1//===--- arm_neon.td - ARM NEON compiler interface ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9//  This file defines the TableGen definitions from which the ARM NEON header
10//  file will be generated.  See ARM document DUI0348B.
11//
12//===----------------------------------------------------------------------===//
13
14include "arm_neon_incl.td"
15
16def OP_ADD      : Op<(op "+", $p0, $p1)>;
17def OP_ADDL     : Op<(op "+", (call "vmovl", $p0), (call "vmovl", $p1))>;
18def OP_ADDLHi   : Op<(op "+", (call "vmovl_high", $p0),
19                              (call "vmovl_high", $p1))>;
20def OP_ADDW     : Op<(op "+", $p0, (call "vmovl", $p1))>;
21def OP_ADDWHi   : Op<(op "+", $p0, (call "vmovl_high", $p1))>;
22def OP_SUB      : Op<(op "-", $p0, $p1)>;
23def OP_SUBL     : Op<(op "-", (call "vmovl", $p0), (call "vmovl", $p1))>;
24def OP_SUBLHi   : Op<(op "-", (call "vmovl_high", $p0),
25                              (call "vmovl_high", $p1))>;
26def OP_SUBW     : Op<(op "-", $p0, (call "vmovl", $p1))>;
27def OP_SUBWHi   : Op<(op "-", $p0, (call "vmovl_high", $p1))>;
28def OP_MUL      : Op<(op "*", $p0, $p1)>;
29def OP_MLA      : Op<(op "+", $p0, (op "*", $p1, $p2))>;
30def OP_MLAL     : Op<(op "+", $p0, (call "vmull", $p1, $p2))>;
31def OP_MULLHi   : Op<(call "vmull", (call "vget_high", $p0),
32                                    (call "vget_high", $p1))>;
33def OP_MULLHi_P64 : Op<(call "vmull",
34                         (cast "poly64_t", (call "vget_high", $p0)),
35                         (cast "poly64_t", (call "vget_high", $p1)))>;
36def OP_MULLHi_N : Op<(call "vmull_n", (call "vget_high", $p0), $p1)>;
37def OP_MLALHi   : Op<(call "vmlal", $p0, (call "vget_high", $p1),
38                                         (call "vget_high", $p2))>;
39def OP_MLALHi_N : Op<(call "vmlal_n", $p0, (call "vget_high", $p1), $p2)>;
40def OP_MLS      : Op<(op "-", $p0, (op "*", $p1, $p2))>;
41def OP_FMLS     : Op<(call "vfma", $p0, (op "-", $p1), $p2)>;
42def OP_MLSL     : Op<(op "-", $p0, (call "vmull", $p1, $p2))>;
43def OP_MLSLHi   : Op<(call "vmlsl", $p0, (call "vget_high", $p1),
44                                         (call "vget_high", $p2))>;
45def OP_MLSLHi_N : Op<(call "vmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
46def OP_MUL_N    : Op<(op "*", $p0, (dup $p1))>;
47def OP_MULX_N   : Op<(call "vmulx", $p0, (dup $p1))>;
48def OP_MLA_N    : Op<(op "+", $p0, (op "*", $p1, (dup $p2)))>;
49def OP_MLS_N    : Op<(op "-", $p0, (op "*", $p1, (dup $p2)))>;
50def OP_FMLA_N   : Op<(call "vfma", $p0, $p1, (dup $p2))>;
51def OP_FMLS_N   : Op<(call "vfma", $p0, (op "-", $p1), (dup $p2))>;
52def OP_MLAL_N   : Op<(op "+", $p0, (call "vmull", $p1, (dup $p2)))>;
53def OP_MLSL_N   : Op<(op "-", $p0, (call "vmull", $p1, (dup $p2)))>;
54def OP_MUL_LN   : Op<(op "*", $p0, (call_mangled "splat_lane", $p1, $p2))>;
55def OP_MULX_LN  : Op<(call "vmulx", $p0, (call_mangled "splat_lane", $p1, $p2))>;
56def OP_MULL_N  : Op<(call "vmull", $p0, (dup $p1))>;
57def OP_MULL_LN  : Op<(call "vmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
58def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (call_mangled "splat_lane", $p1, $p2))>;
59def OP_MLA_LN   : Op<(op "+", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
60def OP_MLS_LN   : Op<(op "-", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
61def OP_MLAL_LN  : Op<(op "+", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
62def OP_MLALHi_LN: Op<(op "+", $p0, (call "vmull", (call "vget_high", $p1),
63                                                  (call_mangled "splat_lane", $p2, $p3)))>;
64def OP_MLSL_LN  : Op<(op "-", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
65def OP_MLSLHi_LN : Op<(op "-", $p0, (call "vmull", (call "vget_high", $p1),
66                                                   (call_mangled "splat_lane", $p2, $p3)))>;
67def OP_QDMULL_N : Op<(call "vqdmull", $p0, (dup $p1))>;
68def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (call_mangled "splat_lane", $p1, $p2))>;
69def OP_QDMULLHi_LN : Op<(call "vqdmull", (call "vget_high", $p0),
70                                         (call_mangled "splat_lane", $p1, $p2))>;
71def OP_QDMLAL_N : Op<(call "vqdmlal", $p0, $p1, (dup $p2))>;
72def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
73def OP_QDMLALHi_LN : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
74                                              (call_mangled "splat_lane", $p2, $p3))>;
75def OP_QDMLSL_N : Op<(call "vqdmlsl", $p0, $p1, (dup $p2))>;
76def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
77def OP_QDMLSLHi_LN : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
78                                              (call_mangled "splat_lane", $p2, $p3))>;
79def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>;
80def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
81def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
82def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>;
83def OP_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
84def OP_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
85def OP_FMS_LN   : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>;
86def OP_FMS_LNQ  : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>;
87def OP_TRN1     : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2),
88                                                    (decimate mask1, 2)))>;
89def OP_ZIP1     : Op<(shuffle $p0, $p1, (lowhalf (interleave mask0, mask1)))>;
90def OP_UZP1     : Op<(shuffle $p0, $p1, (add (decimate mask0, 2),
91                                             (decimate mask1, 2)))>;
92def OP_TRN2     : Op<(shuffle $p0, $p1, (interleave
93                                          (decimate (rotl mask0, 1), 2),
94                                          (decimate (rotl mask1, 1), 2)))>;
95def OP_ZIP2     : Op<(shuffle $p0, $p1, (highhalf (interleave mask0, mask1)))>;
96def OP_UZP2     : Op<(shuffle $p0, $p1, (add (decimate (rotl mask0, 1), 2),
97                                             (decimate (rotl mask1, 1), 2)))>;
98def OP_EQ       : Op<(cast "R", (op "==", $p0, $p1))>;
99def OP_GE       : Op<(cast "R", (op ">=", $p0, $p1))>;
100def OP_LE       : Op<(cast "R", (op "<=", $p0, $p1))>;
101def OP_GT       : Op<(cast "R", (op ">", $p0, $p1))>;
102def OP_LT       : Op<(cast "R", (op "<", $p0, $p1))>;
103def OP_NEG      : Op<(op "-", $p0)>;
104def OP_NOT      : Op<(op "~", $p0)>;
105def OP_AND      : Op<(op "&", $p0, $p1)>;
106def OP_OR       : Op<(op "|", $p0, $p1)>;
107def OP_XOR      : Op<(op "^", $p0, $p1)>;
108def OP_ANDN     : Op<(op "&", $p0, (op "~", $p1))>;
109def OP_ORN      : Op<(op "|", $p0, (op "~", $p1))>;
110def OP_CAST     : LOp<[(save_temp $promote, $p0),
111                       (cast "R", $promote)]>;
112def OP_HI       : Op<(shuffle $p0, $p0, (highhalf mask0))>;
113def OP_LO       : Op<(shuffle $p0, $p0, (lowhalf mask0))>;
114def OP_CONC     : Op<(shuffle $p0, $p1, (add mask0, mask1))>;
115def OP_DUP      : Op<(dup $p0)>;
116def OP_DUP_LN   : Op<(call_mangled "splat_lane", $p0, $p1)>;
117def OP_SEL      : Op<(cast "R", (op "|",
118                                    (op "&", $p0, (cast $p0, $p1)),
119                                    (op "&", (op "~", $p0), (cast $p0, $p2))))>;
120def OP_REV16    : Op<(shuffle $p0, $p0, (rev 16, mask0))>;
121def OP_REV32    : Op<(shuffle $p0, $p0, (rev 32, mask0))>;
122def OP_REV64    : Op<(shuffle $p0, $p0, (rev 64, mask0))>;
123def OP_XTN      : Op<(call "vcombine", $p0, (call "vmovn", $p1))>;
124def OP_SQXTUN   : Op<(call "vcombine", (cast $p0, "U", $p0),
125                                       (call "vqmovun", $p1))>;
126def OP_QXTN     : Op<(call "vcombine", $p0, (call "vqmovn", $p1))>;
127def OP_VCVT_NA_HI_F16 : Op<(call "vcombine", $p0, (call "vcvt_f16_f32", $p1))>;
128def OP_VCVT_NA_HI_F32 : Op<(call "vcombine", $p0, (call "vcvt_f32_f64", $p1))>;
129def OP_VCVT_EX_HI_F32 : Op<(call "vcvt_f32_f16", (call "vget_high", $p0))>;
130def OP_VCVT_EX_HI_F64 : Op<(call "vcvt_f64_f32", (call "vget_high", $p0))>;
131def OP_VCVTX_HI : Op<(call "vcombine", $p0, (call "vcvtx_f32", $p1))>;
132def OP_REINT    : Op<(cast "R", $p0)>;
133def OP_ADDHNHi  : Op<(call "vcombine", $p0, (call "vaddhn", $p1, $p2))>;
134def OP_RADDHNHi : Op<(call "vcombine", $p0, (call "vraddhn", $p1, $p2))>;
135def OP_SUBHNHi  : Op<(call "vcombine", $p0, (call "vsubhn", $p1, $p2))>;
136def OP_RSUBHNHi : Op<(call "vcombine", $p0, (call "vrsubhn", $p1, $p2))>;
137def OP_ABDL     : Op<(cast "R", (call "vmovl", (cast $p0, "U",
138                                                     (call "vabd", $p0, $p1))))>;
139def OP_ABDLHi   : Op<(call "vabdl", (call "vget_high", $p0),
140                                    (call "vget_high", $p1))>;
141def OP_ABA      : Op<(op "+", $p0, (call "vabd", $p1, $p2))>;
142def OP_ABAL     : Op<(op "+", $p0, (call "vabdl", $p1, $p2))>;
143def OP_ABALHi   : Op<(call "vabal", $p0, (call "vget_high", $p1),
144                                       (call "vget_high", $p2))>;
145def OP_QDMULLHi : Op<(call "vqdmull", (call "vget_high", $p0),
146                                      (call "vget_high", $p1))>;
147def OP_QDMULLHi_N : Op<(call "vqdmull_n", (call "vget_high", $p0), $p1)>;
148def OP_QDMLALHi : Op<(call "vqdmlal", $p0, (call "vget_high", $p1),
149                                           (call "vget_high", $p2))>;
150def OP_QDMLALHi_N : Op<(call "vqdmlal_n", $p0, (call "vget_high", $p1), $p2)>;
151def OP_QDMLSLHi : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1),
152                                           (call "vget_high", $p2))>;
153def OP_QDMLSLHi_N : Op<(call "vqdmlsl_n", $p0, (call "vget_high", $p1), $p2)>;
154def OP_DIV  : Op<(op "/", $p0, $p1)>;
155def OP_LONG_HI : Op<(cast "R", (call (name_replace "_high_", "_"),
156                                                (call "vget_high", $p0), $p1))>;
157def OP_NARROW_HI : Op<(cast "R", (call "vcombine",
158                                       (cast "R", "H", $p0),
159                                       (cast "R", "H",
160                                           (call (name_replace "_high_", "_"),
161                                                 $p1, $p2))))>;
162def OP_MOVL_HI  : LOp<[(save_temp $a1, (call "vget_high", $p0)),
163                       (cast "R",
164                            (call "vshll_n", $a1, (literal "int32_t", "0")))]>;
165def OP_COPY_LN : Op<(call "vset_lane", (call "vget_lane", $p2, $p3), $p0, $p1)>;
166def OP_SCALAR_MUL_LN : Op<(op "*", $p0, (call "vget_lane", $p1, $p2))>;
167def OP_SCALAR_MULX_LN : Op<(call "vmulx", $p0, (call "vget_lane", $p1, $p2))>;
168def OP_SCALAR_VMULX_LN : LOp<[(save_temp $x, (call "vget_lane", $p0,
169                                                    (literal "int32_t", "0"))),
170                              (save_temp $y, (call "vget_lane", $p1, $p2)),
171                              (save_temp $z, (call "vmulx", $x, $y)),
172                              (call "vset_lane", $z, $p0, $p2)]>;
173def OP_SCALAR_VMULX_LNQ : LOp<[(save_temp $x, (call "vget_lane", $p0,
174                                                     (literal "int32_t", "0"))),
175                               (save_temp $y, (call "vget_lane", $p1, $p2)),
176                               (save_temp $z, (call "vmulx", $x, $y)),
177                               (call "vset_lane", $z, $p0, (literal "int32_t",
178                                                                     "0"))]>;
179class ScalarMulOp<string opname> :
180  Op<(call opname, $p0, (call "vget_lane", $p1, $p2))>;
181
182def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">;
183def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">;
184def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">;
185
186def OP_SCALAR_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1,
187                                (call "vget_lane", $p2, $p3))>;
188def OP_SCALAR_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1,
189                                (call "vget_lane", $p2, $p3))>;
190
191def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t",
192                                   (call "vget_lane",
193                                         (bitcast "int16x4_t", $p0), $p1))>;
194def OP_SCALAR_HALF_GET_LNQ : Op<(bitcast "float16_t",
195                                    (call "vget_lane",
196                                          (bitcast "int16x8_t", $p0), $p1))>;
197def OP_SCALAR_HALF_SET_LN : Op<(bitcast "float16x4_t",
198                                   (call "vset_lane",
199                                         (bitcast "int16_t", $p0),
200                                         (bitcast "int16x4_t", $p1), $p2))>;
201def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t",
202                                    (call "vset_lane",
203                                          (bitcast "int16_t", $p0),
204                                          (bitcast "int16x8_t", $p1), $p2))>;
205
206def OP_DOT_LN
207    : Op<(call "vdot", $p0, $p1,
208          (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
209def OP_DOT_LNQ
210    : Op<(call "vdot", $p0, $p1,
211          (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>;
212
213def OP_FMLAL_LN     : Op<(call "vfmlal_low", $p0, $p1,
214                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
215def OP_FMLSL_LN     : Op<(call "vfmlsl_low", $p0, $p1,
216                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
217def OP_FMLAL_LN_Hi  : Op<(call "vfmlal_high", $p0, $p1,
218                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
219def OP_FMLSL_LN_Hi  : Op<(call "vfmlsl_high", $p0, $p1,
220                           (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
221
222def OP_USDOT_LN
223    : Op<(call "vusdot", $p0, $p1,
224          (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)))>;
225def OP_USDOT_LNQ
226    : Op<(call "vusdot", $p0, $p1,
227          (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)))>;
228
229// sudot splats the second vector and then calls vusdot
230def OP_SUDOT_LN
231    : Op<(call "vusdot", $p0,
232          (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)), $p1)>;
233def OP_SUDOT_LNQ
234    : Op<(call "vusdot", $p0,
235          (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)), $p1)>;
236
237def OP_BFDOT_LN
238    : Op<(call "vbfdot", $p0, $p1,
239          (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x2_t", $p2), $p3)))>;
240
241def OP_BFDOT_LNQ
242    : Op<(call "vbfdot", $p0, $p1,
243          (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x4_t", $p2), $p3)))>;
244
245def OP_BFMLALB_LN
246    : Op<(call "vbfmlalb", $p0, $p1,
247          (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
248
249def OP_BFMLALT_LN
250    : Op<(call "vbfmlalt", $p0, $p1,
251          (dup_typed $p1, (call "vget_lane", $p2, $p3)))>;
252
253def OP_VCVT_F32_BF16
254    : Op<(bitcast "R",
255          (call "vshll_n", (bitcast "int16x4_t", $p0),
256                           (literal "int32_t", "16")))>;
257def OP_VCVT_F32_BF16_LO
258    : Op<(call "vcvt_f32_bf16", (call "vget_low", $p0))>;
259def OP_VCVT_F32_BF16_HI
260    : Op<(call "vcvt_f32_bf16", (call "vget_high", $p0))>;
261
262def OP_VCVT_BF16_F32_LO_A64
263    : Op<(call "__a64_vcvtq_low_bf16", $p0)>;
264def OP_VCVT_BF16_F32_A64
265    : Op<(call "vget_low", (call "__a64_vcvtq_low_bf16", $p0))>;
266
267def OP_VCVT_BF16_F32_A32
268    : Op<(call "__a32_vcvt_bf16", $p0)>;
269
270def OP_VCVT_BF16_F32_LO_A32
271    : Op<(call "vcombine", (cast "bfloat16x4_t", (literal "uint64_t", "0ULL")),
272                           (call "__a32_vcvt_bf16", $p0))>;
273def OP_VCVT_BF16_F32_HI_A32
274    : Op<(call "vcombine", (call "__a32_vcvt_bf16", $p1),
275                           (call "vget_low", $p0))>;
276
277def OP_CVT_F32_BF16
278    : Op<(bitcast "R", (op "<<", (bitcast "int32_t", $p0),
279                                 (literal "int32_t", "16")))>;
280
281//===----------------------------------------------------------------------===//
282// Auxiliary Instructions
283//===----------------------------------------------------------------------===//
284
285// Splat operation - performs a range-checked splat over a vector
286def SPLAT  : WInst<"splat_lane", ".(!q)I",
287                   "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl">;
288def SPLATQ : WInst<"splat_laneq", ".(!Q)I",
289                   "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl"> {
290  let isLaneQ = 1;
291}
292let TargetGuard = "bf16" in {
293  def SPLAT_BF  : WInst<"splat_lane", ".(!q)I", "bQb">;
294  def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb"> {
295    let isLaneQ = 1;
296  }
297}
298
299//===----------------------------------------------------------------------===//
300// Intrinsics
301//===----------------------------------------------------------------------===//
302
303////////////////////////////////////////////////////////////////////////////////
304// E.3.1 Addition
305def VADD    : IOpInst<"vadd", "...",
306                      "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
307def VADDL   : SOpInst<"vaddl", "(>Q)..", "csiUcUsUi", OP_ADDL>;
308def VADDW   : SOpInst<"vaddw", "(>Q)(>Q).", "csiUcUsUi", OP_ADDW>;
309def VHADD   : SInst<"vhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
310def VRHADD  : SInst<"vrhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
311def VQADD   : SInst<"vqadd", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
312def VADDHN  : IInst<"vaddhn", "<QQ", "silUsUiUl">;
313def VRADDHN : IInst<"vraddhn", "<QQ", "silUsUiUl">;
314
315////////////////////////////////////////////////////////////////////////////////
316// E.3.2 Multiplication
317def VMUL     : IOpInst<"vmul", "...", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>;
318def VMULP    : SInst<"vmul", "...", "PcQPc">;
319def VMLA     : IOpInst<"vmla", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
320def VMLAL    : SOpInst<"vmlal", "(>Q)(>Q)..", "csiUcUsUi", OP_MLAL>;
321def VMLS     : IOpInst<"vmls", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
322def VMLSL    : SOpInst<"vmlsl", "(>Q)(>Q)..", "csiUcUsUi", OP_MLSL>;
323def VQDMULH  : SInst<"vqdmulh", "...", "siQsQi">;
324def VQRDMULH : SInst<"vqrdmulh", "...", "siQsQi">;
325
326let TargetGuard = "v8.1a" in {
327def VQRDMLAH : SInst<"vqrdmlah", "....", "siQsQi">;
328def VQRDMLSH : SInst<"vqrdmlsh", "....", "siQsQi">;
329}
330
331def VQDMLAL  : SInst<"vqdmlal", "(>Q)(>Q)..", "si">;
332def VQDMLSL  : SInst<"vqdmlsl", "(>Q)(>Q)..", "si">;
333def VMULL    : SInst<"vmull", "(>Q)..", "csiUcUsUiPc">;
334def VQDMULL  : SInst<"vqdmull", "(>Q)..", "si">;
335
336////////////////////////////////////////////////////////////////////////////////
337// E.3.3 Subtraction
338def VSUB    : IOpInst<"vsub", "...",
339                      "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
340def VSUBL   : SOpInst<"vsubl", "(>Q)..", "csiUcUsUi", OP_SUBL>;
341def VSUBW   : SOpInst<"vsubw", "(>Q)(>Q).", "csiUcUsUi", OP_SUBW>;
342def VQSUB   : SInst<"vqsub", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
343def VHSUB   : SInst<"vhsub", "...", "csiUcUsUiQcQsQiQUcQUsQUi">;
344def VSUBHN  : IInst<"vsubhn", "<QQ", "silUsUiUl">;
345def VRSUBHN : IInst<"vrsubhn", "<QQ", "silUsUiUl">;
346
347////////////////////////////////////////////////////////////////////////////////
348// E.3.4 Comparison
349def VCEQ  : IOpInst<"vceq", "U..", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
350def VCGE  : SOpInst<"vcge", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
351let InstName = "vcge" in
352def VCLE  : SOpInst<"vcle", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
353def VCGT  : SOpInst<"vcgt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
354let InstName = "vcgt" in
355def VCLT  : SOpInst<"vclt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
356let InstName = "vacge" in {
357def VCAGE : IInst<"vcage", "U..", "fQf">;
358def VCALE : IInst<"vcale", "U..", "fQf">;
359}
360let InstName = "vacgt" in {
361def VCAGT : IInst<"vcagt", "U..", "fQf">;
362def VCALT : IInst<"vcalt", "U..", "fQf">;
363}
364def VTST  : WInst<"vtst", "U..", "csiUcUsUiPcPsQcQsQiQUcQUsQUiQPcQPs">;
365
366////////////////////////////////////////////////////////////////////////////////
367// E.3.5 Absolute Difference
368def VABD  : SInst<"vabd", "...",  "csiUcUsUifQcQsQiQUcQUsQUiQf">;
369def VABDL : SOpInst<"vabdl", "(>Q)..",  "csiUcUsUi", OP_ABDL>;
370def VABA  : SOpInst<"vaba", "....", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>;
371def VABAL : SOpInst<"vabal", "(>Q)(>Q)..", "csiUcUsUi", OP_ABAL>;
372
373////////////////////////////////////////////////////////////////////////////////
374// E.3.6 Max/Min
375def VMAX : SInst<"vmax", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
376def VMIN : SInst<"vmin", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
377
378////////////////////////////////////////////////////////////////////////////////
379// E.3.7 Pairwise Addition
380def VPADD  : IInst<"vpadd", "...", "csiUcUsUif">;
381def VPADDL : SInst<"vpaddl", ">.",  "csiUcUsUiQcQsQiQUcQUsQUi">;
382def VPADAL : SInst<"vpadal", ">>.", "csiUcUsUiQcQsQiQUcQUsQUi">;
383
384////////////////////////////////////////////////////////////////////////////////
385// E.3.8-9 Folding Max/Min
386def VPMAX : SInst<"vpmax", "...", "csiUcUsUif">;
387def VPMIN : SInst<"vpmin", "...", "csiUcUsUif">;
388
389////////////////////////////////////////////////////////////////////////////////
390// E.3.10 Reciprocal/Sqrt
391def VRECPS  : IInst<"vrecps", "...", "fQf">;
392def VRSQRTS : IInst<"vrsqrts", "...", "fQf">;
393
394////////////////////////////////////////////////////////////////////////////////
395// E.3.11 Shifts by signed variable
396def VSHL   : SInst<"vshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
397def VQSHL  : SInst<"vqshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
398def VRSHL  : SInst<"vrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
399def VQRSHL : SInst<"vqrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
400
401////////////////////////////////////////////////////////////////////////////////
402// E.3.12 Shifts by constant
403let isShift = 1 in {
404def VSHR_N     : SInst<"vshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
405def VSHL_N     : IInst<"vshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
406def VRSHR_N    : SInst<"vrshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
407def VSRA_N     : SInst<"vsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
408def VRSRA_N    : SInst<"vrsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
409def VQSHL_N    : SInst<"vqshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
410def VQSHLU_N   : SInst<"vqshlu_n", "U.I", "csilQcQsQiQl">;
411def VSHRN_N    : IInst<"vshrn_n", "<QI", "silUsUiUl">;
412def VQSHRUN_N  : SInst<"vqshrun_n", "(<U)QI", "sil">;
413def VQRSHRUN_N : SInst<"vqrshrun_n", "(<U)QI", "sil">;
414def VQSHRN_N   : SInst<"vqshrn_n", "<QI", "silUsUiUl">;
415def VRSHRN_N   : IInst<"vrshrn_n", "<QI", "silUsUiUl">;
416def VQRSHRN_N  : SInst<"vqrshrn_n", "<QI", "silUsUiUl">;
417def VSHLL_N    : SInst<"vshll_n", "(>Q).I", "csiUcUsUi">;
418
419////////////////////////////////////////////////////////////////////////////////
420// E.3.13 Shifts with insert
421def VSRI_N : WInst<"vsri_n", "...I",
422                   "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
423def VSLI_N : WInst<"vsli_n", "...I",
424                   "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
425}
426
427////////////////////////////////////////////////////////////////////////////////
428// E.3.14 Loads and stores of a single vector
429def VLD1      : WInst<"vld1", ".(c*!)",
430                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
431def VLD1_X2   : WInst<"vld1_x2", "2(c*!)",
432                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
433def VLD1_X3   : WInst<"vld1_x3", "3(c*!)",
434                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
435def VLD1_X4   : WInst<"vld1_x4", "4(c*!)",
436                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
437def VLD1_LANE : WInst<"vld1_lane", ".(c*!).I",
438                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
439def VLD1_DUP  : WInst<"vld1_dup", ".(c*!)",
440                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
441def VST1      : WInst<"vst1", "v*(.!)",
442                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
443def VST1_X2   : WInst<"vst1_x2", "v*(2!)",
444                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
445def VST1_X3   : WInst<"vst1_x3", "v*(3!)",
446                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
447def VST1_X4   : WInst<"vst1_x4", "v*(4!)",
448                      "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">;
449def VST1_LANE : WInst<"vst1_lane", "v*(.!)I",
450                      "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">;
451let ArchGuard = "(__ARM_FP & 2)" in {
452def VLD1_F16      : WInst<"vld1", ".(c*!)", "hQh">;
453def VLD1_X2_F16   : WInst<"vld1_x2", "2(c*!)", "hQh">;
454def VLD1_X3_F16   : WInst<"vld1_x3", "3(c*!)", "hQh">;
455def VLD1_X4_F16   : WInst<"vld1_x4", "4(c*!)", "hQh">;
456def VLD1_LANE_F16 : WInst<"vld1_lane", ".(c*!).I", "hQh">;
457def VLD1_DUP_F16  : WInst<"vld1_dup", ".(c*!)", "hQh">;
458def VST1_F16      : WInst<"vst1", "v*(.!)", "hQh">;
459def VST1_X2_F16   : WInst<"vst1_x2", "v*(2!)", "hQh">;
460def VST1_X3_F16   : WInst<"vst1_x3", "v*(3!)", "hQh">;
461def VST1_X4_F16   : WInst<"vst1_x4", "v*(4!)", "hQh">;
462def VST1_LANE_F16 : WInst<"vst1_lane", "v*(.!)I", "hQh">;
463}
464
465////////////////////////////////////////////////////////////////////////////////
466// E.3.15 Loads and stores of an N-element structure
467def VLD2 : WInst<"vld2", "2(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
468def VLD3 : WInst<"vld3", "3(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
469def VLD4 : WInst<"vld4", "4(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
470def VLD2_DUP  : WInst<"vld2_dup", "2(c*!)",
471                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
472def VLD3_DUP  : WInst<"vld3_dup", "3(c*!)",
473                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
474def VLD4_DUP  : WInst<"vld4_dup", "4(c*!)",
475                      "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">;
476def VLD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
477def VLD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
478def VLD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
479def VST2 : WInst<"vst2", "v*(2!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
480def VST3 : WInst<"vst3", "v*(3!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
481def VST4 : WInst<"vst4", "v*(4!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">;
482def VST2_LANE : WInst<"vst2_lane", "v*(2!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
483def VST3_LANE : WInst<"vst3_lane", "v*(3!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
484def VST4_LANE : WInst<"vst4_lane", "v*(4!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">;
485let ArchGuard = "(__ARM_FP & 2)" in {
486def VLD2_F16      : WInst<"vld2", "2(c*!)", "hQh">;
487def VLD3_F16      : WInst<"vld3", "3(c*!)", "hQh">;
488def VLD4_F16      : WInst<"vld4", "4(c*!)", "hQh">;
489def VLD2_DUP_F16  : WInst<"vld2_dup", "2(c*!)", "hQh">;
490def VLD3_DUP_F16  : WInst<"vld3_dup", "3(c*!)", "hQh">;
491def VLD4_DUP_F16  : WInst<"vld4_dup", "4(c*!)", "hQh">;
492def VLD2_LANE_F16 : WInst<"vld2_lane", "2(c*!)2I", "hQh">;
493def VLD3_LANE_F16 : WInst<"vld3_lane", "3(c*!)3I", "hQh">;
494def VLD4_LANE_F16 : WInst<"vld4_lane", "4(c*!)4I", "hQh">;
495def VST2_F16      : WInst<"vst2", "v*(2!)", "hQh">;
496def VST3_F16      : WInst<"vst3", "v*(3!)", "hQh">;
497def VST4_F16      : WInst<"vst4", "v*(4!)", "hQh">;
498def VST2_LANE_F16 : WInst<"vst2_lane", "v*(2!)I", "hQh">;
499def VST3_LANE_F16 : WInst<"vst3_lane", "v*(3!)I", "hQh">;
500def VST4_LANE_F16 : WInst<"vst4_lane", "v*(4!)I", "hQh">;
501}
502
503////////////////////////////////////////////////////////////////////////////////
504// E.3.16 Extract lanes from a vector
505let InstName = "vmov" in
506def VGET_LANE : IInst<"vget_lane", "1.I",
507                      "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
508
509////////////////////////////////////////////////////////////////////////////////
510// E.3.17 Set lanes within a vector
511let InstName = "vmov" in
512def VSET_LANE : IInst<"vset_lane", ".1.I",
513                      "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
514
515////////////////////////////////////////////////////////////////////////////////
516// E.3.18 Initialize a vector from bit pattern
517def VCREATE : NoTestOpInst<"vcreate", ".(IU>)", "csihfUcUsUiUlPcPsl", OP_CAST> {
518  let BigEndianSafe = 1;
519}
520
521////////////////////////////////////////////////////////////////////////////////
522// E.3.19 Set all lanes to same value
523let InstName = "vmov" in {
524def VDUP_N   : WOpInst<"vdup_n", ".1",
525                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
526                       OP_DUP>;
527def VMOV_N   : WOpInst<"vmov_n", ".1",
528                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
529                       OP_DUP>;
530}
531let InstName = "" in
532def VDUP_LANE: WOpInst<"vdup_lane", ".qI",
533                       "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
534                       OP_DUP_LN>;
535
536////////////////////////////////////////////////////////////////////////////////
537// E.3.20 Combining vectors
538def VCOMBINE : NoTestOpInst<"vcombine", "Q..", "csilhfUcUsUiUlPcPs", OP_CONC>;
539
540////////////////////////////////////////////////////////////////////////////////
541// E.3.21 Splitting vectors
542// Note that the ARM NEON Reference 2.0 mistakenly document the vget_high_f16()
543// and vget_low_f16() intrinsics as AArch64-only. We (and GCC) support all
544// versions of these intrinsics in both AArch32 and AArch64 architectures. See
545// D45668 for more details.
546let InstName = "vmov" in {
547def VGET_HIGH : NoTestOpInst<"vget_high", ".Q", "csilhfUcUsUiUlPcPs", OP_HI>;
548def VGET_LOW  : NoTestOpInst<"vget_low", ".Q", "csilhfUcUsUiUlPcPs", OP_LO>;
549}
550
551////////////////////////////////////////////////////////////////////////////////
552// E.3.22 Converting vectors
553
554let ArchGuard = "(__ARM_FP & 2)" in {
555  def VCVT_F16_F32 : SInst<"vcvt_f16_f32", "(<q)(.!)", "Hf">;
556  def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "(>Q)(.!)", "h">;
557}
558
559def VCVT_S32     : SInst<"vcvt_s32", "S.",  "fQf">;
560def VCVT_U32     : SInst<"vcvt_u32", "U.",  "fQf">;
561def VCVT_F32     : SInst<"vcvt_f32", "F(.!)",  "iUiQiQUi">;
562let isVCVT_N = 1 in {
563def VCVT_N_S32   : SInst<"vcvt_n_s32", "S.I", "fQf">;
564def VCVT_N_U32   : SInst<"vcvt_n_u32", "U.I", "fQf">;
565def VCVT_N_F32   : SInst<"vcvt_n_f32", "F(.!)I", "iUiQiQUi">;
566}
567
568def VMOVN        : IInst<"vmovn", "<Q",  "silUsUiUl">;
569def VMOVL        : SInst<"vmovl", "(>Q).",  "csiUcUsUi">;
570def VQMOVN       : SInst<"vqmovn", "<Q",  "silUsUiUl">;
571def VQMOVUN      : SInst<"vqmovun", "(<U)Q",  "sil">;
572
573////////////////////////////////////////////////////////////////////////////////
574// E.3.23-24 Table lookup, Extended table lookup
575let InstName = "vtbl" in {
576def VTBL1 : WInst<"vtbl1", "..p",  "UccPc">;
577def VTBL2 : WInst<"vtbl2", ".2p",  "UccPc">;
578def VTBL3 : WInst<"vtbl3", ".3p",  "UccPc">;
579def VTBL4 : WInst<"vtbl4", ".4p",  "UccPc">;
580}
581let InstName = "vtbx" in {
582def VTBX1 : WInst<"vtbx1", "...p", "UccPc">;
583def VTBX2 : WInst<"vtbx2", "..2p", "UccPc">;
584def VTBX3 : WInst<"vtbx3", "..3p", "UccPc">;
585def VTBX4 : WInst<"vtbx4", "..4p", "UccPc">;
586}
587
588////////////////////////////////////////////////////////////////////////////////
589// E.3.25 Operations with a scalar value
590def VMLA_LANE     : IOpInst<"vmla_lane", "...qI",
591                            "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
592def VMLAL_LANE    : SOpInst<"vmlal_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLAL_LN>;
593def VQDMLAL_LANE  : SOpInst<"vqdmlal_lane", "(>Q)(>Q)..I", "si", OP_QDMLAL_LN>;
594def VMLS_LANE     : IOpInst<"vmls_lane", "...qI",
595                            "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
596def VMLSL_LANE    : SOpInst<"vmlsl_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLSL_LN>;
597def VQDMLSL_LANE  : SOpInst<"vqdmlsl_lane", "(>Q)(>Q)..I", "si", OP_QDMLSL_LN>;
598def VMUL_N        : IOpInst<"vmul_n", "..1", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
599def VMUL_LANE     : IOpInst<"vmul_lane", "..qI",
600                            "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>;
601def VMULL_N       : SOpInst<"vmull_n", "(>Q).1", "siUsUi", OP_MULL_N>;
602def VMULL_LANE    : SOpInst<"vmull_lane", "(>Q)..I", "siUsUi", OP_MULL_LN>;
603def VQDMULL_N     : SOpInst<"vqdmull_n", "(>Q).1", "si", OP_QDMULL_N>;
604def VQDMULL_LANE  : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>;
605def VQDMULH_N     : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>;
606def VQRDMULH_N    : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>;
607
608let ArchGuard = "!defined(__aarch64__)" in {
609def VQDMULH_LANE  : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>;
610def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>;
611}
612let ArchGuard = "defined(__aarch64__)" in {
613def A64_VQDMULH_LANE  : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">;
614def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">;
615}
616
617let TargetGuard = "v8.1a" in {
618def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "...qI", "siQsQi", OP_QRDMLAH_LN>;
619def VQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "...qI", "siQsQi", OP_QRDMLSH_LN>;
620}
621
622def VMLA_N        : IOpInst<"vmla_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
623def VMLAL_N       : SOpInst<"vmlal_n", "(>Q)(>Q).1", "siUsUi", OP_MLAL_N>;
624def VQDMLAL_N     : SOpInst<"vqdmlal_n", "(>Q)(>Q).1", "si", OP_QDMLAL_N>;
625def VMLS_N        : IOpInst<"vmls_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
626def VMLSL_N       : SOpInst<"vmlsl_n", "(>Q)(>Q).1", "siUsUi", OP_MLSL_N>;
627def VQDMLSL_N     : SOpInst<"vqdmlsl_n", "(>Q)(>Q).1", "si", OP_QDMLSL_N>;
628
629////////////////////////////////////////////////////////////////////////////////
630// E.3.26 Vector Extract
631def VEXT : WInst<"vext", "...I",
632                 "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf">;
633
634////////////////////////////////////////////////////////////////////////////////
635// E.3.27 Reverse vector elements
636def VREV64 : WOpInst<"vrev64", "..", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf",
637                  OP_REV64>;
638def VREV32 : WOpInst<"vrev32", "..", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>;
639def VREV16 : WOpInst<"vrev16", "..", "cUcPcQcQUcQPc", OP_REV16>;
640
641////////////////////////////////////////////////////////////////////////////////
642// E.3.28 Other single operand arithmetic
643def VABS    : SInst<"vabs", "..", "csifQcQsQiQf">;
644def VQABS   : SInst<"vqabs", "..", "csiQcQsQi">;
645def VNEG    : SOpInst<"vneg", "..", "csifQcQsQiQf", OP_NEG>;
646def VQNEG   : SInst<"vqneg", "..", "csiQcQsQi">;
647def VCLS    : SInst<"vcls", "S.", "csiUcUsUiQcQsQiQUcQUsQUi">;
648def VCLZ    : IInst<"vclz", "..", "csiUcUsUiQcQsQiQUcQUsQUi">;
649def VCNT    : WInst<"vcnt", "..", "UccPcQUcQcQPc">;
650def VRECPE  : SInst<"vrecpe", "..", "fUiQfQUi">;
651def VRSQRTE : SInst<"vrsqrte", "..", "fUiQfQUi">;
652
653////////////////////////////////////////////////////////////////////////////////
654// E.3.29 Logical operations
655def VMVN : LOpInst<"vmvn", "..", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
656def VAND : LOpInst<"vand", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
657def VORR : LOpInst<"vorr", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
658def VEOR : LOpInst<"veor", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
659def VBIC : LOpInst<"vbic", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
660def VORN : LOpInst<"vorn", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
661let isHiddenLInst = 1 in
662def VBSL : SInst<"vbsl", ".U..",
663                "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs">;
664
665////////////////////////////////////////////////////////////////////////////////
666// E.3.30 Transposition operations
667def VTRN : WInst<"vtrn", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
668def VZIP : WInst<"vzip", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
669def VUZP : WInst<"vuzp", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
670
671////////////////////////////////////////////////////////////////////////////////
672
673class REINTERPRET_CROSS_SELF<string Types> :
674  NoTestOpInst<"vreinterpret", "..", Types, OP_REINT> {
675    let CartesianProductWith = Types;
676}
677
678multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> {
679  def AXB: NoTestOpInst<"vreinterpret", "..", TypesA, OP_REINT> {
680    let CartesianProductWith = TypesB;
681  }
682  def BXA: NoTestOpInst<"vreinterpret", "..", TypesB, OP_REINT> {
683    let CartesianProductWith = TypesA;
684  }
685}
686
687// E.3.31 Vector reinterpret cast operations
688def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> {
689  let ArchGuard = "!defined(__aarch64__)";
690  let BigEndianSafe = 1;
691}
692
693////////////////////////////////////////////////////////////////////////////////
694// Vector fused multiply-add operations
695
696let ArchGuard = "defined(__ARM_FEATURE_FMA)" in {
697  def VFMA : SInst<"vfma", "....", "fQf">;
698  def VFMS : SOpInst<"vfms", "....", "fQf", OP_FMLS>;
699  def FMLA_N_F32 : SOpInst<"vfma_n", "...1", "fQf", OP_FMLA_N>;
700}
701
702////////////////////////////////////////////////////////////////////////////////
703// fp16 vector operations
704def SCALAR_HALF_GET_LANE : IOpInst<"vget_lane", "1.I", "h", OP_SCALAR_HALF_GET_LN>;
705def SCALAR_HALF_SET_LANE : IOpInst<"vset_lane", ".1.I", "h", OP_SCALAR_HALF_SET_LN>;
706def SCALAR_HALF_GET_LANEQ : IOpInst<"vget_lane", "1.I", "Qh", OP_SCALAR_HALF_GET_LNQ>;
707def SCALAR_HALF_SET_LANEQ : IOpInst<"vset_lane", ".1.I", "Qh", OP_SCALAR_HALF_SET_LNQ>;
708
709////////////////////////////////////////////////////////////////////////////////
710// Non poly128_t vaddp for Arm and AArch64
711// TODO: poly128_t not implemented on arm32
712def VADDP   : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">;
713
714////////////////////////////////////////////////////////////////////////////////
715// AArch64 Intrinsics
716
717let ArchGuard = "defined(__aarch64__)" in {
718
719////////////////////////////////////////////////////////////////////////////////
720// Load/Store
721def LD1 : WInst<"vld1", ".(c*!)", "dQdPlQPl">;
722def LD2 : WInst<"vld2", "2(c*!)", "QUlQldQdPlQPl">;
723def LD3 : WInst<"vld3", "3(c*!)", "QUlQldQdPlQPl">;
724def LD4 : WInst<"vld4", "4(c*!)", "QUlQldQdPlQPl">;
725def ST1 : WInst<"vst1", "v*(.!)", "dQdPlQPl">;
726def ST2 : WInst<"vst2", "v*(2!)", "QUlQldQdPlQPl">;
727def ST3 : WInst<"vst3", "v*(3!)", "QUlQldQdPlQPl">;
728def ST4 : WInst<"vst4", "v*(4!)", "QUlQldQdPlQPl">;
729
730def LD1_X2 : WInst<"vld1_x2", "2(c*!)",
731                   "dQdPlQPl">;
732def LD1_X3 : WInst<"vld1_x3", "3(c*!)",
733                   "dQdPlQPl">;
734def LD1_X4 : WInst<"vld1_x4", "4(c*!)",
735                   "dQdPlQPl">;
736
737def ST1_X2 : WInst<"vst1_x2", "v*(2!)", "dQdPlQPl">;
738def ST1_X3 : WInst<"vst1_x3", "v*(3!)", "dQdPlQPl">;
739def ST1_X4 : WInst<"vst1_x4", "v*(4!)", "dQdPlQPl">;
740
741def LD1_LANE : WInst<"vld1_lane", ".(c*!).I", "dQdPlQPl">;
742def LD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "lUlQcQUcQPcQlQUldQdPlQPl">;
743def LD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "lUlQcQUcQPcQlQUldQdPlQPl">;
744def LD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "lUlQcQUcQPcQlQUldQdPlQPl">;
745def ST1_LANE : WInst<"vst1_lane", "v*(.!)I", "dQdPlQPl">;
746def ST2_LANE : WInst<"vst2_lane", "v*(2!)I", "lUlQcQUcQPcQlQUldQdPlQPl">;
747def ST3_LANE : WInst<"vst3_lane", "v*(3!)I", "lUlQcQUcQPcQlQUldQdPlQPl">;
748def ST4_LANE : WInst<"vst4_lane", "v*(4!)I", "lUlQcQUcQPcQlQUldQdPlQPl">;
749
750def LD1_DUP  : WInst<"vld1_dup", ".(c*!)", "dQdPlQPl">;
751def LD2_DUP  : WInst<"vld2_dup", "2(c*!)", "dQdPlQPl">;
752def LD3_DUP  : WInst<"vld3_dup", "3(c*!)", "dQdPlQPl">;
753def LD4_DUP  : WInst<"vld4_dup", "4(c*!)", "dQdPlQPl">;
754
755def VLDRQ : WInst<"vldrq", "1(c*!)", "Pk">;
756def VSTRQ : WInst<"vstrq", "v*(1!)", "Pk">;
757
758////////////////////////////////////////////////////////////////////////////////
759// Addition
760def ADD : IOpInst<"vadd", "...", "dQd", OP_ADD>;
761
762////////////////////////////////////////////////////////////////////////////////
763// Subtraction
764def SUB : IOpInst<"vsub", "...", "dQd", OP_SUB>;
765
766////////////////////////////////////////////////////////////////////////////////
767// Multiplication
768def MUL     : IOpInst<"vmul", "...", "dQd", OP_MUL>;
769def MLA     : IOpInst<"vmla", "....", "dQd", OP_MLA>;
770def MLS     : IOpInst<"vmls", "....", "dQd", OP_MLS>;
771
772////////////////////////////////////////////////////////////////////////////////
773// Multiplication Extended
774def MULX : SInst<"vmulx", "...", "fdQfQd">;
775
776////////////////////////////////////////////////////////////////////////////////
777// Division
778def FDIV : IOpInst<"vdiv", "...",  "fdQfQd", OP_DIV>;
779
780////////////////////////////////////////////////////////////////////////////////
781// Vector fused multiply-add operations
782def FMLA : SInst<"vfma", "....", "dQd">;
783def FMLS : SOpInst<"vfms", "....", "dQd", OP_FMLS>;
784
785////////////////////////////////////////////////////////////////////////////////
786// MUL, MLA, MLS, FMA, FMS definitions with scalar argument
787def VMUL_N_A64 : IOpInst<"vmul_n", "..1", "Qd", OP_MUL_N>;
788
789def FMLA_N : SOpInst<"vfma_n", "...1", "dQd", OP_FMLA_N>;
790def FMLS_N : SOpInst<"vfms_n", "...1", "fdQfQd", OP_FMLS_N>;
791
792////////////////////////////////////////////////////////////////////////////////
793// Logical operations
794def BSL : SInst<"vbsl", ".U..", "dPlQdQPl">;
795
796////////////////////////////////////////////////////////////////////////////////
797// Absolute Difference
798def ABD  : SInst<"vabd", "...",  "dQd">;
799
800////////////////////////////////////////////////////////////////////////////////
801// saturating absolute/negate
802def ABS    : SInst<"vabs", "..", "dQdlQl">;
803def QABS   : SInst<"vqabs", "..", "lQl">;
804def NEG    : SOpInst<"vneg", "..", "dlQdQl", OP_NEG>;
805def QNEG   : SInst<"vqneg", "..", "lQl">;
806
807////////////////////////////////////////////////////////////////////////////////
808// Signed Saturating Accumulated of Unsigned Value
809def SUQADD : SInst<"vuqadd", "..U", "csilQcQsQiQl">;
810
811////////////////////////////////////////////////////////////////////////////////
812// Unsigned Saturating Accumulated of Signed Value
813def USQADD : SInst<"vsqadd", "..S", "UcUsUiUlQUcQUsQUiQUl">;
814
815////////////////////////////////////////////////////////////////////////////////
816// Reciprocal/Sqrt
817def FRECPS  : IInst<"vrecps", "...", "dQd">;
818def FRSQRTS : IInst<"vrsqrts", "...", "dQd">;
819def FRECPE  : SInst<"vrecpe", "..", "dQd">;
820def FRSQRTE : SInst<"vrsqrte", "..", "dQd">;
821def FSQRT   : SInst<"vsqrt", "..", "fdQfQd">;
822
823////////////////////////////////////////////////////////////////////////////////
824// bitwise reverse
825def RBIT : IInst<"vrbit", "..", "cUcPcQcQUcQPc">;
826
827////////////////////////////////////////////////////////////////////////////////
828// Integer extract and narrow to high
829def XTN2 : SOpInst<"vmovn_high", "(<Q)<Q", "silUsUiUl", OP_XTN>;
830
831////////////////////////////////////////////////////////////////////////////////
832// Signed integer saturating extract and unsigned narrow to high
833def SQXTUN2 : SOpInst<"vqmovun_high", "(<U)(<Uq).", "HsHiHl", OP_SQXTUN>;
834
835////////////////////////////////////////////////////////////////////////////////
836// Integer saturating extract and narrow to high
837def QXTN2 : SOpInst<"vqmovn_high", "(<Q)<Q", "silUsUiUl", OP_QXTN>;
838
839////////////////////////////////////////////////////////////////////////////////
840// Converting vectors
841
842def VCVT_F32_F64 : SInst<"vcvt_f32_f64", "(<q).", "Qd">;
843def VCVT_F64_F32 : SInst<"vcvt_f64_f32", "(>Q).", "f">;
844
845def VCVT_S64 : SInst<"vcvt_s64", "S.",  "dQd">;
846def VCVT_U64 : SInst<"vcvt_u64", "U.",  "dQd">;
847def VCVT_F64 : SInst<"vcvt_f64", "F(.!)",  "lUlQlQUl">;
848
849def VCVT_HIGH_F16_F32 : SOpInst<"vcvt_high_f16", "<(<q!)Q", "Hf", OP_VCVT_NA_HI_F16>;
850def VCVT_HIGH_F32_F16 : SOpInst<"vcvt_high_f32", "(>Q)(Q!)", "h", OP_VCVT_EX_HI_F32>;
851def VCVT_HIGH_F32_F64 : SOpInst<"vcvt_high_f32", "(<Q)(F<!)Q", "d", OP_VCVT_NA_HI_F32>;
852def VCVT_HIGH_F64_F32 : SOpInst<"vcvt_high_f64", "(>Q)(Q!)", "f", OP_VCVT_EX_HI_F64>;
853
854def VCVTX_F32_F64      : SInst<"vcvtx_f32", "(F<)(Q!)",  "d">;
855def VCVTX_HIGH_F32_F64 : SOpInst<"vcvtx_high_f32", "(<Q)(F<!)Q", "d", OP_VCVTX_HI>;
856
857////////////////////////////////////////////////////////////////////////////////
858// Comparison
859def FCAGE : IInst<"vcage", "U..", "dQd">;
860def FCAGT : IInst<"vcagt", "U..", "dQd">;
861def FCALE : IInst<"vcale", "U..", "dQd">;
862def FCALT : IInst<"vcalt", "U..", "dQd">;
863def CMTST  : WInst<"vtst", "U..", "lUlPlQlQUlQPl">;
864def CFMEQ  : SOpInst<"vceq", "U..", "lUldQdQlQUlPlQPl", OP_EQ>;
865def CFMGE  : SOpInst<"vcge", "U..", "lUldQdQlQUl", OP_GE>;
866def CFMLE  : SOpInst<"vcle", "U..", "lUldQdQlQUl", OP_LE>;
867def CFMGT  : SOpInst<"vcgt", "U..", "lUldQdQlQUl", OP_GT>;
868def CFMLT  : SOpInst<"vclt", "U..", "lUldQdQlQUl", OP_LT>;
869
870def CMEQ  : SInst<"vceqz", "U.",
871                  "csilfUcUsUiUlPcPlQcQsQiQlQfQUcQUsQUiQUlQPcdQdQPl">;
872def CMGE  : SInst<"vcgez", "U.", "csilfdQcQsQiQlQfQd">;
873def CMLE  : SInst<"vclez", "U.", "csilfdQcQsQiQlQfQd">;
874def CMGT  : SInst<"vcgtz", "U.", "csilfdQcQsQiQlQfQd">;
875def CMLT  : SInst<"vcltz", "U.", "csilfdQcQsQiQlQfQd">;
876
877////////////////////////////////////////////////////////////////////////////////
878// Max/Min Integer
879def MAX : SInst<"vmax", "...", "dQd">;
880def MIN : SInst<"vmin", "...", "dQd">;
881
882////////////////////////////////////////////////////////////////////////////////
883// Pairwise Max/Min
884def MAXP : SInst<"vpmax", "...", "QcQsQiQUcQUsQUiQfQd">;
885def MINP : SInst<"vpmin", "...", "QcQsQiQUcQUsQUiQfQd">;
886
887////////////////////////////////////////////////////////////////////////////////
888// Pairwise MaxNum/MinNum Floating Point
889def FMAXNMP : SInst<"vpmaxnm", "...", "fQfQd">;
890def FMINNMP : SInst<"vpminnm", "...", "fQfQd">;
891
892////////////////////////////////////////////////////////////////////////////////
893// Pairwise Addition
894def ADDP  : IInst<"vpadd", "...", "QcQsQiQlQUcQUsQUiQUlQfQd">;
895
896////////////////////////////////////////////////////////////////////////////////
897// Shifts by constant
898let isShift = 1 in {
899// Left shift long high
900def SHLL_HIGH_N    : SOpInst<"vshll_high_n", ">.I", "HcHsHiHUcHUsHUi",
901                             OP_LONG_HI>;
902
903////////////////////////////////////////////////////////////////////////////////
904def SRI_N : WInst<"vsri_n", "...I", "PlQPl">;
905def SLI_N : WInst<"vsli_n", "...I", "PlQPl">;
906
907// Right shift narrow high
908def SHRN_HIGH_N    : IOpInst<"vshrn_high_n", "<(<q).I",
909                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
910def QSHRUN_HIGH_N  : SOpInst<"vqshrun_high_n", "<(<q).I",
911                             "HsHiHl", OP_NARROW_HI>;
912def RSHRN_HIGH_N   : IOpInst<"vrshrn_high_n", "<(<q).I",
913                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
914def QRSHRUN_HIGH_N : SOpInst<"vqrshrun_high_n", "<(<q).I",
915                             "HsHiHl", OP_NARROW_HI>;
916def QSHRN_HIGH_N   : SOpInst<"vqshrn_high_n", "<(<q).I",
917                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
918def QRSHRN_HIGH_N  : SOpInst<"vqrshrn_high_n", "<(<q).I",
919                             "HsHiHlHUsHUiHUl", OP_NARROW_HI>;
920}
921
922////////////////////////////////////////////////////////////////////////////////
923// Converting vectors
924def VMOVL_HIGH   : SOpInst<"vmovl_high", ">.", "HcHsHiHUcHUsHUi", OP_MOVL_HI>;
925
926let isVCVT_N = 1 in {
927def CVTF_N_F64   : SInst<"vcvt_n_f64", "F(.!)I", "lUlQlQUl">;
928def FCVTZS_N_S64 : SInst<"vcvt_n_s64", "S.I", "dQd">;
929def FCVTZS_N_U64 : SInst<"vcvt_n_u64", "U.I", "dQd">;
930}
931
932////////////////////////////////////////////////////////////////////////////////
933// 3VDiff class using high 64-bit in operands
934def VADDL_HIGH   : SOpInst<"vaddl_high", "(>Q)QQ", "csiUcUsUi", OP_ADDLHi>;
935def VADDW_HIGH   : SOpInst<"vaddw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_ADDWHi>;
936def VSUBL_HIGH   : SOpInst<"vsubl_high", "(>Q)QQ", "csiUcUsUi", OP_SUBLHi>;
937def VSUBW_HIGH   : SOpInst<"vsubw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_SUBWHi>;
938
939def VABDL_HIGH   : SOpInst<"vabdl_high", "(>Q)QQ",  "csiUcUsUi", OP_ABDLHi>;
940def VABAL_HIGH   : SOpInst<"vabal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_ABALHi>;
941
942def VMULL_HIGH   : SOpInst<"vmull_high", "(>Q)QQ", "csiUcUsUiPc", OP_MULLHi>;
943def VMULL_HIGH_N : SOpInst<"vmull_high_n", "(>Q)Q1", "siUsUi", OP_MULLHi_N>;
944def VMLAL_HIGH   : SOpInst<"vmlal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLALHi>;
945def VMLAL_HIGH_N : SOpInst<"vmlal_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLALHi_N>;
946def VMLSL_HIGH   : SOpInst<"vmlsl_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLSLHi>;
947def VMLSL_HIGH_N : SOpInst<"vmlsl_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLSLHi_N>;
948
949def VADDHN_HIGH  : SOpInst<"vaddhn_high", "(<Q)<QQ", "silUsUiUl", OP_ADDHNHi>;
950def VRADDHN_HIGH : SOpInst<"vraddhn_high", "(<Q)<QQ", "silUsUiUl", OP_RADDHNHi>;
951def VSUBHN_HIGH  : SOpInst<"vsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_SUBHNHi>;
952def VRSUBHN_HIGH : SOpInst<"vrsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_RSUBHNHi>;
953
954def VQDMULL_HIGH : SOpInst<"vqdmull_high", "(>Q)QQ", "si", OP_QDMULLHi>;
955def VQDMULL_HIGH_N : SOpInst<"vqdmull_high_n", "(>Q)Q1", "si", OP_QDMULLHi_N>;
956def VQDMLAL_HIGH : SOpInst<"vqdmlal_high", "(>Q)(>Q)QQ", "si", OP_QDMLALHi>;
957def VQDMLAL_HIGH_N : SOpInst<"vqdmlal_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLALHi_N>;
958def VQDMLSL_HIGH : SOpInst<"vqdmlsl_high", "(>Q)(>Q)QQ", "si", OP_QDMLSLHi>;
959def VQDMLSL_HIGH_N : SOpInst<"vqdmlsl_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLSLHi_N>;
960let TargetGuard = "aes" in {
961  def VMULL_P64    : SInst<"vmull", "(1>)11", "Pl">;
962  def VMULL_HIGH_P64 : SOpInst<"vmull_high", "(1>)..", "HPl", OP_MULLHi_P64>;
963}
964
965
966////////////////////////////////////////////////////////////////////////////////
967// Extract or insert element from vector
968def GET_LANE : IInst<"vget_lane", "1.I", "dQdPlQPl">;
969def SET_LANE : IInst<"vset_lane", ".1.I", "dQdPlQPl">;
970def COPY_LANE : IOpInst<"vcopy_lane", "..I.I",
971                        "csilUcUsUiUlPcPsPlfd", OP_COPY_LN>;
972def COPYQ_LANE : IOpInst<"vcopy_lane", "..IqI",
973                        "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>;
974def COPY_LANEQ : IOpInst<"vcopy_laneq", "..IQI",
975                     "csilPcPsPlUcUsUiUlfd", OP_COPY_LN> {
976  let isLaneQ = 1;
977}
978def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "..I.I",
979                     "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN> {
980  let isLaneQ = 1;
981}
982
983////////////////////////////////////////////////////////////////////////////////
984// Set all lanes to same value
985def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "dQdPlQPl", OP_DUP_LN>;
986def VDUP_LANE2: WOpInst<"vdup_laneq", ".QI",
987                  "csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl",
988                        OP_DUP_LN> {
989  let isLaneQ = 1;
990}
991def DUP_N   : WOpInst<"vdup_n", ".1", "dQdPlQPl", OP_DUP>;
992def MOV_N   : WOpInst<"vmov_n", ".1", "dQdPlQPl", OP_DUP>;
993
994////////////////////////////////////////////////////////////////////////////////
995def COMBINE : NoTestOpInst<"vcombine", "Q..", "dPl", OP_CONC>;
996
997////////////////////////////////////////////////////////////////////////////////
998//Initialize a vector from bit pattern
999def CREATE : NoTestOpInst<"vcreate", ".(IU>)", "dPl", OP_CAST> {
1000  let BigEndianSafe = 1;
1001}
1002
1003////////////////////////////////////////////////////////////////////////////////
1004
1005def VMLA_LANEQ   : IOpInst<"vmla_laneq", "...QI",
1006                           "siUsUifQsQiQUsQUiQf", OP_MLA_LN> {
1007  let isLaneQ = 1;
1008}
1009def VMLS_LANEQ   : IOpInst<"vmls_laneq", "...QI",
1010                           "siUsUifQsQiQUsQUiQf", OP_MLS_LN> {
1011  let isLaneQ = 1;
1012}
1013
1014def VFMA_LANE    : IInst<"vfma_lane", "...qI", "fdQfQd">;
1015def VFMA_LANEQ   : IInst<"vfma_laneq", "...QI", "fdQfQd"> {
1016  let isLaneQ = 1;
1017}
1018def VFMS_LANE    : IOpInst<"vfms_lane", "...qI", "fdQfQd", OP_FMS_LN>;
1019def VFMS_LANEQ   : IOpInst<"vfms_laneq", "...QI", "fdQfQd", OP_FMS_LNQ> {
1020  let isLaneQ = 1;
1021}
1022
1023def VMLAL_LANEQ  : SOpInst<"vmlal_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLAL_LN> {
1024  let isLaneQ = 1;
1025}
1026def VMLAL_HIGH_LANE   : SOpInst<"vmlal_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
1027                                OP_MLALHi_LN>;
1028def VMLAL_HIGH_LANEQ  : SOpInst<"vmlal_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
1029                                OP_MLALHi_LN> {
1030  let isLaneQ = 1;
1031}
1032def VMLSL_LANEQ  : SOpInst<"vmlsl_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLSL_LN> {
1033  let isLaneQ = 1;
1034}
1035def VMLSL_HIGH_LANE   : SOpInst<"vmlsl_high_lane", "(>Q)(>Q)Q.I", "siUsUi",
1036                                OP_MLSLHi_LN>;
1037def VMLSL_HIGH_LANEQ  : SOpInst<"vmlsl_high_laneq", "(>Q)(>Q)QQI", "siUsUi",
1038                                OP_MLSLHi_LN> {
1039  let isLaneQ = 1;
1040}
1041
1042def VQDMLAL_LANEQ  : SOpInst<"vqdmlal_laneq", "(>Q)(>Q).QI", "si", OP_QDMLAL_LN> {
1043  let isLaneQ = 1;
1044}
1045def VQDMLAL_HIGH_LANE   : SOpInst<"vqdmlal_high_lane", "(>Q)(>Q)Q.I", "si",
1046                                OP_QDMLALHi_LN>;
1047def VQDMLAL_HIGH_LANEQ  : SOpInst<"vqdmlal_high_laneq", "(>Q)(>Q)QQI", "si",
1048                                OP_QDMLALHi_LN> {
1049  let isLaneQ = 1;
1050}
1051def VQDMLSL_LANEQ  : SOpInst<"vqdmlsl_laneq", "(>Q)(>Q).QI", "si", OP_QDMLSL_LN> {
1052  let isLaneQ = 1;
1053}
1054def VQDMLSL_HIGH_LANE   : SOpInst<"vqdmlsl_high_lane", "(>Q)(>Q)Q.I", "si",
1055                                OP_QDMLSLHi_LN>;
1056def VQDMLSL_HIGH_LANEQ  : SOpInst<"vqdmlsl_high_laneq", "(>Q)(>Q)QQI", "si",
1057                                OP_QDMLSLHi_LN> {
1058  let isLaneQ = 1;
1059}
1060
1061// Newly add double parameter for vmul_lane in aarch64
1062// Note: d type is handled by SCALAR_VMUL_LANE
1063def VMUL_LANE_A64 : IOpInst<"vmul_lane", "..qI", "Qd", OP_MUL_LN>;
1064
1065// Note: d type is handled by SCALAR_VMUL_LANEQ
1066def VMUL_LANEQ   : IOpInst<"vmul_laneq", "..QI",
1067                           "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN> {
1068  let isLaneQ = 1;
1069}
1070def VMULL_LANEQ  : SOpInst<"vmull_laneq", "(>Q).QI", "siUsUi", OP_MULL_LN> {
1071  let isLaneQ = 1;
1072}
1073def VMULL_HIGH_LANE   : SOpInst<"vmull_high_lane", "(>Q)Q.I", "siUsUi",
1074                                OP_MULLHi_LN>;
1075def VMULL_HIGH_LANEQ  : SOpInst<"vmull_high_laneq", "(>Q)QQI", "siUsUi",
1076                                OP_MULLHi_LN> {
1077  let isLaneQ = 1;
1078}
1079
1080def VQDMULL_LANEQ  : SOpInst<"vqdmull_laneq", "(>Q).QI", "si", OP_QDMULL_LN> {
1081  let isLaneQ = 1;
1082}
1083def VQDMULL_HIGH_LANE   : SOpInst<"vqdmull_high_lane", "(>Q)Q.I", "si",
1084                                  OP_QDMULLHi_LN>;
1085def VQDMULL_HIGH_LANEQ  : SOpInst<"vqdmull_high_laneq", "(>Q)QQI", "si",
1086                                  OP_QDMULLHi_LN> {
1087  let isLaneQ = 1;
1088}
1089
1090let isLaneQ = 1 in {
1091def VQDMULH_LANEQ  : SInst<"vqdmulh_laneq", "..QI", "siQsQi">;
1092def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">;
1093}
1094let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in {
1095def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> {
1096  let isLaneQ = 1;
1097}
1098def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> {
1099  let isLaneQ = 1;
1100}
1101} // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a"
1102
1103// Note: d type implemented by SCALAR_VMULX_LANE
1104def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>;
1105// Note: d type is implemented by SCALAR_VMULX_LANEQ
1106def VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "fQfQd", OP_MULX_LN> {
1107  let isLaneQ = 1;
1108}
1109
1110////////////////////////////////////////////////////////////////////////////////
1111// Across vectors class
1112def VADDLV  : SInst<"vaddlv", "(1>).", "csiUcUsUiQcQsQiQUcQUsQUi">;
1113def VMAXV   : SInst<"vmaxv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
1114def VMINV   : SInst<"vminv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">;
1115def VADDV   : SInst<"vaddv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQdQlQUl">;
1116def FMAXNMV : SInst<"vmaxnmv", "1.", "fQfQd">;
1117def FMINNMV : SInst<"vminnmv", "1.", "fQfQd">;
1118
1119////////////////////////////////////////////////////////////////////////////////
1120// Newly added Vector Extract for f64
1121def VEXT_A64 : WInst<"vext", "...I", "dQdPlQPl">;
1122
1123////////////////////////////////////////////////////////////////////////////////
1124// Crypto
1125let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "aes" in {
1126def AESE : SInst<"vaese", "...", "QUc">;
1127def AESD : SInst<"vaesd", "...", "QUc">;
1128def AESMC : SInst<"vaesmc", "..", "QUc">;
1129def AESIMC : SInst<"vaesimc", "..", "QUc">;
1130}
1131
1132let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "sha2" in {
1133def SHA1H : SInst<"vsha1h", "11", "Ui">;
1134def SHA1SU1 : SInst<"vsha1su1", "...", "QUi">;
1135def SHA256SU0 : SInst<"vsha256su0", "...", "QUi">;
1136
1137def SHA1C : SInst<"vsha1c", "..1.", "QUi">;
1138def SHA1P : SInst<"vsha1p", "..1.", "QUi">;
1139def SHA1M : SInst<"vsha1m", "..1.", "QUi">;
1140def SHA1SU0 : SInst<"vsha1su0", "....", "QUi">;
1141def SHA256H : SInst<"vsha256h", "....", "QUi">;
1142def SHA256H2 : SInst<"vsha256h2", "....", "QUi">;
1143def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">;
1144}
1145
1146let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in {
1147def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">;
1148def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">;
1149def RAX1 : SInst<"vrax1", "...", "QUl">;
1150
1151let isVXAR = 1 in {
1152def XAR :  SInst<"vxar", "...I", "QUl">;
1153}
1154}
1155
1156let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in {
1157def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">;
1158def SHA512su1 : SInst<"vsha512su1", "....", "QUl">;
1159def SHA512H : SInst<"vsha512h", "....", "QUl">;
1160def SHA512H2 : SInst<"vsha512h2", "....", "QUl">;
1161}
1162
1163let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in {
1164def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">;
1165def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">;
1166def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">;
1167def SM3TT2A : SInst<"vsm3tt2a", "....I", "QUi">;
1168def SM3TT2B : SInst<"vsm3tt2b", "....I", "QUi">;
1169def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">;
1170def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">;
1171}
1172
1173let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in {
1174def SM4E : SInst<"vsm4e", "...", "QUi">;
1175def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">;
1176}
1177
1178////////////////////////////////////////////////////////////////////////////////
1179// poly128_t vadd for AArch64 only see VADDP for the rest
1180def VADDP_Q   : WInst<"vadd", "...", "QPk">;
1181
1182////////////////////////////////////////////////////////////////////////////////
1183// Float -> Int conversions with explicit rounding mode
1184
1185let ArchGuard = "__ARM_ARCH >= 8" in {
1186def FCVTNS_S32 : SInst<"vcvtn_s32", "S.", "fQf">;
1187def FCVTNU_S32 : SInst<"vcvtn_u32", "U.", "fQf">;
1188def FCVTPS_S32 : SInst<"vcvtp_s32", "S.", "fQf">;
1189def FCVTPU_S32 : SInst<"vcvtp_u32", "U.", "fQf">;
1190def FCVTMS_S32 : SInst<"vcvtm_s32", "S.", "fQf">;
1191def FCVTMU_S32 : SInst<"vcvtm_u32", "U.", "fQf">;
1192def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">;
1193def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">;
1194}
1195
1196let ArchGuard = "defined(__aarch64__)" in {
1197def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">;
1198def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">;
1199def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">;
1200def FCVTPU_S64 : SInst<"vcvtp_u64", "U.", "dQd">;
1201def FCVTMS_S64 : SInst<"vcvtm_s64", "S.", "dQd">;
1202def FCVTMU_S64 : SInst<"vcvtm_u64", "U.", "dQd">;
1203def FCVTAS_S64 : SInst<"vcvta_s64", "S.", "dQd">;
1204def FCVTAU_S64 : SInst<"vcvta_u64", "U.", "dQd">;
1205}
1206
1207////////////////////////////////////////////////////////////////////////////////
1208// Round to Integral
1209
1210let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1211def FRINTN_S32 : SInst<"vrndn", "..", "fQf">;
1212def FRINTA_S32 : SInst<"vrnda", "..", "fQf">;
1213def FRINTP_S32 : SInst<"vrndp", "..", "fQf">;
1214def FRINTM_S32 : SInst<"vrndm", "..", "fQf">;
1215def FRINTX_S32 : SInst<"vrndx", "..", "fQf">;
1216def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">;
1217def FRINTI_S32 : SInst<"vrndi", "..", "fQf">;
1218}
1219
1220let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1221def FRINTN_S64 : SInst<"vrndn", "..", "dQd">;
1222def FRINTA_S64 : SInst<"vrnda", "..", "dQd">;
1223def FRINTP_S64 : SInst<"vrndp", "..", "dQd">;
1224def FRINTM_S64 : SInst<"vrndm", "..", "dQd">;
1225def FRINTX_S64 : SInst<"vrndx", "..", "dQd">;
1226def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
1227def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
1228}
1229
1230let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.5a" in {
1231def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
1232def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
1233def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
1234def FRINT64Z_S32 : SInst<"vrnd64z", "..", "fQf">;
1235}
1236
1237////////////////////////////////////////////////////////////////////////////////
1238// MaxNum/MinNum Floating Point
1239
1240let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1241def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">;
1242def FMINNM_S32 : SInst<"vminnm", "...", "fQf">;
1243}
1244
1245let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1246def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">;
1247def FMINNM_S64 : SInst<"vminnm", "...", "dQd">;
1248}
1249
1250////////////////////////////////////////////////////////////////////////////////
1251// Permutation
1252def VTRN1 : SOpInst<"vtrn1", "...",
1253                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN1>;
1254def VZIP1 : SOpInst<"vzip1", "...",
1255                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP1>;
1256def VUZP1 : SOpInst<"vuzp1", "...",
1257                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP1>;
1258def VTRN2 : SOpInst<"vtrn2", "...",
1259                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN2>;
1260def VZIP2 : SOpInst<"vzip2", "...",
1261                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP2>;
1262def VUZP2 : SOpInst<"vuzp2", "...",
1263                    "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP2>;
1264
1265////////////////////////////////////////////////////////////////////////////////
1266// Table lookup
1267let InstName = "vtbl" in {
1268def VQTBL1_A64 : WInst<"vqtbl1", ".QU",  "UccPcQUcQcQPc">;
1269def VQTBL2_A64 : WInst<"vqtbl2", ".(2Q)U",  "UccPcQUcQcQPc">;
1270def VQTBL3_A64 : WInst<"vqtbl3", ".(3Q)U",  "UccPcQUcQcQPc">;
1271def VQTBL4_A64 : WInst<"vqtbl4", ".(4Q)U",  "UccPcQUcQcQPc">;
1272}
1273let InstName = "vtbx" in {
1274def VQTBX1_A64 : WInst<"vqtbx1", "..QU", "UccPcQUcQcQPc">;
1275def VQTBX2_A64 : WInst<"vqtbx2", "..(2Q)U", "UccPcQUcQcQPc">;
1276def VQTBX3_A64 : WInst<"vqtbx3", "..(3Q)U", "UccPcQUcQcQPc">;
1277def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
1278}
1279
1280////////////////////////////////////////////////////////////////////////////////
1281// Vector reinterpret cast operations
1282
1283// NeonEmitter implicitly takes the cartesian product of the type string with
1284// itself during generation so, unlike all other intrinsics, this one should
1285// include *all* types, not just additional ones.
1286def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> {
1287  let ArchGuard = "defined(__aarch64__)";
1288  let BigEndianSafe = 1;
1289}
1290
1291////////////////////////////////////////////////////////////////////////////////
1292// Scalar Intrinsics
1293// Scalar Arithmetic
1294
1295// Scalar Addition
1296def SCALAR_ADD : SInst<"vadd", "111",  "SlSUl">;
1297// Scalar  Saturating Add
1298def SCALAR_QADD   : SInst<"vqadd", "111", "ScSsSiSlSUcSUsSUiSUl">;
1299
1300// Scalar Subtraction
1301def SCALAR_SUB : SInst<"vsub", "111",  "SlSUl">;
1302// Scalar  Saturating Sub
1303def SCALAR_QSUB   : SInst<"vqsub", "111", "ScSsSiSlSUcSUsSUiSUl">;
1304
1305let InstName = "vmov" in {
1306def VGET_HIGH_A64 : NoTestOpInst<"vget_high", ".Q", "dPl", OP_HI>;
1307def VGET_LOW_A64  : NoTestOpInst<"vget_low", ".Q", "dPl", OP_LO>;
1308}
1309
1310////////////////////////////////////////////////////////////////////////////////
1311// Scalar Shift
1312// Scalar Shift Left
1313def SCALAR_SHL: SInst<"vshl", "11(S1)", "SlSUl">;
1314// Scalar Saturating Shift Left
1315def SCALAR_QSHL: SInst<"vqshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">;
1316// Scalar Saturating Rounding Shift Left
1317def SCALAR_QRSHL: SInst<"vqrshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">;
1318// Scalar Shift Rounding Left
1319def SCALAR_RSHL: SInst<"vrshl", "11(S1)", "SlSUl">;
1320
1321////////////////////////////////////////////////////////////////////////////////
1322// Scalar Shift (Immediate)
1323let isScalarShift = 1 in {
1324// Signed/Unsigned Shift Right (Immediate)
1325def SCALAR_SSHR_N: SInst<"vshr_n", "11I", "SlSUl">;
1326// Signed/Unsigned Rounding Shift Right (Immediate)
1327def SCALAR_SRSHR_N: SInst<"vrshr_n", "11I", "SlSUl">;
1328
1329// Signed/Unsigned Shift Right and Accumulate (Immediate)
1330def SCALAR_SSRA_N: SInst<"vsra_n", "111I", "SlSUl">;
1331// Signed/Unsigned Rounding Shift Right and Accumulate (Immediate)
1332def SCALAR_SRSRA_N: SInst<"vrsra_n", "111I", "SlSUl">;
1333
1334// Shift Left (Immediate)
1335def SCALAR_SHL_N: SInst<"vshl_n", "11I", "SlSUl">;
1336// Signed/Unsigned Saturating Shift Left (Immediate)
1337def SCALAR_SQSHL_N: SInst<"vqshl_n", "11I", "ScSsSiSlSUcSUsSUiSUl">;
1338// Signed Saturating Shift Left Unsigned (Immediate)
1339def SCALAR_SQSHLU_N: SInst<"vqshlu_n", "11I", "ScSsSiSl">;
1340
1341// Shift Right And Insert (Immediate)
1342def SCALAR_SRI_N: SInst<"vsri_n", "111I", "SlSUl">;
1343// Shift Left And Insert (Immediate)
1344def SCALAR_SLI_N: SInst<"vsli_n", "111I", "SlSUl">;
1345
1346let isScalarNarrowShift = 1 in {
1347  // Signed/Unsigned Saturating Shift Right Narrow (Immediate)
1348  def SCALAR_SQSHRN_N: SInst<"vqshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl">;
1349  // Signed/Unsigned Saturating Rounded Shift Right Narrow (Immediate)
1350  def SCALAR_SQRSHRN_N: SInst<"vqrshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl">;
1351  // Signed Saturating Shift Right Unsigned Narrow (Immediate)
1352  def SCALAR_SQSHRUN_N: SInst<"vqshrun_n", "(1<)1I", "SsSiSl">;
1353  // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
1354  def SCALAR_SQRSHRUN_N: SInst<"vqrshrun_n", "(1<)1I", "SsSiSl">;
1355}
1356
1357////////////////////////////////////////////////////////////////////////////////
1358// Scalar Signed/Unsigned Fixed-point Convert To Floating-Point (Immediate)
1359def SCALAR_SCVTF_N_F32: SInst<"vcvt_n_f32", "(1F)(1!)I", "SiSUi">;
1360def SCALAR_SCVTF_N_F64: SInst<"vcvt_n_f64", "(1F)(1!)I", "SlSUl">;
1361
1362////////////////////////////////////////////////////////////////////////////////
1363// Scalar Floating-point Convert To Signed/Unsigned Fixed-point (Immediate)
1364def SCALAR_FCVTZS_N_S32 : SInst<"vcvt_n_s32", "(1S)1I", "Sf">;
1365def SCALAR_FCVTZU_N_U32 : SInst<"vcvt_n_u32", "(1U)1I", "Sf">;
1366def SCALAR_FCVTZS_N_S64 : SInst<"vcvt_n_s64", "(1S)1I", "Sd">;
1367def SCALAR_FCVTZU_N_U64 : SInst<"vcvt_n_u64", "(1U)1I", "Sd">;
1368}
1369
1370////////////////////////////////////////////////////////////////////////////////
1371// Scalar Floating-point Round to Integral
1372let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1373def SCALAR_FRINTN_S32 : SInst<"vrndn", "11", "Sf">;
1374}
1375
1376////////////////////////////////////////////////////////////////////////////////
1377// Scalar Reduce Pairwise Addition (Scalar and Floating Point)
1378def SCALAR_ADDP  : SInst<"vpadd", "1.", "SfSHlSHdSHUl">;
1379
1380////////////////////////////////////////////////////////////////////////////////
1381// Scalar Reduce Floating Point Pairwise Max/Min
1382def SCALAR_FMAXP : SInst<"vpmax", "1.", "SfSQd">;
1383
1384def SCALAR_FMINP : SInst<"vpmin", "1.", "SfSQd">;
1385
1386////////////////////////////////////////////////////////////////////////////////
1387// Scalar Reduce Floating Point Pairwise maxNum/minNum
1388def SCALAR_FMAXNMP : SInst<"vpmaxnm", "1.", "SfSQd">;
1389def SCALAR_FMINNMP : SInst<"vpminnm", "1.", "SfSQd">;
1390
1391////////////////////////////////////////////////////////////////////////////////
1392// Scalar Integer Saturating Doubling Multiply Half High
1393def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">;
1394
1395////////////////////////////////////////////////////////////////////////////////
1396// Scalar Integer Saturating Rounding Doubling Multiply Half High
1397def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">;
1398
1399let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in {
1400////////////////////////////////////////////////////////////////////////////////
1401// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1402def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">;
1403
1404////////////////////////////////////////////////////////////////////////////////
1405// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1406def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">;
1407} // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a"
1408
1409////////////////////////////////////////////////////////////////////////////////
1410// Scalar Floating-point Multiply Extended
1411def SCALAR_FMULX : IInst<"vmulx", "111", "SfSd">;
1412
1413////////////////////////////////////////////////////////////////////////////////
1414// Scalar Floating-point Reciprocal Step
1415def SCALAR_FRECPS : IInst<"vrecps", "111", "SfSd">;
1416
1417////////////////////////////////////////////////////////////////////////////////
1418// Scalar Floating-point Reciprocal Square Root Step
1419def SCALAR_FRSQRTS : IInst<"vrsqrts", "111", "SfSd">;
1420
1421////////////////////////////////////////////////////////////////////////////////
1422// Scalar Signed Integer Convert To Floating-point
1423def SCALAR_SCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "Si">;
1424def SCALAR_SCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "Sl">;
1425
1426////////////////////////////////////////////////////////////////////////////////
1427// Scalar Unsigned Integer Convert To Floating-point
1428def SCALAR_UCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "SUi">;
1429def SCALAR_UCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "SUl">;
1430
1431////////////////////////////////////////////////////////////////////////////////
1432// Scalar Floating-point Converts
1433def SCALAR_FCVTXN  : IInst<"vcvtx_f32", "(1F<)(1!)", "Sd">;
1434def SCALAR_FCVTNSS : SInst<"vcvtn_s32", "(1S)1", "Sf">;
1435def SCALAR_FCVTNUS : SInst<"vcvtn_u32", "(1U)1", "Sf">;
1436def SCALAR_FCVTNSD : SInst<"vcvtn_s64", "(1S)1", "Sd">;
1437def SCALAR_FCVTNUD : SInst<"vcvtn_u64", "(1U)1", "Sd">;
1438def SCALAR_FCVTMSS : SInst<"vcvtm_s32", "(1S)1", "Sf">;
1439def SCALAR_FCVTMUS : SInst<"vcvtm_u32", "(1U)1", "Sf">;
1440def SCALAR_FCVTMSD : SInst<"vcvtm_s64", "(1S)1", "Sd">;
1441def SCALAR_FCVTMUD : SInst<"vcvtm_u64", "(1U)1", "Sd">;
1442def SCALAR_FCVTASS : SInst<"vcvta_s32", "(1S)1", "Sf">;
1443def SCALAR_FCVTAUS : SInst<"vcvta_u32", "(1U)1", "Sf">;
1444def SCALAR_FCVTASD : SInst<"vcvta_s64", "(1S)1", "Sd">;
1445def SCALAR_FCVTAUD : SInst<"vcvta_u64", "(1U)1", "Sd">;
1446def SCALAR_FCVTPSS : SInst<"vcvtp_s32", "(1S)1", "Sf">;
1447def SCALAR_FCVTPUS : SInst<"vcvtp_u32", "(1U)1", "Sf">;
1448def SCALAR_FCVTPSD : SInst<"vcvtp_s64", "(1S)1", "Sd">;
1449def SCALAR_FCVTPUD : SInst<"vcvtp_u64", "(1U)1", "Sd">;
1450def SCALAR_FCVTZSS : SInst<"vcvt_s32", "(1S)1", "Sf">;
1451def SCALAR_FCVTZUS : SInst<"vcvt_u32", "(1U)1", "Sf">;
1452def SCALAR_FCVTZSD : SInst<"vcvt_s64", "(1S)1", "Sd">;
1453def SCALAR_FCVTZUD : SInst<"vcvt_u64", "(1U)1", "Sd">;
1454
1455////////////////////////////////////////////////////////////////////////////////
1456// Scalar Floating-point Reciprocal Estimate
1457def SCALAR_FRECPE : IInst<"vrecpe", "11", "SfSd">;
1458
1459////////////////////////////////////////////////////////////////////////////////
1460// Scalar Floating-point Reciprocal Exponent
1461def SCALAR_FRECPX : IInst<"vrecpx", "11", "SfSd">;
1462
1463////////////////////////////////////////////////////////////////////////////////
1464// Scalar Floating-point Reciprocal Square Root Estimate
1465def SCALAR_FRSQRTE : IInst<"vrsqrte", "11", "SfSd">;
1466
1467////////////////////////////////////////////////////////////////////////////////
1468// Scalar Integer Comparison
1469def SCALAR_CMEQ : SInst<"vceq", "(U1)11", "SlSUl">;
1470def SCALAR_CMEQZ : SInst<"vceqz", "(U1)1", "SlSUl">;
1471def SCALAR_CMGE : SInst<"vcge", "(U1)11", "Sl">;
1472def SCALAR_CMGEZ : SInst<"vcgez", "(U1)1", "Sl">;
1473def SCALAR_CMHS : SInst<"vcge", "(U1)11", "SUl">;
1474def SCALAR_CMLE : SInst<"vcle", "(U1)11", "SlSUl">;
1475def SCALAR_CMLEZ : SInst<"vclez", "(U1)1", "Sl">;
1476def SCALAR_CMLT : SInst<"vclt", "(U1)11", "SlSUl">;
1477def SCALAR_CMLTZ : SInst<"vcltz", "(U1)1", "Sl">;
1478def SCALAR_CMGT : SInst<"vcgt", "(U1)11", "Sl">;
1479def SCALAR_CMGTZ : SInst<"vcgtz", "(U1)1", "Sl">;
1480def SCALAR_CMHI : SInst<"vcgt", "(U1)11", "SUl">;
1481def SCALAR_CMTST : SInst<"vtst", "(U1)11", "SlSUl">;
1482
1483////////////////////////////////////////////////////////////////////////////////
1484// Scalar Floating-point Comparison
1485def SCALAR_FCMEQ : IInst<"vceq", "(1U)11", "SfSd">;
1486def SCALAR_FCMEQZ : IInst<"vceqz", "(1U)1", "SfSd">;
1487def SCALAR_FCMGE : IInst<"vcge", "(1U)11", "SfSd">;
1488def SCALAR_FCMGEZ : IInst<"vcgez", "(1U)1", "SfSd">;
1489def SCALAR_FCMGT : IInst<"vcgt", "(1U)11", "SfSd">;
1490def SCALAR_FCMGTZ : IInst<"vcgtz", "(1U)1", "SfSd">;
1491def SCALAR_FCMLE : IInst<"vcle", "(1U)11", "SfSd">;
1492def SCALAR_FCMLEZ : IInst<"vclez", "(1U)1", "SfSd">;
1493def SCALAR_FCMLT : IInst<"vclt", "(1U)11", "SfSd">;
1494def SCALAR_FCMLTZ : IInst<"vcltz", "(1U)1", "SfSd">;
1495
1496////////////////////////////////////////////////////////////////////////////////
1497// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
1498def SCALAR_FACGE : IInst<"vcage", "(1U)11", "SfSd">;
1499def SCALAR_FACLE : IInst<"vcale", "(1U)11", "SfSd">;
1500
1501////////////////////////////////////////////////////////////////////////////////
1502// Scalar Floating-point Absolute Compare Mask Greater Than
1503def SCALAR_FACGT : IInst<"vcagt", "(1U)11", "SfSd">;
1504def SCALAR_FACLT : IInst<"vcalt", "(1U)11", "SfSd">;
1505
1506////////////////////////////////////////////////////////////////////////////////
1507// Scalar Absolute Value
1508def SCALAR_ABS : SInst<"vabs", "11", "Sl">;
1509
1510////////////////////////////////////////////////////////////////////////////////
1511// Scalar Absolute Difference
1512def SCALAR_ABD : IInst<"vabd", "111", "SfSd">;
1513
1514////////////////////////////////////////////////////////////////////////////////
1515// Scalar Signed Saturating Absolute Value
1516def SCALAR_SQABS : SInst<"vqabs", "11", "ScSsSiSl">;
1517
1518////////////////////////////////////////////////////////////////////////////////
1519// Scalar Negate
1520def SCALAR_NEG : SInst<"vneg", "11", "Sl">;
1521
1522////////////////////////////////////////////////////////////////////////////////
1523// Scalar Signed Saturating Negate
1524def SCALAR_SQNEG : SInst<"vqneg", "11", "ScSsSiSl">;
1525
1526////////////////////////////////////////////////////////////////////////////////
1527// Scalar Signed Saturating Accumulated of Unsigned Value
1528def SCALAR_SUQADD : SInst<"vuqadd", "11(1U)", "ScSsSiSl">;
1529
1530////////////////////////////////////////////////////////////////////////////////
1531// Scalar Unsigned Saturating Accumulated of Signed Value
1532def SCALAR_USQADD : SInst<"vsqadd", "11(1S)", "SUcSUsSUiSUl">;
1533
1534////////////////////////////////////////////////////////////////////////////////
1535// Signed Saturating Doubling Multiply-Add Long
1536def SCALAR_SQDMLAL : SInst<"vqdmlal", "(1>)(1>)11", "SsSi">;
1537
1538////////////////////////////////////////////////////////////////////////////////
1539// Signed Saturating Doubling Multiply-Subtract Long
1540def SCALAR_SQDMLSL : SInst<"vqdmlsl", "(1>)(1>)11", "SsSi">;
1541
1542////////////////////////////////////////////////////////////////////////////////
1543// Signed Saturating Doubling Multiply Long
1544def SCALAR_SQDMULL : SInst<"vqdmull", "(1>)11", "SsSi">;
1545
1546////////////////////////////////////////////////////////////////////////////////
1547// Scalar Signed Saturating Extract Unsigned Narrow
1548def SCALAR_SQXTUN : SInst<"vqmovun", "(U1<)1", "SsSiSl">;
1549
1550////////////////////////////////////////////////////////////////////////////////
1551// Scalar Signed Saturating Extract Narrow
1552def SCALAR_SQXTN : SInst<"vqmovn", "(1<)1", "SsSiSl">;
1553
1554////////////////////////////////////////////////////////////////////////////////
1555// Scalar Unsigned Saturating Extract Narrow
1556def SCALAR_UQXTN : SInst<"vqmovn", "(1<)1", "SUsSUiSUl">;
1557
1558// Scalar Floating Point  multiply (scalar, by element)
1559def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "11.I", "SfSd", OP_SCALAR_MUL_LN>;
1560def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "11QI", "SfSd", OP_SCALAR_MUL_LN> {
1561  let isLaneQ = 1;
1562}
1563
1564// Scalar Floating Point  multiply extended (scalar, by element)
1565def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "11.I", "SfSd", OP_SCALAR_MULX_LN>;
1566def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "11QI", "SfSd", OP_SCALAR_MULX_LN> {
1567  let isLaneQ = 1;
1568}
1569
1570def SCALAR_VMUL_N : IInst<"vmul_n", "..1", "d">;
1571
1572// VMUL_LANE_A64 d type implemented using scalar mul lane
1573def SCALAR_VMUL_LANE : IInst<"vmul_lane", "..qI", "d">;
1574
1575// VMUL_LANEQ d type implemented using scalar mul lane
1576def SCALAR_VMUL_LANEQ   : IInst<"vmul_laneq", "..QI", "d"> {
1577  let isLaneQ = 1;
1578}
1579
1580// VMULX_LANE d type implemented using scalar vmulx_lane
1581def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "d", OP_SCALAR_VMULX_LN>;
1582
1583// VMULX_LANEQ d type implemented using scalar vmulx_laneq
1584def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "d", OP_SCALAR_VMULX_LNQ> {
1585  let isLaneQ = 1;
1586}
1587
1588// Scalar Floating Point fused multiply-add (scalar, by element)
1589def SCALAR_FMLA_LANE : IInst<"vfma_lane", "111.I", "SfSd">;
1590def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "111QI", "SfSd"> {
1591  let isLaneQ = 1;
1592}
1593
1594// Scalar Floating Point fused multiply-subtract (scalar, by element)
1595def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "111.I", "SfSd", OP_FMS_LN>;
1596def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "111QI", "SfSd", OP_FMS_LNQ> {
1597  let isLaneQ = 1;
1598}
1599
1600// Signed Saturating Doubling Multiply Long (scalar by element)
1601def SCALAR_SQDMULL_LANE : SOpInst<"vqdmull_lane", "(1>)1.I", "SsSi", OP_SCALAR_QDMULL_LN>;
1602def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(1>)1QI", "SsSi", OP_SCALAR_QDMULL_LN> {
1603  let isLaneQ = 1;
1604}
1605
1606// Signed Saturating Doubling Multiply-Add Long (scalar by element)
1607def SCALAR_SQDMLAL_LANE : SInst<"vqdmlal_lane", "(1>)(1>)1.I", "SsSi">;
1608def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "(1>)(1>)1QI", "SsSi"> {
1609  let isLaneQ = 1;
1610}
1611
1612// Signed Saturating Doubling Multiply-Subtract Long (scalar by element)
1613def SCALAR_SQDMLS_LANE : SInst<"vqdmlsl_lane", "(1>)(1>)1.I", "SsSi">;
1614def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "(1>)(1>)1QI", "SsSi"> {
1615  let isLaneQ = 1;
1616}
1617
1618// Scalar Integer Saturating Doubling Multiply Half High (scalar by element)
1619def SCALAR_SQDMULH_LANE : SOpInst<"vqdmulh_lane", "11.I", "SsSi", OP_SCALAR_QDMULH_LN>;
1620def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QDMULH_LN> {
1621  let isLaneQ = 1;
1622}
1623
1624// Scalar Integer Saturating Rounding Doubling Multiply Half High
1625def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "11.I", "SsSi", OP_SCALAR_QRDMULH_LN>;
1626def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QRDMULH_LN> {
1627  let isLaneQ = 1;
1628}
1629
1630let TargetGuard = "v8.1a" in {
1631// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1632def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "111.I", "SsSi", OP_SCALAR_QRDMLAH_LN>;
1633def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN> {
1634  let isLaneQ = 1;
1635}
1636
1637// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1638def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "111.I", "SsSi", OP_SCALAR_QRDMLSH_LN>;
1639def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN> {
1640  let isLaneQ = 1;
1641}
1642} // TargetGuard = "v8.1a"
1643
1644def SCALAR_VDUP_LANE : IInst<"vdup_lane", "1.I", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
1645def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs"> {
1646  let isLaneQ = 1;
1647}
1648
1649} // ArchGuard = "defined(__aarch64__)"
1650
1651// ARMv8.2-A FP16 vector intrinsics for A32/A64.
1652let TargetGuard = "fullfp16" in {
1653
1654  // ARMv8.2-A FP16 one-operand vector intrinsics.
1655
1656  // Comparison
1657  def CMEQH    : SInst<"vceqz", "U.", "hQh">;
1658  def CMGEH    : SInst<"vcgez", "U.", "hQh">;
1659  def CMGTH    : SInst<"vcgtz", "U.", "hQh">;
1660  def CMLEH    : SInst<"vclez", "U.", "hQh">;
1661  def CMLTH    : SInst<"vcltz", "U.", "hQh">;
1662
1663  // Vector conversion
1664  def VCVT_F16     : SInst<"vcvt_f16", "F(.!)",  "sUsQsQUs">;
1665  def VCVT_S16     : SInst<"vcvt_s16", "S.",  "hQh">;
1666  def VCVT_U16     : SInst<"vcvt_u16", "U.",  "hQh">;
1667  def VCVTA_S16    : SInst<"vcvta_s16", "S.", "hQh">;
1668  def VCVTA_U16    : SInst<"vcvta_u16", "U.", "hQh">;
1669  def VCVTM_S16    : SInst<"vcvtm_s16", "S.", "hQh">;
1670  def VCVTM_U16    : SInst<"vcvtm_u16", "U.", "hQh">;
1671  def VCVTN_S16    : SInst<"vcvtn_s16", "S.", "hQh">;
1672  def VCVTN_U16    : SInst<"vcvtn_u16", "U.", "hQh">;
1673  def VCVTP_S16    : SInst<"vcvtp_s16", "S.", "hQh">;
1674  def VCVTP_U16    : SInst<"vcvtp_u16", "U.", "hQh">;
1675
1676  // Vector rounding
1677  let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)", TargetGuard = "fullfp16" in {
1678    def FRINTZH      : SInst<"vrnd",  "..", "hQh">;
1679    def FRINTNH      : SInst<"vrndn", "..", "hQh">;
1680    def FRINTAH      : SInst<"vrnda", "..", "hQh">;
1681    def FRINTPH      : SInst<"vrndp", "..", "hQh">;
1682    def FRINTMH      : SInst<"vrndm", "..", "hQh">;
1683    def FRINTXH      : SInst<"vrndx", "..", "hQh">;
1684  }
1685
1686  // Misc.
1687  def VABSH        : SInst<"vabs", "..", "hQh">;
1688  def VNEGH        : SOpInst<"vneg", "..", "hQh", OP_NEG>;
1689  def VRECPEH      : SInst<"vrecpe", "..", "hQh">;
1690  def FRSQRTEH     : SInst<"vrsqrte", "..", "hQh">;
1691
1692  // ARMv8.2-A FP16 two-operands vector intrinsics.
1693
1694  // Misc.
1695  def VADDH        : SOpInst<"vadd", "...", "hQh", OP_ADD>;
1696  def VABDH        : SInst<"vabd", "...",  "hQh">;
1697  def VSUBH         : SOpInst<"vsub", "...", "hQh", OP_SUB>;
1698
1699  // Comparison
1700  let InstName = "vacge" in {
1701    def VCAGEH     : SInst<"vcage", "U..", "hQh">;
1702    def VCALEH     : SInst<"vcale", "U..", "hQh">;
1703  }
1704  let InstName = "vacgt" in {
1705    def VCAGTH     : SInst<"vcagt", "U..", "hQh">;
1706    def VCALTH     : SInst<"vcalt", "U..", "hQh">;
1707  }
1708  def VCEQH        : SOpInst<"vceq", "U..", "hQh", OP_EQ>;
1709  def VCGEH        : SOpInst<"vcge", "U..", "hQh", OP_GE>;
1710  def VCGTH        : SOpInst<"vcgt", "U..", "hQh", OP_GT>;
1711  let InstName = "vcge" in
1712    def VCLEH      : SOpInst<"vcle", "U..", "hQh", OP_LE>;
1713  let InstName = "vcgt" in
1714    def VCLTH      : SOpInst<"vclt", "U..", "hQh", OP_LT>;
1715
1716  // Vector conversion
1717  let isVCVT_N = 1 in {
1718    def VCVT_N_F16 : SInst<"vcvt_n_f16", "F(.!)I", "sUsQsQUs">;
1719    def VCVT_N_S16 : SInst<"vcvt_n_s16", "S.I", "hQh">;
1720    def VCVT_N_U16 : SInst<"vcvt_n_u16", "U.I", "hQh">;
1721  }
1722
1723  // Max/Min
1724  def VMAXH         : SInst<"vmax", "...", "hQh">;
1725  def VMINH         : SInst<"vmin", "...", "hQh">;
1726  let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)", TargetGuard = "fullfp16" in {
1727    def FMAXNMH       : SInst<"vmaxnm", "...", "hQh">;
1728    def FMINNMH       : SInst<"vminnm", "...", "hQh">;
1729  }
1730
1731  // Multiplication/Division
1732  def VMULH         : SOpInst<"vmul", "...", "hQh", OP_MUL>;
1733
1734  // Pairwise addition
1735  def VPADDH        : SInst<"vpadd", "...", "h">;
1736
1737  // Pairwise Max/Min
1738  def VPMAXH        : SInst<"vpmax", "...", "h">;
1739  def VPMINH        : SInst<"vpmin", "...", "h">;
1740
1741  // Reciprocal/Sqrt
1742  def VRECPSH       : SInst<"vrecps", "...", "hQh">;
1743  def VRSQRTSH      : SInst<"vrsqrts", "...", "hQh">;
1744
1745  // ARMv8.2-A FP16 three-operands vector intrinsics.
1746
1747  // Vector fused multiply-add operations
1748  def VFMAH        : SInst<"vfma", "....", "hQh">;
1749  def VFMSH        : SOpInst<"vfms", "....", "hQh", OP_FMLS>;
1750
1751  // ARMv8.2-A FP16 lane vector intrinsics.
1752
1753  // Mul lane
1754  def VMUL_LANEH    : IOpInst<"vmul_lane", "..qI", "hQh", OP_MUL_LN>;
1755  def VMUL_NH       : IOpInst<"vmul_n", "..1", "hQh", OP_MUL_N>;
1756
1757  // Data processing intrinsics - section 5
1758
1759  // Logical operations
1760  let isHiddenLInst = 1 in
1761  def VBSLH    : SInst<"vbsl", ".U..", "hQh">;
1762
1763  // Transposition operations
1764  def VZIPH    : WInst<"vzip", "2..", "hQh">;
1765  def VUZPH    : WInst<"vuzp", "2..", "hQh">;
1766  def VTRNH    : WInst<"vtrn", "2..", "hQh">;
1767
1768  // Vector Extract
1769  def VEXTH      : WInst<"vext", "...I", "hQh">;
1770
1771  // Reverse vector elements
1772  def VREV64H    : WOpInst<"vrev64", "..", "hQh", OP_REV64>;
1773}
1774
1775// ARMv8.2-A FP16 vector intrinsics for A64 only.
1776let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in {
1777
1778  // Vector rounding
1779  def FRINTIH      : SInst<"vrndi", "..", "hQh">;
1780
1781  // Misc.
1782  def FSQRTH       : SInst<"vsqrt", "..", "hQh">;
1783
1784  // Multiplication/Division
1785  def MULXH         : SInst<"vmulx", "...", "hQh">;
1786  def FDIVH         : IOpInst<"vdiv", "...",  "hQh", OP_DIV>;
1787
1788  // Pairwise addition
1789  def VPADDH1       : SInst<"vpadd", "...", "Qh">;
1790
1791  // Pairwise Max/Min
1792  def VPMAXH1       : SInst<"vpmax", "...", "Qh">;
1793  def VPMINH1       : SInst<"vpmin", "...", "Qh">;
1794
1795  // Pairwise MaxNum/MinNum
1796  def FMAXNMPH      : SInst<"vpmaxnm", "...", "hQh">;
1797  def FMINNMPH      : SInst<"vpminnm", "...", "hQh">;
1798
1799  // ARMv8.2-A FP16 lane vector intrinsics.
1800
1801  // FMA lane
1802  def VFMA_LANEH   : IInst<"vfma_lane", "...qI", "hQh">;
1803  def VFMA_LANEQH  : IInst<"vfma_laneq", "...QI", "hQh"> {
1804    let isLaneQ = 1;
1805  }
1806
1807  // FMA lane with scalar argument
1808  def FMLA_NH      : SOpInst<"vfma_n", "...1", "hQh", OP_FMLA_N>;
1809  // Scalar floating point fused multiply-add (scalar, by element)
1810  def SCALAR_FMLA_LANEH  : IInst<"vfma_lane", "111.I", "Sh">;
1811  def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "111QI", "Sh"> {
1812    let isLaneQ = 1;
1813  }
1814
1815  // FMS lane
1816  def VFMS_LANEH   : IOpInst<"vfms_lane", "...qI", "hQh", OP_FMS_LN>;
1817  def VFMS_LANEQH  : IOpInst<"vfms_laneq", "...QI", "hQh", OP_FMS_LNQ> {
1818    let isLaneQ = 1;
1819  }
1820  // FMS lane with scalar argument
1821  def FMLS_NH      : SOpInst<"vfms_n", "...1", "hQh", OP_FMLS_N>;
1822  // Scalar floating foint fused multiply-subtract (scalar, by element)
1823  def SCALAR_FMLS_LANEH  : IOpInst<"vfms_lane", "111.I", "Sh", OP_FMS_LN>;
1824  def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "111QI", "Sh", OP_FMS_LNQ> {
1825    let isLaneQ = 1;
1826  }
1827
1828  // Mul lane
1829  def VMUL_LANEQH   : IOpInst<"vmul_laneq", "..QI", "hQh", OP_MUL_LN> {
1830    let isLaneQ = 1;
1831  }
1832  // Scalar floating point  multiply (scalar, by element)
1833  def SCALAR_FMUL_LANEH  : IOpInst<"vmul_lane", "11.I", "Sh", OP_SCALAR_MUL_LN>;
1834  def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "11QI", "Sh", OP_SCALAR_MUL_LN> {
1835    let isLaneQ = 1;
1836  }
1837
1838  // Mulx lane
1839  def VMULX_LANEH   : IOpInst<"vmulx_lane", "..qI", "hQh", OP_MULX_LN>;
1840  def VMULX_LANEQH  : IOpInst<"vmulx_laneq", "..QI", "hQh", OP_MULX_LN> {
1841    let isLaneQ = 1;
1842  }
1843  def VMULX_NH      : IOpInst<"vmulx_n", "..1", "hQh", OP_MULX_N>;
1844  // Scalar floating point  mulx (scalar, by element)
1845  def SCALAR_FMULX_LANEH : IInst<"vmulx_lane", "11.I", "Sh">;
1846  def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "11QI", "Sh"> {
1847    let isLaneQ = 1;
1848  }
1849
1850  // ARMv8.2-A FP16 reduction vector intrinsics.
1851  def VMAXVH   : SInst<"vmaxv", "1.", "hQh">;
1852  def VMINVH   : SInst<"vminv", "1.", "hQh">;
1853  def FMAXNMVH : SInst<"vmaxnmv", "1.", "hQh">;
1854  def FMINNMVH : SInst<"vminnmv", "1.", "hQh">;
1855
1856  // Permutation
1857  def VTRN1H     : SOpInst<"vtrn1", "...", "hQh", OP_TRN1>;
1858  def VZIP1H     : SOpInst<"vzip1", "...", "hQh", OP_ZIP1>;
1859  def VUZP1H     : SOpInst<"vuzp1", "...", "hQh", OP_UZP1>;
1860  def VTRN2H     : SOpInst<"vtrn2", "...", "hQh", OP_TRN2>;
1861  def VZIP2H     : SOpInst<"vzip2", "...", "hQh", OP_ZIP2>;
1862  def VUZP2H     : SOpInst<"vuzp2", "...", "hQh", OP_UZP2>;
1863
1864  def SCALAR_VDUP_LANEH  : IInst<"vdup_lane", "1.I", "Sh">;
1865  def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "1QI", "Sh"> {
1866    let isLaneQ = 1;
1867  }
1868}
1869
1870// v8.2-A dot product instructions.
1871let TargetGuard = "dotprod" in {
1872  def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">;
1873  def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<<q)I", "iUiQiQUi", OP_DOT_LN>;
1874}
1875let ArchGuard = "defined(__aarch64__)", TargetGuard = "dotprod" in {
1876  // Variants indexing into a 128-bit vector are A64 only.
1877  def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ> {
1878    let isLaneQ = 1;
1879  }
1880}
1881
1882// v8.2-A FP16 fused multiply-add long instructions.
1883let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp16fml" in {
1884  def VFMLAL_LOW  : SInst<"vfmlal_low",  ">>..", "hQh">;
1885  def VFMLSL_LOW  : SInst<"vfmlsl_low",  ">>..", "hQh">;
1886  def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">;
1887  def VFMLSL_HIGH : SInst<"vfmlsl_high", ">>..", "hQh">;
1888
1889  def VFMLAL_LANE_LOW  : SOpInst<"vfmlal_lane_low",  "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN>;
1890  def VFMLSL_LANE_LOW  : SOpInst<"vfmlsl_lane_low",  "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN>;
1891  def VFMLAL_LANE_HIGH : SOpInst<"vfmlal_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN_Hi>;
1892  def VFMLSL_LANE_HIGH : SOpInst<"vfmlsl_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN_Hi>;
1893
1894  def VFMLAL_LANEQ_LOW  : SOpInst<"vfmlal_laneq_low",  "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN> {
1895    let isLaneQ = 1;
1896  }
1897  def VFMLSL_LANEQ_LOW  : SOpInst<"vfmlsl_laneq_low",  "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN> {
1898    let isLaneQ = 1;
1899  }
1900  def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN_Hi> {
1901    let isLaneQ = 1;
1902  }
1903  def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN_Hi> {
1904    let isLaneQ = 1;
1905  }
1906}
1907
1908let TargetGuard = "i8mm" in {
1909  def VMMLA   : SInst<"vmmla", "..(<<)(<<)", "QUiQi">;
1910  def VUSMMLA : SInst<"vusmmla", "..(<<U)(<<)", "Qi">;
1911
1912  def VUSDOT  : SInst<"vusdot", "..(<<U)(<<)", "iQi">;
1913
1914  def VUSDOT_LANE  : SOpInst<"vusdot_lane", "..(<<U)(<<q)I", "iQi", OP_USDOT_LN>;
1915  def VSUDOT_LANE  : SOpInst<"vsudot_lane", "..(<<)(<<qU)I", "iQi", OP_SUDOT_LN>;
1916
1917  let ArchGuard = "defined(__aarch64__)" in {
1918    let isLaneQ = 1 in {
1919      def VUSDOT_LANEQ  : SOpInst<"vusdot_laneq", "..(<<U)(<<Q)I", "iQi", OP_USDOT_LNQ>;
1920      def VSUDOT_LANEQ  : SOpInst<"vsudot_laneq", "..(<<)(<<QU)I", "iQi", OP_SUDOT_LNQ>;
1921    }
1922  }
1923}
1924
1925let TargetGuard = "bf16" in {
1926  def VDOT_BF : SInst<"vbfdot", "..BB", "fQf">;
1927  def VDOT_LANE_BF : SOpInst<"vbfdot_lane", "..B(Bq)I", "fQf", OP_BFDOT_LN>;
1928  def VDOT_LANEQ_BF : SOpInst<"vbfdot_laneq", "..B(BQ)I", "fQf", OP_BFDOT_LNQ> {
1929    let isLaneQ = 1;
1930  }
1931
1932  def VFMMLA_BF : SInst<"vbfmmla", "..BB", "Qf">;
1933
1934  def VFMLALB_BF : SInst<"vbfmlalb", "..BB", "Qf">;
1935  def VFMLALT_BF : SInst<"vbfmlalt", "..BB", "Qf">;
1936
1937  def VFMLALB_LANE_BF : SOpInst<"vbfmlalb_lane", "..B(Bq)I", "Qf", OP_BFMLALB_LN>;
1938  def VFMLALB_LANEQ_BF : SOpInst<"vbfmlalb_laneq", "..B(BQ)I", "Qf", OP_BFMLALB_LN>;
1939
1940  def VFMLALT_LANE_BF : SOpInst<"vbfmlalt_lane", "..B(Bq)I", "Qf", OP_BFMLALT_LN>;
1941  def VFMLALT_LANEQ_BF : SOpInst<"vbfmlalt_laneq", "..B(BQ)I", "Qf", OP_BFMLALT_LN>;
1942}
1943
1944multiclass VCMLA_ROTS<string type, string lanety, string laneqty> {
1945  foreach ROT = ["", "_rot90", "_rot180", "_rot270" ] in {
1946    def   : SInst<"vcmla" # ROT, "....", type # "Q" # type>;
1947
1948    // vcmla{ROT}_lane
1949    def : SOpInst<"vcmla" # ROT # "_lane", "...qI", type, Op<(call "vcmla" # ROT, $p0, $p1,
1950           (bitcast $p0, (dup_typed lanety , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>;
1951
1952    // vcmlaq{ROT}_lane
1953    def : SOpInst<"vcmla" # ROT # "_lane", "...qI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1,
1954           (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>;
1955
1956    let isLaneQ = 1 in  {
1957      // vcmla{ROT}_laneq
1958      def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", type,  Op<(call "vcmla" # ROT, $p0, $p1,
1959              (bitcast $p0, (dup_typed lanety, (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>;
1960
1961      // vcmlaq{ROT}_laneq
1962      def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1,
1963             (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>;
1964    }
1965  }
1966}
1967
1968// v8.3-A Vector complex addition intrinsics
1969let TargetGuard = "v8.3a,fullfp16" in {
1970  def VCADD_ROT90_FP16   : SInst<"vcadd_rot90", "...", "h">;
1971  def VCADD_ROT270_FP16  : SInst<"vcadd_rot270", "...", "h">;
1972  def VCADDQ_ROT90_FP16  : SInst<"vcaddq_rot90", "QQQ", "h">;
1973  def VCADDQ_ROT270_FP16 : SInst<"vcaddq_rot270", "QQQ", "h">;
1974
1975  defm VCMLA_FP16  : VCMLA_ROTS<"h", "uint32x2_t", "uint32x4_t">;
1976}
1977let TargetGuard = "v8.3a" in {
1978  def VCADD_ROT90   : SInst<"vcadd_rot90", "...", "f">;
1979  def VCADD_ROT270  : SInst<"vcadd_rot270", "...", "f">;
1980  def VCADDQ_ROT90  : SInst<"vcaddq_rot90", "QQQ", "f">;
1981  def VCADDQ_ROT270 : SInst<"vcaddq_rot270", "QQQ", "f">;
1982
1983  defm VCMLA_F32        : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">;
1984}
1985let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.3a" in {
1986  def VCADDQ_ROT90_FP64  : SInst<"vcaddq_rot90", "QQQ", "d">;
1987  def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
1988
1989  defm VCMLA_FP64 : VCMLA_ROTS<"d", "uint64x2_t", "uint64x2_t">;
1990}
1991
1992// V8.2-A BFloat intrinsics
1993let TargetGuard = "bf16" in {
1994  def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> {
1995    let BigEndianSafe = 1;
1996  }
1997
1998  def VDUP_N_BF    : WOpInst<"vdup_n", ".1", "bQb", OP_DUP>;
1999
2000  def VDUP_LANE_BF : WOpInst<"vdup_lane", ".qI", "bQb", OP_DUP_LN>;
2001  def VDUP_LANEQ_BF: WOpInst<"vdup_laneq", ".QI", "bQb", OP_DUP_LN> {
2002    let isLaneQ = 1;
2003  }
2004
2005  def VCOMBINE_BF  : NoTestOpInst<"vcombine", "Q..", "b", OP_CONC>;
2006
2007  def VGET_HIGH_BF : NoTestOpInst<"vget_high", ".Q", "b", OP_HI>;
2008  def VGET_LOW_BF  : NoTestOpInst<"vget_low", ".Q", "b", OP_LO>;
2009
2010  def VGET_LANE_BF : IInst<"vget_lane", "1.I", "bQb">;
2011  def VSET_LANE_BF : IInst<"vset_lane", ".1.I", "bQb">;
2012  def SCALAR_VDUP_LANE_BF : IInst<"vdup_lane", "1.I", "Sb">;
2013  def SCALAR_VDUP_LANEQ_BF : IInst<"vdup_laneq", "1QI", "Sb"> {
2014    let isLaneQ = 1;
2015  }
2016
2017  def VLD1_BF : WInst<"vld1", ".(c*!)", "bQb">;
2018  def VLD2_BF : WInst<"vld2", "2(c*!)", "bQb">;
2019  def VLD3_BF : WInst<"vld3", "3(c*!)", "bQb">;
2020  def VLD4_BF : WInst<"vld4", "4(c*!)", "bQb">;
2021
2022  def VST1_BF : WInst<"vst1", "v*(.!)", "bQb">;
2023  def VST2_BF : WInst<"vst2", "v*(2!)", "bQb">;
2024  def VST3_BF : WInst<"vst3", "v*(3!)", "bQb">;
2025  def VST4_BF : WInst<"vst4", "v*(4!)", "bQb">;
2026
2027  def VLD1_X2_BF : WInst<"vld1_x2", "2(c*!)", "bQb">;
2028  def VLD1_X3_BF : WInst<"vld1_x3", "3(c*!)", "bQb">;
2029  def VLD1_X4_BF : WInst<"vld1_x4", "4(c*!)", "bQb">;
2030
2031  def VST1_X2_BF : WInst<"vst1_x2", "v*(2!)", "bQb">;
2032  def VST1_X3_BF : WInst<"vst1_x3", "v*(3!)", "bQb">;
2033  def VST1_X4_BF : WInst<"vst1_x4", "v*(4!)", "bQb">;
2034
2035  def VLD1_LANE_BF : WInst<"vld1_lane", ".(c*!).I", "bQb">;
2036  def VLD2_LANE_BF : WInst<"vld2_lane", "2(c*!)2I", "bQb">;
2037  def VLD3_LANE_BF : WInst<"vld3_lane", "3(c*!)3I", "bQb">;
2038  def VLD4_LANE_BF : WInst<"vld4_lane", "4(c*!)4I", "bQb">;
2039  def VST1_LANE_BF : WInst<"vst1_lane", "v*(.!)I", "bQb">;
2040  def VST2_LANE_BF : WInst<"vst2_lane", "v*(2!)I", "bQb">;
2041  def VST3_LANE_BF : WInst<"vst3_lane", "v*(3!)I", "bQb">;
2042  def VST4_LANE_BF : WInst<"vst4_lane", "v*(4!)I", "bQb">;
2043
2044  def VLD1_DUP_BF : WInst<"vld1_dup", ".(c*!)", "bQb">;
2045  def VLD2_DUP_BF : WInst<"vld2_dup", "2(c*!)", "bQb">;
2046  def VLD3_DUP_BF : WInst<"vld3_dup", "3(c*!)", "bQb">;
2047  def VLD4_DUP_BF : WInst<"vld4_dup", "4(c*!)", "bQb">;
2048
2049  def VCVT_F32_BF16 : SOpInst<"vcvt_f32_bf16", "(F>)(Bq!)",  "Qb", OP_VCVT_F32_BF16>;
2050  def VCVT_LOW_F32_BF16 : SOpInst<"vcvt_low_f32", "(F>)(BQ!)",  "Qb", OP_VCVT_F32_BF16_LO>;
2051  def VCVT_HIGH_F32_BF16 : SOpInst<"vcvt_high_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_HI>;
2052
2053  def SCALAR_CVT_BF16_F32 : SInst<"vcvth_bf16", "(1B)1", "f">;
2054  def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>;
2055}
2056
2057let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in {
2058  def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">;
2059  def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>;
2060  def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16",  "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>;
2061  def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>;
2062}
2063
2064let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
2065  def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">;
2066  def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>;
2067  def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">;
2068  def VCVT_BF16_F32 : SOpInst<"vcvt_bf16",    "BQ", "f", OP_VCVT_BF16_F32_A64>;
2069
2070  def COPY_LANE_BF16 : IOpInst<"vcopy_lane", "..I.I", "b", OP_COPY_LN>;
2071  def COPYQ_LANE_BF16 : IOpInst<"vcopy_lane", "..IqI", "Qb", OP_COPY_LN>;
2072  def COPY_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..IQI", "b", OP_COPY_LN>;
2073  def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>;
2074}
2075
2076let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in {
2077  let BigEndianSafe = 1 in {
2078    defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2079        "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
2080  }
2081}
2082
2083let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
2084  let BigEndianSafe = 1 in {
2085    defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2086        "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
2087  }
2088}
2089
2090// v8.9a/v9.4a LRCPC3 intrinsics
2091let ArchGuard = "defined(__aarch64__)", TargetGuard = "rcpc3" in {
2092  def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl">;
2093  def VSTL1_LANE  : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl">;
2094}
2095