1//===--- arm_neon.td - ARM NEON compiler interface ------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file defines the TableGen definitions from which the ARM NEON header 10// file will be generated. See ARM document DUI0348B. 11// 12//===----------------------------------------------------------------------===// 13 14include "arm_neon_incl.td" 15 16def OP_ADD : Op<(op "+", $p0, $p1)>; 17def OP_ADDL : Op<(op "+", (call "vmovl", $p0), (call "vmovl", $p1))>; 18def OP_ADDLHi : Op<(op "+", (call "vmovl_high", $p0), 19 (call "vmovl_high", $p1))>; 20def OP_ADDW : Op<(op "+", $p0, (call "vmovl", $p1))>; 21def OP_ADDWHi : Op<(op "+", $p0, (call "vmovl_high", $p1))>; 22def OP_SUB : Op<(op "-", $p0, $p1)>; 23def OP_SUBL : Op<(op "-", (call "vmovl", $p0), (call "vmovl", $p1))>; 24def OP_SUBLHi : Op<(op "-", (call "vmovl_high", $p0), 25 (call "vmovl_high", $p1))>; 26def OP_SUBW : Op<(op "-", $p0, (call "vmovl", $p1))>; 27def OP_SUBWHi : Op<(op "-", $p0, (call "vmovl_high", $p1))>; 28def OP_MUL : Op<(op "*", $p0, $p1)>; 29def OP_MLA : Op<(op "+", $p0, (op "*", $p1, $p2))>; 30def OP_MLAL : Op<(op "+", $p0, (call "vmull", $p1, $p2))>; 31def OP_MULLHi : Op<(call "vmull", (call "vget_high", $p0), 32 (call "vget_high", $p1))>; 33def OP_MULLHi_P64 : Op<(call "vmull", 34 (cast "poly64_t", (call "vget_high", $p0)), 35 (cast "poly64_t", (call "vget_high", $p1)))>; 36def OP_MULLHi_N : Op<(call "vmull_n", (call "vget_high", $p0), $p1)>; 37def OP_MLALHi : Op<(call "vmlal", $p0, (call "vget_high", $p1), 38 (call "vget_high", $p2))>; 39def OP_MLALHi_N : Op<(call "vmlal_n", $p0, (call "vget_high", $p1), $p2)>; 40def OP_MLS : Op<(op "-", $p0, (op "*", $p1, $p2))>; 41def OP_FMLS : Op<(call "vfma", $p0, (op "-", $p1), $p2)>; 42def OP_MLSL : Op<(op "-", $p0, (call "vmull", $p1, $p2))>; 43def OP_MLSLHi : Op<(call "vmlsl", $p0, (call "vget_high", $p1), 44 (call "vget_high", $p2))>; 45def OP_MLSLHi_N : Op<(call "vmlsl_n", $p0, (call "vget_high", $p1), $p2)>; 46def OP_MUL_N : Op<(op "*", $p0, (dup $p1))>; 47def OP_MULX_N : Op<(call "vmulx", $p0, (dup $p1))>; 48def OP_MLA_N : Op<(op "+", $p0, (op "*", $p1, (dup $p2)))>; 49def OP_MLS_N : Op<(op "-", $p0, (op "*", $p1, (dup $p2)))>; 50def OP_FMLA_N : Op<(call "vfma", $p0, $p1, (dup $p2))>; 51def OP_FMLS_N : Op<(call "vfma", $p0, (op "-", $p1), (dup $p2))>; 52def OP_MLAL_N : Op<(op "+", $p0, (call "vmull", $p1, (dup $p2)))>; 53def OP_MLSL_N : Op<(op "-", $p0, (call "vmull", $p1, (dup $p2)))>; 54def OP_MUL_LN : Op<(op "*", $p0, (call_mangled "splat_lane", $p1, $p2))>; 55def OP_MULX_LN : Op<(call "vmulx", $p0, (call_mangled "splat_lane", $p1, $p2))>; 56def OP_MULL_N : Op<(call "vmull", $p0, (dup $p1))>; 57def OP_MULL_LN : Op<(call "vmull", $p0, (call_mangled "splat_lane", $p1, $p2))>; 58def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (call_mangled "splat_lane", $p1, $p2))>; 59def OP_MLA_LN : Op<(op "+", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>; 60def OP_MLS_LN : Op<(op "-", $p0, (op "*", $p1, (call_mangled "splat_lane", $p2, $p3)))>; 61def OP_MLAL_LN : Op<(op "+", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>; 62def OP_MLALHi_LN: Op<(op "+", $p0, (call "vmull", (call "vget_high", $p1), 63 (call_mangled "splat_lane", $p2, $p3)))>; 64def OP_MLSL_LN : Op<(op "-", $p0, (call "vmull", $p1, (call_mangled "splat_lane", $p2, $p3)))>; 65def OP_MLSLHi_LN : Op<(op "-", $p0, (call "vmull", (call "vget_high", $p1), 66 (call_mangled "splat_lane", $p2, $p3)))>; 67def OP_QDMULL_N : Op<(call "vqdmull", $p0, (dup $p1))>; 68def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (call_mangled "splat_lane", $p1, $p2))>; 69def OP_QDMULLHi_LN : Op<(call "vqdmull", (call "vget_high", $p0), 70 (call_mangled "splat_lane", $p1, $p2))>; 71def OP_QDMLAL_N : Op<(call "vqdmlal", $p0, $p1, (dup $p2))>; 72def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>; 73def OP_QDMLALHi_LN : Op<(call "vqdmlal", $p0, (call "vget_high", $p1), 74 (call_mangled "splat_lane", $p2, $p3))>; 75def OP_QDMLSL_N : Op<(call "vqdmlsl", $p0, $p1, (dup $p2))>; 76def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>; 77def OP_QDMLSLHi_LN : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1), 78 (call_mangled "splat_lane", $p2, $p3))>; 79def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>; 80def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>; 81def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>; 82def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>; 83def OP_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>; 84def OP_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>; 85def OP_FMS_LN : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>; 86def OP_FMS_LNQ : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>; 87def OP_TRN1 : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2), 88 (decimate mask1, 2)))>; 89def OP_ZIP1 : Op<(shuffle $p0, $p1, (lowhalf (interleave mask0, mask1)))>; 90def OP_UZP1 : Op<(shuffle $p0, $p1, (add (decimate mask0, 2), 91 (decimate mask1, 2)))>; 92def OP_TRN2 : Op<(shuffle $p0, $p1, (interleave 93 (decimate (rotl mask0, 1), 2), 94 (decimate (rotl mask1, 1), 2)))>; 95def OP_ZIP2 : Op<(shuffle $p0, $p1, (highhalf (interleave mask0, mask1)))>; 96def OP_UZP2 : Op<(shuffle $p0, $p1, (add (decimate (rotl mask0, 1), 2), 97 (decimate (rotl mask1, 1), 2)))>; 98def OP_EQ : Op<(cast "R", (op "==", $p0, $p1))>; 99def OP_GE : Op<(cast "R", (op ">=", $p0, $p1))>; 100def OP_LE : Op<(cast "R", (op "<=", $p0, $p1))>; 101def OP_GT : Op<(cast "R", (op ">", $p0, $p1))>; 102def OP_LT : Op<(cast "R", (op "<", $p0, $p1))>; 103def OP_NEG : Op<(op "-", $p0)>; 104def OP_NOT : Op<(op "~", $p0)>; 105def OP_AND : Op<(op "&", $p0, $p1)>; 106def OP_OR : Op<(op "|", $p0, $p1)>; 107def OP_XOR : Op<(op "^", $p0, $p1)>; 108def OP_ANDN : Op<(op "&", $p0, (op "~", $p1))>; 109def OP_ORN : Op<(op "|", $p0, (op "~", $p1))>; 110def OP_CAST : LOp<[(save_temp $promote, $p0), 111 (cast "R", $promote)]>; 112def OP_HI : Op<(shuffle $p0, $p0, (highhalf mask0))>; 113def OP_LO : Op<(shuffle $p0, $p0, (lowhalf mask0))>; 114def OP_CONC : Op<(shuffle $p0, $p1, (add mask0, mask1))>; 115def OP_DUP : Op<(dup $p0)>; 116def OP_DUP_LN : Op<(call_mangled "splat_lane", $p0, $p1)>; 117def OP_SEL : Op<(cast "R", (op "|", 118 (op "&", $p0, (cast $p0, $p1)), 119 (op "&", (op "~", $p0), (cast $p0, $p2))))>; 120def OP_REV16 : Op<(shuffle $p0, $p0, (rev 16, mask0))>; 121def OP_REV32 : Op<(shuffle $p0, $p0, (rev 32, mask0))>; 122def OP_REV64 : Op<(shuffle $p0, $p0, (rev 64, mask0))>; 123def OP_XTN : Op<(call "vcombine", $p0, (call "vmovn", $p1))>; 124def OP_SQXTUN : Op<(call "vcombine", (cast $p0, "U", $p0), 125 (call "vqmovun", $p1))>; 126def OP_QXTN : Op<(call "vcombine", $p0, (call "vqmovn", $p1))>; 127def OP_VCVT_NA_HI_F16 : Op<(call "vcombine", $p0, (call "vcvt_f16_f32", $p1))>; 128def OP_VCVT_NA_HI_F32 : Op<(call "vcombine", $p0, (call "vcvt_f32_f64", $p1))>; 129def OP_VCVT_EX_HI_F32 : Op<(call "vcvt_f32_f16", (call "vget_high", $p0))>; 130def OP_VCVT_EX_HI_F64 : Op<(call "vcvt_f64_f32", (call "vget_high", $p0))>; 131def OP_VCVTX_HI : Op<(call "vcombine", $p0, (call "vcvtx_f32", $p1))>; 132def OP_REINT : Op<(cast "R", $p0)>; 133def OP_ADDHNHi : Op<(call "vcombine", $p0, (call "vaddhn", $p1, $p2))>; 134def OP_RADDHNHi : Op<(call "vcombine", $p0, (call "vraddhn", $p1, $p2))>; 135def OP_SUBHNHi : Op<(call "vcombine", $p0, (call "vsubhn", $p1, $p2))>; 136def OP_RSUBHNHi : Op<(call "vcombine", $p0, (call "vrsubhn", $p1, $p2))>; 137def OP_ABDL : Op<(cast "R", (call "vmovl", (cast $p0, "U", 138 (call "vabd", $p0, $p1))))>; 139def OP_ABDLHi : Op<(call "vabdl", (call "vget_high", $p0), 140 (call "vget_high", $p1))>; 141def OP_ABA : Op<(op "+", $p0, (call "vabd", $p1, $p2))>; 142def OP_ABAL : Op<(op "+", $p0, (call "vabdl", $p1, $p2))>; 143def OP_ABALHi : Op<(call "vabal", $p0, (call "vget_high", $p1), 144 (call "vget_high", $p2))>; 145def OP_QDMULLHi : Op<(call "vqdmull", (call "vget_high", $p0), 146 (call "vget_high", $p1))>; 147def OP_QDMULLHi_N : Op<(call "vqdmull_n", (call "vget_high", $p0), $p1)>; 148def OP_QDMLALHi : Op<(call "vqdmlal", $p0, (call "vget_high", $p1), 149 (call "vget_high", $p2))>; 150def OP_QDMLALHi_N : Op<(call "vqdmlal_n", $p0, (call "vget_high", $p1), $p2)>; 151def OP_QDMLSLHi : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1), 152 (call "vget_high", $p2))>; 153def OP_QDMLSLHi_N : Op<(call "vqdmlsl_n", $p0, (call "vget_high", $p1), $p2)>; 154def OP_DIV : Op<(op "/", $p0, $p1)>; 155def OP_LONG_HI : Op<(cast "R", (call (name_replace "_high_", "_"), 156 (call "vget_high", $p0), $p1))>; 157def OP_NARROW_HI : Op<(cast "R", (call "vcombine", 158 (cast "R", "H", $p0), 159 (cast "R", "H", 160 (call (name_replace "_high_", "_"), 161 $p1, $p2))))>; 162def OP_MOVL_HI : LOp<[(save_temp $a1, (call "vget_high", $p0)), 163 (cast "R", 164 (call "vshll_n", $a1, (literal "int32_t", "0")))]>; 165def OP_COPY_LN : Op<(call "vset_lane", (call "vget_lane", $p2, $p3), $p0, $p1)>; 166def OP_SCALAR_MUL_LN : Op<(op "*", $p0, (call "vget_lane", $p1, $p2))>; 167def OP_SCALAR_MULX_LN : Op<(call "vmulx", $p0, (call "vget_lane", $p1, $p2))>; 168def OP_SCALAR_VMULX_LN : LOp<[(save_temp $x, (call "vget_lane", $p0, 169 (literal "int32_t", "0"))), 170 (save_temp $y, (call "vget_lane", $p1, $p2)), 171 (save_temp $z, (call "vmulx", $x, $y)), 172 (call "vset_lane", $z, $p0, $p2)]>; 173def OP_SCALAR_VMULX_LNQ : LOp<[(save_temp $x, (call "vget_lane", $p0, 174 (literal "int32_t", "0"))), 175 (save_temp $y, (call "vget_lane", $p1, $p2)), 176 (save_temp $z, (call "vmulx", $x, $y)), 177 (call "vset_lane", $z, $p0, (literal "int32_t", 178 "0"))]>; 179class ScalarMulOp<string opname> : 180 Op<(call opname, $p0, (call "vget_lane", $p1, $p2))>; 181 182def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">; 183def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">; 184def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">; 185 186def OP_SCALAR_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, 187 (call "vget_lane", $p2, $p3))>; 188def OP_SCALAR_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, 189 (call "vget_lane", $p2, $p3))>; 190 191def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t", 192 (call "vget_lane", 193 (bitcast "int16x4_t", $p0), $p1))>; 194def OP_SCALAR_HALF_GET_LNQ : Op<(bitcast "float16_t", 195 (call "vget_lane", 196 (bitcast "int16x8_t", $p0), $p1))>; 197def OP_SCALAR_HALF_SET_LN : Op<(bitcast "float16x4_t", 198 (call "vset_lane", 199 (bitcast "int16_t", $p0), 200 (bitcast "int16x4_t", $p1), $p2))>; 201def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t", 202 (call "vset_lane", 203 (bitcast "int16_t", $p0), 204 (bitcast "int16x8_t", $p1), $p2))>; 205 206def OP_DOT_LN 207 : Op<(call "vdot", $p0, $p1, 208 (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>; 209def OP_DOT_LNQ 210 : Op<(call "vdot", $p0, $p1, 211 (bitcast $p1, (call_mangled "splat_lane", (bitcast "32", $p2), $p3)))>; 212 213def OP_FMLAL_LN : Op<(call "vfmlal_low", $p0, $p1, 214 (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; 215def OP_FMLSL_LN : Op<(call "vfmlsl_low", $p0, $p1, 216 (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; 217def OP_FMLAL_LN_Hi : Op<(call "vfmlal_high", $p0, $p1, 218 (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; 219def OP_FMLSL_LN_Hi : Op<(call "vfmlsl_high", $p0, $p1, 220 (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; 221 222def OP_USDOT_LN 223 : Op<(call "vusdot", $p0, $p1, 224 (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)))>; 225def OP_USDOT_LNQ 226 : Op<(call "vusdot", $p0, $p1, 227 (cast "8", "S", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)))>; 228 229// sudot splats the second vector and then calls vusdot 230def OP_SUDOT_LN 231 : Op<(call "vusdot", $p0, 232 (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x2_t", $p2), $p3)), $p1)>; 233def OP_SUDOT_LNQ 234 : Op<(call "vusdot", $p0, 235 (cast "8", "U", (call_mangled "splat_lane", (bitcast "int32x4_t", $p2), $p3)), $p1)>; 236 237def OP_BFDOT_LN 238 : Op<(call "vbfdot", $p0, $p1, 239 (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x2_t", $p2), $p3)))>; 240 241def OP_BFDOT_LNQ 242 : Op<(call "vbfdot", $p0, $p1, 243 (bitcast $p1, (call_mangled "splat_lane", (bitcast "float32x4_t", $p2), $p3)))>; 244 245def OP_BFMLALB_LN 246 : Op<(call "vbfmlalb", $p0, $p1, 247 (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; 248 249def OP_BFMLALT_LN 250 : Op<(call "vbfmlalt", $p0, $p1, 251 (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; 252 253def OP_VCVT_F32_BF16 254 : Op<(bitcast "R", 255 (call "vshll_n", (bitcast "int16x4_t", $p0), 256 (literal "int32_t", "16")))>; 257def OP_VCVT_F32_BF16_LO 258 : Op<(call "vcvt_f32_bf16", (call "vget_low", $p0))>; 259def OP_VCVT_F32_BF16_HI 260 : Op<(call "vcvt_f32_bf16", (call "vget_high", $p0))>; 261 262def OP_VCVT_BF16_F32_LO_A64 263 : Op<(call "__a64_vcvtq_low_bf16", $p0)>; 264def OP_VCVT_BF16_F32_A64 265 : Op<(call "vget_low", (call "__a64_vcvtq_low_bf16", $p0))>; 266 267def OP_VCVT_BF16_F32_A32 268 : Op<(call "__a32_vcvt_bf16", $p0)>; 269 270def OP_VCVT_BF16_F32_LO_A32 271 : Op<(call "vcombine", (cast "bfloat16x4_t", (literal "uint64_t", "0ULL")), 272 (call "__a32_vcvt_bf16", $p0))>; 273def OP_VCVT_BF16_F32_HI_A32 274 : Op<(call "vcombine", (call "__a32_vcvt_bf16", $p1), 275 (call "vget_low", $p0))>; 276 277def OP_CVT_F32_BF16 278 : Op<(bitcast "R", (op "<<", (bitcast "int32_t", $p0), 279 (literal "int32_t", "16")))>; 280 281//===----------------------------------------------------------------------===// 282// Auxiliary Instructions 283//===----------------------------------------------------------------------===// 284 285// Splat operation - performs a range-checked splat over a vector 286def SPLAT : WInst<"splat_lane", ".(!q)I", 287 "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl">; 288def SPLATQ : WInst<"splat_laneq", ".(!Q)I", 289 "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl"> { 290 let isLaneQ = 1; 291} 292let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in { 293 def SPLAT_BF : WInst<"splat_lane", ".(!q)I", "bQb">; 294 def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb"> { 295 let isLaneQ = 1; 296 } 297} 298 299//===----------------------------------------------------------------------===// 300// Intrinsics 301//===----------------------------------------------------------------------===// 302 303//////////////////////////////////////////////////////////////////////////////// 304// E.3.1 Addition 305def VADD : IOpInst<"vadd", "...", 306 "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>; 307def VADDL : SOpInst<"vaddl", "(>Q)..", "csiUcUsUi", OP_ADDL>; 308def VADDW : SOpInst<"vaddw", "(>Q)(>Q).", "csiUcUsUi", OP_ADDW>; 309def VHADD : SInst<"vhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">; 310def VRHADD : SInst<"vrhadd", "...", "csiUcUsUiQcQsQiQUcQUsQUi">; 311def VQADD : SInst<"vqadd", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 312def VADDHN : IInst<"vaddhn", "<QQ", "silUsUiUl">; 313def VRADDHN : IInst<"vraddhn", "<QQ", "silUsUiUl">; 314 315//////////////////////////////////////////////////////////////////////////////// 316// E.3.2 Multiplication 317def VMUL : IOpInst<"vmul", "...", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>; 318def VMULP : SInst<"vmul", "...", "PcQPc">; 319def VMLA : IOpInst<"vmla", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>; 320def VMLAL : SOpInst<"vmlal", "(>Q)(>Q)..", "csiUcUsUi", OP_MLAL>; 321def VMLS : IOpInst<"vmls", "....", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>; 322def VMLSL : SOpInst<"vmlsl", "(>Q)(>Q)..", "csiUcUsUi", OP_MLSL>; 323def VQDMULH : SInst<"vqdmulh", "...", "siQsQi">; 324def VQRDMULH : SInst<"vqrdmulh", "...", "siQsQi">; 325 326let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in { 327def VQRDMLAH : SInst<"vqrdmlah", "....", "siQsQi">; 328def VQRDMLSH : SInst<"vqrdmlsh", "....", "siQsQi">; 329} 330 331def VQDMLAL : SInst<"vqdmlal", "(>Q)(>Q)..", "si">; 332def VQDMLSL : SInst<"vqdmlsl", "(>Q)(>Q)..", "si">; 333def VMULL : SInst<"vmull", "(>Q)..", "csiUcUsUiPc">; 334def VQDMULL : SInst<"vqdmull", "(>Q)..", "si">; 335 336//////////////////////////////////////////////////////////////////////////////// 337// E.3.3 Subtraction 338def VSUB : IOpInst<"vsub", "...", 339 "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>; 340def VSUBL : SOpInst<"vsubl", "(>Q)..", "csiUcUsUi", OP_SUBL>; 341def VSUBW : SOpInst<"vsubw", "(>Q)(>Q).", "csiUcUsUi", OP_SUBW>; 342def VQSUB : SInst<"vqsub", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 343def VHSUB : SInst<"vhsub", "...", "csiUcUsUiQcQsQiQUcQUsQUi">; 344def VSUBHN : IInst<"vsubhn", "<QQ", "silUsUiUl">; 345def VRSUBHN : IInst<"vrsubhn", "<QQ", "silUsUiUl">; 346 347//////////////////////////////////////////////////////////////////////////////// 348// E.3.4 Comparison 349def VCEQ : IOpInst<"vceq", "U..", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>; 350def VCGE : SOpInst<"vcge", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>; 351let InstName = "vcge" in 352def VCLE : SOpInst<"vcle", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>; 353def VCGT : SOpInst<"vcgt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>; 354let InstName = "vcgt" in 355def VCLT : SOpInst<"vclt", "U..", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>; 356let InstName = "vacge" in { 357def VCAGE : IInst<"vcage", "U..", "fQf">; 358def VCALE : IInst<"vcale", "U..", "fQf">; 359} 360let InstName = "vacgt" in { 361def VCAGT : IInst<"vcagt", "U..", "fQf">; 362def VCALT : IInst<"vcalt", "U..", "fQf">; 363} 364def VTST : WInst<"vtst", "U..", "csiUcUsUiPcPsQcQsQiQUcQUsQUiQPcQPs">; 365 366//////////////////////////////////////////////////////////////////////////////// 367// E.3.5 Absolute Difference 368def VABD : SInst<"vabd", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">; 369def VABDL : SOpInst<"vabdl", "(>Q)..", "csiUcUsUi", OP_ABDL>; 370def VABA : SOpInst<"vaba", "....", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>; 371def VABAL : SOpInst<"vabal", "(>Q)(>Q)..", "csiUcUsUi", OP_ABAL>; 372 373//////////////////////////////////////////////////////////////////////////////// 374// E.3.6 Max/Min 375def VMAX : SInst<"vmax", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">; 376def VMIN : SInst<"vmin", "...", "csiUcUsUifQcQsQiQUcQUsQUiQf">; 377 378//////////////////////////////////////////////////////////////////////////////// 379// E.3.7 Pairwise Addition 380def VPADD : IInst<"vpadd", "...", "csiUcUsUif">; 381def VPADDL : SInst<"vpaddl", ">.", "csiUcUsUiQcQsQiQUcQUsQUi">; 382def VPADAL : SInst<"vpadal", ">>.", "csiUcUsUiQcQsQiQUcQUsQUi">; 383 384//////////////////////////////////////////////////////////////////////////////// 385// E.3.8-9 Folding Max/Min 386def VPMAX : SInst<"vpmax", "...", "csiUcUsUif">; 387def VPMIN : SInst<"vpmin", "...", "csiUcUsUif">; 388 389//////////////////////////////////////////////////////////////////////////////// 390// E.3.10 Reciprocal/Sqrt 391def VRECPS : IInst<"vrecps", "...", "fQf">; 392def VRSQRTS : IInst<"vrsqrts", "...", "fQf">; 393 394//////////////////////////////////////////////////////////////////////////////// 395// E.3.11 Shifts by signed variable 396def VSHL : SInst<"vshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 397def VQSHL : SInst<"vqshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 398def VRSHL : SInst<"vrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 399def VQRSHL : SInst<"vqrshl", "..S", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 400 401//////////////////////////////////////////////////////////////////////////////// 402// E.3.12 Shifts by constant 403let isShift = 1 in { 404def VSHR_N : SInst<"vshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 405def VSHL_N : IInst<"vshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 406def VRSHR_N : SInst<"vrshr_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 407def VSRA_N : SInst<"vsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 408def VRSRA_N : SInst<"vrsra_n", "...I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 409def VQSHL_N : SInst<"vqshl_n", "..I", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; 410def VQSHLU_N : SInst<"vqshlu_n", "U.I", "csilQcQsQiQl">; 411def VSHRN_N : IInst<"vshrn_n", "<QI", "silUsUiUl">; 412def VQSHRUN_N : SInst<"vqshrun_n", "(<U)QI", "sil">; 413def VQRSHRUN_N : SInst<"vqrshrun_n", "(<U)QI", "sil">; 414def VQSHRN_N : SInst<"vqshrn_n", "<QI", "silUsUiUl">; 415def VRSHRN_N : IInst<"vrshrn_n", "<QI", "silUsUiUl">; 416def VQRSHRN_N : SInst<"vqrshrn_n", "<QI", "silUsUiUl">; 417def VSHLL_N : SInst<"vshll_n", "(>Q).I", "csiUcUsUi">; 418 419//////////////////////////////////////////////////////////////////////////////// 420// E.3.13 Shifts with insert 421def VSRI_N : WInst<"vsri_n", "...I", 422 "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">; 423def VSLI_N : WInst<"vsli_n", "...I", 424 "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">; 425} 426 427//////////////////////////////////////////////////////////////////////////////// 428// E.3.14 Loads and stores of a single vector 429def VLD1 : WInst<"vld1", ".(c*!)", 430 "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; 431def VLD1_X2 : WInst<"vld1_x2", "2(c*!)", 432 "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; 433def VLD1_X3 : WInst<"vld1_x3", "3(c*!)", 434 "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; 435def VLD1_X4 : WInst<"vld1_x4", "4(c*!)", 436 "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; 437def VLD1_LANE : WInst<"vld1_lane", ".(c*!).I", 438 "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; 439def VLD1_DUP : WInst<"vld1_dup", ".(c*!)", 440 "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; 441def VST1 : WInst<"vst1", "v*(.!)", 442 "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; 443def VST1_X2 : WInst<"vst1_x2", "v*(2!)", 444 "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; 445def VST1_X3 : WInst<"vst1_x3", "v*(3!)", 446 "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; 447def VST1_X4 : WInst<"vst1_x4", "v*(4!)", 448 "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; 449def VST1_LANE : WInst<"vst1_lane", "v*(.!)I", 450 "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; 451let ArchGuard = "(__ARM_FP & 2)" in { 452def VLD1_F16 : WInst<"vld1", ".(c*!)", "hQh">; 453def VLD1_X2_F16 : WInst<"vld1_x2", "2(c*!)", "hQh">; 454def VLD1_X3_F16 : WInst<"vld1_x3", "3(c*!)", "hQh">; 455def VLD1_X4_F16 : WInst<"vld1_x4", "4(c*!)", "hQh">; 456def VLD1_LANE_F16 : WInst<"vld1_lane", ".(c*!).I", "hQh">; 457def VLD1_DUP_F16 : WInst<"vld1_dup", ".(c*!)", "hQh">; 458def VST1_F16 : WInst<"vst1", "v*(.!)", "hQh">; 459def VST1_X2_F16 : WInst<"vst1_x2", "v*(2!)", "hQh">; 460def VST1_X3_F16 : WInst<"vst1_x3", "v*(3!)", "hQh">; 461def VST1_X4_F16 : WInst<"vst1_x4", "v*(4!)", "hQh">; 462def VST1_LANE_F16 : WInst<"vst1_lane", "v*(.!)I", "hQh">; 463} 464 465//////////////////////////////////////////////////////////////////////////////// 466// E.3.15 Loads and stores of an N-element structure 467def VLD2 : WInst<"vld2", "2(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; 468def VLD3 : WInst<"vld3", "3(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; 469def VLD4 : WInst<"vld4", "4(c*!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; 470def VLD2_DUP : WInst<"vld2_dup", "2(c*!)", 471 "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">; 472def VLD3_DUP : WInst<"vld3_dup", "3(c*!)", 473 "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">; 474def VLD4_DUP : WInst<"vld4_dup", "4(c*!)", 475 "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">; 476def VLD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; 477def VLD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; 478def VLD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; 479def VST2 : WInst<"vst2", "v*(2!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; 480def VST3 : WInst<"vst3", "v*(3!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; 481def VST4 : WInst<"vst4", "v*(4!)", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; 482def VST2_LANE : WInst<"vst2_lane", "v*(2!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; 483def VST3_LANE : WInst<"vst3_lane", "v*(3!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; 484def VST4_LANE : WInst<"vst4_lane", "v*(4!)I", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; 485let ArchGuard = "(__ARM_FP & 2)" in { 486def VLD2_F16 : WInst<"vld2", "2(c*!)", "hQh">; 487def VLD3_F16 : WInst<"vld3", "3(c*!)", "hQh">; 488def VLD4_F16 : WInst<"vld4", "4(c*!)", "hQh">; 489def VLD2_DUP_F16 : WInst<"vld2_dup", "2(c*!)", "hQh">; 490def VLD3_DUP_F16 : WInst<"vld3_dup", "3(c*!)", "hQh">; 491def VLD4_DUP_F16 : WInst<"vld4_dup", "4(c*!)", "hQh">; 492def VLD2_LANE_F16 : WInst<"vld2_lane", "2(c*!)2I", "hQh">; 493def VLD3_LANE_F16 : WInst<"vld3_lane", "3(c*!)3I", "hQh">; 494def VLD4_LANE_F16 : WInst<"vld4_lane", "4(c*!)4I", "hQh">; 495def VST2_F16 : WInst<"vst2", "v*(2!)", "hQh">; 496def VST3_F16 : WInst<"vst3", "v*(3!)", "hQh">; 497def VST4_F16 : WInst<"vst4", "v*(4!)", "hQh">; 498def VST2_LANE_F16 : WInst<"vst2_lane", "v*(2!)I", "hQh">; 499def VST3_LANE_F16 : WInst<"vst3_lane", "v*(3!)I", "hQh">; 500def VST4_LANE_F16 : WInst<"vst4_lane", "v*(4!)I", "hQh">; 501} 502 503//////////////////////////////////////////////////////////////////////////////// 504// E.3.16 Extract lanes from a vector 505let InstName = "vmov" in 506def VGET_LANE : IInst<"vget_lane", "1.I", 507 "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">; 508 509//////////////////////////////////////////////////////////////////////////////// 510// E.3.17 Set lanes within a vector 511let InstName = "vmov" in 512def VSET_LANE : IInst<"vset_lane", ".1.I", 513 "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">; 514 515//////////////////////////////////////////////////////////////////////////////// 516// E.3.18 Initialize a vector from bit pattern 517def VCREATE : NoTestOpInst<"vcreate", ".(IU>)", "csihfUcUsUiUlPcPsl", OP_CAST> { 518 let BigEndianSafe = 1; 519} 520 521//////////////////////////////////////////////////////////////////////////////// 522// E.3.19 Set all lanes to same value 523let InstName = "vmov" in { 524def VDUP_N : WOpInst<"vdup_n", ".1", 525 "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl", 526 OP_DUP>; 527def VMOV_N : WOpInst<"vmov_n", ".1", 528 "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl", 529 OP_DUP>; 530} 531let InstName = "" in 532def VDUP_LANE: WOpInst<"vdup_lane", ".qI", 533 "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", 534 OP_DUP_LN>; 535 536//////////////////////////////////////////////////////////////////////////////// 537// E.3.20 Combining vectors 538def VCOMBINE : NoTestOpInst<"vcombine", "Q..", "csilhfUcUsUiUlPcPs", OP_CONC>; 539 540//////////////////////////////////////////////////////////////////////////////// 541// E.3.21 Splitting vectors 542// Note that the ARM NEON Reference 2.0 mistakenly document the vget_high_f16() 543// and vget_low_f16() intrinsics as AArch64-only. We (and GCC) support all 544// versions of these intrinsics in both AArch32 and AArch64 architectures. See 545// D45668 for more details. 546let InstName = "vmov" in { 547def VGET_HIGH : NoTestOpInst<"vget_high", ".Q", "csilhfUcUsUiUlPcPs", OP_HI>; 548def VGET_LOW : NoTestOpInst<"vget_low", ".Q", "csilhfUcUsUiUlPcPs", OP_LO>; 549} 550 551//////////////////////////////////////////////////////////////////////////////// 552// E.3.22 Converting vectors 553 554let ArchGuard = "(__ARM_FP & 2)" in { 555 def VCVT_F16_F32 : SInst<"vcvt_f16_f32", "(<q)(.!)", "Hf">; 556 def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "(>Q)(.!)", "h">; 557} 558 559def VCVT_S32 : SInst<"vcvt_s32", "S.", "fQf">; 560def VCVT_U32 : SInst<"vcvt_u32", "U.", "fQf">; 561def VCVT_F32 : SInst<"vcvt_f32", "F(.!)", "iUiQiQUi">; 562let isVCVT_N = 1 in { 563def VCVT_N_S32 : SInst<"vcvt_n_s32", "S.I", "fQf">; 564def VCVT_N_U32 : SInst<"vcvt_n_u32", "U.I", "fQf">; 565def VCVT_N_F32 : SInst<"vcvt_n_f32", "F(.!)I", "iUiQiQUi">; 566} 567 568def VMOVN : IInst<"vmovn", "<Q", "silUsUiUl">; 569def VMOVL : SInst<"vmovl", "(>Q).", "csiUcUsUi">; 570def VQMOVN : SInst<"vqmovn", "<Q", "silUsUiUl">; 571def VQMOVUN : SInst<"vqmovun", "(<U)Q", "sil">; 572 573//////////////////////////////////////////////////////////////////////////////// 574// E.3.23-24 Table lookup, Extended table lookup 575let InstName = "vtbl" in { 576def VTBL1 : WInst<"vtbl1", "..p", "UccPc">; 577def VTBL2 : WInst<"vtbl2", ".2p", "UccPc">; 578def VTBL3 : WInst<"vtbl3", ".3p", "UccPc">; 579def VTBL4 : WInst<"vtbl4", ".4p", "UccPc">; 580} 581let InstName = "vtbx" in { 582def VTBX1 : WInst<"vtbx1", "...p", "UccPc">; 583def VTBX2 : WInst<"vtbx2", "..2p", "UccPc">; 584def VTBX3 : WInst<"vtbx3", "..3p", "UccPc">; 585def VTBX4 : WInst<"vtbx4", "..4p", "UccPc">; 586} 587 588//////////////////////////////////////////////////////////////////////////////// 589// E.3.25 Operations with a scalar value 590def VMLA_LANE : IOpInst<"vmla_lane", "...qI", 591 "siUsUifQsQiQUsQUiQf", OP_MLA_LN>; 592def VMLAL_LANE : SOpInst<"vmlal_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLAL_LN>; 593def VQDMLAL_LANE : SOpInst<"vqdmlal_lane", "(>Q)(>Q)..I", "si", OP_QDMLAL_LN>; 594def VMLS_LANE : IOpInst<"vmls_lane", "...qI", 595 "siUsUifQsQiQUsQUiQf", OP_MLS_LN>; 596def VMLSL_LANE : SOpInst<"vmlsl_lane", "(>Q)(>Q)..I", "siUsUi", OP_MLSL_LN>; 597def VQDMLSL_LANE : SOpInst<"vqdmlsl_lane", "(>Q)(>Q)..I", "si", OP_QDMLSL_LN>; 598def VMUL_N : IOpInst<"vmul_n", "..1", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>; 599def VMUL_LANE : IOpInst<"vmul_lane", "..qI", 600 "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>; 601def VMULL_N : SOpInst<"vmull_n", "(>Q).1", "siUsUi", OP_MULL_N>; 602def VMULL_LANE : SOpInst<"vmull_lane", "(>Q)..I", "siUsUi", OP_MULL_LN>; 603def VQDMULL_N : SOpInst<"vqdmull_n", "(>Q).1", "si", OP_QDMULL_N>; 604def VQDMULL_LANE : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>; 605def VQDMULH_N : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>; 606def VQRDMULH_N : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>; 607 608let ArchGuard = "!defined(__aarch64__)" in { 609def VQDMULH_LANE : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>; 610def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>; 611} 612let ArchGuard = "defined(__aarch64__)" in { 613def A64_VQDMULH_LANE : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">; 614def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">; 615} 616 617let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in { 618def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "...qI", "siQsQi", OP_QRDMLAH_LN>; 619def VQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "...qI", "siQsQi", OP_QRDMLSH_LN>; 620} 621 622def VMLA_N : IOpInst<"vmla_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLA_N>; 623def VMLAL_N : SOpInst<"vmlal_n", "(>Q)(>Q).1", "siUsUi", OP_MLAL_N>; 624def VQDMLAL_N : SOpInst<"vqdmlal_n", "(>Q)(>Q).1", "si", OP_QDMLAL_N>; 625def VMLS_N : IOpInst<"vmls_n", "...1", "siUsUifQsQiQUsQUiQf", OP_MLS_N>; 626def VMLSL_N : SOpInst<"vmlsl_n", "(>Q)(>Q).1", "siUsUi", OP_MLSL_N>; 627def VQDMLSL_N : SOpInst<"vqdmlsl_n", "(>Q)(>Q).1", "si", OP_QDMLSL_N>; 628 629//////////////////////////////////////////////////////////////////////////////// 630// E.3.26 Vector Extract 631def VEXT : WInst<"vext", "...I", 632 "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf">; 633 634//////////////////////////////////////////////////////////////////////////////// 635// E.3.27 Reverse vector elements 636def VREV64 : WOpInst<"vrev64", "..", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf", 637 OP_REV64>; 638def VREV32 : WOpInst<"vrev32", "..", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>; 639def VREV16 : WOpInst<"vrev16", "..", "cUcPcQcQUcQPc", OP_REV16>; 640 641//////////////////////////////////////////////////////////////////////////////// 642// E.3.28 Other single operand arithmetic 643def VABS : SInst<"vabs", "..", "csifQcQsQiQf">; 644def VQABS : SInst<"vqabs", "..", "csiQcQsQi">; 645def VNEG : SOpInst<"vneg", "..", "csifQcQsQiQf", OP_NEG>; 646def VQNEG : SInst<"vqneg", "..", "csiQcQsQi">; 647def VCLS : SInst<"vcls", "S.", "csiUcUsUiQcQsQiQUcQUsQUi">; 648def VCLZ : IInst<"vclz", "..", "csiUcUsUiQcQsQiQUcQUsQUi">; 649def VCNT : WInst<"vcnt", "..", "UccPcQUcQcQPc">; 650def VRECPE : SInst<"vrecpe", "..", "fUiQfQUi">; 651def VRSQRTE : SInst<"vrsqrte", "..", "fUiQfQUi">; 652 653//////////////////////////////////////////////////////////////////////////////// 654// E.3.29 Logical operations 655def VMVN : LOpInst<"vmvn", "..", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>; 656def VAND : LOpInst<"vand", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>; 657def VORR : LOpInst<"vorr", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>; 658def VEOR : LOpInst<"veor", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>; 659def VBIC : LOpInst<"vbic", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>; 660def VORN : LOpInst<"vorn", "...", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>; 661let isHiddenLInst = 1 in 662def VBSL : SInst<"vbsl", ".U..", 663 "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs">; 664 665//////////////////////////////////////////////////////////////////////////////// 666// E.3.30 Transposition operations 667def VTRN : WInst<"vtrn", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; 668def VZIP : WInst<"vzip", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; 669def VUZP : WInst<"vuzp", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; 670 671//////////////////////////////////////////////////////////////////////////////// 672 673class REINTERPRET_CROSS_SELF<string Types> : 674 NoTestOpInst<"vreinterpret", "..", Types, OP_REINT> { 675 let CartesianProductWith = Types; 676} 677 678multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> { 679 def AXB: NoTestOpInst<"vreinterpret", "..", TypesA, OP_REINT> { 680 let CartesianProductWith = TypesB; 681 } 682 def BXA: NoTestOpInst<"vreinterpret", "..", TypesB, OP_REINT> { 683 let CartesianProductWith = TypesA; 684 } 685} 686 687// E.3.31 Vector reinterpret cast operations 688def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> { 689 let ArchGuard = "!defined(__aarch64__)"; 690 let BigEndianSafe = 1; 691} 692 693//////////////////////////////////////////////////////////////////////////////// 694// Vector fused multiply-add operations 695 696let ArchGuard = "defined(__ARM_FEATURE_FMA)" in { 697 def VFMA : SInst<"vfma", "....", "fQf">; 698 def VFMS : SOpInst<"vfms", "....", "fQf", OP_FMLS>; 699 def FMLA_N_F32 : SOpInst<"vfma_n", "...1", "fQf", OP_FMLA_N>; 700} 701 702//////////////////////////////////////////////////////////////////////////////// 703// fp16 vector operations 704def SCALAR_HALF_GET_LANE : IOpInst<"vget_lane", "1.I", "h", OP_SCALAR_HALF_GET_LN>; 705def SCALAR_HALF_SET_LANE : IOpInst<"vset_lane", ".1.I", "h", OP_SCALAR_HALF_SET_LN>; 706def SCALAR_HALF_GET_LANEQ : IOpInst<"vget_lane", "1.I", "Qh", OP_SCALAR_HALF_GET_LNQ>; 707def SCALAR_HALF_SET_LANEQ : IOpInst<"vset_lane", ".1.I", "Qh", OP_SCALAR_HALF_SET_LNQ>; 708 709//////////////////////////////////////////////////////////////////////////////// 710// Non poly128_t vaddp for Arm and AArch64 711// TODO: poly128_t not implemented on arm32 712def VADDP : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">; 713 714//////////////////////////////////////////////////////////////////////////////// 715// AArch64 Intrinsics 716 717let ArchGuard = "defined(__aarch64__)" in { 718 719//////////////////////////////////////////////////////////////////////////////// 720// Load/Store 721def LD1 : WInst<"vld1", ".(c*!)", "dQdPlQPl">; 722def LD2 : WInst<"vld2", "2(c*!)", "QUlQldQdPlQPl">; 723def LD3 : WInst<"vld3", "3(c*!)", "QUlQldQdPlQPl">; 724def LD4 : WInst<"vld4", "4(c*!)", "QUlQldQdPlQPl">; 725def ST1 : WInst<"vst1", "v*(.!)", "dQdPlQPl">; 726def ST2 : WInst<"vst2", "v*(2!)", "QUlQldQdPlQPl">; 727def ST3 : WInst<"vst3", "v*(3!)", "QUlQldQdPlQPl">; 728def ST4 : WInst<"vst4", "v*(4!)", "QUlQldQdPlQPl">; 729 730def LD1_X2 : WInst<"vld1_x2", "2(c*!)", 731 "dQdPlQPl">; 732def LD1_X3 : WInst<"vld1_x3", "3(c*!)", 733 "dQdPlQPl">; 734def LD1_X4 : WInst<"vld1_x4", "4(c*!)", 735 "dQdPlQPl">; 736 737def ST1_X2 : WInst<"vst1_x2", "v*(2!)", "dQdPlQPl">; 738def ST1_X3 : WInst<"vst1_x3", "v*(3!)", "dQdPlQPl">; 739def ST1_X4 : WInst<"vst1_x4", "v*(4!)", "dQdPlQPl">; 740 741def LD1_LANE : WInst<"vld1_lane", ".(c*!).I", "dQdPlQPl">; 742def LD2_LANE : WInst<"vld2_lane", "2(c*!)2I", "lUlQcQUcQPcQlQUldQdPlQPl">; 743def LD3_LANE : WInst<"vld3_lane", "3(c*!)3I", "lUlQcQUcQPcQlQUldQdPlQPl">; 744def LD4_LANE : WInst<"vld4_lane", "4(c*!)4I", "lUlQcQUcQPcQlQUldQdPlQPl">; 745def ST1_LANE : WInst<"vst1_lane", "v*(.!)I", "dQdPlQPl">; 746def ST2_LANE : WInst<"vst2_lane", "v*(2!)I", "lUlQcQUcQPcQlQUldQdPlQPl">; 747def ST3_LANE : WInst<"vst3_lane", "v*(3!)I", "lUlQcQUcQPcQlQUldQdPlQPl">; 748def ST4_LANE : WInst<"vst4_lane", "v*(4!)I", "lUlQcQUcQPcQlQUldQdPlQPl">; 749 750def LD1_DUP : WInst<"vld1_dup", ".(c*!)", "dQdPlQPl">; 751def LD2_DUP : WInst<"vld2_dup", "2(c*!)", "dQdPlQPl">; 752def LD3_DUP : WInst<"vld3_dup", "3(c*!)", "dQdPlQPl">; 753def LD4_DUP : WInst<"vld4_dup", "4(c*!)", "dQdPlQPl">; 754 755def VLDRQ : WInst<"vldrq", "1(c*!)", "Pk">; 756def VSTRQ : WInst<"vstrq", "v*(1!)", "Pk">; 757 758//////////////////////////////////////////////////////////////////////////////// 759// Addition 760def ADD : IOpInst<"vadd", "...", "dQd", OP_ADD>; 761 762//////////////////////////////////////////////////////////////////////////////// 763// Subtraction 764def SUB : IOpInst<"vsub", "...", "dQd", OP_SUB>; 765 766//////////////////////////////////////////////////////////////////////////////// 767// Multiplication 768def MUL : IOpInst<"vmul", "...", "dQd", OP_MUL>; 769def MLA : IOpInst<"vmla", "....", "dQd", OP_MLA>; 770def MLS : IOpInst<"vmls", "....", "dQd", OP_MLS>; 771 772//////////////////////////////////////////////////////////////////////////////// 773// Multiplication Extended 774def MULX : SInst<"vmulx", "...", "fdQfQd">; 775 776//////////////////////////////////////////////////////////////////////////////// 777// Division 778def FDIV : IOpInst<"vdiv", "...", "fdQfQd", OP_DIV>; 779 780//////////////////////////////////////////////////////////////////////////////// 781// Vector fused multiply-add operations 782def FMLA : SInst<"vfma", "....", "dQd">; 783def FMLS : SOpInst<"vfms", "....", "dQd", OP_FMLS>; 784 785//////////////////////////////////////////////////////////////////////////////// 786// MUL, MLA, MLS, FMA, FMS definitions with scalar argument 787def VMUL_N_A64 : IOpInst<"vmul_n", "..1", "Qd", OP_MUL_N>; 788 789def FMLA_N : SOpInst<"vfma_n", "...1", "dQd", OP_FMLA_N>; 790def FMLS_N : SOpInst<"vfms_n", "...1", "fdQfQd", OP_FMLS_N>; 791 792//////////////////////////////////////////////////////////////////////////////// 793// Logical operations 794def BSL : SInst<"vbsl", ".U..", "dPlQdQPl">; 795 796//////////////////////////////////////////////////////////////////////////////// 797// Absolute Difference 798def ABD : SInst<"vabd", "...", "dQd">; 799 800//////////////////////////////////////////////////////////////////////////////// 801// saturating absolute/negate 802def ABS : SInst<"vabs", "..", "dQdlQl">; 803def QABS : SInst<"vqabs", "..", "lQl">; 804def NEG : SOpInst<"vneg", "..", "dlQdQl", OP_NEG>; 805def QNEG : SInst<"vqneg", "..", "lQl">; 806 807//////////////////////////////////////////////////////////////////////////////// 808// Signed Saturating Accumulated of Unsigned Value 809def SUQADD : SInst<"vuqadd", "..U", "csilQcQsQiQl">; 810 811//////////////////////////////////////////////////////////////////////////////// 812// Unsigned Saturating Accumulated of Signed Value 813def USQADD : SInst<"vsqadd", "..S", "UcUsUiUlQUcQUsQUiQUl">; 814 815//////////////////////////////////////////////////////////////////////////////// 816// Reciprocal/Sqrt 817def FRECPS : IInst<"vrecps", "...", "dQd">; 818def FRSQRTS : IInst<"vrsqrts", "...", "dQd">; 819def FRECPE : SInst<"vrecpe", "..", "dQd">; 820def FRSQRTE : SInst<"vrsqrte", "..", "dQd">; 821def FSQRT : SInst<"vsqrt", "..", "fdQfQd">; 822 823//////////////////////////////////////////////////////////////////////////////// 824// bitwise reverse 825def RBIT : IInst<"vrbit", "..", "cUcPcQcQUcQPc">; 826 827//////////////////////////////////////////////////////////////////////////////// 828// Integer extract and narrow to high 829def XTN2 : SOpInst<"vmovn_high", "(<Q)<Q", "silUsUiUl", OP_XTN>; 830 831//////////////////////////////////////////////////////////////////////////////// 832// Signed integer saturating extract and unsigned narrow to high 833def SQXTUN2 : SOpInst<"vqmovun_high", "(<U)(<Uq).", "HsHiHl", OP_SQXTUN>; 834 835//////////////////////////////////////////////////////////////////////////////// 836// Integer saturating extract and narrow to high 837def QXTN2 : SOpInst<"vqmovn_high", "(<Q)<Q", "silUsUiUl", OP_QXTN>; 838 839//////////////////////////////////////////////////////////////////////////////// 840// Converting vectors 841 842def VCVT_F32_F64 : SInst<"vcvt_f32_f64", "(<q).", "Qd">; 843def VCVT_F64_F32 : SInst<"vcvt_f64_f32", "(>Q).", "f">; 844 845def VCVT_S64 : SInst<"vcvt_s64", "S.", "dQd">; 846def VCVT_U64 : SInst<"vcvt_u64", "U.", "dQd">; 847def VCVT_F64 : SInst<"vcvt_f64", "F(.!)", "lUlQlQUl">; 848 849def VCVT_HIGH_F16_F32 : SOpInst<"vcvt_high_f16", "<(<q!)Q", "Hf", OP_VCVT_NA_HI_F16>; 850def VCVT_HIGH_F32_F16 : SOpInst<"vcvt_high_f32", "(>Q)(Q!)", "h", OP_VCVT_EX_HI_F32>; 851def VCVT_HIGH_F32_F64 : SOpInst<"vcvt_high_f32", "(<Q)(F<!)Q", "d", OP_VCVT_NA_HI_F32>; 852def VCVT_HIGH_F64_F32 : SOpInst<"vcvt_high_f64", "(>Q)(Q!)", "f", OP_VCVT_EX_HI_F64>; 853 854def VCVTX_F32_F64 : SInst<"vcvtx_f32", "(F<)(Q!)", "d">; 855def VCVTX_HIGH_F32_F64 : SOpInst<"vcvtx_high_f32", "(<Q)(F<!)Q", "d", OP_VCVTX_HI>; 856 857//////////////////////////////////////////////////////////////////////////////// 858// Comparison 859def FCAGE : IInst<"vcage", "U..", "dQd">; 860def FCAGT : IInst<"vcagt", "U..", "dQd">; 861def FCALE : IInst<"vcale", "U..", "dQd">; 862def FCALT : IInst<"vcalt", "U..", "dQd">; 863def CMTST : WInst<"vtst", "U..", "lUlPlQlQUlQPl">; 864def CFMEQ : SOpInst<"vceq", "U..", "lUldQdQlQUlPlQPl", OP_EQ>; 865def CFMGE : SOpInst<"vcge", "U..", "lUldQdQlQUl", OP_GE>; 866def CFMLE : SOpInst<"vcle", "U..", "lUldQdQlQUl", OP_LE>; 867def CFMGT : SOpInst<"vcgt", "U..", "lUldQdQlQUl", OP_GT>; 868def CFMLT : SOpInst<"vclt", "U..", "lUldQdQlQUl", OP_LT>; 869 870def CMEQ : SInst<"vceqz", "U.", 871 "csilfUcUsUiUlPcPlQcQsQiQlQfQUcQUsQUiQUlQPcdQdQPl">; 872def CMGE : SInst<"vcgez", "U.", "csilfdQcQsQiQlQfQd">; 873def CMLE : SInst<"vclez", "U.", "csilfdQcQsQiQlQfQd">; 874def CMGT : SInst<"vcgtz", "U.", "csilfdQcQsQiQlQfQd">; 875def CMLT : SInst<"vcltz", "U.", "csilfdQcQsQiQlQfQd">; 876 877//////////////////////////////////////////////////////////////////////////////// 878// Max/Min Integer 879def MAX : SInst<"vmax", "...", "dQd">; 880def MIN : SInst<"vmin", "...", "dQd">; 881 882//////////////////////////////////////////////////////////////////////////////// 883// Pairwise Max/Min 884def MAXP : SInst<"vpmax", "...", "QcQsQiQUcQUsQUiQfQd">; 885def MINP : SInst<"vpmin", "...", "QcQsQiQUcQUsQUiQfQd">; 886 887//////////////////////////////////////////////////////////////////////////////// 888// Pairwise MaxNum/MinNum Floating Point 889def FMAXNMP : SInst<"vpmaxnm", "...", "fQfQd">; 890def FMINNMP : SInst<"vpminnm", "...", "fQfQd">; 891 892//////////////////////////////////////////////////////////////////////////////// 893// Pairwise Addition 894def ADDP : IInst<"vpadd", "...", "QcQsQiQlQUcQUsQUiQUlQfQd">; 895 896//////////////////////////////////////////////////////////////////////////////// 897// Shifts by constant 898let isShift = 1 in { 899// Left shift long high 900def SHLL_HIGH_N : SOpInst<"vshll_high_n", ">.I", "HcHsHiHUcHUsHUi", 901 OP_LONG_HI>; 902 903//////////////////////////////////////////////////////////////////////////////// 904def SRI_N : WInst<"vsri_n", "...I", "PlQPl">; 905def SLI_N : WInst<"vsli_n", "...I", "PlQPl">; 906 907// Right shift narrow high 908def SHRN_HIGH_N : IOpInst<"vshrn_high_n", "<(<q).I", 909 "HsHiHlHUsHUiHUl", OP_NARROW_HI>; 910def QSHRUN_HIGH_N : SOpInst<"vqshrun_high_n", "<(<q).I", 911 "HsHiHl", OP_NARROW_HI>; 912def RSHRN_HIGH_N : IOpInst<"vrshrn_high_n", "<(<q).I", 913 "HsHiHlHUsHUiHUl", OP_NARROW_HI>; 914def QRSHRUN_HIGH_N : SOpInst<"vqrshrun_high_n", "<(<q).I", 915 "HsHiHl", OP_NARROW_HI>; 916def QSHRN_HIGH_N : SOpInst<"vqshrn_high_n", "<(<q).I", 917 "HsHiHlHUsHUiHUl", OP_NARROW_HI>; 918def QRSHRN_HIGH_N : SOpInst<"vqrshrn_high_n", "<(<q).I", 919 "HsHiHlHUsHUiHUl", OP_NARROW_HI>; 920} 921 922//////////////////////////////////////////////////////////////////////////////// 923// Converting vectors 924def VMOVL_HIGH : SOpInst<"vmovl_high", ">.", "HcHsHiHUcHUsHUi", OP_MOVL_HI>; 925 926let isVCVT_N = 1 in { 927def CVTF_N_F64 : SInst<"vcvt_n_f64", "F(.!)I", "lUlQlQUl">; 928def FCVTZS_N_S64 : SInst<"vcvt_n_s64", "S.I", "dQd">; 929def FCVTZS_N_U64 : SInst<"vcvt_n_u64", "U.I", "dQd">; 930} 931 932//////////////////////////////////////////////////////////////////////////////// 933// 3VDiff class using high 64-bit in operands 934def VADDL_HIGH : SOpInst<"vaddl_high", "(>Q)QQ", "csiUcUsUi", OP_ADDLHi>; 935def VADDW_HIGH : SOpInst<"vaddw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_ADDWHi>; 936def VSUBL_HIGH : SOpInst<"vsubl_high", "(>Q)QQ", "csiUcUsUi", OP_SUBLHi>; 937def VSUBW_HIGH : SOpInst<"vsubw_high", "(>Q)(>Q)Q", "csiUcUsUi", OP_SUBWHi>; 938 939def VABDL_HIGH : SOpInst<"vabdl_high", "(>Q)QQ", "csiUcUsUi", OP_ABDLHi>; 940def VABAL_HIGH : SOpInst<"vabal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_ABALHi>; 941 942def VMULL_HIGH : SOpInst<"vmull_high", "(>Q)QQ", "csiUcUsUiPc", OP_MULLHi>; 943def VMULL_HIGH_N : SOpInst<"vmull_high_n", "(>Q)Q1", "siUsUi", OP_MULLHi_N>; 944def VMLAL_HIGH : SOpInst<"vmlal_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLALHi>; 945def VMLAL_HIGH_N : SOpInst<"vmlal_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLALHi_N>; 946def VMLSL_HIGH : SOpInst<"vmlsl_high", "(>Q)(>Q)QQ", "csiUcUsUi", OP_MLSLHi>; 947def VMLSL_HIGH_N : SOpInst<"vmlsl_high_n", "(>Q)(>Q)Q1", "siUsUi", OP_MLSLHi_N>; 948 949def VADDHN_HIGH : SOpInst<"vaddhn_high", "(<Q)<QQ", "silUsUiUl", OP_ADDHNHi>; 950def VRADDHN_HIGH : SOpInst<"vraddhn_high", "(<Q)<QQ", "silUsUiUl", OP_RADDHNHi>; 951def VSUBHN_HIGH : SOpInst<"vsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_SUBHNHi>; 952def VRSUBHN_HIGH : SOpInst<"vrsubhn_high", "(<Q)<QQ", "silUsUiUl", OP_RSUBHNHi>; 953 954def VQDMULL_HIGH : SOpInst<"vqdmull_high", "(>Q)QQ", "si", OP_QDMULLHi>; 955def VQDMULL_HIGH_N : SOpInst<"vqdmull_high_n", "(>Q)Q1", "si", OP_QDMULLHi_N>; 956def VQDMLAL_HIGH : SOpInst<"vqdmlal_high", "(>Q)(>Q)QQ", "si", OP_QDMLALHi>; 957def VQDMLAL_HIGH_N : SOpInst<"vqdmlal_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLALHi_N>; 958def VQDMLSL_HIGH : SOpInst<"vqdmlsl_high", "(>Q)(>Q)QQ", "si", OP_QDMLSLHi>; 959def VQDMLSL_HIGH_N : SOpInst<"vqdmlsl_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLSLHi_N>; 960def VMULL_P64 : SInst<"vmull", "(1>)11", "Pl">; 961def VMULL_HIGH_P64 : SOpInst<"vmull_high", "(1>)..", "HPl", OP_MULLHi_P64>; 962 963 964//////////////////////////////////////////////////////////////////////////////// 965// Extract or insert element from vector 966def GET_LANE : IInst<"vget_lane", "1.I", "dQdPlQPl">; 967def SET_LANE : IInst<"vset_lane", ".1.I", "dQdPlQPl">; 968def COPY_LANE : IOpInst<"vcopy_lane", "..I.I", 969 "csilUcUsUiUlPcPsPlfd", OP_COPY_LN>; 970def COPYQ_LANE : IOpInst<"vcopy_lane", "..IqI", 971 "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>; 972def COPY_LANEQ : IOpInst<"vcopy_laneq", "..IQI", 973 "csilPcPsPlUcUsUiUlfd", OP_COPY_LN> { 974 let isLaneQ = 1; 975} 976def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "..I.I", 977 "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN> { 978 let isLaneQ = 1; 979} 980 981//////////////////////////////////////////////////////////////////////////////// 982// Set all lanes to same value 983def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "hdQhQdPlQPl", OP_DUP_LN>; 984def VDUP_LANE2: WOpInst<"vdup_laneq", ".QI", 985 "csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl", 986 OP_DUP_LN> { 987 let isLaneQ = 1; 988} 989def DUP_N : WOpInst<"vdup_n", ".1", "dQdPlQPl", OP_DUP>; 990def MOV_N : WOpInst<"vmov_n", ".1", "dQdPlQPl", OP_DUP>; 991 992//////////////////////////////////////////////////////////////////////////////// 993def COMBINE : NoTestOpInst<"vcombine", "Q..", "dPl", OP_CONC>; 994 995//////////////////////////////////////////////////////////////////////////////// 996//Initialize a vector from bit pattern 997def CREATE : NoTestOpInst<"vcreate", ".(IU>)", "dPl", OP_CAST> { 998 let BigEndianSafe = 1; 999} 1000 1001//////////////////////////////////////////////////////////////////////////////// 1002 1003def VMLA_LANEQ : IOpInst<"vmla_laneq", "...QI", 1004 "siUsUifQsQiQUsQUiQf", OP_MLA_LN> { 1005 let isLaneQ = 1; 1006} 1007def VMLS_LANEQ : IOpInst<"vmls_laneq", "...QI", 1008 "siUsUifQsQiQUsQUiQf", OP_MLS_LN> { 1009 let isLaneQ = 1; 1010} 1011 1012def VFMA_LANE : IInst<"vfma_lane", "...qI", "fdQfQd">; 1013def VFMA_LANEQ : IInst<"vfma_laneq", "...QI", "fdQfQd"> { 1014 let isLaneQ = 1; 1015} 1016def VFMS_LANE : IOpInst<"vfms_lane", "...qI", "fdQfQd", OP_FMS_LN>; 1017def VFMS_LANEQ : IOpInst<"vfms_laneq", "...QI", "fdQfQd", OP_FMS_LNQ> { 1018 let isLaneQ = 1; 1019} 1020 1021def VMLAL_LANEQ : SOpInst<"vmlal_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLAL_LN> { 1022 let isLaneQ = 1; 1023} 1024def VMLAL_HIGH_LANE : SOpInst<"vmlal_high_lane", "(>Q)(>Q)Q.I", "siUsUi", 1025 OP_MLALHi_LN>; 1026def VMLAL_HIGH_LANEQ : SOpInst<"vmlal_high_laneq", "(>Q)(>Q)QQI", "siUsUi", 1027 OP_MLALHi_LN> { 1028 let isLaneQ = 1; 1029} 1030def VMLSL_LANEQ : SOpInst<"vmlsl_laneq", "(>Q)(>Q).QI", "siUsUi", OP_MLSL_LN> { 1031 let isLaneQ = 1; 1032} 1033def VMLSL_HIGH_LANE : SOpInst<"vmlsl_high_lane", "(>Q)(>Q)Q.I", "siUsUi", 1034 OP_MLSLHi_LN>; 1035def VMLSL_HIGH_LANEQ : SOpInst<"vmlsl_high_laneq", "(>Q)(>Q)QQI", "siUsUi", 1036 OP_MLSLHi_LN> { 1037 let isLaneQ = 1; 1038} 1039 1040def VQDMLAL_LANEQ : SOpInst<"vqdmlal_laneq", "(>Q)(>Q).QI", "si", OP_QDMLAL_LN> { 1041 let isLaneQ = 1; 1042} 1043def VQDMLAL_HIGH_LANE : SOpInst<"vqdmlal_high_lane", "(>Q)(>Q)Q.I", "si", 1044 OP_QDMLALHi_LN>; 1045def VQDMLAL_HIGH_LANEQ : SOpInst<"vqdmlal_high_laneq", "(>Q)(>Q)QQI", "si", 1046 OP_QDMLALHi_LN> { 1047 let isLaneQ = 1; 1048} 1049def VQDMLSL_LANEQ : SOpInst<"vqdmlsl_laneq", "(>Q)(>Q).QI", "si", OP_QDMLSL_LN> { 1050 let isLaneQ = 1; 1051} 1052def VQDMLSL_HIGH_LANE : SOpInst<"vqdmlsl_high_lane", "(>Q)(>Q)Q.I", "si", 1053 OP_QDMLSLHi_LN>; 1054def VQDMLSL_HIGH_LANEQ : SOpInst<"vqdmlsl_high_laneq", "(>Q)(>Q)QQI", "si", 1055 OP_QDMLSLHi_LN> { 1056 let isLaneQ = 1; 1057} 1058 1059// Newly add double parameter for vmul_lane in aarch64 1060// Note: d type is handled by SCALAR_VMUL_LANE 1061def VMUL_LANE_A64 : IOpInst<"vmul_lane", "..qI", "Qd", OP_MUL_LN>; 1062 1063// Note: d type is handled by SCALAR_VMUL_LANEQ 1064def VMUL_LANEQ : IOpInst<"vmul_laneq", "..QI", 1065 "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN> { 1066 let isLaneQ = 1; 1067} 1068def VMULL_LANEQ : SOpInst<"vmull_laneq", "(>Q).QI", "siUsUi", OP_MULL_LN> { 1069 let isLaneQ = 1; 1070} 1071def VMULL_HIGH_LANE : SOpInst<"vmull_high_lane", "(>Q)Q.I", "siUsUi", 1072 OP_MULLHi_LN>; 1073def VMULL_HIGH_LANEQ : SOpInst<"vmull_high_laneq", "(>Q)QQI", "siUsUi", 1074 OP_MULLHi_LN> { 1075 let isLaneQ = 1; 1076} 1077 1078def VQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(>Q).QI", "si", OP_QDMULL_LN> { 1079 let isLaneQ = 1; 1080} 1081def VQDMULL_HIGH_LANE : SOpInst<"vqdmull_high_lane", "(>Q)Q.I", "si", 1082 OP_QDMULLHi_LN>; 1083def VQDMULL_HIGH_LANEQ : SOpInst<"vqdmull_high_laneq", "(>Q)QQI", "si", 1084 OP_QDMULLHi_LN> { 1085 let isLaneQ = 1; 1086} 1087 1088let isLaneQ = 1 in { 1089def VQDMULH_LANEQ : SInst<"vqdmulh_laneq", "..QI", "siQsQi">; 1090def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">; 1091} 1092let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in { 1093def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> { 1094 let isLaneQ = 1; 1095} 1096def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> { 1097 let isLaneQ = 1; 1098} 1099} 1100 1101// Note: d type implemented by SCALAR_VMULX_LANE 1102def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>; 1103// Note: d type is implemented by SCALAR_VMULX_LANEQ 1104def VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "fQfQd", OP_MULX_LN> { 1105 let isLaneQ = 1; 1106} 1107 1108//////////////////////////////////////////////////////////////////////////////// 1109// Across vectors class 1110def VADDLV : SInst<"vaddlv", "(1>).", "csiUcUsUiQcQsQiQUcQUsQUi">; 1111def VMAXV : SInst<"vmaxv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">; 1112def VMINV : SInst<"vminv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">; 1113def VADDV : SInst<"vaddv", "1.", "csifUcUsUiQcQsQiQUcQUsQUiQfQdQlQUl">; 1114def FMAXNMV : SInst<"vmaxnmv", "1.", "fQfQd">; 1115def FMINNMV : SInst<"vminnmv", "1.", "fQfQd">; 1116 1117//////////////////////////////////////////////////////////////////////////////// 1118// Newly added Vector Extract for f64 1119def VEXT_A64 : WInst<"vext", "...I", "dQdPlQPl">; 1120 1121//////////////////////////////////////////////////////////////////////////////// 1122// Crypto 1123let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES)" in { 1124def AESE : SInst<"vaese", "...", "QUc">; 1125def AESD : SInst<"vaesd", "...", "QUc">; 1126def AESMC : SInst<"vaesmc", "..", "QUc">; 1127def AESIMC : SInst<"vaesimc", "..", "QUc">; 1128} 1129 1130let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2)" in { 1131def SHA1H : SInst<"vsha1h", "11", "Ui">; 1132def SHA1SU1 : SInst<"vsha1su1", "...", "QUi">; 1133def SHA256SU0 : SInst<"vsha256su0", "...", "QUi">; 1134 1135def SHA1C : SInst<"vsha1c", "..1.", "QUi">; 1136def SHA1P : SInst<"vsha1p", "..1.", "QUi">; 1137def SHA1M : SInst<"vsha1m", "..1.", "QUi">; 1138def SHA1SU0 : SInst<"vsha1su0", "....", "QUi">; 1139def SHA256H : SInst<"vsha256h", "....", "QUi">; 1140def SHA256H2 : SInst<"vsha256h2", "....", "QUi">; 1141def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">; 1142} 1143 1144let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)" in { 1145def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">; 1146def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">; 1147def RAX1 : SInst<"vrax1", "...", "QUl">; 1148 1149let isVXAR = 1 in { 1150def XAR : SInst<"vxar", "...I", "QUl">; 1151} 1152} 1153 1154let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)" in { 1155 1156def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">; 1157def SHA512su1 : SInst<"vsha512su1", "....", "QUl">; 1158def SHA512H : SInst<"vsha512h", "....", "QUl">; 1159def SHA512H2 : SInst<"vsha512h2", "....", "QUl">; 1160} 1161 1162let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)" in { 1163def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">; 1164def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">; 1165def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">; 1166def SM3TT2A : SInst<"vsm3tt2a", "....I", "QUi">; 1167def SM3TT2B : SInst<"vsm3tt2b", "....I", "QUi">; 1168def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">; 1169def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">; 1170} 1171 1172let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)" in { 1173def SM4E : SInst<"vsm4e", "...", "QUi">; 1174def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">; 1175} 1176 1177//////////////////////////////////////////////////////////////////////////////// 1178// poly128_t vadd for AArch64 only see VADDP for the rest 1179def VADDP_Q : WInst<"vadd", "...", "QPk">; 1180 1181//////////////////////////////////////////////////////////////////////////////// 1182// Float -> Int conversions with explicit rounding mode 1183 1184let ArchGuard = "__ARM_ARCH >= 8" in { 1185def FCVTNS_S32 : SInst<"vcvtn_s32", "S.", "fQf">; 1186def FCVTNU_S32 : SInst<"vcvtn_u32", "U.", "fQf">; 1187def FCVTPS_S32 : SInst<"vcvtp_s32", "S.", "fQf">; 1188def FCVTPU_S32 : SInst<"vcvtp_u32", "U.", "fQf">; 1189def FCVTMS_S32 : SInst<"vcvtm_s32", "S.", "fQf">; 1190def FCVTMU_S32 : SInst<"vcvtm_u32", "U.", "fQf">; 1191def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">; 1192def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">; 1193} 1194 1195let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)" in { 1196def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">; 1197def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">; 1198def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">; 1199def FCVTPU_S64 : SInst<"vcvtp_u64", "U.", "dQd">; 1200def FCVTMS_S64 : SInst<"vcvtm_s64", "S.", "dQd">; 1201def FCVTMU_S64 : SInst<"vcvtm_u64", "U.", "dQd">; 1202def FCVTAS_S64 : SInst<"vcvta_s64", "S.", "dQd">; 1203def FCVTAU_S64 : SInst<"vcvta_u64", "U.", "dQd">; 1204} 1205 1206//////////////////////////////////////////////////////////////////////////////// 1207// Round to Integral 1208 1209let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { 1210def FRINTN_S32 : SInst<"vrndn", "..", "fQf">; 1211def FRINTA_S32 : SInst<"vrnda", "..", "fQf">; 1212def FRINTP_S32 : SInst<"vrndp", "..", "fQf">; 1213def FRINTM_S32 : SInst<"vrndm", "..", "fQf">; 1214def FRINTX_S32 : SInst<"vrndx", "..", "fQf">; 1215def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">; 1216def FRINTI_S32 : SInst<"vrndi", "..", "fQf">; 1217} 1218 1219let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { 1220def FRINTN_S64 : SInst<"vrndn", "..", "dQd">; 1221def FRINTA_S64 : SInst<"vrnda", "..", "dQd">; 1222def FRINTP_S64 : SInst<"vrndp", "..", "dQd">; 1223def FRINTM_S64 : SInst<"vrndm", "..", "dQd">; 1224def FRINTX_S64 : SInst<"vrndx", "..", "dQd">; 1225def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">; 1226def FRINTI_S64 : SInst<"vrndi", "..", "dQd">; 1227} 1228 1229let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)" in { 1230def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">; 1231def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">; 1232def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">; 1233def FRINT64Z_S32 : SInst<"vrnd64z", "..", "fQf">; 1234} 1235 1236//////////////////////////////////////////////////////////////////////////////// 1237// MaxNum/MinNum Floating Point 1238 1239let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in { 1240def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">; 1241def FMINNM_S32 : SInst<"vminnm", "...", "fQf">; 1242} 1243 1244let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in { 1245def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">; 1246def FMINNM_S64 : SInst<"vminnm", "...", "dQd">; 1247} 1248 1249//////////////////////////////////////////////////////////////////////////////// 1250// Permutation 1251def VTRN1 : SOpInst<"vtrn1", "...", 1252 "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN1>; 1253def VZIP1 : SOpInst<"vzip1", "...", 1254 "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP1>; 1255def VUZP1 : SOpInst<"vuzp1", "...", 1256 "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP1>; 1257def VTRN2 : SOpInst<"vtrn2", "...", 1258 "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN2>; 1259def VZIP2 : SOpInst<"vzip2", "...", 1260 "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP2>; 1261def VUZP2 : SOpInst<"vuzp2", "...", 1262 "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP2>; 1263 1264//////////////////////////////////////////////////////////////////////////////// 1265// Table lookup 1266let InstName = "vtbl" in { 1267def VQTBL1_A64 : WInst<"vqtbl1", ".QU", "UccPcQUcQcQPc">; 1268def VQTBL2_A64 : WInst<"vqtbl2", ".(2Q)U", "UccPcQUcQcQPc">; 1269def VQTBL3_A64 : WInst<"vqtbl3", ".(3Q)U", "UccPcQUcQcQPc">; 1270def VQTBL4_A64 : WInst<"vqtbl4", ".(4Q)U", "UccPcQUcQcQPc">; 1271} 1272let InstName = "vtbx" in { 1273def VQTBX1_A64 : WInst<"vqtbx1", "..QU", "UccPcQUcQcQPc">; 1274def VQTBX2_A64 : WInst<"vqtbx2", "..(2Q)U", "UccPcQUcQcQPc">; 1275def VQTBX3_A64 : WInst<"vqtbx3", "..(3Q)U", "UccPcQUcQcQPc">; 1276def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">; 1277} 1278 1279//////////////////////////////////////////////////////////////////////////////// 1280// Vector reinterpret cast operations 1281 1282// NeonEmitter implicitly takes the cartesian product of the type string with 1283// itself during generation so, unlike all other intrinsics, this one should 1284// include *all* types, not just additional ones. 1285def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> { 1286 let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)"; 1287 let BigEndianSafe = 1; 1288} 1289 1290//////////////////////////////////////////////////////////////////////////////// 1291// Scalar Intrinsics 1292// Scalar Arithmetic 1293 1294// Scalar Addition 1295def SCALAR_ADD : SInst<"vadd", "111", "SlSUl">; 1296// Scalar Saturating Add 1297def SCALAR_QADD : SInst<"vqadd", "111", "ScSsSiSlSUcSUsSUiSUl">; 1298 1299// Scalar Subtraction 1300def SCALAR_SUB : SInst<"vsub", "111", "SlSUl">; 1301// Scalar Saturating Sub 1302def SCALAR_QSUB : SInst<"vqsub", "111", "ScSsSiSlSUcSUsSUiSUl">; 1303 1304let InstName = "vmov" in { 1305def VGET_HIGH_A64 : NoTestOpInst<"vget_high", ".Q", "dPl", OP_HI>; 1306def VGET_LOW_A64 : NoTestOpInst<"vget_low", ".Q", "dPl", OP_LO>; 1307} 1308 1309//////////////////////////////////////////////////////////////////////////////// 1310// Scalar Shift 1311// Scalar Shift Left 1312def SCALAR_SHL: SInst<"vshl", "11(S1)", "SlSUl">; 1313// Scalar Saturating Shift Left 1314def SCALAR_QSHL: SInst<"vqshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">; 1315// Scalar Saturating Rounding Shift Left 1316def SCALAR_QRSHL: SInst<"vqrshl", "11(S1)", "ScSsSiSlSUcSUsSUiSUl">; 1317// Scalar Shift Rounding Left 1318def SCALAR_RSHL: SInst<"vrshl", "11(S1)", "SlSUl">; 1319 1320//////////////////////////////////////////////////////////////////////////////// 1321// Scalar Shift (Immediate) 1322let isScalarShift = 1 in { 1323// Signed/Unsigned Shift Right (Immediate) 1324def SCALAR_SSHR_N: SInst<"vshr_n", "11I", "SlSUl">; 1325// Signed/Unsigned Rounding Shift Right (Immediate) 1326def SCALAR_SRSHR_N: SInst<"vrshr_n", "11I", "SlSUl">; 1327 1328// Signed/Unsigned Shift Right and Accumulate (Immediate) 1329def SCALAR_SSRA_N: SInst<"vsra_n", "111I", "SlSUl">; 1330// Signed/Unsigned Rounding Shift Right and Accumulate (Immediate) 1331def SCALAR_SRSRA_N: SInst<"vrsra_n", "111I", "SlSUl">; 1332 1333// Shift Left (Immediate) 1334def SCALAR_SHL_N: SInst<"vshl_n", "11I", "SlSUl">; 1335// Signed/Unsigned Saturating Shift Left (Immediate) 1336def SCALAR_SQSHL_N: SInst<"vqshl_n", "11I", "ScSsSiSlSUcSUsSUiSUl">; 1337// Signed Saturating Shift Left Unsigned (Immediate) 1338def SCALAR_SQSHLU_N: SInst<"vqshlu_n", "11I", "ScSsSiSl">; 1339 1340// Shift Right And Insert (Immediate) 1341def SCALAR_SRI_N: SInst<"vsri_n", "111I", "SlSUl">; 1342// Shift Left And Insert (Immediate) 1343def SCALAR_SLI_N: SInst<"vsli_n", "111I", "SlSUl">; 1344 1345let isScalarNarrowShift = 1 in { 1346 // Signed/Unsigned Saturating Shift Right Narrow (Immediate) 1347 def SCALAR_SQSHRN_N: SInst<"vqshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl">; 1348 // Signed/Unsigned Saturating Rounded Shift Right Narrow (Immediate) 1349 def SCALAR_SQRSHRN_N: SInst<"vqrshrn_n", "(1<)1I", "SsSiSlSUsSUiSUl">; 1350 // Signed Saturating Shift Right Unsigned Narrow (Immediate) 1351 def SCALAR_SQSHRUN_N: SInst<"vqshrun_n", "(1<)1I", "SsSiSl">; 1352 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate) 1353 def SCALAR_SQRSHRUN_N: SInst<"vqrshrun_n", "(1<)1I", "SsSiSl">; 1354} 1355 1356//////////////////////////////////////////////////////////////////////////////// 1357// Scalar Signed/Unsigned Fixed-point Convert To Floating-Point (Immediate) 1358def SCALAR_SCVTF_N_F32: SInst<"vcvt_n_f32", "(1F)(1!)I", "SiSUi">; 1359def SCALAR_SCVTF_N_F64: SInst<"vcvt_n_f64", "(1F)(1!)I", "SlSUl">; 1360 1361//////////////////////////////////////////////////////////////////////////////// 1362// Scalar Floating-point Convert To Signed/Unsigned Fixed-point (Immediate) 1363def SCALAR_FCVTZS_N_S32 : SInst<"vcvt_n_s32", "(1S)1I", "Sf">; 1364def SCALAR_FCVTZU_N_U32 : SInst<"vcvt_n_u32", "(1U)1I", "Sf">; 1365def SCALAR_FCVTZS_N_S64 : SInst<"vcvt_n_s64", "(1S)1I", "Sd">; 1366def SCALAR_FCVTZU_N_U64 : SInst<"vcvt_n_u64", "(1U)1I", "Sd">; 1367} 1368 1369//////////////////////////////////////////////////////////////////////////////// 1370// Scalar Floating-point Round to Integral 1371let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { 1372def SCALAR_FRINTN_S32 : SInst<"vrndn", "11", "Sf">; 1373} 1374 1375//////////////////////////////////////////////////////////////////////////////// 1376// Scalar Reduce Pairwise Addition (Scalar and Floating Point) 1377def SCALAR_ADDP : SInst<"vpadd", "1.", "SfSHlSHdSHUl">; 1378 1379//////////////////////////////////////////////////////////////////////////////// 1380// Scalar Reduce Floating Point Pairwise Max/Min 1381def SCALAR_FMAXP : SInst<"vpmax", "1.", "SfSQd">; 1382 1383def SCALAR_FMINP : SInst<"vpmin", "1.", "SfSQd">; 1384 1385//////////////////////////////////////////////////////////////////////////////// 1386// Scalar Reduce Floating Point Pairwise maxNum/minNum 1387def SCALAR_FMAXNMP : SInst<"vpmaxnm", "1.", "SfSQd">; 1388def SCALAR_FMINNMP : SInst<"vpminnm", "1.", "SfSQd">; 1389 1390//////////////////////////////////////////////////////////////////////////////// 1391// Scalar Integer Saturating Doubling Multiply Half High 1392def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">; 1393 1394//////////////////////////////////////////////////////////////////////////////// 1395// Scalar Integer Saturating Rounding Doubling Multiply Half High 1396def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">; 1397 1398let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in { 1399//////////////////////////////////////////////////////////////////////////////// 1400// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half 1401def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">; 1402 1403//////////////////////////////////////////////////////////////////////////////// 1404// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half 1405def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">; 1406} 1407 1408//////////////////////////////////////////////////////////////////////////////// 1409// Scalar Floating-point Multiply Extended 1410def SCALAR_FMULX : IInst<"vmulx", "111", "SfSd">; 1411 1412//////////////////////////////////////////////////////////////////////////////// 1413// Scalar Floating-point Reciprocal Step 1414def SCALAR_FRECPS : IInst<"vrecps", "111", "SfSd">; 1415 1416//////////////////////////////////////////////////////////////////////////////// 1417// Scalar Floating-point Reciprocal Square Root Step 1418def SCALAR_FRSQRTS : IInst<"vrsqrts", "111", "SfSd">; 1419 1420//////////////////////////////////////////////////////////////////////////////// 1421// Scalar Signed Integer Convert To Floating-point 1422def SCALAR_SCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "Si">; 1423def SCALAR_SCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "Sl">; 1424 1425//////////////////////////////////////////////////////////////////////////////// 1426// Scalar Unsigned Integer Convert To Floating-point 1427def SCALAR_UCVTFS : SInst<"vcvt_f32", "(1F)(1!)", "SUi">; 1428def SCALAR_UCVTFD : SInst<"vcvt_f64", "(1F)(1!)", "SUl">; 1429 1430//////////////////////////////////////////////////////////////////////////////// 1431// Scalar Floating-point Converts 1432def SCALAR_FCVTXN : IInst<"vcvtx_f32", "(1F<)(1!)", "Sd">; 1433def SCALAR_FCVTNSS : SInst<"vcvtn_s32", "(1S)1", "Sf">; 1434def SCALAR_FCVTNUS : SInst<"vcvtn_u32", "(1U)1", "Sf">; 1435def SCALAR_FCVTNSD : SInst<"vcvtn_s64", "(1S)1", "Sd">; 1436def SCALAR_FCVTNUD : SInst<"vcvtn_u64", "(1U)1", "Sd">; 1437def SCALAR_FCVTMSS : SInst<"vcvtm_s32", "(1S)1", "Sf">; 1438def SCALAR_FCVTMUS : SInst<"vcvtm_u32", "(1U)1", "Sf">; 1439def SCALAR_FCVTMSD : SInst<"vcvtm_s64", "(1S)1", "Sd">; 1440def SCALAR_FCVTMUD : SInst<"vcvtm_u64", "(1U)1", "Sd">; 1441def SCALAR_FCVTASS : SInst<"vcvta_s32", "(1S)1", "Sf">; 1442def SCALAR_FCVTAUS : SInst<"vcvta_u32", "(1U)1", "Sf">; 1443def SCALAR_FCVTASD : SInst<"vcvta_s64", "(1S)1", "Sd">; 1444def SCALAR_FCVTAUD : SInst<"vcvta_u64", "(1U)1", "Sd">; 1445def SCALAR_FCVTPSS : SInst<"vcvtp_s32", "(1S)1", "Sf">; 1446def SCALAR_FCVTPUS : SInst<"vcvtp_u32", "(1U)1", "Sf">; 1447def SCALAR_FCVTPSD : SInst<"vcvtp_s64", "(1S)1", "Sd">; 1448def SCALAR_FCVTPUD : SInst<"vcvtp_u64", "(1U)1", "Sd">; 1449def SCALAR_FCVTZSS : SInst<"vcvt_s32", "(1S)1", "Sf">; 1450def SCALAR_FCVTZUS : SInst<"vcvt_u32", "(1U)1", "Sf">; 1451def SCALAR_FCVTZSD : SInst<"vcvt_s64", "(1S)1", "Sd">; 1452def SCALAR_FCVTZUD : SInst<"vcvt_u64", "(1U)1", "Sd">; 1453 1454//////////////////////////////////////////////////////////////////////////////// 1455// Scalar Floating-point Reciprocal Estimate 1456def SCALAR_FRECPE : IInst<"vrecpe", "11", "SfSd">; 1457 1458//////////////////////////////////////////////////////////////////////////////// 1459// Scalar Floating-point Reciprocal Exponent 1460def SCALAR_FRECPX : IInst<"vrecpx", "11", "SfSd">; 1461 1462//////////////////////////////////////////////////////////////////////////////// 1463// Scalar Floating-point Reciprocal Square Root Estimate 1464def SCALAR_FRSQRTE : IInst<"vrsqrte", "11", "SfSd">; 1465 1466//////////////////////////////////////////////////////////////////////////////// 1467// Scalar Integer Comparison 1468def SCALAR_CMEQ : SInst<"vceq", "(U1)11", "SlSUl">; 1469def SCALAR_CMEQZ : SInst<"vceqz", "(U1)1", "SlSUl">; 1470def SCALAR_CMGE : SInst<"vcge", "(U1)11", "Sl">; 1471def SCALAR_CMGEZ : SInst<"vcgez", "(U1)1", "Sl">; 1472def SCALAR_CMHS : SInst<"vcge", "(U1)11", "SUl">; 1473def SCALAR_CMLE : SInst<"vcle", "(U1)11", "SlSUl">; 1474def SCALAR_CMLEZ : SInst<"vclez", "(U1)1", "Sl">; 1475def SCALAR_CMLT : SInst<"vclt", "(U1)11", "SlSUl">; 1476def SCALAR_CMLTZ : SInst<"vcltz", "(U1)1", "Sl">; 1477def SCALAR_CMGT : SInst<"vcgt", "(U1)11", "Sl">; 1478def SCALAR_CMGTZ : SInst<"vcgtz", "(U1)1", "Sl">; 1479def SCALAR_CMHI : SInst<"vcgt", "(U1)11", "SUl">; 1480def SCALAR_CMTST : SInst<"vtst", "(U1)11", "SlSUl">; 1481 1482//////////////////////////////////////////////////////////////////////////////// 1483// Scalar Floating-point Comparison 1484def SCALAR_FCMEQ : IInst<"vceq", "(1U)11", "SfSd">; 1485def SCALAR_FCMEQZ : IInst<"vceqz", "(1U)1", "SfSd">; 1486def SCALAR_FCMGE : IInst<"vcge", "(1U)11", "SfSd">; 1487def SCALAR_FCMGEZ : IInst<"vcgez", "(1U)1", "SfSd">; 1488def SCALAR_FCMGT : IInst<"vcgt", "(1U)11", "SfSd">; 1489def SCALAR_FCMGTZ : IInst<"vcgtz", "(1U)1", "SfSd">; 1490def SCALAR_FCMLE : IInst<"vcle", "(1U)11", "SfSd">; 1491def SCALAR_FCMLEZ : IInst<"vclez", "(1U)1", "SfSd">; 1492def SCALAR_FCMLT : IInst<"vclt", "(1U)11", "SfSd">; 1493def SCALAR_FCMLTZ : IInst<"vcltz", "(1U)1", "SfSd">; 1494 1495//////////////////////////////////////////////////////////////////////////////// 1496// Scalar Floating-point Absolute Compare Mask Greater Than Or Equal 1497def SCALAR_FACGE : IInst<"vcage", "(1U)11", "SfSd">; 1498def SCALAR_FACLE : IInst<"vcale", "(1U)11", "SfSd">; 1499 1500//////////////////////////////////////////////////////////////////////////////// 1501// Scalar Floating-point Absolute Compare Mask Greater Than 1502def SCALAR_FACGT : IInst<"vcagt", "(1U)11", "SfSd">; 1503def SCALAR_FACLT : IInst<"vcalt", "(1U)11", "SfSd">; 1504 1505//////////////////////////////////////////////////////////////////////////////// 1506// Scalar Absolute Value 1507def SCALAR_ABS : SInst<"vabs", "11", "Sl">; 1508 1509//////////////////////////////////////////////////////////////////////////////// 1510// Scalar Absolute Difference 1511def SCALAR_ABD : IInst<"vabd", "111", "SfSd">; 1512 1513//////////////////////////////////////////////////////////////////////////////// 1514// Scalar Signed Saturating Absolute Value 1515def SCALAR_SQABS : SInst<"vqabs", "11", "ScSsSiSl">; 1516 1517//////////////////////////////////////////////////////////////////////////////// 1518// Scalar Negate 1519def SCALAR_NEG : SInst<"vneg", "11", "Sl">; 1520 1521//////////////////////////////////////////////////////////////////////////////// 1522// Scalar Signed Saturating Negate 1523def SCALAR_SQNEG : SInst<"vqneg", "11", "ScSsSiSl">; 1524 1525//////////////////////////////////////////////////////////////////////////////// 1526// Scalar Signed Saturating Accumulated of Unsigned Value 1527def SCALAR_SUQADD : SInst<"vuqadd", "11(1U)", "ScSsSiSl">; 1528 1529//////////////////////////////////////////////////////////////////////////////// 1530// Scalar Unsigned Saturating Accumulated of Signed Value 1531def SCALAR_USQADD : SInst<"vsqadd", "11(1S)", "SUcSUsSUiSUl">; 1532 1533//////////////////////////////////////////////////////////////////////////////// 1534// Signed Saturating Doubling Multiply-Add Long 1535def SCALAR_SQDMLAL : SInst<"vqdmlal", "(1>)(1>)11", "SsSi">; 1536 1537//////////////////////////////////////////////////////////////////////////////// 1538// Signed Saturating Doubling Multiply-Subtract Long 1539def SCALAR_SQDMLSL : SInst<"vqdmlsl", "(1>)(1>)11", "SsSi">; 1540 1541//////////////////////////////////////////////////////////////////////////////// 1542// Signed Saturating Doubling Multiply Long 1543def SCALAR_SQDMULL : SInst<"vqdmull", "(1>)11", "SsSi">; 1544 1545//////////////////////////////////////////////////////////////////////////////// 1546// Scalar Signed Saturating Extract Unsigned Narrow 1547def SCALAR_SQXTUN : SInst<"vqmovun", "(U1<)1", "SsSiSl">; 1548 1549//////////////////////////////////////////////////////////////////////////////// 1550// Scalar Signed Saturating Extract Narrow 1551def SCALAR_SQXTN : SInst<"vqmovn", "(1<)1", "SsSiSl">; 1552 1553//////////////////////////////////////////////////////////////////////////////// 1554// Scalar Unsigned Saturating Extract Narrow 1555def SCALAR_UQXTN : SInst<"vqmovn", "(1<)1", "SUsSUiSUl">; 1556 1557// Scalar Floating Point multiply (scalar, by element) 1558def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "11.I", "SfSd", OP_SCALAR_MUL_LN>; 1559def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "11QI", "SfSd", OP_SCALAR_MUL_LN> { 1560 let isLaneQ = 1; 1561} 1562 1563// Scalar Floating Point multiply extended (scalar, by element) 1564def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "11.I", "SfSd", OP_SCALAR_MULX_LN>; 1565def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "11QI", "SfSd", OP_SCALAR_MULX_LN> { 1566 let isLaneQ = 1; 1567} 1568 1569def SCALAR_VMUL_N : IInst<"vmul_n", "..1", "d">; 1570 1571// VMUL_LANE_A64 d type implemented using scalar mul lane 1572def SCALAR_VMUL_LANE : IInst<"vmul_lane", "..qI", "d">; 1573 1574// VMUL_LANEQ d type implemented using scalar mul lane 1575def SCALAR_VMUL_LANEQ : IInst<"vmul_laneq", "..QI", "d"> { 1576 let isLaneQ = 1; 1577} 1578 1579// VMULX_LANE d type implemented using scalar vmulx_lane 1580def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "d", OP_SCALAR_VMULX_LN>; 1581 1582// VMULX_LANEQ d type implemented using scalar vmulx_laneq 1583def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "..QI", "d", OP_SCALAR_VMULX_LNQ> { 1584 let isLaneQ = 1; 1585} 1586 1587// Scalar Floating Point fused multiply-add (scalar, by element) 1588def SCALAR_FMLA_LANE : IInst<"vfma_lane", "111.I", "SfSd">; 1589def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "111QI", "SfSd"> { 1590 let isLaneQ = 1; 1591} 1592 1593// Scalar Floating Point fused multiply-subtract (scalar, by element) 1594def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "111.I", "SfSd", OP_FMS_LN>; 1595def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "111QI", "SfSd", OP_FMS_LNQ> { 1596 let isLaneQ = 1; 1597} 1598 1599// Signed Saturating Doubling Multiply Long (scalar by element) 1600def SCALAR_SQDMULL_LANE : SOpInst<"vqdmull_lane", "(1>)1.I", "SsSi", OP_SCALAR_QDMULL_LN>; 1601def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "(1>)1QI", "SsSi", OP_SCALAR_QDMULL_LN> { 1602 let isLaneQ = 1; 1603} 1604 1605// Signed Saturating Doubling Multiply-Add Long (scalar by element) 1606def SCALAR_SQDMLAL_LANE : SInst<"vqdmlal_lane", "(1>)(1>)1.I", "SsSi">; 1607def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "(1>)(1>)1QI", "SsSi"> { 1608 let isLaneQ = 1; 1609} 1610 1611// Signed Saturating Doubling Multiply-Subtract Long (scalar by element) 1612def SCALAR_SQDMLS_LANE : SInst<"vqdmlsl_lane", "(1>)(1>)1.I", "SsSi">; 1613def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "(1>)(1>)1QI", "SsSi"> { 1614 let isLaneQ = 1; 1615} 1616 1617// Scalar Integer Saturating Doubling Multiply Half High (scalar by element) 1618def SCALAR_SQDMULH_LANE : SOpInst<"vqdmulh_lane", "11.I", "SsSi", OP_SCALAR_QDMULH_LN>; 1619def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QDMULH_LN> { 1620 let isLaneQ = 1; 1621} 1622 1623// Scalar Integer Saturating Rounding Doubling Multiply Half High 1624def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "11.I", "SsSi", OP_SCALAR_QRDMULH_LN>; 1625def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_QRDMULH_LN> { 1626 let isLaneQ = 1; 1627} 1628 1629let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in { 1630// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half 1631def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "111.I", "SsSi", OP_SCALAR_QRDMLAH_LN>; 1632def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN> { 1633 let isLaneQ = 1; 1634} 1635 1636// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half 1637def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "111.I", "SsSi", OP_SCALAR_QRDMLSH_LN>; 1638def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN> { 1639 let isLaneQ = 1; 1640} 1641} 1642 1643def SCALAR_VDUP_LANE : IInst<"vdup_lane", "1.I", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">; 1644def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs"> { 1645 let isLaneQ = 1; 1646} 1647} 1648 1649// ARMv8.2-A FP16 vector intrinsics for A32/A64. 1650let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { 1651 1652 // ARMv8.2-A FP16 one-operand vector intrinsics. 1653 1654 // Comparison 1655 def CMEQH : SInst<"vceqz", "U.", "hQh">; 1656 def CMGEH : SInst<"vcgez", "U.", "hQh">; 1657 def CMGTH : SInst<"vcgtz", "U.", "hQh">; 1658 def CMLEH : SInst<"vclez", "U.", "hQh">; 1659 def CMLTH : SInst<"vcltz", "U.", "hQh">; 1660 1661 // Vector conversion 1662 def VCVT_F16 : SInst<"vcvt_f16", "F(.!)", "sUsQsQUs">; 1663 def VCVT_S16 : SInst<"vcvt_s16", "S.", "hQh">; 1664 def VCVT_U16 : SInst<"vcvt_u16", "U.", "hQh">; 1665 def VCVTA_S16 : SInst<"vcvta_s16", "S.", "hQh">; 1666 def VCVTA_U16 : SInst<"vcvta_u16", "U.", "hQh">; 1667 def VCVTM_S16 : SInst<"vcvtm_s16", "S.", "hQh">; 1668 def VCVTM_U16 : SInst<"vcvtm_u16", "U.", "hQh">; 1669 def VCVTN_S16 : SInst<"vcvtn_s16", "S.", "hQh">; 1670 def VCVTN_U16 : SInst<"vcvtn_u16", "U.", "hQh">; 1671 def VCVTP_S16 : SInst<"vcvtp_s16", "S.", "hQh">; 1672 def VCVTP_U16 : SInst<"vcvtp_u16", "U.", "hQh">; 1673 1674 // Vector rounding 1675 let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { 1676 def FRINTZH : SInst<"vrnd", "..", "hQh">; 1677 def FRINTNH : SInst<"vrndn", "..", "hQh">; 1678 def FRINTAH : SInst<"vrnda", "..", "hQh">; 1679 def FRINTPH : SInst<"vrndp", "..", "hQh">; 1680 def FRINTMH : SInst<"vrndm", "..", "hQh">; 1681 def FRINTXH : SInst<"vrndx", "..", "hQh">; 1682 } 1683 1684 // Misc. 1685 def VABSH : SInst<"vabs", "..", "hQh">; 1686 def VNEGH : SOpInst<"vneg", "..", "hQh", OP_NEG>; 1687 def VRECPEH : SInst<"vrecpe", "..", "hQh">; 1688 def FRSQRTEH : SInst<"vrsqrte", "..", "hQh">; 1689 1690 // ARMv8.2-A FP16 two-operands vector intrinsics. 1691 1692 // Misc. 1693 def VADDH : SOpInst<"vadd", "...", "hQh", OP_ADD>; 1694 def VABDH : SInst<"vabd", "...", "hQh">; 1695 def VSUBH : SOpInst<"vsub", "...", "hQh", OP_SUB>; 1696 1697 // Comparison 1698 let InstName = "vacge" in { 1699 def VCAGEH : SInst<"vcage", "U..", "hQh">; 1700 def VCALEH : SInst<"vcale", "U..", "hQh">; 1701 } 1702 let InstName = "vacgt" in { 1703 def VCAGTH : SInst<"vcagt", "U..", "hQh">; 1704 def VCALTH : SInst<"vcalt", "U..", "hQh">; 1705 } 1706 def VCEQH : SOpInst<"vceq", "U..", "hQh", OP_EQ>; 1707 def VCGEH : SOpInst<"vcge", "U..", "hQh", OP_GE>; 1708 def VCGTH : SOpInst<"vcgt", "U..", "hQh", OP_GT>; 1709 let InstName = "vcge" in 1710 def VCLEH : SOpInst<"vcle", "U..", "hQh", OP_LE>; 1711 let InstName = "vcgt" in 1712 def VCLTH : SOpInst<"vclt", "U..", "hQh", OP_LT>; 1713 1714 // Vector conversion 1715 let isVCVT_N = 1 in { 1716 def VCVT_N_F16 : SInst<"vcvt_n_f16", "F(.!)I", "sUsQsQUs">; 1717 def VCVT_N_S16 : SInst<"vcvt_n_s16", "S.I", "hQh">; 1718 def VCVT_N_U16 : SInst<"vcvt_n_u16", "U.I", "hQh">; 1719 } 1720 1721 // Max/Min 1722 def VMAXH : SInst<"vmax", "...", "hQh">; 1723 def VMINH : SInst<"vmin", "...", "hQh">; 1724 let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { 1725 def FMAXNMH : SInst<"vmaxnm", "...", "hQh">; 1726 def FMINNMH : SInst<"vminnm", "...", "hQh">; 1727 } 1728 1729 // Multiplication/Division 1730 def VMULH : SOpInst<"vmul", "...", "hQh", OP_MUL>; 1731 1732 // Pairwise addition 1733 def VPADDH : SInst<"vpadd", "...", "h">; 1734 1735 // Pairwise Max/Min 1736 def VPMAXH : SInst<"vpmax", "...", "h">; 1737 def VPMINH : SInst<"vpmin", "...", "h">; 1738 1739 // Reciprocal/Sqrt 1740 def VRECPSH : SInst<"vrecps", "...", "hQh">; 1741 def VRSQRTSH : SInst<"vrsqrts", "...", "hQh">; 1742 1743 // ARMv8.2-A FP16 three-operands vector intrinsics. 1744 1745 // Vector fused multiply-add operations 1746 def VFMAH : SInst<"vfma", "....", "hQh">; 1747 def VFMSH : SOpInst<"vfms", "....", "hQh", OP_FMLS>; 1748 1749 // ARMv8.2-A FP16 lane vector intrinsics. 1750 1751 // Mul lane 1752 def VMUL_LANEH : IOpInst<"vmul_lane", "..qI", "hQh", OP_MUL_LN>; 1753 def VMUL_NH : IOpInst<"vmul_n", "..1", "hQh", OP_MUL_N>; 1754 1755 // Data processing intrinsics - section 5 1756 1757 // Logical operations 1758 let isHiddenLInst = 1 in 1759 def VBSLH : SInst<"vbsl", ".U..", "hQh">; 1760 1761 // Transposition operations 1762 def VZIPH : WInst<"vzip", "2..", "hQh">; 1763 def VUZPH : WInst<"vuzp", "2..", "hQh">; 1764 def VTRNH : WInst<"vtrn", "2..", "hQh">; 1765 1766 1767 let ArchGuard = "!defined(__aarch64__)" in { 1768 // Set all lanes to same value. 1769 // Already implemented prior to ARMv8.2-A. 1770 def VMOV_NH : WOpInst<"vmov_n", ".1", "hQh", OP_DUP>; 1771 def VDUP_NH : WOpInst<"vdup_n", ".1", "hQh", OP_DUP>; 1772 def VDUP_LANE1H : WOpInst<"vdup_lane", ".qI", "hQh", OP_DUP_LN>; 1773 } 1774 1775 // Vector Extract 1776 def VEXTH : WInst<"vext", "...I", "hQh">; 1777 1778 // Reverse vector elements 1779 def VREV64H : WOpInst<"vrev64", "..", "hQh", OP_REV64>; 1780} 1781 1782// ARMv8.2-A FP16 vector intrinsics for A64 only. 1783let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in { 1784 1785 // Vector rounding 1786 def FRINTIH : SInst<"vrndi", "..", "hQh">; 1787 1788 // Misc. 1789 def FSQRTH : SInst<"vsqrt", "..", "hQh">; 1790 1791 // Multiplication/Division 1792 def MULXH : SInst<"vmulx", "...", "hQh">; 1793 def FDIVH : IOpInst<"vdiv", "...", "hQh", OP_DIV>; 1794 1795 // Pairwise addition 1796 def VPADDH1 : SInst<"vpadd", "...", "Qh">; 1797 1798 // Pairwise Max/Min 1799 def VPMAXH1 : SInst<"vpmax", "...", "Qh">; 1800 def VPMINH1 : SInst<"vpmin", "...", "Qh">; 1801 1802 // Pairwise MaxNum/MinNum 1803 def FMAXNMPH : SInst<"vpmaxnm", "...", "hQh">; 1804 def FMINNMPH : SInst<"vpminnm", "...", "hQh">; 1805 1806 // ARMv8.2-A FP16 lane vector intrinsics. 1807 1808 // FMA lane 1809 def VFMA_LANEH : IInst<"vfma_lane", "...qI", "hQh">; 1810 def VFMA_LANEQH : IInst<"vfma_laneq", "...QI", "hQh"> { 1811 let isLaneQ = 1; 1812 } 1813 1814 // FMA lane with scalar argument 1815 def FMLA_NH : SOpInst<"vfma_n", "...1", "hQh", OP_FMLA_N>; 1816 // Scalar floating point fused multiply-add (scalar, by element) 1817 def SCALAR_FMLA_LANEH : IInst<"vfma_lane", "111.I", "Sh">; 1818 def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "111QI", "Sh"> { 1819 let isLaneQ = 1; 1820 } 1821 1822 // FMS lane 1823 def VFMS_LANEH : IOpInst<"vfms_lane", "...qI", "hQh", OP_FMS_LN>; 1824 def VFMS_LANEQH : IOpInst<"vfms_laneq", "...QI", "hQh", OP_FMS_LNQ> { 1825 let isLaneQ = 1; 1826 } 1827 // FMS lane with scalar argument 1828 def FMLS_NH : SOpInst<"vfms_n", "...1", "hQh", OP_FMLS_N>; 1829 // Scalar floating foint fused multiply-subtract (scalar, by element) 1830 def SCALAR_FMLS_LANEH : IOpInst<"vfms_lane", "111.I", "Sh", OP_FMS_LN>; 1831 def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "111QI", "Sh", OP_FMS_LNQ> { 1832 let isLaneQ = 1; 1833 } 1834 1835 // Mul lane 1836 def VMUL_LANEQH : IOpInst<"vmul_laneq", "..QI", "hQh", OP_MUL_LN> { 1837 let isLaneQ = 1; 1838 } 1839 // Scalar floating point multiply (scalar, by element) 1840 def SCALAR_FMUL_LANEH : IOpInst<"vmul_lane", "11.I", "Sh", OP_SCALAR_MUL_LN>; 1841 def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "11QI", "Sh", OP_SCALAR_MUL_LN> { 1842 let isLaneQ = 1; 1843 } 1844 1845 // Mulx lane 1846 def VMULX_LANEH : IOpInst<"vmulx_lane", "..qI", "hQh", OP_MULX_LN>; 1847 def VMULX_LANEQH : IOpInst<"vmulx_laneq", "..QI", "hQh", OP_MULX_LN> { 1848 let isLaneQ = 1; 1849 } 1850 def VMULX_NH : IOpInst<"vmulx_n", "..1", "hQh", OP_MULX_N>; 1851 // Scalar floating point mulx (scalar, by element) 1852 def SCALAR_FMULX_LANEH : IInst<"vmulx_lane", "11.I", "Sh">; 1853 def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "11QI", "Sh"> { 1854 let isLaneQ = 1; 1855 } 1856 1857 // ARMv8.2-A FP16 reduction vector intrinsics. 1858 def VMAXVH : SInst<"vmaxv", "1.", "hQh">; 1859 def VMINVH : SInst<"vminv", "1.", "hQh">; 1860 def FMAXNMVH : SInst<"vmaxnmv", "1.", "hQh">; 1861 def FMINNMVH : SInst<"vminnmv", "1.", "hQh">; 1862 1863 // Permutation 1864 def VTRN1H : SOpInst<"vtrn1", "...", "hQh", OP_TRN1>; 1865 def VZIP1H : SOpInst<"vzip1", "...", "hQh", OP_ZIP1>; 1866 def VUZP1H : SOpInst<"vuzp1", "...", "hQh", OP_UZP1>; 1867 def VTRN2H : SOpInst<"vtrn2", "...", "hQh", OP_TRN2>; 1868 def VZIP2H : SOpInst<"vzip2", "...", "hQh", OP_ZIP2>; 1869 def VUZP2H : SOpInst<"vuzp2", "...", "hQh", OP_UZP2>; 1870 1871 def SCALAR_VDUP_LANEH : IInst<"vdup_lane", "1.I", "Sh">; 1872 def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "1QI", "Sh"> { 1873 let isLaneQ = 1; 1874 } 1875} 1876 1877// v8.2-A dot product instructions. 1878let ArchGuard = "defined(__ARM_FEATURE_DOTPROD)" in { 1879 def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">; 1880 def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<<q)I", "iUiQiQUi", OP_DOT_LN>; 1881} 1882let ArchGuard = "defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)" in { 1883 // Variants indexing into a 128-bit vector are A64 only. 1884 def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ> { 1885 let isLaneQ = 1; 1886 } 1887} 1888 1889// v8.2-A FP16 fused multiply-add long instructions. 1890let ArchGuard = "defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)" in { 1891 def VFMLAL_LOW : SInst<"vfmlal_low", ">>..", "hQh">; 1892 def VFMLSL_LOW : SInst<"vfmlsl_low", ">>..", "hQh">; 1893 def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">; 1894 def VFMLSL_HIGH : SInst<"vfmlsl_high", ">>..", "hQh">; 1895 1896 def VFMLAL_LANE_LOW : SOpInst<"vfmlal_lane_low", "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN>; 1897 def VFMLSL_LANE_LOW : SOpInst<"vfmlsl_lane_low", "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN>; 1898 def VFMLAL_LANE_HIGH : SOpInst<"vfmlal_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLAL_LN_Hi>; 1899 def VFMLSL_LANE_HIGH : SOpInst<"vfmlsl_lane_high", "(F>)(F>)F(Fq)I", "hQh", OP_FMLSL_LN_Hi>; 1900 1901 def VFMLAL_LANEQ_LOW : SOpInst<"vfmlal_laneq_low", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN> { 1902 let isLaneQ = 1; 1903 } 1904 def VFMLSL_LANEQ_LOW : SOpInst<"vfmlsl_laneq_low", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN> { 1905 let isLaneQ = 1; 1906 } 1907 def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLAL_LN_Hi> { 1908 let isLaneQ = 1; 1909 } 1910 def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "(F>)(F>)F(FQ)I", "hQh", OP_FMLSL_LN_Hi> { 1911 let isLaneQ = 1; 1912 } 1913} 1914 1915let ArchGuard = "defined(__ARM_FEATURE_MATMUL_INT8)" in { 1916 def VMMLA : SInst<"vmmla", "..(<<)(<<)", "QUiQi">; 1917 def VUSMMLA : SInst<"vusmmla", "..(<<U)(<<)", "Qi">; 1918 1919 def VUSDOT : SInst<"vusdot", "..(<<U)(<<)", "iQi">; 1920 1921 def VUSDOT_LANE : SOpInst<"vusdot_lane", "..(<<U)(<<q)I", "iQi", OP_USDOT_LN>; 1922 def VSUDOT_LANE : SOpInst<"vsudot_lane", "..(<<)(<<qU)I", "iQi", OP_SUDOT_LN>; 1923 1924 let ArchGuard = "defined(__aarch64__)" in { 1925 let isLaneQ = 1 in { 1926 def VUSDOT_LANEQ : SOpInst<"vusdot_laneq", "..(<<U)(<<Q)I", "iQi", OP_USDOT_LNQ>; 1927 def VSUDOT_LANEQ : SOpInst<"vsudot_laneq", "..(<<)(<<QU)I", "iQi", OP_SUDOT_LNQ>; 1928 } 1929 } 1930} 1931 1932let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in { 1933 def VDOT_BF : SInst<"vbfdot", "..BB", "fQf">; 1934 def VDOT_LANE_BF : SOpInst<"vbfdot_lane", "..B(Bq)I", "fQf", OP_BFDOT_LN>; 1935 def VDOT_LANEQ_BF : SOpInst<"vbfdot_laneq", "..B(BQ)I", "fQf", OP_BFDOT_LNQ> { 1936 let isLaneQ = 1; 1937 } 1938 1939 def VFMMLA_BF : SInst<"vbfmmla", "..BB", "Qf">; 1940 1941 def VFMLALB_BF : SInst<"vbfmlalb", "..BB", "Qf">; 1942 def VFMLALT_BF : SInst<"vbfmlalt", "..BB", "Qf">; 1943 1944 def VFMLALB_LANE_BF : SOpInst<"vbfmlalb_lane", "..B(Bq)I", "Qf", OP_BFMLALB_LN>; 1945 def VFMLALB_LANEQ_BF : SOpInst<"vbfmlalb_laneq", "..B(BQ)I", "Qf", OP_BFMLALB_LN>; 1946 1947 def VFMLALT_LANE_BF : SOpInst<"vbfmlalt_lane", "..B(Bq)I", "Qf", OP_BFMLALT_LN>; 1948 def VFMLALT_LANEQ_BF : SOpInst<"vbfmlalt_laneq", "..B(BQ)I", "Qf", OP_BFMLALT_LN>; 1949} 1950 1951multiclass VCMLA_ROTS<string type, string lanety, string laneqty> { 1952 foreach ROT = ["", "_rot90", "_rot180", "_rot270" ] in { 1953 def : SInst<"vcmla" # ROT, "....", type # "Q" # type>; 1954 1955 // vcmla{ROT}_lane 1956 def : SOpInst<"vcmla" # ROT # "_lane", "...qI", type, Op<(call "vcmla" # ROT, $p0, $p1, 1957 (bitcast $p0, (dup_typed lanety , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>; 1958 1959 // vcmlaq{ROT}_lane 1960 def : SOpInst<"vcmla" # ROT # "_lane", "...qI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1, 1961 (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast lanety, $p2), $p3))))>>; 1962 1963 let isLaneQ = 1 in { 1964 // vcmla{ROT}_laneq 1965 def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", type, Op<(call "vcmla" # ROT, $p0, $p1, 1966 (bitcast $p0, (dup_typed lanety, (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>; 1967 1968 // vcmlaq{ROT}_laneq 1969 def : SOpInst<"vcmla" # ROT # "_laneq", "...QI", "Q" # type, Op<(call "vcmla" # ROT, $p0, $p1, 1970 (bitcast $p0, (dup_typed laneqty , (call "vget_lane", (bitcast laneqty, $p2), $p3))))>>; 1971 } 1972 } 1973} 1974 1975// v8.3-A Vector complex addition intrinsics 1976let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { 1977 def VCADD_ROT90_FP16 : SInst<"vcadd_rot90", "...", "h">; 1978 def VCADD_ROT270_FP16 : SInst<"vcadd_rot270", "...", "h">; 1979 def VCADDQ_ROT90_FP16 : SInst<"vcaddq_rot90", "QQQ", "h">; 1980 def VCADDQ_ROT270_FP16 : SInst<"vcaddq_rot270", "QQQ", "h">; 1981 1982 defm VCMLA_FP16 : VCMLA_ROTS<"h", "uint32x2_t", "uint32x4_t">; 1983} 1984let ArchGuard = "defined(__ARM_FEATURE_COMPLEX)" in { 1985 def VCADD_ROT90 : SInst<"vcadd_rot90", "...", "f">; 1986 def VCADD_ROT270 : SInst<"vcadd_rot270", "...", "f">; 1987 def VCADDQ_ROT90 : SInst<"vcaddq_rot90", "QQQ", "f">; 1988 def VCADDQ_ROT270 : SInst<"vcaddq_rot270", "QQQ", "f">; 1989 1990 defm VCMLA_F32 : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">; 1991} 1992let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)" in { 1993 def VCADDQ_ROT90_FP64 : SInst<"vcaddq_rot90", "QQQ", "d">; 1994 def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">; 1995 1996 defm VCMLA_FP64 : VCMLA_ROTS<"d", "uint64x2_t", "uint64x2_t">; 1997} 1998 1999// V8.2-A BFloat intrinsics 2000let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in { 2001 def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> { 2002 let BigEndianSafe = 1; 2003 } 2004 2005 def VDUP_N_BF : WOpInst<"vdup_n", ".1", "bQb", OP_DUP>; 2006 2007 def VDUP_LANE_BF : WOpInst<"vdup_lane", ".qI", "bQb", OP_DUP_LN>; 2008 def VDUP_LANEQ_BF: WOpInst<"vdup_laneq", ".QI", "bQb", OP_DUP_LN> { 2009 let isLaneQ = 1; 2010 } 2011 2012 def VCOMBINE_BF : NoTestOpInst<"vcombine", "Q..", "b", OP_CONC>; 2013 2014 def VGET_HIGH_BF : NoTestOpInst<"vget_high", ".Q", "b", OP_HI>; 2015 def VGET_LOW_BF : NoTestOpInst<"vget_low", ".Q", "b", OP_LO>; 2016 2017 def VGET_LANE_BF : IInst<"vget_lane", "1.I", "bQb">; 2018 def VSET_LANE_BF : IInst<"vset_lane", ".1.I", "bQb">; 2019 def SCALAR_VDUP_LANE_BF : IInst<"vdup_lane", "1.I", "Sb">; 2020 def SCALAR_VDUP_LANEQ_BF : IInst<"vdup_laneq", "1QI", "Sb"> { 2021 let isLaneQ = 1; 2022 } 2023 2024 def VLD1_BF : WInst<"vld1", ".(c*!)", "bQb">; 2025 def VLD2_BF : WInst<"vld2", "2(c*!)", "bQb">; 2026 def VLD3_BF : WInst<"vld3", "3(c*!)", "bQb">; 2027 def VLD4_BF : WInst<"vld4", "4(c*!)", "bQb">; 2028 2029 def VST1_BF : WInst<"vst1", "v*(.!)", "bQb">; 2030 def VST2_BF : WInst<"vst2", "v*(2!)", "bQb">; 2031 def VST3_BF : WInst<"vst3", "v*(3!)", "bQb">; 2032 def VST4_BF : WInst<"vst4", "v*(4!)", "bQb">; 2033 2034 def VLD1_X2_BF : WInst<"vld1_x2", "2(c*!)", "bQb">; 2035 def VLD1_X3_BF : WInst<"vld1_x3", "3(c*!)", "bQb">; 2036 def VLD1_X4_BF : WInst<"vld1_x4", "4(c*!)", "bQb">; 2037 2038 def VST1_X2_BF : WInst<"vst1_x2", "v*(2!)", "bQb">; 2039 def VST1_X3_BF : WInst<"vst1_x3", "v*(3!)", "bQb">; 2040 def VST1_X4_BF : WInst<"vst1_x4", "v*(4!)", "bQb">; 2041 2042 def VLD1_LANE_BF : WInst<"vld1_lane", ".(c*!).I", "bQb">; 2043 def VLD2_LANE_BF : WInst<"vld2_lane", "2(c*!)2I", "bQb">; 2044 def VLD3_LANE_BF : WInst<"vld3_lane", "3(c*!)3I", "bQb">; 2045 def VLD4_LANE_BF : WInst<"vld4_lane", "4(c*!)4I", "bQb">; 2046 def VST1_LANE_BF : WInst<"vst1_lane", "v*(.!)I", "bQb">; 2047 def VST2_LANE_BF : WInst<"vst2_lane", "v*(2!)I", "bQb">; 2048 def VST3_LANE_BF : WInst<"vst3_lane", "v*(3!)I", "bQb">; 2049 def VST4_LANE_BF : WInst<"vst4_lane", "v*(4!)I", "bQb">; 2050 2051 def VLD1_DUP_BF : WInst<"vld1_dup", ".(c*!)", "bQb">; 2052 def VLD2_DUP_BF : WInst<"vld2_dup", "2(c*!)", "bQb">; 2053 def VLD3_DUP_BF : WInst<"vld3_dup", "3(c*!)", "bQb">; 2054 def VLD4_DUP_BF : WInst<"vld4_dup", "4(c*!)", "bQb">; 2055 2056 def VCVT_F32_BF16 : SOpInst<"vcvt_f32_bf16", "(F>)(Bq!)", "Qb", OP_VCVT_F32_BF16>; 2057 def VCVT_LOW_F32_BF16 : SOpInst<"vcvt_low_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_LO>; 2058 def VCVT_HIGH_F32_BF16 : SOpInst<"vcvt_high_f32", "(F>)(BQ!)", "Qb", OP_VCVT_F32_BF16_HI>; 2059 2060 def SCALAR_CVT_BF16_F32 : SInst<"vcvth_bf16", "(1B)1", "f">; 2061 def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>; 2062} 2063 2064let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)" in { 2065 def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">; 2066 def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>; 2067 def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>; 2068 def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>; 2069} 2070 2071let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in { 2072 def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">; 2073 def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>; 2074 def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">; 2075 def VCVT_BF16_F32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A64>; 2076 2077 def COPY_LANE_BF16 : IOpInst<"vcopy_lane", "..I.I", "b", OP_COPY_LN>; 2078 def COPYQ_LANE_BF16 : IOpInst<"vcopy_lane", "..IqI", "Qb", OP_COPY_LN>; 2079 def COPY_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..IQI", "b", OP_COPY_LN>; 2080 def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>; 2081} 2082 2083let ArchGuard = "defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)" in { 2084 let BigEndianSafe = 1 in { 2085 defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES< 2086 "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">; 2087 } 2088} 2089 2090let ArchGuard = "defined(__ARM_FEATURE_BF16) && defined(__aarch64__)" in { 2091 let BigEndianSafe = 1 in { 2092 defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES< 2093 "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">; 2094 } 2095} 2096