1//===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes the ARM VFP instruction set. 11// 12//===----------------------------------------------------------------------===// 13 14def SDT_CMPFP0 : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisVT<1, i32>]>; 15def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, 16 SDTCisSameAs<1, 2>]>; 17def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, 18 SDTCisVT<2, f64>]>; 19 20def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>; 21 22def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>; 23def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMFCmp, [SDNPOutGlue]>; 24def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>; 25def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; 26def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>; 27def arm_vmovsr : SDNode<"ARMISD::VMOVSR", SDT_VMOVSR>; 28 29def SDT_VMOVhr : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, i32>] >; 30def SDT_VMOVrh : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisFP<1>] >; 31def arm_vmovhr : SDNode<"ARMISD::VMOVhr", SDT_VMOVhr>; 32def arm_vmovrh : SDNode<"ARMISD::VMOVrh", SDT_VMOVrh>; 33 34//===----------------------------------------------------------------------===// 35// Operand Definitions. 36// 37 38// 8-bit floating-point immediate encodings. 39def FPImmOperand : AsmOperandClass { 40 let Name = "FPImm"; 41 let ParserMethod = "parseFPImm"; 42} 43 44def vfp_f16imm : Operand<f16>, 45 PatLeaf<(f16 fpimm), [{ 46 return ARM_AM::getFP16Imm(N->getValueAPF()) != -1; 47 }], SDNodeXForm<fpimm, [{ 48 APFloat InVal = N->getValueAPF(); 49 uint32_t enc = ARM_AM::getFP16Imm(InVal); 50 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 51 }]>> { 52 let PrintMethod = "printFPImmOperand"; 53 let ParserMatchClass = FPImmOperand; 54} 55 56def vfp_f32imm : Operand<f32>, 57 PatLeaf<(f32 fpimm), [{ 58 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1; 59 }], SDNodeXForm<fpimm, [{ 60 APFloat InVal = N->getValueAPF(); 61 uint32_t enc = ARM_AM::getFP32Imm(InVal); 62 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 63 }]>> { 64 let PrintMethod = "printFPImmOperand"; 65 let ParserMatchClass = FPImmOperand; 66} 67 68def vfp_f64imm : Operand<f64>, 69 PatLeaf<(f64 fpimm), [{ 70 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1; 71 }], SDNodeXForm<fpimm, [{ 72 APFloat InVal = N->getValueAPF(); 73 uint32_t enc = ARM_AM::getFP64Imm(InVal); 74 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i32); 75 }]>> { 76 let PrintMethod = "printFPImmOperand"; 77 let ParserMatchClass = FPImmOperand; 78} 79 80def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 81 return cast<LoadSDNode>(N)->getAlignment() >= 2; 82}]>; 83 84def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 85 return cast<LoadSDNode>(N)->getAlignment() >= 4; 86}]>; 87 88def alignedstore16 : PatFrag<(ops node:$val, node:$ptr), 89 (store node:$val, node:$ptr), [{ 90 return cast<StoreSDNode>(N)->getAlignment() >= 2; 91}]>; 92 93def alignedstore32 : PatFrag<(ops node:$val, node:$ptr), 94 (store node:$val, node:$ptr), [{ 95 return cast<StoreSDNode>(N)->getAlignment() >= 4; 96}]>; 97 98// The VCVT to/from fixed-point instructions encode the 'fbits' operand 99// (the number of fixed bits) differently than it appears in the assembly 100// source. It's encoded as "Size - fbits" where Size is the size of the 101// fixed-point representation (32 or 16) and fbits is the value appearing 102// in the assembly source, an integer in [0,16] or (0,32], depending on size. 103def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; } 104def fbits32 : Operand<i32> { 105 let PrintMethod = "printFBits32"; 106 let ParserMatchClass = fbits32_asm_operand; 107} 108 109def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; } 110def fbits16 : Operand<i32> { 111 let PrintMethod = "printFBits16"; 112 let ParserMatchClass = fbits16_asm_operand; 113} 114 115//===----------------------------------------------------------------------===// 116// Load / store Instructions. 117// 118 119let canFoldAsLoad = 1, isReMaterializable = 1 in { 120 121def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr), 122 IIC_fpLoad64, "vldr", "\t$Dd, $addr", 123 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>; 124 125def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr), 126 IIC_fpLoad32, "vldr", "\t$Sd, $addr", 127 [(set SPR:$Sd, (alignedload32 addrmode5:$addr))]> { 128 // Some single precision VFP instructions may be executed on both NEON and VFP 129 // pipelines. 130 let D = VFPNeonDomain; 131} 132 133def VLDRH : AHI5<0b1101, 0b01, (outs HPR:$Sd), (ins addrmode5fp16:$addr), 134 IIC_fpLoad16, "vldr", ".16\t$Sd, $addr", 135 [(set HPR:$Sd, (alignedload16 addrmode5fp16:$addr))]>, 136 Requires<[HasFullFP16]>; 137 138} // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in' 139 140def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr), 141 IIC_fpStore64, "vstr", "\t$Dd, $addr", 142 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>; 143 144def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr), 145 IIC_fpStore32, "vstr", "\t$Sd, $addr", 146 [(alignedstore32 SPR:$Sd, addrmode5:$addr)]> { 147 // Some single precision VFP instructions may be executed on both NEON and VFP 148 // pipelines. 149 let D = VFPNeonDomain; 150} 151 152def VSTRH : AHI5<0b1101, 0b00, (outs), (ins HPR:$Sd, addrmode5fp16:$addr), 153 IIC_fpStore16, "vstr", ".16\t$Sd, $addr", 154 [(alignedstore16 HPR:$Sd, addrmode5fp16:$addr)]>, 155 Requires<[HasFullFP16]>; 156 157//===----------------------------------------------------------------------===// 158// Load / store multiple Instructions. 159// 160 161multiclass vfp_ldst_mult<string asm, bit L_bit, 162 InstrItinClass itin, InstrItinClass itin_upd> { 163 // Double Precision 164 def DIA : 165 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 166 IndexModeNone, itin, 167 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 168 let Inst{24-23} = 0b01; // Increment After 169 let Inst{21} = 0; // No writeback 170 let Inst{20} = L_bit; 171 } 172 def DIA_UPD : 173 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 174 variable_ops), 175 IndexModeUpd, itin_upd, 176 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 177 let Inst{24-23} = 0b01; // Increment After 178 let Inst{21} = 1; // Writeback 179 let Inst{20} = L_bit; 180 } 181 def DDB_UPD : 182 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, 183 variable_ops), 184 IndexModeUpd, itin_upd, 185 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 186 let Inst{24-23} = 0b10; // Decrement Before 187 let Inst{21} = 1; // Writeback 188 let Inst{20} = L_bit; 189 } 190 191 // Single Precision 192 def SIA : 193 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops), 194 IndexModeNone, itin, 195 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> { 196 let Inst{24-23} = 0b01; // Increment After 197 let Inst{21} = 0; // No writeback 198 let Inst{20} = L_bit; 199 200 // Some single precision VFP instructions may be executed on both NEON and 201 // VFP pipelines. 202 let D = VFPNeonDomain; 203 } 204 def SIA_UPD : 205 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 206 variable_ops), 207 IndexModeUpd, itin_upd, 208 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 209 let Inst{24-23} = 0b01; // Increment After 210 let Inst{21} = 1; // Writeback 211 let Inst{20} = L_bit; 212 213 // Some single precision VFP instructions may be executed on both NEON and 214 // VFP pipelines. 215 let D = VFPNeonDomain; 216 } 217 def SDB_UPD : 218 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, 219 variable_ops), 220 IndexModeUpd, itin_upd, 221 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 222 let Inst{24-23} = 0b10; // Decrement Before 223 let Inst{21} = 1; // Writeback 224 let Inst{20} = L_bit; 225 226 // Some single precision VFP instructions may be executed on both NEON and 227 // VFP pipelines. 228 let D = VFPNeonDomain; 229 } 230} 231 232let hasSideEffects = 0 in { 233 234let mayLoad = 1, hasExtraDefRegAllocReq = 1 in 235defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>; 236 237let mayStore = 1, hasExtraSrcRegAllocReq = 1 in 238defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpStore_m, IIC_fpStore_mu>; 239 240} // hasSideEffects 241 242def : MnemonicAlias<"vldm", "vldmia">; 243def : MnemonicAlias<"vstm", "vstmia">; 244 245 246//===----------------------------------------------------------------------===// 247// Lazy load / store multiple Instructions 248// 249let mayLoad = 1 in 250def VLLDM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone, 251 IIC_fpLoad_m, "vlldm${p}\t$Rn", "", []>, 252 Requires<[HasV8MMainline, Has8MSecExt]> { 253 let Inst{24-23} = 0b00; 254 let Inst{22} = 0; 255 let Inst{21} = 1; 256 let Inst{20} = 1; 257 let Inst{15-12} = 0; 258 let Inst{7-0} = 0; 259 let mayLoad = 1; 260} 261 262let mayStore = 1 in 263def VLSTM : AXSI4<(outs), (ins GPRnopc:$Rn, pred:$p), IndexModeNone, 264 IIC_fpStore_m, "vlstm${p}\t$Rn", "", []>, 265 Requires<[HasV8MMainline, Has8MSecExt]> { 266 let Inst{24-23} = 0b00; 267 let Inst{22} = 0; 268 let Inst{21} = 1; 269 let Inst{20} = 0; 270 let Inst{15-12} = 0; 271 let Inst{7-0} = 0; 272 let mayStore = 1; 273} 274 275def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r), 0>, 276 Requires<[HasVFP2]>; 277def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r), 0>, 278 Requires<[HasVFP2]>; 279def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r), 0>, 280 Requires<[HasVFP2]>; 281def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r), 0>, 282 Requires<[HasVFP2]>; 283defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 284 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>; 285defm : VFPDTAnyInstAlias<"vpush${p}", "$r", 286 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>; 287defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 288 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>; 289defm : VFPDTAnyInstAlias<"vpop${p}", "$r", 290 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>; 291 292// FLDMX, FSTMX - Load and store multiple unknown precision registers for 293// pre-armv6 cores. 294// These instruction are deprecated so we don't want them to get selected. 295// However, there is no UAL syntax for them, so we keep them around for 296// (dis)assembly only. 297multiclass vfp_ldstx_mult<string asm, bit L_bit> { 298 // Unknown precision 299 def XIA : 300 AXXI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 301 IndexModeNone, !strconcat(asm, "iax${p}\t$Rn, $regs"), "", []> { 302 let Inst{24-23} = 0b01; // Increment After 303 let Inst{21} = 0; // No writeback 304 let Inst{20} = L_bit; 305 } 306 def XIA_UPD : 307 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 308 IndexModeUpd, !strconcat(asm, "iax${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 309 let Inst{24-23} = 0b01; // Increment After 310 let Inst{21} = 1; // Writeback 311 let Inst{20} = L_bit; 312 } 313 def XDB_UPD : 314 AXXI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops), 315 IndexModeUpd, !strconcat(asm, "dbx${p}\t$Rn!, $regs"), "$Rn = $wb", []> { 316 let Inst{24-23} = 0b10; // Decrement Before 317 let Inst{21} = 1; // Writeback 318 let Inst{20} = L_bit; 319 } 320} 321 322defm FLDM : vfp_ldstx_mult<"fldm", 1>; 323defm FSTM : vfp_ldstx_mult<"fstm", 0>; 324 325def : VFP2MnemonicAlias<"fldmeax", "fldmdbx">; 326def : VFP2MnemonicAlias<"fldmfdx", "fldmiax">; 327 328def : VFP2MnemonicAlias<"fstmeax", "fstmiax">; 329def : VFP2MnemonicAlias<"fstmfdx", "fstmdbx">; 330 331//===----------------------------------------------------------------------===// 332// FP Binary Operations. 333// 334 335let TwoOperandAliasConstraint = "$Dn = $Dd" in 336def VADDD : ADbI<0b11100, 0b11, 0, 0, 337 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 338 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm", 339 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>, 340 Sched<[WriteFPALU64]>; 341 342let TwoOperandAliasConstraint = "$Sn = $Sd" in 343def VADDS : ASbIn<0b11100, 0b11, 0, 0, 344 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 345 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm", 346 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]>, 347 Sched<[WriteFPALU32]> { 348 // Some single precision VFP instructions may be executed on both NEON and 349 // VFP pipelines on A8. 350 let D = VFPNeonA8Domain; 351} 352 353let TwoOperandAliasConstraint = "$Sn = $Sd" in 354def VADDH : AHbI<0b11100, 0b11, 0, 0, 355 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 356 IIC_fpALU16, "vadd", ".f16\t$Sd, $Sn, $Sm", 357 [(set HPR:$Sd, (fadd HPR:$Sn, HPR:$Sm))]>, 358 Sched<[WriteFPALU32]>; 359 360let TwoOperandAliasConstraint = "$Dn = $Dd" in 361def VSUBD : ADbI<0b11100, 0b11, 1, 0, 362 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 363 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm", 364 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>, 365 Sched<[WriteFPALU64]>; 366 367let TwoOperandAliasConstraint = "$Sn = $Sd" in 368def VSUBS : ASbIn<0b11100, 0b11, 1, 0, 369 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 370 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm", 371 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]>, 372 Sched<[WriteFPALU32]>{ 373 // Some single precision VFP instructions may be executed on both NEON and 374 // VFP pipelines on A8. 375 let D = VFPNeonA8Domain; 376} 377 378let TwoOperandAliasConstraint = "$Sn = $Sd" in 379def VSUBH : AHbI<0b11100, 0b11, 1, 0, 380 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 381 IIC_fpALU16, "vsub", ".f16\t$Sd, $Sn, $Sm", 382 [(set HPR:$Sd, (fsub HPR:$Sn, HPR:$Sm))]>, 383 Sched<[WriteFPALU32]>; 384 385let TwoOperandAliasConstraint = "$Dn = $Dd" in 386def VDIVD : ADbI<0b11101, 0b00, 0, 0, 387 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 388 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm", 389 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>, 390 Sched<[WriteFPDIV64]>; 391 392let TwoOperandAliasConstraint = "$Sn = $Sd" in 393def VDIVS : ASbI<0b11101, 0b00, 0, 0, 394 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 395 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm", 396 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>, 397 Sched<[WriteFPDIV32]>; 398 399let TwoOperandAliasConstraint = "$Sn = $Sd" in 400def VDIVH : AHbI<0b11101, 0b00, 0, 0, 401 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 402 IIC_fpDIV16, "vdiv", ".f16\t$Sd, $Sn, $Sm", 403 [(set HPR:$Sd, (fdiv HPR:$Sn, HPR:$Sm))]>, 404 Sched<[WriteFPDIV32]>; 405 406let TwoOperandAliasConstraint = "$Dn = $Dd" in 407def VMULD : ADbI<0b11100, 0b10, 0, 0, 408 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 409 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm", 410 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>, 411 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>; 412 413let TwoOperandAliasConstraint = "$Sn = $Sd" in 414def VMULS : ASbIn<0b11100, 0b10, 0, 0, 415 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 416 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm", 417 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]>, 418 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> { 419 // Some single precision VFP instructions may be executed on both NEON and 420 // VFP pipelines on A8. 421 let D = VFPNeonA8Domain; 422} 423 424let TwoOperandAliasConstraint = "$Sn = $Sd" in 425def VMULH : AHbI<0b11100, 0b10, 0, 0, 426 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 427 IIC_fpMUL16, "vmul", ".f16\t$Sd, $Sn, $Sm", 428 [(set HPR:$Sd, (fmul HPR:$Sn, HPR:$Sm))]>, 429 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>; 430 431def VNMULD : ADbI<0b11100, 0b10, 1, 0, 432 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 433 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm", 434 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>, 435 Sched<[WriteFPMUL64, ReadFPMUL, ReadFPMUL]>; 436 437def VNMULS : ASbI<0b11100, 0b10, 1, 0, 438 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 439 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm", 440 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]>, 441 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]> { 442 // Some single precision VFP instructions may be executed on both NEON and 443 // VFP pipelines on A8. 444 let D = VFPNeonA8Domain; 445} 446 447def VNMULH : AHbI<0b11100, 0b10, 1, 0, 448 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 449 IIC_fpMUL16, "vnmul", ".f16\t$Sd, $Sn, $Sm", 450 [(set HPR:$Sd, (fneg (fmul HPR:$Sn, HPR:$Sm)))]>, 451 Sched<[WriteFPMUL32, ReadFPMUL, ReadFPMUL]>; 452 453multiclass vsel_inst<string op, bits<2> opc, int CC> { 454 let DecoderNamespace = "VFPV8", PostEncoderMethod = "", 455 Uses = [CPSR], AddedComplexity = 4 in { 456 def H : AHbInp<0b11100, opc, 0, 457 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 458 NoItinerary, !strconcat("vsel", op, ".f16\t$Sd, $Sn, $Sm"), 459 [(set HPR:$Sd, (ARMcmov HPR:$Sm, HPR:$Sn, CC))]>, 460 Requires<[HasFullFP16]>; 461 462 def S : ASbInp<0b11100, opc, 0, 463 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 464 NoItinerary, !strconcat("vsel", op, ".f32\t$Sd, $Sn, $Sm"), 465 [(set SPR:$Sd, (ARMcmov SPR:$Sm, SPR:$Sn, CC))]>, 466 Requires<[HasFPARMv8]>; 467 468 def D : ADbInp<0b11100, opc, 0, 469 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 470 NoItinerary, !strconcat("vsel", op, ".f64\t$Dd, $Dn, $Dm"), 471 [(set DPR:$Dd, (ARMcmov (f64 DPR:$Dm), (f64 DPR:$Dn), CC))]>, 472 Requires<[HasFPARMv8, HasDPVFP]>; 473 } 474} 475 476// The CC constants here match ARMCC::CondCodes. 477defm VSELGT : vsel_inst<"gt", 0b11, 12>; 478defm VSELGE : vsel_inst<"ge", 0b10, 10>; 479defm VSELEQ : vsel_inst<"eq", 0b00, 0>; 480defm VSELVS : vsel_inst<"vs", 0b01, 6>; 481 482multiclass vmaxmin_inst<string op, bit opc, SDNode SD> { 483 let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in { 484 def H : AHbInp<0b11101, 0b00, opc, 485 (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm), 486 NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"), 487 [(set HPR:$Sd, (SD HPR:$Sn, HPR:$Sm))]>, 488 Requires<[HasFullFP16]>; 489 490 def S : ASbInp<0b11101, 0b00, opc, 491 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm), 492 NoItinerary, !strconcat(op, ".f32\t$Sd, $Sn, $Sm"), 493 [(set SPR:$Sd, (SD SPR:$Sn, SPR:$Sm))]>, 494 Requires<[HasFPARMv8]>; 495 496 def D : ADbInp<0b11101, 0b00, opc, 497 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm), 498 NoItinerary, !strconcat(op, ".f64\t$Dd, $Dn, $Dm"), 499 [(set DPR:$Dd, (f64 (SD (f64 DPR:$Dn), (f64 DPR:$Dm))))]>, 500 Requires<[HasFPARMv8, HasDPVFP]>; 501 } 502} 503 504defm VMAXNM : vmaxmin_inst<"vmaxnm", 0, fmaxnum>; 505defm VMINNM : vmaxmin_inst<"vminnm", 1, fminnum>; 506 507// Match reassociated forms only if not sign dependent rounding. 508def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)), 509 (VNMULD DPR:$a, DPR:$b)>, 510 Requires<[NoHonorSignDependentRounding,HasDPVFP]>; 511def : Pat<(fmul (fneg SPR:$a), SPR:$b), 512 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>; 513 514// These are encoded as unary instructions. 515let Defs = [FPSCR_NZCV] in { 516def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, 517 (outs), (ins DPR:$Dd, DPR:$Dm), 518 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", 519 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 1))]>; 520 521def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, 522 (outs), (ins SPR:$Sd, SPR:$Sm), 523 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", 524 [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 1))]> { 525 // Some single precision VFP instructions may be executed on both NEON and 526 // VFP pipelines on A8. 527 let D = VFPNeonA8Domain; 528} 529 530def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0, 531 (outs), (ins HPR:$Sd, HPR:$Sm), 532 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm", 533 [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 1))]>; 534 535def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, 536 (outs), (ins DPR:$Dd, DPR:$Dm), 537 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", 538 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 0))]>; 539 540def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, 541 (outs), (ins SPR:$Sd, SPR:$Sm), 542 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", 543 [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 0))]> { 544 // Some single precision VFP instructions may be executed on both NEON and 545 // VFP pipelines on A8. 546 let D = VFPNeonA8Domain; 547} 548 549def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0, 550 (outs), (ins HPR:$Sd, HPR:$Sm), 551 IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm", 552 [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 0))]>; 553} // Defs = [FPSCR_NZCV] 554 555//===----------------------------------------------------------------------===// 556// FP Unary Operations. 557// 558 559def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, 560 (outs DPR:$Dd), (ins DPR:$Dm), 561 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm", 562 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>; 563 564def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0, 565 (outs SPR:$Sd), (ins SPR:$Sm), 566 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm", 567 [(set SPR:$Sd, (fabs SPR:$Sm))]> { 568 // Some single precision VFP instructions may be executed on both NEON and 569 // VFP pipelines on A8. 570 let D = VFPNeonA8Domain; 571} 572 573def VABSH : AHuI<0b11101, 0b11, 0b0000, 0b11, 0, 574 (outs SPR:$Sd), (ins SPR:$Sm), 575 IIC_fpUNA16, "vabs", ".f16\t$Sd, $Sm", 576 []>; 577 578let Defs = [FPSCR_NZCV] in { 579def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, 580 (outs), (ins DPR:$Dd), 581 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", 582 [(arm_cmpfp0 (f64 DPR:$Dd), (i32 1))]> { 583 let Inst{3-0} = 0b0000; 584 let Inst{5} = 0; 585} 586 587def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, 588 (outs), (ins SPR:$Sd), 589 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", 590 [(arm_cmpfp0 SPR:$Sd, (i32 1))]> { 591 let Inst{3-0} = 0b0000; 592 let Inst{5} = 0; 593 594 // Some single precision VFP instructions may be executed on both NEON and 595 // VFP pipelines on A8. 596 let D = VFPNeonA8Domain; 597} 598 599def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0, 600 (outs), (ins HPR:$Sd), 601 IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0", 602 [(arm_cmpfp0 HPR:$Sd, (i32 1))]> { 603 let Inst{3-0} = 0b0000; 604 let Inst{5} = 0; 605} 606 607def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, 608 (outs), (ins DPR:$Dd), 609 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", 610 [(arm_cmpfp0 (f64 DPR:$Dd), (i32 0))]> { 611 let Inst{3-0} = 0b0000; 612 let Inst{5} = 0; 613} 614 615def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, 616 (outs), (ins SPR:$Sd), 617 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", 618 [(arm_cmpfp0 SPR:$Sd, (i32 0))]> { 619 let Inst{3-0} = 0b0000; 620 let Inst{5} = 0; 621 622 // Some single precision VFP instructions may be executed on both NEON and 623 // VFP pipelines on A8. 624 let D = VFPNeonA8Domain; 625} 626 627def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0, 628 (outs), (ins HPR:$Sd), 629 IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0", 630 [(arm_cmpfp0 HPR:$Sd, (i32 0))]> { 631 let Inst{3-0} = 0b0000; 632 let Inst{5} = 0; 633} 634} // Defs = [FPSCR_NZCV] 635 636def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, 637 (outs DPR:$Dd), (ins SPR:$Sm), 638 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm", 639 [(set DPR:$Dd, (fpextend SPR:$Sm))]>, 640 Sched<[WriteFPCVT]> { 641 // Instruction operands. 642 bits<5> Dd; 643 bits<5> Sm; 644 645 // Encode instruction operands. 646 let Inst{3-0} = Sm{4-1}; 647 let Inst{5} = Sm{0}; 648 let Inst{15-12} = Dd{3-0}; 649 let Inst{22} = Dd{4}; 650 651 let Predicates = [HasVFP2, HasDPVFP]; 652} 653 654// Special case encoding: bits 11-8 is 0b1011. 655def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm, 656 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm", 657 [(set SPR:$Sd, (fpround DPR:$Dm))]>, 658 Sched<[WriteFPCVT]> { 659 // Instruction operands. 660 bits<5> Sd; 661 bits<5> Dm; 662 663 // Encode instruction operands. 664 let Inst{3-0} = Dm{3-0}; 665 let Inst{5} = Dm{4}; 666 let Inst{15-12} = Sd{4-1}; 667 let Inst{22} = Sd{0}; 668 669 let Inst{27-23} = 0b11101; 670 let Inst{21-16} = 0b110111; 671 let Inst{11-8} = 0b1011; 672 let Inst{7-6} = 0b11; 673 let Inst{4} = 0; 674 675 let Predicates = [HasVFP2, HasDPVFP]; 676} 677 678// Between half, single and double-precision. 679def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 680 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm", 681 [/* Intentionally left blank, see patterns below */]>, 682 Requires<[HasFP16]>, 683 Sched<[WriteFPCVT]>; 684 685def : FullFP16Pat<(f32 (fpextend HPR:$Sm)), 686 (VCVTBHS (COPY_TO_REGCLASS HPR:$Sm, SPR))>; 687def : FP16Pat<(f16_to_fp GPR:$a), 688 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>; 689 690def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), 691 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", 692 [/* Intentionally left blank, see patterns below */]>, 693 Requires<[HasFP16]>, 694 Sched<[WriteFPCVT]>; 695 696def : FullFP16Pat<(f16 (fpround SPR:$Sm)), 697 (COPY_TO_REGCLASS (VCVTBSH SPR:$Sm), HPR)>; 698def : FP16Pat<(fp_to_f16 SPR:$a), 699 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>; 700 701def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 702 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", 703 [/* For disassembly only; pattern left blank */]>, 704 Requires<[HasFP16]>, 705 Sched<[WriteFPCVT]>; 706 707def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), 708 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm", 709 [/* For disassembly only; pattern left blank */]>, 710 Requires<[HasFP16]>, 711 Sched<[WriteFPCVT]>; 712 713def VCVTBHD : ADuI<0b11101, 0b11, 0b0010, 0b01, 0, 714 (outs DPR:$Dd), (ins SPR:$Sm), 715 NoItinerary, "vcvtb", ".f64.f16\t$Dd, $Sm", 716 [/* Intentionally left blank, see patterns below */]>, 717 Requires<[HasFPARMv8, HasDPVFP]>, 718 Sched<[WriteFPCVT]> { 719 // Instruction operands. 720 bits<5> Sm; 721 722 // Encode instruction operands. 723 let Inst{3-0} = Sm{4-1}; 724 let Inst{5} = Sm{0}; 725} 726 727def : FullFP16Pat<(f64 (fpextend HPR:$Sm)), 728 (VCVTBHD (COPY_TO_REGCLASS HPR:$Sm, SPR))>; 729def : FP16Pat<(f64 (f16_to_fp GPR:$a)), 730 (VCVTBHD (COPY_TO_REGCLASS GPR:$a, SPR))>; 731 732def VCVTBDH : ADuI<0b11101, 0b11, 0b0011, 0b01, 0, 733 (outs SPR:$Sd), (ins DPR:$Dm), 734 NoItinerary, "vcvtb", ".f16.f64\t$Sd, $Dm", 735 [/* Intentionally left blank, see patterns below */]>, 736 Requires<[HasFPARMv8, HasDPVFP]> { 737 // Instruction operands. 738 bits<5> Sd; 739 bits<5> Dm; 740 741 // Encode instruction operands. 742 let Inst{3-0} = Dm{3-0}; 743 let Inst{5} = Dm{4}; 744 let Inst{15-12} = Sd{4-1}; 745 let Inst{22} = Sd{0}; 746} 747 748def : FullFP16Pat<(f16 (fpround DPR:$Dm)), 749 (COPY_TO_REGCLASS (VCVTBDH DPR:$Dm), HPR)>; 750def : FP16Pat<(fp_to_f16 (f64 DPR:$a)), 751 (i32 (COPY_TO_REGCLASS (VCVTBDH DPR:$a), GPR))>; 752 753def VCVTTHD : ADuI<0b11101, 0b11, 0b0010, 0b11, 0, 754 (outs DPR:$Dd), (ins SPR:$Sm), 755 NoItinerary, "vcvtt", ".f64.f16\t$Dd, $Sm", 756 []>, Requires<[HasFPARMv8, HasDPVFP]> { 757 // Instruction operands. 758 bits<5> Sm; 759 760 // Encode instruction operands. 761 let Inst{3-0} = Sm{4-1}; 762 let Inst{5} = Sm{0}; 763} 764 765def VCVTTDH : ADuI<0b11101, 0b11, 0b0011, 0b11, 0, 766 (outs SPR:$Sd), (ins DPR:$Dm), 767 NoItinerary, "vcvtt", ".f16.f64\t$Sd, $Dm", 768 []>, Requires<[HasFPARMv8, HasDPVFP]> { 769 // Instruction operands. 770 bits<5> Sd; 771 bits<5> Dm; 772 773 // Encode instruction operands. 774 let Inst{15-12} = Sd{4-1}; 775 let Inst{22} = Sd{0}; 776 let Inst{3-0} = Dm{3-0}; 777 let Inst{5} = Dm{4}; 778} 779 780multiclass vcvt_inst<string opc, bits<2> rm, 781 SDPatternOperator node = null_frag> { 782 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { 783 def SH : AHuInp<0b11101, 0b11, 0b1100, 0b11, 0, 784 (outs SPR:$Sd), (ins HPR:$Sm), 785 NoItinerary, !strconcat("vcvt", opc, ".s32.f16\t$Sd, $Sm"), 786 []>, 787 Requires<[HasFullFP16]> { 788 let Inst{17-16} = rm; 789 } 790 791 def UH : AHuInp<0b11101, 0b11, 0b1100, 0b01, 0, 792 (outs SPR:$Sd), (ins HPR:$Sm), 793 NoItinerary, !strconcat("vcvt", opc, ".u32.f16\t$Sd, $Sm"), 794 []>, 795 Requires<[HasFullFP16]> { 796 let Inst{17-16} = rm; 797 } 798 799 def SS : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 800 (outs SPR:$Sd), (ins SPR:$Sm), 801 NoItinerary, !strconcat("vcvt", opc, ".s32.f32\t$Sd, $Sm"), 802 []>, 803 Requires<[HasFPARMv8]> { 804 let Inst{17-16} = rm; 805 } 806 807 def US : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 808 (outs SPR:$Sd), (ins SPR:$Sm), 809 NoItinerary, !strconcat("vcvt", opc, ".u32.f32\t$Sd, $Sm"), 810 []>, 811 Requires<[HasFPARMv8]> { 812 let Inst{17-16} = rm; 813 } 814 815 def SD : ASuInp<0b11101, 0b11, 0b1100, 0b11, 0, 816 (outs SPR:$Sd), (ins DPR:$Dm), 817 NoItinerary, !strconcat("vcvt", opc, ".s32.f64\t$Sd, $Dm"), 818 []>, 819 Requires<[HasFPARMv8, HasDPVFP]> { 820 bits<5> Dm; 821 822 let Inst{17-16} = rm; 823 824 // Encode instruction operands 825 let Inst{3-0} = Dm{3-0}; 826 let Inst{5} = Dm{4}; 827 let Inst{8} = 1; 828 } 829 830 def UD : ASuInp<0b11101, 0b11, 0b1100, 0b01, 0, 831 (outs SPR:$Sd), (ins DPR:$Dm), 832 NoItinerary, !strconcat("vcvt", opc, ".u32.f64\t$Sd, $Dm"), 833 []>, 834 Requires<[HasFPARMv8, HasDPVFP]> { 835 bits<5> Dm; 836 837 let Inst{17-16} = rm; 838 839 // Encode instruction operands 840 let Inst{3-0} = Dm{3-0}; 841 let Inst{5} = Dm{4}; 842 let Inst{8} = 1; 843 } 844 } 845 846 let Predicates = [HasFPARMv8] in { 847 let Predicates = [HasFullFP16] in { 848 def : Pat<(i32 (fp_to_sint (node HPR:$a))), 849 (COPY_TO_REGCLASS 850 (!cast<Instruction>(NAME#"SH") HPR:$a), 851 GPR)>; 852 853 def : Pat<(i32 (fp_to_uint (node HPR:$a))), 854 (COPY_TO_REGCLASS 855 (!cast<Instruction>(NAME#"UH") HPR:$a), 856 GPR)>; 857 } 858 def : Pat<(i32 (fp_to_sint (node SPR:$a))), 859 (COPY_TO_REGCLASS 860 (!cast<Instruction>(NAME#"SS") SPR:$a), 861 GPR)>; 862 def : Pat<(i32 (fp_to_uint (node SPR:$a))), 863 (COPY_TO_REGCLASS 864 (!cast<Instruction>(NAME#"US") SPR:$a), 865 GPR)>; 866 } 867 let Predicates = [HasFPARMv8, HasDPVFP] in { 868 def : Pat<(i32 (fp_to_sint (node (f64 DPR:$a)))), 869 (COPY_TO_REGCLASS 870 (!cast<Instruction>(NAME#"SD") DPR:$a), 871 GPR)>; 872 def : Pat<(i32 (fp_to_uint (node (f64 DPR:$a)))), 873 (COPY_TO_REGCLASS 874 (!cast<Instruction>(NAME#"UD") DPR:$a), 875 GPR)>; 876 } 877} 878 879defm VCVTA : vcvt_inst<"a", 0b00, fround>; 880defm VCVTN : vcvt_inst<"n", 0b01>; 881defm VCVTP : vcvt_inst<"p", 0b10, fceil>; 882defm VCVTM : vcvt_inst<"m", 0b11, ffloor>; 883 884def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, 885 (outs DPR:$Dd), (ins DPR:$Dm), 886 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm", 887 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>; 888 889def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0, 890 (outs SPR:$Sd), (ins SPR:$Sm), 891 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm", 892 [(set SPR:$Sd, (fneg SPR:$Sm))]> { 893 // Some single precision VFP instructions may be executed on both NEON and 894 // VFP pipelines on A8. 895 let D = VFPNeonA8Domain; 896} 897 898def VNEGH : AHuI<0b11101, 0b11, 0b0001, 0b01, 0, 899 (outs HPR:$Sd), (ins HPR:$Sm), 900 IIC_fpUNA16, "vneg", ".f16\t$Sd, $Sm", 901 [(set HPR:$Sd, (fneg HPR:$Sm))]>; 902 903multiclass vrint_inst_zrx<string opc, bit op, bit op2, SDPatternOperator node> { 904 def H : AHuI<0b11101, 0b11, 0b0110, 0b11, 0, 905 (outs SPR:$Sd), (ins SPR:$Sm), 906 NoItinerary, !strconcat("vrint", opc), ".f16\t$Sd, $Sm", 907 []>, 908 Requires<[HasFullFP16]> { 909 let Inst{7} = op2; 910 let Inst{16} = op; 911 } 912 913 def S : ASuI<0b11101, 0b11, 0b0110, 0b11, 0, 914 (outs SPR:$Sd), (ins SPR:$Sm), 915 NoItinerary, !strconcat("vrint", opc), ".f32\t$Sd, $Sm", 916 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>, 917 Requires<[HasFPARMv8]> { 918 let Inst{7} = op2; 919 let Inst{16} = op; 920 } 921 def D : ADuI<0b11101, 0b11, 0b0110, 0b11, 0, 922 (outs DPR:$Dd), (ins DPR:$Dm), 923 NoItinerary, !strconcat("vrint", opc), ".f64\t$Dd, $Dm", 924 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>, 925 Requires<[HasFPARMv8, HasDPVFP]> { 926 let Inst{7} = op2; 927 let Inst{16} = op; 928 } 929 930 def : InstAlias<!strconcat("vrint", opc, "$p.f16.f16\t$Sd, $Sm"), 931 (!cast<Instruction>(NAME#"H") SPR:$Sd, SPR:$Sm, pred:$p), 0>, 932 Requires<[HasFullFP16]>; 933 def : InstAlias<!strconcat("vrint", opc, "$p.f32.f32\t$Sd, $Sm"), 934 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm, pred:$p), 0>, 935 Requires<[HasFPARMv8]>; 936 def : InstAlias<!strconcat("vrint", opc, "$p.f64.f64\t$Dd, $Dm"), 937 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm, pred:$p), 0>, 938 Requires<[HasFPARMv8,HasDPVFP]>; 939} 940 941defm VRINTZ : vrint_inst_zrx<"z", 0, 1, ftrunc>; 942defm VRINTR : vrint_inst_zrx<"r", 0, 0, fnearbyint>; 943defm VRINTX : vrint_inst_zrx<"x", 1, 0, frint>; 944 945multiclass vrint_inst_anpm<string opc, bits<2> rm, 946 SDPatternOperator node = null_frag> { 947 let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { 948 def H : AHuInp<0b11101, 0b11, 0b1000, 0b01, 0, 949 (outs SPR:$Sd), (ins SPR:$Sm), 950 NoItinerary, !strconcat("vrint", opc, ".f16\t$Sd, $Sm"), 951 []>, 952 Requires<[HasFullFP16]> { 953 let Inst{17-16} = rm; 954 } 955 def S : ASuInp<0b11101, 0b11, 0b1000, 0b01, 0, 956 (outs SPR:$Sd), (ins SPR:$Sm), 957 NoItinerary, !strconcat("vrint", opc, ".f32\t$Sd, $Sm"), 958 [(set (f32 SPR:$Sd), (node (f32 SPR:$Sm)))]>, 959 Requires<[HasFPARMv8]> { 960 let Inst{17-16} = rm; 961 } 962 def D : ADuInp<0b11101, 0b11, 0b1000, 0b01, 0, 963 (outs DPR:$Dd), (ins DPR:$Dm), 964 NoItinerary, !strconcat("vrint", opc, ".f64\t$Dd, $Dm"), 965 [(set (f64 DPR:$Dd), (node (f64 DPR:$Dm)))]>, 966 Requires<[HasFPARMv8, HasDPVFP]> { 967 let Inst{17-16} = rm; 968 } 969 } 970 971 def : InstAlias<!strconcat("vrint", opc, ".f32.f32\t$Sd, $Sm"), 972 (!cast<Instruction>(NAME#"S") SPR:$Sd, SPR:$Sm), 0>, 973 Requires<[HasFPARMv8]>; 974 def : InstAlias<!strconcat("vrint", opc, ".f64.f64\t$Dd, $Dm"), 975 (!cast<Instruction>(NAME#"D") DPR:$Dd, DPR:$Dm), 0>, 976 Requires<[HasFPARMv8,HasDPVFP]>; 977} 978 979defm VRINTA : vrint_inst_anpm<"a", 0b00, fround>; 980defm VRINTN : vrint_inst_anpm<"n", 0b01, int_arm_neon_vrintn>; 981defm VRINTP : vrint_inst_anpm<"p", 0b10, fceil>; 982defm VRINTM : vrint_inst_anpm<"m", 0b11, ffloor>; 983 984def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, 985 (outs DPR:$Dd), (ins DPR:$Dm), 986 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm", 987 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>, 988 Sched<[WriteFPSQRT64]>; 989 990def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, 991 (outs SPR:$Sd), (ins SPR:$Sm), 992 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm", 993 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>, 994 Sched<[WriteFPSQRT32]>; 995 996def VSQRTH : AHuI<0b11101, 0b11, 0b0001, 0b11, 0, 997 (outs SPR:$Sd), (ins SPR:$Sm), 998 IIC_fpSQRT16, "vsqrt", ".f16\t$Sd, $Sm", 999 []>; 1000 1001let hasSideEffects = 0 in { 1002let isMoveReg = 1 in { 1003def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0, 1004 (outs DPR:$Dd), (ins DPR:$Dm), 1005 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>; 1006 1007def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0, 1008 (outs SPR:$Sd), (ins SPR:$Sm), 1009 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>; 1010} // isMoveReg 1011 1012let PostEncoderMethod = "", DecoderNamespace = "VFPV8" in { 1013def VMOVH : ASuInp<0b11101, 0b11, 0b0000, 0b01, 0, 1014 (outs SPR:$Sd), (ins SPR:$Sm), 1015 IIC_fpUNA16, "vmovx.f16\t$Sd, $Sm", []>, 1016 Requires<[HasFullFP16]>; 1017 1018def VINSH : ASuInp<0b11101, 0b11, 0b0000, 0b11, 0, 1019 (outs SPR:$Sd), (ins SPR:$Sm), 1020 IIC_fpUNA16, "vins.f16\t$Sd, $Sm", []>, 1021 Requires<[HasFullFP16]>; 1022} // PostEncoderMethod 1023} // hasSideEffects 1024 1025//===----------------------------------------------------------------------===// 1026// FP <-> GPR Copies. Int <-> FP Conversions. 1027// 1028 1029let isMoveReg = 1 in { 1030def VMOVRS : AVConv2I<0b11100001, 0b1010, 1031 (outs GPR:$Rt), (ins SPR:$Sn), 1032 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn", 1033 [(set GPR:$Rt, (bitconvert SPR:$Sn))]>, 1034 Sched<[WriteFPMOV]> { 1035 // Instruction operands. 1036 bits<4> Rt; 1037 bits<5> Sn; 1038 1039 // Encode instruction operands. 1040 let Inst{19-16} = Sn{4-1}; 1041 let Inst{7} = Sn{0}; 1042 let Inst{15-12} = Rt; 1043 1044 let Inst{6-5} = 0b00; 1045 let Inst{3-0} = 0b0000; 1046 1047 // Some single precision VFP instructions may be executed on both NEON and VFP 1048 // pipelines. 1049 let D = VFPNeonDomain; 1050} 1051 1052// Bitcast i32 -> f32. NEON prefers to use VMOVDRR. 1053def VMOVSR : AVConv4I<0b11100000, 0b1010, 1054 (outs SPR:$Sn), (ins GPR:$Rt), 1055 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt", 1056 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>, 1057 Requires<[HasVFP2, UseVMOVSR]>, 1058 Sched<[WriteFPMOV]> { 1059 // Instruction operands. 1060 bits<5> Sn; 1061 bits<4> Rt; 1062 1063 // Encode instruction operands. 1064 let Inst{19-16} = Sn{4-1}; 1065 let Inst{7} = Sn{0}; 1066 let Inst{15-12} = Rt; 1067 1068 let Inst{6-5} = 0b00; 1069 let Inst{3-0} = 0b0000; 1070 1071 // Some single precision VFP instructions may be executed on both NEON and VFP 1072 // pipelines. 1073 let D = VFPNeonDomain; 1074} 1075} // isMoveReg 1076def : Pat<(arm_vmovsr GPR:$Rt), (VMOVSR GPR:$Rt)>, Requires<[HasVFP2, UseVMOVSR]>; 1077 1078let hasSideEffects = 0 in { 1079def VMOVRRD : AVConv3I<0b11000101, 0b1011, 1080 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm), 1081 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm", 1082 [(set GPR:$Rt, GPR:$Rt2, (arm_fmrrd DPR:$Dm))]>, 1083 Sched<[WriteFPMOV]> { 1084 // Instruction operands. 1085 bits<5> Dm; 1086 bits<4> Rt; 1087 bits<4> Rt2; 1088 1089 // Encode instruction operands. 1090 let Inst{3-0} = Dm{3-0}; 1091 let Inst{5} = Dm{4}; 1092 let Inst{15-12} = Rt; 1093 let Inst{19-16} = Rt2; 1094 1095 let Inst{7-6} = 0b00; 1096 1097 // Some single precision VFP instructions may be executed on both NEON and VFP 1098 // pipelines. 1099 let D = VFPNeonDomain; 1100 1101 // This instruction is equivalent to 1102 // $Rt = EXTRACT_SUBREG $Dm, ssub_0 1103 // $Rt2 = EXTRACT_SUBREG $Dm, ssub_1 1104 let isExtractSubreg = 1; 1105} 1106 1107def VMOVRRS : AVConv3I<0b11000101, 0b1010, 1108 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2), 1109 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2", 1110 [/* For disassembly only; pattern left blank */]>, 1111 Sched<[WriteFPMOV]> { 1112 bits<5> src1; 1113 bits<4> Rt; 1114 bits<4> Rt2; 1115 1116 // Encode instruction operands. 1117 let Inst{3-0} = src1{4-1}; 1118 let Inst{5} = src1{0}; 1119 let Inst{15-12} = Rt; 1120 let Inst{19-16} = Rt2; 1121 1122 let Inst{7-6} = 0b00; 1123 1124 // Some single precision VFP instructions may be executed on both NEON and VFP 1125 // pipelines. 1126 let D = VFPNeonDomain; 1127 let DecoderMethod = "DecodeVMOVRRS"; 1128} 1129} // hasSideEffects 1130 1131// FMDHR: GPR -> SPR 1132// FMDLR: GPR -> SPR 1133 1134def VMOVDRR : AVConv5I<0b11000100, 0b1011, 1135 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2), 1136 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2", 1137 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]>, 1138 Sched<[WriteFPMOV]> { 1139 // Instruction operands. 1140 bits<5> Dm; 1141 bits<4> Rt; 1142 bits<4> Rt2; 1143 1144 // Encode instruction operands. 1145 let Inst{3-0} = Dm{3-0}; 1146 let Inst{5} = Dm{4}; 1147 let Inst{15-12} = Rt; 1148 let Inst{19-16} = Rt2; 1149 1150 let Inst{7-6} = 0b00; 1151 1152 // Some single precision VFP instructions may be executed on both NEON and VFP 1153 // pipelines. 1154 let D = VFPNeonDomain; 1155 1156 // This instruction is equivalent to 1157 // $Dm = REG_SEQUENCE $Rt, ssub_0, $Rt2, ssub_1 1158 let isRegSequence = 1; 1159} 1160 1161// Hoist an fabs or a fneg of a value coming from integer registers 1162// and do the fabs/fneg on the integer value. This is never a lose 1163// and could enable the conversion to float to be removed completely. 1164def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1165 (VMOVDRR GPR:$Rl, (BFC GPR:$Rh, (i32 0x7FFFFFFF)))>, 1166 Requires<[IsARM, HasV6T2]>; 1167def : Pat<(fabs (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1168 (VMOVDRR GPR:$Rl, (t2BFC GPR:$Rh, (i32 0x7FFFFFFF)))>, 1169 Requires<[IsThumb2, HasV6T2]>; 1170def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1171 (VMOVDRR GPR:$Rl, (EORri GPR:$Rh, (i32 0x80000000)))>, 1172 Requires<[IsARM]>; 1173def : Pat<(fneg (arm_fmdrr GPR:$Rl, GPR:$Rh)), 1174 (VMOVDRR GPR:$Rl, (t2EORri GPR:$Rh, (i32 0x80000000)))>, 1175 Requires<[IsThumb2]>; 1176 1177let hasSideEffects = 0 in 1178def VMOVSRR : AVConv5I<0b11000100, 0b1010, 1179 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2), 1180 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2", 1181 [/* For disassembly only; pattern left blank */]>, 1182 Sched<[WriteFPMOV]> { 1183 // Instruction operands. 1184 bits<5> dst1; 1185 bits<4> src1; 1186 bits<4> src2; 1187 1188 // Encode instruction operands. 1189 let Inst{3-0} = dst1{4-1}; 1190 let Inst{5} = dst1{0}; 1191 let Inst{15-12} = src1; 1192 let Inst{19-16} = src2; 1193 1194 let Inst{7-6} = 0b00; 1195 1196 // Some single precision VFP instructions may be executed on both NEON and VFP 1197 // pipelines. 1198 let D = VFPNeonDomain; 1199 1200 let DecoderMethod = "DecodeVMOVSRR"; 1201} 1202 1203// Move H->R, clearing top 16 bits 1204def VMOVRH : AVConv2I<0b11100001, 0b1001, 1205 (outs GPR:$Rt), (ins HPR:$Sn), 1206 IIC_fpMOVSI, "vmov", ".f16\t$Rt, $Sn", 1207 [(set GPR:$Rt, (arm_vmovrh HPR:$Sn))]>, 1208 Requires<[HasFullFP16]>, 1209 Sched<[WriteFPMOV]> { 1210 // Instruction operands. 1211 bits<4> Rt; 1212 bits<5> Sn; 1213 1214 // Encode instruction operands. 1215 let Inst{19-16} = Sn{4-1}; 1216 let Inst{7} = Sn{0}; 1217 let Inst{15-12} = Rt; 1218 1219 let Inst{6-5} = 0b00; 1220 let Inst{3-0} = 0b0000; 1221} 1222 1223// Move R->H, clearing top 16 bits 1224def VMOVHR : AVConv4I<0b11100000, 0b1001, 1225 (outs HPR:$Sn), (ins GPR:$Rt), 1226 IIC_fpMOVIS, "vmov", ".f16\t$Sn, $Rt", 1227 [(set HPR:$Sn, (arm_vmovhr GPR:$Rt))]>, 1228 Requires<[HasFullFP16]>, 1229 Sched<[WriteFPMOV]> { 1230 // Instruction operands. 1231 bits<5> Sn; 1232 bits<4> Rt; 1233 1234 // Encode instruction operands. 1235 let Inst{19-16} = Sn{4-1}; 1236 let Inst{7} = Sn{0}; 1237 let Inst{15-12} = Rt; 1238 1239 let Inst{6-5} = 0b00; 1240 let Inst{3-0} = 0b0000; 1241} 1242 1243// FMRDH: SPR -> GPR 1244// FMRDL: SPR -> GPR 1245// FMRRS: SPR -> GPR 1246// FMRX: SPR system reg -> GPR 1247// FMSRR: GPR -> SPR 1248// FMXR: GPR -> VFP system reg 1249 1250 1251// Int -> FP: 1252 1253class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1254 bits<4> opcod4, dag oops, dag iops, 1255 InstrItinClass itin, string opc, string asm, 1256 list<dag> pattern> 1257 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1258 pattern> { 1259 // Instruction operands. 1260 bits<5> Dd; 1261 bits<5> Sm; 1262 1263 // Encode instruction operands. 1264 let Inst{3-0} = Sm{4-1}; 1265 let Inst{5} = Sm{0}; 1266 let Inst{15-12} = Dd{3-0}; 1267 let Inst{22} = Dd{4}; 1268 1269 let Predicates = [HasVFP2, HasDPVFP]; 1270} 1271 1272class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1273 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin, 1274 string opc, string asm, list<dag> pattern> 1275 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1276 pattern> { 1277 // Instruction operands. 1278 bits<5> Sd; 1279 bits<5> Sm; 1280 1281 // Encode instruction operands. 1282 let Inst{3-0} = Sm{4-1}; 1283 let Inst{5} = Sm{0}; 1284 let Inst{15-12} = Sd{4-1}; 1285 let Inst{22} = Sd{0}; 1286} 1287 1288class AVConv1IHs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1289 bits<4> opcod4, dag oops, dag iops, 1290 InstrItinClass itin, string opc, string asm, 1291 list<dag> pattern> 1292 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1293 pattern> { 1294 // Instruction operands. 1295 bits<5> Sd; 1296 bits<5> Sm; 1297 1298 // Encode instruction operands. 1299 let Inst{3-0} = Sm{4-1}; 1300 let Inst{5} = Sm{0}; 1301 let Inst{15-12} = Sd{4-1}; 1302 let Inst{22} = Sd{0}; 1303 1304 let Predicates = [HasFullFP16]; 1305} 1306 1307def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 1308 (outs DPR:$Dd), (ins SPR:$Sm), 1309 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm", 1310 []>, 1311 Sched<[WriteFPCVT]> { 1312 let Inst{7} = 1; // s32 1313} 1314 1315let Predicates=[HasVFP2, HasDPVFP] in { 1316 def : VFPPat<(f64 (sint_to_fp GPR:$a)), 1317 (VSITOD (COPY_TO_REGCLASS GPR:$a, SPR))>; 1318 1319 def : VFPPat<(f64 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1320 (VSITOD (VLDRS addrmode5:$a))>; 1321} 1322 1323def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 1324 (outs SPR:$Sd),(ins SPR:$Sm), 1325 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm", 1326 []>, 1327 Sched<[WriteFPCVT]> { 1328 let Inst{7} = 1; // s32 1329 1330 // Some single precision VFP instructions may be executed on both NEON and 1331 // VFP pipelines on A8. 1332 let D = VFPNeonA8Domain; 1333} 1334 1335def : VFPNoNEONPat<(f32 (sint_to_fp GPR:$a)), 1336 (VSITOS (COPY_TO_REGCLASS GPR:$a, SPR))>; 1337 1338def : VFPNoNEONPat<(f32 (sint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1339 (VSITOS (VLDRS addrmode5:$a))>; 1340 1341def VSITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001, 1342 (outs HPR:$Sd), (ins SPR:$Sm), 1343 IIC_fpCVTIH, "vcvt", ".f16.s32\t$Sd, $Sm", 1344 []>, 1345 Sched<[WriteFPCVT]> { 1346 let Inst{7} = 1; // s32 1347} 1348 1349def : VFPNoNEONPat<(f16 (sint_to_fp GPR:$a)), 1350 (VSITOH (COPY_TO_REGCLASS GPR:$a, SPR))>; 1351 1352def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011, 1353 (outs DPR:$Dd), (ins SPR:$Sm), 1354 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm", 1355 []>, 1356 Sched<[WriteFPCVT]> { 1357 let Inst{7} = 0; // u32 1358} 1359 1360let Predicates=[HasVFP2, HasDPVFP] in { 1361 def : VFPPat<(f64 (uint_to_fp GPR:$a)), 1362 (VUITOD (COPY_TO_REGCLASS GPR:$a, SPR))>; 1363 1364 def : VFPPat<(f64 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1365 (VUITOD (VLDRS addrmode5:$a))>; 1366} 1367 1368def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010, 1369 (outs SPR:$Sd), (ins SPR:$Sm), 1370 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm", 1371 []>, 1372 Sched<[WriteFPCVT]> { 1373 let Inst{7} = 0; // u32 1374 1375 // Some single precision VFP instructions may be executed on both NEON and 1376 // VFP pipelines on A8. 1377 let D = VFPNeonA8Domain; 1378} 1379 1380def : VFPNoNEONPat<(f32 (uint_to_fp GPR:$a)), 1381 (VUITOS (COPY_TO_REGCLASS GPR:$a, SPR))>; 1382 1383def : VFPNoNEONPat<(f32 (uint_to_fp (i32 (alignedload32 addrmode5:$a)))), 1384 (VUITOS (VLDRS addrmode5:$a))>; 1385 1386def VUITOH : AVConv1IHs_Encode<0b11101, 0b11, 0b1000, 0b1001, 1387 (outs HPR:$Sd), (ins SPR:$Sm), 1388 IIC_fpCVTIH, "vcvt", ".f16.u32\t$Sd, $Sm", 1389 []>, 1390 Sched<[WriteFPCVT]> { 1391 let Inst{7} = 0; // u32 1392} 1393 1394def : VFPNoNEONPat<(f16 (uint_to_fp GPR:$a)), 1395 (VUITOH (COPY_TO_REGCLASS GPR:$a, SPR))>; 1396 1397// FP -> Int: 1398 1399class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1400 bits<4> opcod4, dag oops, dag iops, 1401 InstrItinClass itin, string opc, string asm, 1402 list<dag> pattern> 1403 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1404 pattern> { 1405 // Instruction operands. 1406 bits<5> Sd; 1407 bits<5> Dm; 1408 1409 // Encode instruction operands. 1410 let Inst{3-0} = Dm{3-0}; 1411 let Inst{5} = Dm{4}; 1412 let Inst{15-12} = Sd{4-1}; 1413 let Inst{22} = Sd{0}; 1414 1415 let Predicates = [HasVFP2, HasDPVFP]; 1416} 1417 1418class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1419 bits<4> opcod4, dag oops, dag iops, 1420 InstrItinClass itin, string opc, string asm, 1421 list<dag> pattern> 1422 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1423 pattern> { 1424 // Instruction operands. 1425 bits<5> Sd; 1426 bits<5> Sm; 1427 1428 // Encode instruction operands. 1429 let Inst{3-0} = Sm{4-1}; 1430 let Inst{5} = Sm{0}; 1431 let Inst{15-12} = Sd{4-1}; 1432 let Inst{22} = Sd{0}; 1433} 1434 1435class AVConv1IsH_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, 1436 bits<4> opcod4, dag oops, dag iops, 1437 InstrItinClass itin, string opc, string asm, 1438 list<dag> pattern> 1439 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm, 1440 pattern> { 1441 // Instruction operands. 1442 bits<5> Sd; 1443 bits<5> Sm; 1444 1445 // Encode instruction operands. 1446 let Inst{3-0} = Sm{4-1}; 1447 let Inst{5} = Sm{0}; 1448 let Inst{15-12} = Sd{4-1}; 1449 let Inst{22} = Sd{0}; 1450 1451 let Predicates = [HasFullFP16]; 1452} 1453 1454// Always set Z bit in the instruction, i.e. "round towards zero" variants. 1455def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1456 (outs SPR:$Sd), (ins DPR:$Dm), 1457 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm", 1458 []>, 1459 Sched<[WriteFPCVT]> { 1460 let Inst{7} = 1; // Z bit 1461} 1462 1463let Predicates=[HasVFP2, HasDPVFP] in { 1464 def : VFPPat<(i32 (fp_to_sint (f64 DPR:$a))), 1465 (COPY_TO_REGCLASS (VTOSIZD DPR:$a), GPR)>; 1466 1467 def : VFPPat<(alignedstore32 (i32 (fp_to_sint (f64 DPR:$a))), addrmode5:$ptr), 1468 (VSTRS (VTOSIZD DPR:$a), addrmode5:$ptr)>; 1469} 1470 1471def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1472 (outs SPR:$Sd), (ins SPR:$Sm), 1473 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm", 1474 []>, 1475 Sched<[WriteFPCVT]> { 1476 let Inst{7} = 1; // Z bit 1477 1478 // Some single precision VFP instructions may be executed on both NEON and 1479 // VFP pipelines on A8. 1480 let D = VFPNeonA8Domain; 1481} 1482 1483def : VFPNoNEONPat<(i32 (fp_to_sint SPR:$a)), 1484 (COPY_TO_REGCLASS (VTOSIZS SPR:$a), GPR)>; 1485 1486def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_sint (f32 SPR:$a))), 1487 addrmode5:$ptr), 1488 (VSTRS (VTOSIZS SPR:$a), addrmode5:$ptr)>; 1489 1490def VTOSIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001, 1491 (outs SPR:$Sd), (ins HPR:$Sm), 1492 IIC_fpCVTHI, "vcvt", ".s32.f16\t$Sd, $Sm", 1493 []>, 1494 Sched<[WriteFPCVT]> { 1495 let Inst{7} = 1; // Z bit 1496} 1497 1498def : VFPNoNEONPat<(i32 (fp_to_sint HPR:$a)), 1499 (COPY_TO_REGCLASS (VTOSIZH HPR:$a), GPR)>; 1500 1501def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1502 (outs SPR:$Sd), (ins DPR:$Dm), 1503 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm", 1504 []>, 1505 Sched<[WriteFPCVT]> { 1506 let Inst{7} = 1; // Z bit 1507} 1508 1509let Predicates=[HasVFP2, HasDPVFP] in { 1510 def : VFPPat<(i32 (fp_to_uint (f64 DPR:$a))), 1511 (COPY_TO_REGCLASS (VTOUIZD DPR:$a), GPR)>; 1512 1513 def : VFPPat<(alignedstore32 (i32 (fp_to_uint (f64 DPR:$a))), addrmode5:$ptr), 1514 (VSTRS (VTOUIZD DPR:$a), addrmode5:$ptr)>; 1515} 1516 1517def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1518 (outs SPR:$Sd), (ins SPR:$Sm), 1519 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm", 1520 []>, 1521 Sched<[WriteFPCVT]> { 1522 let Inst{7} = 1; // Z bit 1523 1524 // Some single precision VFP instructions may be executed on both NEON and 1525 // VFP pipelines on A8. 1526 let D = VFPNeonA8Domain; 1527} 1528 1529def : VFPNoNEONPat<(i32 (fp_to_uint SPR:$a)), 1530 (COPY_TO_REGCLASS (VTOUIZS SPR:$a), GPR)>; 1531 1532def : VFPNoNEONPat<(alignedstore32 (i32 (fp_to_uint (f32 SPR:$a))), 1533 addrmode5:$ptr), 1534 (VSTRS (VTOUIZS SPR:$a), addrmode5:$ptr)>; 1535 1536def VTOUIZH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001, 1537 (outs SPR:$Sd), (ins HPR:$Sm), 1538 IIC_fpCVTHI, "vcvt", ".u32.f16\t$Sd, $Sm", 1539 []>, 1540 Sched<[WriteFPCVT]> { 1541 let Inst{7} = 1; // Z bit 1542} 1543 1544def : VFPNoNEONPat<(i32 (fp_to_uint HPR:$a)), 1545 (COPY_TO_REGCLASS (VTOUIZH HPR:$a), GPR)>; 1546 1547// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR. 1548let Uses = [FPSCR] in { 1549def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011, 1550 (outs SPR:$Sd), (ins DPR:$Dm), 1551 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm", 1552 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>, 1553 Sched<[WriteFPCVT]> { 1554 let Inst{7} = 0; // Z bit 1555} 1556 1557def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010, 1558 (outs SPR:$Sd), (ins SPR:$Sm), 1559 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm", 1560 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]>, 1561 Sched<[WriteFPCVT]> { 1562 let Inst{7} = 0; // Z bit 1563} 1564 1565def VTOSIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1101, 0b1001, 1566 (outs SPR:$Sd), (ins SPR:$Sm), 1567 IIC_fpCVTHI, "vcvtr", ".s32.f16\t$Sd, $Sm", 1568 []>, 1569 Sched<[WriteFPCVT]> { 1570 let Inst{7} = 0; // Z bit 1571} 1572 1573def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011, 1574 (outs SPR:$Sd), (ins DPR:$Dm), 1575 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm", 1576 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>, 1577 Sched<[WriteFPCVT]> { 1578 let Inst{7} = 0; // Z bit 1579} 1580 1581def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010, 1582 (outs SPR:$Sd), (ins SPR:$Sm), 1583 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm", 1584 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]>, 1585 Sched<[WriteFPCVT]> { 1586 let Inst{7} = 0; // Z bit 1587} 1588 1589def VTOUIRH : AVConv1IsH_Encode<0b11101, 0b11, 0b1100, 0b1001, 1590 (outs SPR:$Sd), (ins SPR:$Sm), 1591 IIC_fpCVTHI, "vcvtr", ".u32.f16\t$Sd, $Sm", 1592 []>, 1593 Sched<[WriteFPCVT]> { 1594 let Inst{7} = 0; // Z bit 1595} 1596} 1597 1598// v8.3-a Javascript Convert to Signed fixed-point 1599def VJCVT : AVConv1IsD_Encode<0b11101, 0b11, 0b1001, 0b1011, 1600 (outs SPR:$Sd), (ins DPR:$Dm), 1601 IIC_fpCVTDI, "vjcvt", ".s32.f64\t$Sd, $Dm", 1602 []>, 1603 Requires<[HasFPARMv8, HasV8_3a]> { 1604 let Inst{7} = 1; // Z bit 1605} 1606 1607// Convert between floating-point and fixed-point 1608// Data type for fixed-point naming convention: 1609// S16 (U=0, sx=0) -> SH 1610// U16 (U=1, sx=0) -> UH 1611// S32 (U=0, sx=1) -> SL 1612// U32 (U=1, sx=1) -> UL 1613 1614let Constraints = "$a = $dst" in { 1615 1616// FP to Fixed-Point: 1617 1618// Single Precision register 1619class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1620 bit op5, dag oops, dag iops, InstrItinClass itin, 1621 string opc, string asm, list<dag> pattern> 1622 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> { 1623 bits<5> dst; 1624 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1625 let Inst{22} = dst{0}; 1626 let Inst{15-12} = dst{4-1}; 1627} 1628 1629// Double Precision register 1630class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, 1631 bit op5, dag oops, dag iops, InstrItinClass itin, 1632 string opc, string asm, list<dag> pattern> 1633 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> { 1634 bits<5> dst; 1635 // if dp_operation then UInt(D:Vd) else UInt(Vd:D); 1636 let Inst{22} = dst{4}; 1637 let Inst{15-12} = dst{3-0}; 1638 1639 let Predicates = [HasVFP2, HasDPVFP]; 1640} 1641 1642def VTOSHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 0, 1643 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1644 IIC_fpCVTHI, "vcvt", ".s16.f16\t$dst, $a, $fbits", []>, 1645 Requires<[HasFullFP16]>, 1646 Sched<[WriteFPCVT]>; 1647 1648def VTOUHH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 0, 1649 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1650 IIC_fpCVTHI, "vcvt", ".u16.f16\t$dst, $a, $fbits", []>, 1651 Requires<[HasFullFP16]>, 1652 Sched<[WriteFPCVT]>; 1653 1654def VTOSLH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1001, 1, 1655 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1656 IIC_fpCVTHI, "vcvt", ".s32.f16\t$dst, $a, $fbits", []>, 1657 Requires<[HasFullFP16]>, 1658 Sched<[WriteFPCVT]>; 1659 1660def VTOULH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1001, 1, 1661 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1662 IIC_fpCVTHI, "vcvt", ".u32.f16\t$dst, $a, $fbits", []>, 1663 Requires<[HasFullFP16]>, 1664 Sched<[WriteFPCVT]>; 1665 1666def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0, 1667 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1668 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []>, 1669 Sched<[WriteFPCVT]> { 1670 // Some single precision VFP instructions may be executed on both NEON and 1671 // VFP pipelines on A8. 1672 let D = VFPNeonA8Domain; 1673} 1674 1675def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0, 1676 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1677 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> { 1678 // Some single precision VFP instructions may be executed on both NEON and 1679 // VFP pipelines on A8. 1680 let D = VFPNeonA8Domain; 1681} 1682 1683def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1, 1684 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1685 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> { 1686 // Some single precision VFP instructions may be executed on both NEON and 1687 // VFP pipelines on A8. 1688 let D = VFPNeonA8Domain; 1689} 1690 1691def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1, 1692 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1693 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> { 1694 // Some single precision VFP instructions may be executed on both NEON and 1695 // VFP pipelines on A8. 1696 let D = VFPNeonA8Domain; 1697} 1698 1699def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0, 1700 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1701 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>, 1702 Sched<[WriteFPCVT]>; 1703 1704def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0, 1705 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1706 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>, 1707 Sched<[WriteFPCVT]>; 1708 1709def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1, 1710 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1711 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>, 1712 Sched<[WriteFPCVT]>; 1713 1714def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1, 1715 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1716 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>, 1717 Sched<[WriteFPCVT]>; 1718 1719// Fixed-Point to FP: 1720 1721def VSHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 0, 1722 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1723 IIC_fpCVTIH, "vcvt", ".f16.s16\t$dst, $a, $fbits", []>, 1724 Requires<[HasFullFP16]>, 1725 Sched<[WriteFPCVT]>; 1726 1727def VUHTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 0, 1728 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1729 IIC_fpCVTIH, "vcvt", ".f16.u16\t$dst, $a, $fbits", []>, 1730 Requires<[HasFullFP16]>, 1731 Sched<[WriteFPCVT]>; 1732 1733def VSLTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1001, 1, 1734 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1735 IIC_fpCVTIH, "vcvt", ".f16.s32\t$dst, $a, $fbits", []>, 1736 Requires<[HasFullFP16]>, 1737 Sched<[WriteFPCVT]>; 1738 1739def VULTOH : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1001, 1, 1740 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1741 IIC_fpCVTIH, "vcvt", ".f16.u32\t$dst, $a, $fbits", []>, 1742 Requires<[HasFullFP16]>, 1743 Sched<[WriteFPCVT]>; 1744 1745def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0, 1746 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1747 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []>, 1748 Sched<[WriteFPCVT]> { 1749 // Some single precision VFP instructions may be executed on both NEON and 1750 // VFP pipelines on A8. 1751 let D = VFPNeonA8Domain; 1752} 1753 1754def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0, 1755 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits), 1756 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []>, 1757 Sched<[WriteFPCVT]> { 1758 // Some single precision VFP instructions may be executed on both NEON and 1759 // VFP pipelines on A8. 1760 let D = VFPNeonA8Domain; 1761} 1762 1763def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1, 1764 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1765 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []>, 1766 Sched<[WriteFPCVT]> { 1767 // Some single precision VFP instructions may be executed on both NEON and 1768 // VFP pipelines on A8. 1769 let D = VFPNeonA8Domain; 1770} 1771 1772def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1, 1773 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits), 1774 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []>, 1775 Sched<[WriteFPCVT]> { 1776 // Some single precision VFP instructions may be executed on both NEON and 1777 // VFP pipelines on A8. 1778 let D = VFPNeonA8Domain; 1779} 1780 1781def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0, 1782 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1783 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>, 1784 Sched<[WriteFPCVT]>; 1785 1786def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0, 1787 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits), 1788 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>, 1789 Sched<[WriteFPCVT]>; 1790 1791def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1, 1792 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1793 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>, 1794 Sched<[WriteFPCVT]>; 1795 1796def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1, 1797 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits), 1798 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>, 1799 Sched<[WriteFPCVT]>; 1800 1801} // End of 'let Constraints = "$a = $dst" in' 1802 1803//===----------------------------------------------------------------------===// 1804// FP Multiply-Accumulate Operations. 1805// 1806 1807def VMLAD : ADbI<0b11100, 0b00, 0, 0, 1808 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1809 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm", 1810 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1811 (f64 DPR:$Ddin)))]>, 1812 RegConstraint<"$Ddin = $Dd">, 1813 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>, 1814 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 1815 1816def VMLAS : ASbIn<0b11100, 0b00, 0, 0, 1817 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1818 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm", 1819 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 1820 SPR:$Sdin))]>, 1821 RegConstraint<"$Sdin = $Sd">, 1822 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>, 1823 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 1824 // Some single precision VFP instructions may be executed on both NEON and 1825 // VFP pipelines on A8. 1826 let D = VFPNeonA8Domain; 1827} 1828 1829def VMLAH : AHbI<0b11100, 0b00, 0, 0, 1830 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 1831 IIC_fpMAC16, "vmla", ".f16\t$Sd, $Sn, $Sm", 1832 [(set HPR:$Sd, (fadd_mlx (fmul_su HPR:$Sn, HPR:$Sm), 1833 HPR:$Sdin))]>, 1834 RegConstraint<"$Sdin = $Sd">, 1835 Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>; 1836 1837def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 1838 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 1839 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1840def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 1841 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 1842 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>; 1843def : Pat<(fadd_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)), 1844 (VMLAH HPR:$dstin, HPR:$a, HPR:$b)>, 1845 Requires<[HasFullFP16,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>; 1846 1847 1848def VMLSD : ADbI<0b11100, 0b00, 1, 0, 1849 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1850 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm", 1851 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 1852 (f64 DPR:$Ddin)))]>, 1853 RegConstraint<"$Ddin = $Dd">, 1854 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>, 1855 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 1856 1857def VMLSS : ASbIn<0b11100, 0b00, 1, 0, 1858 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1859 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm", 1860 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 1861 SPR:$Sdin))]>, 1862 RegConstraint<"$Sdin = $Sd">, 1863 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>, 1864 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 1865 // Some single precision VFP instructions may be executed on both NEON and 1866 // VFP pipelines on A8. 1867 let D = VFPNeonA8Domain; 1868} 1869 1870def VMLSH : AHbI<0b11100, 0b00, 1, 0, 1871 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 1872 IIC_fpMAC16, "vmls", ".f16\t$Sd, $Sn, $Sm", 1873 [(set HPR:$Sd, (fadd_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)), 1874 HPR:$Sdin))]>, 1875 RegConstraint<"$Sdin = $Sd">, 1876 Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>; 1877 1878def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 1879 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 1880 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1881def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 1882 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 1883 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1884def : Pat<(fsub_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)), 1885 (VMLSH HPR:$dstin, HPR:$a, HPR:$b)>, 1886 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1887 1888def VNMLAD : ADbI<0b11100, 0b01, 1, 0, 1889 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1890 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm", 1891 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 1892 (f64 DPR:$Ddin)))]>, 1893 RegConstraint<"$Ddin = $Dd">, 1894 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>, 1895 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 1896 1897def VNMLAS : ASbI<0b11100, 0b01, 1, 0, 1898 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1899 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm", 1900 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 1901 SPR:$Sdin))]>, 1902 RegConstraint<"$Sdin = $Sd">, 1903 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>, 1904 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 1905 // Some single precision VFP instructions may be executed on both NEON and 1906 // VFP pipelines on A8. 1907 let D = VFPNeonA8Domain; 1908} 1909 1910def VNMLAH : AHbI<0b11100, 0b01, 1, 0, 1911 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 1912 IIC_fpMAC16, "vnmla", ".f16\t$Sd, $Sn, $Sm", 1913 [(set HPR:$Sd, (fsub_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)), 1914 HPR:$Sdin))]>, 1915 RegConstraint<"$Sdin = $Sd">, 1916 Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>; 1917 1918// (-(a * b) - dst) -> -(dst + (a * b)) 1919def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 1920 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 1921 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1922def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 1923 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 1924 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1925def : Pat<(fsub_mlx (fneg (fmul_su HPR:$a, HPR:$b)), HPR:$dstin), 1926 (VNMLAH HPR:$dstin, HPR:$a, HPR:$b)>, 1927 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1928 1929// (-dst - (a * b)) -> -(dst + (a * b)) 1930def : Pat<(fsub_mlx (fneg DPR:$dstin), (fmul_su DPR:$a, (f64 DPR:$b))), 1931 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>, 1932 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1933def : Pat<(fsub_mlx (fneg SPR:$dstin), (fmul_su SPR:$a, SPR:$b)), 1934 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>, 1935 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1936def : Pat<(fsub_mlx (fneg HPR:$dstin), (fmul_su HPR:$a, HPR:$b)), 1937 (VNMLAH HPR:$dstin, HPR:$a, HPR:$b)>, 1938 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1939 1940def VNMLSD : ADbI<0b11100, 0b01, 0, 0, 1941 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1942 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm", 1943 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1944 (f64 DPR:$Ddin)))]>, 1945 RegConstraint<"$Ddin = $Dd">, 1946 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>, 1947 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 1948 1949def VNMLSS : ASbI<0b11100, 0b01, 0, 0, 1950 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1951 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm", 1952 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 1953 RegConstraint<"$Sdin = $Sd">, 1954 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>, 1955 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 1956 // Some single precision VFP instructions may be executed on both NEON and 1957 // VFP pipelines on A8. 1958 let D = VFPNeonA8Domain; 1959} 1960 1961def VNMLSH : AHbI<0b11100, 0b01, 0, 0, 1962 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 1963 IIC_fpMAC16, "vnmls", ".f16\t$Sd, $Sn, $Sm", 1964 [(set HPR:$Sd, (fsub_mlx (fmul_su HPR:$Sn, HPR:$Sm), HPR:$Sdin))]>, 1965 RegConstraint<"$Sdin = $Sd">, 1966 Requires<[HasFullFP16,UseFPVMLx,DontUseFusedMAC]>; 1967 1968def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 1969 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>, 1970 Requires<[HasVFP2,HasDPVFP,UseFPVMLx,DontUseFusedMAC]>; 1971def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 1972 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>, 1973 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1974def : Pat<(fsub_mlx (fmul_su HPR:$a, HPR:$b), HPR:$dstin), 1975 (VNMLSH HPR:$dstin, HPR:$a, HPR:$b)>, 1976 Requires<[HasFullFP16,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>; 1977 1978//===----------------------------------------------------------------------===// 1979// Fused FP Multiply-Accumulate Operations. 1980// 1981def VFMAD : ADbI<0b11101, 0b10, 0, 0, 1982 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 1983 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm", 1984 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm), 1985 (f64 DPR:$Ddin)))]>, 1986 RegConstraint<"$Ddin = $Dd">, 1987 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 1988 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 1989 1990def VFMAS : ASbIn<0b11101, 0b10, 0, 0, 1991 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 1992 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm", 1993 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm), 1994 SPR:$Sdin))]>, 1995 RegConstraint<"$Sdin = $Sd">, 1996 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 1997 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 1998 // Some single precision VFP instructions may be executed on both NEON and 1999 // VFP pipelines. 2000} 2001 2002def VFMAH : AHbI<0b11101, 0b10, 0, 0, 2003 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2004 IIC_fpFMAC16, "vfma", ".f16\t$Sd, $Sn, $Sm", 2005 [(set HPR:$Sd, (fadd_mlx (fmul_su HPR:$Sn, HPR:$Sm), 2006 HPR:$Sdin))]>, 2007 RegConstraint<"$Sdin = $Sd">, 2008 Requires<[HasFullFP16,UseFusedMAC]>, 2009 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2010 2011def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2012 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>, 2013 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2014def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2015 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>, 2016 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2017def : Pat<(fadd_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)), 2018 (VFMAH HPR:$dstin, HPR:$a, HPR:$b)>, 2019 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>; 2020 2021// Match @llvm.fma.* intrinsics 2022// (fma x, y, z) -> (vfms z, x, y) 2023def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)), 2024 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2025 Requires<[HasVFP4,HasDPVFP]>; 2026def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)), 2027 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2028 Requires<[HasVFP4]>; 2029 2030def VFMSD : ADbI<0b11101, 0b10, 1, 0, 2031 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2032 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm", 2033 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2034 (f64 DPR:$Ddin)))]>, 2035 RegConstraint<"$Ddin = $Dd">, 2036 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2037 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2038 2039def VFMSS : ASbIn<0b11101, 0b10, 1, 0, 2040 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2041 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm", 2042 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2043 SPR:$Sdin))]>, 2044 RegConstraint<"$Sdin = $Sd">, 2045 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2046 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2047 // Some single precision VFP instructions may be executed on both NEON and 2048 // VFP pipelines. 2049} 2050 2051def VFMSH : AHbI<0b11101, 0b10, 1, 0, 2052 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2053 IIC_fpFMAC16, "vfms", ".f16\t$Sd, $Sn, $Sm", 2054 [(set HPR:$Sd, (fadd_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)), 2055 HPR:$Sdin))]>, 2056 RegConstraint<"$Sdin = $Sd">, 2057 Requires<[HasFullFP16,UseFusedMAC]>, 2058 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2059 2060def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))), 2061 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>, 2062 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2063def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)), 2064 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>, 2065 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2066def : Pat<(fsub_mlx HPR:$dstin, (fmul_su HPR:$a, HPR:$b)), 2067 (VFMSH HPR:$dstin, HPR:$a, HPR:$b)>, 2068 Requires<[HasFullFP16,DontUseNEONForFP,UseFusedMAC]>; 2069 2070// Match @llvm.fma.* intrinsics 2071// (fma (fneg x), y, z) -> (vfms z, x, y) 2072def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)), 2073 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2074 Requires<[HasVFP4,HasDPVFP]>; 2075def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)), 2076 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2077 Requires<[HasVFP4]>; 2078// (fma x, (fneg y), z) -> (vfms z, x, y) 2079def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)), 2080 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2081 Requires<[HasVFP4,HasDPVFP]>; 2082def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)), 2083 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2084 Requires<[HasVFP4]>; 2085 2086def VFNMAD : ADbI<0b11101, 0b01, 1, 0, 2087 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2088 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm", 2089 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)), 2090 (f64 DPR:$Ddin)))]>, 2091 RegConstraint<"$Ddin = $Dd">, 2092 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2093 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2094 2095def VFNMAS : ASbI<0b11101, 0b01, 1, 0, 2096 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2097 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm", 2098 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)), 2099 SPR:$Sdin))]>, 2100 RegConstraint<"$Sdin = $Sd">, 2101 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2102 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2103 // Some single precision VFP instructions may be executed on both NEON and 2104 // VFP pipelines. 2105} 2106 2107def VFNMAH : AHbI<0b11101, 0b01, 1, 0, 2108 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2109 IIC_fpFMAC16, "vfnma", ".f16\t$Sd, $Sn, $Sm", 2110 [(set HPR:$Sd, (fsub_mlx (fneg (fmul_su HPR:$Sn, HPR:$Sm)), 2111 HPR:$Sdin))]>, 2112 RegConstraint<"$Sdin = $Sd">, 2113 Requires<[HasFullFP16,UseFusedMAC]>, 2114 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2115 2116def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin), 2117 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>, 2118 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2119def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin), 2120 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>, 2121 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2122 2123// Match @llvm.fma.* intrinsics 2124// (fneg (fma x, y, z)) -> (vfnma z, x, y) 2125def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))), 2126 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2127 Requires<[HasVFP4,HasDPVFP]>; 2128def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))), 2129 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2130 Requires<[HasVFP4]>; 2131// (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y) 2132def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))), 2133 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2134 Requires<[HasVFP4,HasDPVFP]>; 2135def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))), 2136 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2137 Requires<[HasVFP4]>; 2138 2139def VFNMSD : ADbI<0b11101, 0b01, 0, 0, 2140 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm), 2141 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm", 2142 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm), 2143 (f64 DPR:$Ddin)))]>, 2144 RegConstraint<"$Ddin = $Dd">, 2145 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>, 2146 Sched<[WriteFPMAC64, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2147 2148def VFNMSS : ASbI<0b11101, 0b01, 0, 0, 2149 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm), 2150 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm", 2151 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>, 2152 RegConstraint<"$Sdin = $Sd">, 2153 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>, 2154 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]> { 2155 // Some single precision VFP instructions may be executed on both NEON and 2156 // VFP pipelines. 2157} 2158 2159def VFNMSH : AHbI<0b11101, 0b01, 0, 0, 2160 (outs HPR:$Sd), (ins HPR:$Sdin, HPR:$Sn, HPR:$Sm), 2161 IIC_fpFMAC16, "vfnms", ".f16\t$Sd, $Sn, $Sm", 2162 [(set HPR:$Sd, (fsub_mlx (fmul_su HPR:$Sn, HPR:$Sm), HPR:$Sdin))]>, 2163 RegConstraint<"$Sdin = $Sd">, 2164 Requires<[HasFullFP16,UseFusedMAC]>, 2165 Sched<[WriteFPMAC32, ReadFPMAC, ReadFPMUL, ReadFPMUL]>; 2166 2167def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin), 2168 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>, 2169 Requires<[HasVFP4,HasDPVFP,UseFusedMAC]>; 2170def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin), 2171 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>, 2172 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>; 2173 2174// Match @llvm.fma.* intrinsics 2175 2176// (fma x, y, (fneg z)) -> (vfnms z, x, y)) 2177def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))), 2178 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2179 Requires<[HasVFP4,HasDPVFP]>; 2180def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))), 2181 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2182 Requires<[HasVFP4]>; 2183// (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y) 2184def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))), 2185 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2186 Requires<[HasVFP4,HasDPVFP]>; 2187def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))), 2188 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2189 Requires<[HasVFP4]>; 2190// (fneg (fma x, (fneg y), z) -> (vfnms z, x, y) 2191def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))), 2192 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>, 2193 Requires<[HasVFP4,HasDPVFP]>; 2194def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))), 2195 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>, 2196 Requires<[HasVFP4]>; 2197 2198//===----------------------------------------------------------------------===// 2199// FP Conditional moves. 2200// 2201 2202let hasSideEffects = 0 in { 2203def VMOVDcc : PseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, cmovpred:$p), 2204 IIC_fpUNA64, 2205 [(set (f64 DPR:$Dd), 2206 (ARMcmov DPR:$Dn, DPR:$Dm, cmovpred:$p))]>, 2207 RegConstraint<"$Dn = $Dd">, Requires<[HasVFP2,HasDPVFP]>; 2208 2209def VMOVScc : PseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, cmovpred:$p), 2210 IIC_fpUNA32, 2211 [(set (f32 SPR:$Sd), 2212 (ARMcmov SPR:$Sn, SPR:$Sm, cmovpred:$p))]>, 2213 RegConstraint<"$Sn = $Sd">, Requires<[HasVFP2]>; 2214} // hasSideEffects 2215 2216//===----------------------------------------------------------------------===// 2217// Move from VFP System Register to ARM core register. 2218// 2219 2220class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 2221 list<dag> pattern>: 2222 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> { 2223 2224 // Instruction operand. 2225 bits<4> Rt; 2226 2227 let Inst{27-20} = 0b11101111; 2228 let Inst{19-16} = opc19_16; 2229 let Inst{15-12} = Rt; 2230 let Inst{11-8} = 0b1010; 2231 let Inst{7} = 0; 2232 let Inst{6-5} = 0b00; 2233 let Inst{4} = 1; 2234 let Inst{3-0} = 0b0000; 2235} 2236 2237// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags 2238// to APSR. 2239let Defs = [CPSR], Uses = [FPSCR_NZCV], Rt = 0b1111 /* apsr_nzcv */ in 2240def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins), 2241 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>; 2242 2243let DecoderMethod = "DecodeForVMRSandVMSR" in { 2244 // Application level FPSCR -> GPR 2245 let hasSideEffects = 1, Uses = [FPSCR] in 2246 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPRnopc:$Rt), (ins), 2247 "vmrs", "\t$Rt, fpscr", 2248 [(set GPRnopc:$Rt, (int_arm_get_fpscr))]>; 2249 2250 // System level FPEXC, FPSID -> GPR 2251 let Uses = [FPSCR] in { 2252 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPRnopc:$Rt), (ins), 2253 "vmrs", "\t$Rt, fpexc", []>; 2254 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPRnopc:$Rt), (ins), 2255 "vmrs", "\t$Rt, fpsid", []>; 2256 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPRnopc:$Rt), (ins), 2257 "vmrs", "\t$Rt, mvfr0", []>; 2258 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPRnopc:$Rt), (ins), 2259 "vmrs", "\t$Rt, mvfr1", []>; 2260 let Predicates = [HasFPARMv8] in { 2261 def VMRS_MVFR2 : MovFromVFP<0b0101 /* mvfr2 */, (outs GPRnopc:$Rt), (ins), 2262 "vmrs", "\t$Rt, mvfr2", []>; 2263 } 2264 def VMRS_FPINST : MovFromVFP<0b1001 /* fpinst */, (outs GPRnopc:$Rt), (ins), 2265 "vmrs", "\t$Rt, fpinst", []>; 2266 def VMRS_FPINST2 : MovFromVFP<0b1010 /* fpinst2 */, (outs GPRnopc:$Rt), 2267 (ins), "vmrs", "\t$Rt, fpinst2", []>; 2268 } 2269} 2270 2271//===----------------------------------------------------------------------===// 2272// Move from ARM core register to VFP System Register. 2273// 2274 2275class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm, 2276 list<dag> pattern>: 2277 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> { 2278 2279 // Instruction operand. 2280 bits<4> src; 2281 2282 // Encode instruction operand. 2283 let Inst{15-12} = src; 2284 2285 let Inst{27-20} = 0b11101110; 2286 let Inst{19-16} = opc19_16; 2287 let Inst{11-8} = 0b1010; 2288 let Inst{7} = 0; 2289 let Inst{4} = 1; 2290} 2291 2292let DecoderMethod = "DecodeForVMRSandVMSR" in { 2293 let Defs = [FPSCR] in { 2294 // Application level GPR -> FPSCR 2295 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPRnopc:$src), 2296 "vmsr", "\tfpscr, $src", 2297 [(int_arm_set_fpscr GPRnopc:$src)]>; 2298 // System level GPR -> FPEXC 2299 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPRnopc:$src), 2300 "vmsr", "\tfpexc, $src", []>; 2301 // System level GPR -> FPSID 2302 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPRnopc:$src), 2303 "vmsr", "\tfpsid, $src", []>; 2304 def VMSR_FPINST : MovToVFP<0b1001 /* fpinst */, (outs), (ins GPRnopc:$src), 2305 "vmsr", "\tfpinst, $src", []>; 2306 def VMSR_FPINST2 : MovToVFP<0b1010 /* fpinst2 */, (outs), (ins GPRnopc:$src), 2307 "vmsr", "\tfpinst2, $src", []>; 2308 } 2309} 2310 2311//===----------------------------------------------------------------------===// 2312// Misc. 2313// 2314 2315// Materialize FP immediates. VFP3 only. 2316let isReMaterializable = 1 in { 2317def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm), 2318 VFPMiscFrm, IIC_fpUNA64, 2319 "vmov", ".f64\t$Dd, $imm", 2320 [(set DPR:$Dd, vfp_f64imm:$imm)]>, 2321 Requires<[HasVFP3,HasDPVFP]> { 2322 bits<5> Dd; 2323 bits<8> imm; 2324 2325 let Inst{27-23} = 0b11101; 2326 let Inst{22} = Dd{4}; 2327 let Inst{21-20} = 0b11; 2328 let Inst{19-16} = imm{7-4}; 2329 let Inst{15-12} = Dd{3-0}; 2330 let Inst{11-9} = 0b101; 2331 let Inst{8} = 1; // Double precision. 2332 let Inst{7-4} = 0b0000; 2333 let Inst{3-0} = imm{3-0}; 2334} 2335 2336def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm), 2337 VFPMiscFrm, IIC_fpUNA32, 2338 "vmov", ".f32\t$Sd, $imm", 2339 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> { 2340 bits<5> Sd; 2341 bits<8> imm; 2342 2343 let Inst{27-23} = 0b11101; 2344 let Inst{22} = Sd{0}; 2345 let Inst{21-20} = 0b11; 2346 let Inst{19-16} = imm{7-4}; 2347 let Inst{15-12} = Sd{4-1}; 2348 let Inst{11-9} = 0b101; 2349 let Inst{8} = 0; // Single precision. 2350 let Inst{7-4} = 0b0000; 2351 let Inst{3-0} = imm{3-0}; 2352} 2353 2354def FCONSTH : VFPAI<(outs HPR:$Sd), (ins vfp_f16imm:$imm), 2355 VFPMiscFrm, IIC_fpUNA16, 2356 "vmov", ".f16\t$Sd, $imm", 2357 [(set HPR:$Sd, vfp_f16imm:$imm)]>, 2358 Requires<[HasFullFP16]> { 2359 bits<5> Sd; 2360 bits<8> imm; 2361 2362 let Inst{27-23} = 0b11101; 2363 let Inst{22} = Sd{0}; 2364 let Inst{21-20} = 0b11; 2365 let Inst{19-16} = imm{7-4}; 2366 let Inst{15-12} = Sd{4-1}; 2367 let Inst{11-8} = 0b1001; // Half precision 2368 let Inst{7-4} = 0b0000; 2369 let Inst{3-0} = imm{3-0}; 2370} 2371} 2372 2373//===----------------------------------------------------------------------===// 2374// Assembler aliases. 2375// 2376// A few mnemonic aliases for pre-unifixed syntax. We don't guarantee to 2377// support them all, but supporting at least some of the basics is 2378// good to be friendly. 2379def : VFP2MnemonicAlias<"flds", "vldr">; 2380def : VFP2MnemonicAlias<"fldd", "vldr">; 2381def : VFP2MnemonicAlias<"fmrs", "vmov">; 2382def : VFP2MnemonicAlias<"fmsr", "vmov">; 2383def : VFP2MnemonicAlias<"fsqrts", "vsqrt">; 2384def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">; 2385def : VFP2MnemonicAlias<"fadds", "vadd.f32">; 2386def : VFP2MnemonicAlias<"faddd", "vadd.f64">; 2387def : VFP2MnemonicAlias<"fmrdd", "vmov">; 2388def : VFP2MnemonicAlias<"fmrds", "vmov">; 2389def : VFP2MnemonicAlias<"fmrrd", "vmov">; 2390def : VFP2MnemonicAlias<"fmdrr", "vmov">; 2391def : VFP2MnemonicAlias<"fmuls", "vmul.f32">; 2392def : VFP2MnemonicAlias<"fmuld", "vmul.f64">; 2393def : VFP2MnemonicAlias<"fnegs", "vneg.f32">; 2394def : VFP2MnemonicAlias<"fnegd", "vneg.f64">; 2395def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">; 2396def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">; 2397def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">; 2398def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">; 2399def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">; 2400def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">; 2401def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">; 2402def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">; 2403def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">; 2404def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">; 2405def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">; 2406def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">; 2407def : VFP2MnemonicAlias<"fsts", "vstr">; 2408def : VFP2MnemonicAlias<"fstd", "vstr">; 2409def : VFP2MnemonicAlias<"fmacd", "vmla.f64">; 2410def : VFP2MnemonicAlias<"fmacs", "vmla.f32">; 2411def : VFP2MnemonicAlias<"fcpys", "vmov.f32">; 2412def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">; 2413def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">; 2414def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">; 2415def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">; 2416def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">; 2417def : VFP2MnemonicAlias<"fmrx", "vmrs">; 2418def : VFP2MnemonicAlias<"fmxr", "vmsr">; 2419 2420// Be friendly and accept the old form of zero-compare 2421def : VFP2DPInstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>; 2422def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>; 2423 2424 2425def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>; 2426def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm", 2427 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 2428def : VFP2DPInstAlias<"faddd${p} $Dd, $Dn, $Dm", 2429 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 2430def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm", 2431 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>; 2432def : VFP2DPInstAlias<"fsubd${p} $Dd, $Dn, $Dm", 2433 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>; 2434 2435// No need for the size suffix on VSQRT. It's implied by the register classes. 2436def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>; 2437def : VFP2DPInstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>; 2438 2439// VLDR/VSTR accept an optional type suffix. 2440def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr", 2441 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 2442def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr", 2443 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>; 2444def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr", 2445 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 2446def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr", 2447 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>; 2448 2449// VMOV can accept optional 32-bit or less data type suffix suffix. 2450def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn", 2451 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2452def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn", 2453 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2454def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn", 2455 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>; 2456def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt", 2457 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2458def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt", 2459 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2460def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt", 2461 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>; 2462 2463def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn", 2464 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>; 2465def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2", 2466 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>; 2467 2468// VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way 2469// VMOVD does. 2470def : VFP2InstAlias<"vmov${p} $Sd, $Sm", 2471 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>; 2472 2473// FCONSTD/FCONSTS alias for vmov.f64/vmov.f32 2474// These aliases provide added functionality over vmov.f instructions by 2475// allowing users to write assembly containing encoded floating point constants 2476// (e.g. #0x70 vs #1.0). Without these alises there is no way for the 2477// assembler to accept encoded fp constants (but the equivalent fp-literal is 2478// accepted directly by vmovf). 2479def : VFP3InstAlias<"fconstd${p} $Dd, $val", 2480 (FCONSTD DPR:$Dd, vfp_f64imm:$val, pred:$p)>; 2481def : VFP3InstAlias<"fconsts${p} $Sd, $val", 2482 (FCONSTS SPR:$Sd, vfp_f32imm:$val, pred:$p)>; 2483