Home
last modified time | relevance | path

Searched refs:f64 (Results 1 – 25 of 1795) sorted by relevance

12345678910>>...72

/minix/external/bsd/llvm/dist/llvm/test/MC/ARM/
H A Dsingle-precision-fp.s5 vadd.f64 d0, d1, d2
6 vsub.f64 d2, d3, d4
7 vdiv.f64 d4, d5, d6
8 vmul.f64 d6, d7, d8
9 vnmul.f64 d8, d9, d10
46 vneg.f64 d15, d14
56 vcmpe.f64 d0, d1
57 vcmp.f64 d2, d3
58 vabs.f64 d4, d5
59 vcmpe.f64 d5, #0
[all …]
H A Dfp-armv8.s5 vcvtt.f64.f16 d3, s1
7 vcvtt.f16.f64 s5, d12
10 vcvtb.f64.f16 d3, s1
12 vcvtb.f16.f64 s4, d1
15 vcvttge.f64.f16 d3, s1
30 vcvta.s32.f64 s2, d3
38 vcvtp.s32.f64 s0, d4
109 vrinta.f64 d3, d4
113 vrintn.f64 d3, d4
117 vrintp.f64 d3, d4
[all …]
H A Dthumb-fp-armv8.s5 vcvtt.f64.f16 d3, s1
7 vcvtt.f16.f64 s5, d12
10 vcvtb.f64.f16 d3, s1
12 vcvtb.f16.f64 s4, d1
16 vcvttge.f64.f16 d3, s1
33 vcvta.s32.f64 s2, d3
41 vcvtp.s32.f64 s0, d4
115 vrinta.f64 d3, d4
119 vrintn.f64 d3, d4
123 vrintp.f64 d3, d4
[all …]
H A Ddirective-arch_extension-fp.s96 vrintz.f64.f64 d0, d0
104 vrintr.f64.f64 d0, d0
112 vrintx.f64.f64 d0, d0
121 vrinta.f64.f64 d0, d0
129 vrintn.f64.f64 d0, d0
137 vrintp.f64.f64 d0, d0
145 vrintm.f64.f64 d0, d0
232 vrintz.f64.f64 d0, d0
240 vrintr.f64.f64 d0, d0
248 vrintx.f64.f64 d0, d0
[all …]
H A Ddirective-arch_extension-simd.s68 vrintz.f64.f64 d0, d0
76 vrintr.f64.f64 d0, d0
84 vrintx.f64.f64 d0, d0
93 vrinta.f64.f64 d0, d0
101 vrintn.f64.f64 d0, d0
109 vrintp.f64.f64 d0, d0
117 vrintm.f64.f64 d0, d0
176 vrintz.f64.f64 d0, d0
184 vrintr.f64.f64 d0, d0
192 vrintx.f64.f64 d0, d0
[all …]
H A Dsimple-fp-encoding.s3 vadd.f64 d16, d17, d16
8 vsub.f64 d16, d17, d16
13 vdiv.f64 d16, d17, d16
16 vdiv.f64 d5, d7
24 vmul.f64 d16, d17, d16
25 vmul.f64 d20, d17
47 vcmpe.f64 d16, #0
59 vcvt.f32.f64 s0, d16
60 vcvt.f64.f32 d16, s0
146 vmov.f64 r1, r5, d2
[all …]
H A Dd16.s7 @ D16-NEXT: vadd.f64 d1, d2, d16
8 vadd.f64 d1, d2, d16
11 @ D16-NEXT: vadd.f64 d1, d17, d6
12 vadd.f64 d1, d17, d6
15 @ D16-NEXT: vadd.f64 d19, d7, d6
16 vadd.f64 d19, d7, d6
19 @ D16-NEXT: vcvt.f64.f32 d22, s4
20 vcvt.f64.f32 d22, s4
23 @ D16-NEXT: vcvt.f32.f64 s26, d30
24 vcvt.f32.f64 s26, d30
H A Dinvalid-fp-armv8.s5 vcvtt.f64.f16 d3, s1
7 vcvtt.f16.f64 s5, d12
39 vselgt.f64 s3, s2, s1
43 vselgt.f64 q0, s3, q1
48 vminnm.f64 s3, s2, s1
52 vmaxnm.f64 q0, s3, q1
57 vcvta.s32.f64 d3, s2
61 vcvtn.u32.f64 d3, s2
65 vcvtnge.u32.f64 d3, s2
68 vcvtbgt.f64.f16 q0, d3
[all …]
H A Dvfp4.s7 @ ARM: vfma.f64 d16, d18, d17 @ encoding: [0xa1,0x0b,0xe2,0xee]
8 @ THUMB: vfma.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xa1,0x0b]
10 @ THUMB_V7EM-ERRORS-NEXT: vfma.f64 d16, d18, d17
11 vfma.f64 d16, d18, d17
33 @ THUMB_V7EM-ERRORS-NEXT: vfnma.f64 d16, d18, d17
34 vfnma.f64 d16, d18, d17
41 @ ARM: vfms.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xe2,0xee]
44 @ THUMB_V7EM-ERRORS-NEXT: vfms.f64 d16, d18, d17
45 vfms.f64 d16, d18, d17
67 @ THUMB_V7EM-ERRORS-NEXT: vfnms.f64 d16, d18, d17
[all …]
/minix/external/bsd/llvm/dist/llvm/test/CodeGen/Thumb2/
H A Dfloat-intrinsics-double.ll12 ; HARD: vsqrt.f64 d0, d0
102 ; HARD: vfma.f64
115 ; DP: vabs.f64 d0, d0
139 ; FP-ARMv8: vrintm.f64
149 ; FP-ARMv8: vrintp.f64
159 ; FP-ARMv8: vrintz.f64
169 ; FP-ARMv8: vrintx.f64
199 ; VFP4: vmul.f64
200 ; VFP4: vadd.f64
201 ; FP-ARMv8: vmla.f64
[all …]
/minix/external/bsd/llvm/dist/llvm/test/MC/Disassembler/ARM/
H A Dfp-armv8.txt4 # CHECK: vcvtt.f64.f16 d3, s1
7 # CHECK: vcvtt.f16.f64 s5, d12
10 # CHECK: vcvtb.f64.f16 d3, s1
13 # CHECK: vcvtb.f16.f64 s4, d1
16 # CHECK: vcvttge.f64.f16 d3, s1
32 # CHECK: vcvta.s32.f64 s2, d3
44 # CHECK: vcvtp.s32.f64 s0, d4
134 # CHECK: vrinta.f64 d3, d4
140 # CHECK: vrintn.f64 d3, d4
146 # CHECK: vrintp.f64 d3, d4
[all …]
H A Dthumb-fp-armv8.txt4 # CHECK: vcvtt.f64.f16 d3, s1
7 # CHECK: vcvtt.f16.f64 s5, d12
10 # CHECK: vcvtb.f64.f16 d3, s1
13 # CHECK: vcvtb.f16.f64 s4, d1
17 # CHECK: vcvttge.f64.f16 d3, s1
36 # CHECK: vcvta.s32.f64 s2, d3
48 # CHECK: vcvtp.s32.f64 s0, d4
142 # CHECK: vrinta.f64 d3, d4
148 # CHECK: vrintn.f64 d3, d4
154 # CHECK: vrintp.f64 d3, d4
[all …]
/minix/lib/libc_vfp/
H A Dvfpdf.S105 vneg.f64 d0, d0
156 vcmp.f64 d0, d1
177 vcmp.f64 d0, d1
186 vcmp.f64 d0, d1
195 vcmp.f64 d0, d1
204 vcmp.f64 d0, d1
213 vcmp.f64 d0, d1
222 vcmp.f64 d0, d1
238 vcmp.f64 d0, d1
248 vcmp.f64 d0, d1
[all …]
/minix/external/bsd/llvm/dist/llvm/test/ExecutionEngine/Interpreter/
H A Dintrinsics.ll5 declare double @llvm.sin.f64(double)
7 declare double @llvm.cos.f64(double)
9 declare double @llvm.floor.f64(double)
11 declare double @llvm.ceil.f64(double)
13 declare double @llvm.trunc.f64(double)
15 declare double @llvm.round.f64(double)
17 declare double @llvm.copysign.f64(double, double)
21 %sin64 = call double @llvm.sin.f64(double 0.000000e+00)
23 %cos64 = call double @llvm.cos.f64(double 0.000000e+00)
25 %floor64 = call double @llvm.floor.f64(double 0.000000e+00)
[all …]
/minix/external/bsd/llvm/dist/llvm/test/CodeGen/XCore/
H A Dfloat-intrinsics.ll2 declare double @llvm.cos.f64(double)
3 declare double @llvm.exp.f64(double)
4 declare double @llvm.exp2.f64(double)
5 declare double @llvm.log.f64(double)
6 declare double @llvm.log10.f64(double)
7 declare double @llvm.log2.f64(double)
8 declare double @llvm.pow.f64(double, double)
9 declare double @llvm.powi.f64(double, i32)
10 declare double @llvm.sin.f64(double)
11 declare double @llvm.sqrt.f64(double)
[all …]
/minix/external/bsd/llvm/dist/llvm/test/Transforms/InstSimplify/
H A Dfold-builtin-fma.ll7 declare double @llvm.fma.f64(double, double, double)
11 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0.0)
19 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 5.0)
27 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0x7FF8000000000000)
34 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0xFFF8000000000000)
42 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0x7FF0000000000000)
49 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0xFFF0000000000000)
57 %1 = call double @llvm.fma.f64(double 0x7FF8000000000000, double 8.0, double 0.0)
65 %1 = call double @llvm.fma.f64(double 7.0, double 0x7FF8000000000000, double 0.0)
73 %1 = call double @llvm.fma.f64(double 0xFFF8000000000000, double 8.0, double 0.0)
[all …]
/minix/external/bsd/llvm/dist/llvm/lib/Target/ARM/
H A DARMCallingConv.td26 // Handle all vector types as either f64 or v2f64.
37 CCIfType<[f64], CCAssignToStack<8, 4>>,
45 // Handle all vector types as either f64 or v2f64.
59 // Handle all vector types as either f64 or v2f64.
78 // Handle all vector types as either f64 or v2f64.
94 // Handle all vector types as either f64 or v2f64.
99 CCIfType<[f64], CCAssignToReg<[D8, D9, D10, D11]>>,
117 // i64/f64 is passed in even pairs of GPRs
145 // Handle all vector types as either f64 or v2f64.
155 // Handle all vector types as either f64 or v2f64.
[all …]
/minix/external/bsd/llvm/dist/llvm/test/CodeGen/ARM/
H A D2011-11-09-IllegalVectorFPIntConvert.ll5 ; CHECK: vcvt.s32.f64
6 ; CHECK: vcvt.s32.f64
14 ; CHECK: vcvt.u32.f64
15 ; CHECK: vcvt.u32.f64
23 ; CHECK: vcvt.f64.s32
24 ; CHECK: vcvt.f64.s32
32 ; CHECK: vcvt.f64.u32
33 ; CHECK: vcvt.f64.u32
H A Dfnmscs.ll64 ; VFP2: vnmla.f64
67 ; NEON: vnmla.f64
70 ; A8U: vnmul.f64 d
71 ; A8U: vsub.f64 d
74 ; A8: vnmul.f64 d
75 ; A8: vsub.f64 d
85 ; VFP2: vnmla.f64
88 ; NEON: vnmla.f64
92 ; A8U: vsub.f64 d
95 ; A8: vnmul.f64 d
[all …]
H A Dneon_fpconv.ll5 ; CHECK: vcvt.f32.f64 [[S0:s[0-9]+]], [[D0:d[0-9]+]]
6 ; CHECK: vcvt.f32.f64 [[S1:s[0-9]+]], [[D1:d[0-9]+]]
12 ; CHECK: vcvt.f64.f32 [[D0:d[0-9]+]], [[S0:s[0-9]+]]
13 ; CHECK: vcvt.f64.f32 [[D1:d[0-9]+]], [[S1:s[0-9]+]]
24 ; CHECK-NEXT: vcvt.f64.s32
25 ; CHECK-NEXT: vcvt.f64.s32
36 ; CHECK-NEXT: vcvt.f64.u32
37 ; CHECK-NEXT: vcvt.f64.u32
/minix/external/bsd/llvm/dist/llvm/test/CodeGen/SystemZ/
H A Dfp-round-01.ll15 ; Test rint for f64.
16 declare double @llvm.rint.f64(double %f)
21 %res = call double @llvm.rint.f64(double %f)
47 ; Test nearbyint for f64.
70 ; Test floor for f64.
71 declare double @llvm.floor.f64(double %f)
93 ; Test ceil for f64.
94 declare double @llvm.ceil.f64(double %f)
116 ; Test trunc for f64.
117 declare double @llvm.trunc.f64(double %f)
[all …]
H A Dfp-sqrt-02.ll5 declare double @llvm.sqrt.f64(double %f)
13 %res = call double @llvm.sqrt.f64(double %val)
23 %res = call double @llvm.sqrt.f64(double %val)
34 %res = call double @llvm.sqrt.f64(double %val)
47 %res = call double @llvm.sqrt.f64(double %val)
59 %res = call double @llvm.sqrt.f64(double %val)
72 %res = call double @llvm.sqrt.f64(double %val)
100 %sqrt0 = call double @llvm.sqrt.f64(double %val0)
101 %sqrt1 = call double @llvm.sqrt.f64(double %val1)
102 %sqrt2 = call double @llvm.sqrt.f64(double %val2)
[all …]
/minix/external/bsd/llvm/dist/llvm/test/Transforms/BBVectorize/X86/
H A Dsimple-int.ll4 declare double @llvm.fma.f64(double, double, double)
5 declare double @llvm.fmuladd.f64(double, double, double)
6 declare double @llvm.cos.f64(double)
7 declare double @llvm.powi.f64(double, i32)
13 %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1)
41 %Y1 = call double @llvm.cos.f64(double %X1)
42 %Y2 = call double @llvm.cos.f64(double %X2)
55 %Y1 = call double @llvm.powi.f64(double %X1, i32 %P)
56 %Y2 = call double @llvm.powi.f64(double %X2, i32 %P)
70 %Y1 = call double @llvm.powi.f64(double %X1, i32 %P)
[all …]
/minix/external/bsd/llvm/dist/llvm/lib/Target/PowerPC/
H A DPPCInstrVSX.td113 [(set f64:$XT, (fadd f64:$XA, f64:$XB))]>;
117 [(set f64:$XT, (fmul f64:$XA, f64:$XB))]>;
144 [(set f64:$XT, (fsub f64:$XA, f64:$XB))]>;
161 [(set f64:$XT, (fma f64:$XA, f64:$XB, f64:$XTi))]>,
177 [(set f64:$XT, (fma f64:$XA, f64:$XB, (fneg f64:$XTi)))]>,
193 [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, f64:$XTi)))]>,
209 [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, (fneg f64:$XTi))))]>,
352 [(set f64:$XT, (fdiv f64:$XA, f64:$XB))]>;
467 [(set f64:$XT, (fcopysign f64:$XB, f64:$XA))]>;
819 def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B),
[all …]
/minix/external/bsd/llvm/dist/llvm/test/CodeGen/AArch64/
H A Darm64-cvt.ll26 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A)
34 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A)
66 %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %A)
74 %tmp3 = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %A)
106 %tmp3 = call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %A)
114 %tmp3 = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %A)
146 %tmp3 = call i32 @llvm.aarch64.neon.fcvtmu.i32.f64(double %A)
154 %tmp3 = call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %A)
186 %tmp3 = call i32 @llvm.aarch64.neon.fcvtns.i32.f64(double %A)
194 %tmp3 = call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %A)
[all …]

12345678910>>...72