1; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
2; RUN:   grep -i ST | not grep "fadd\|fsub\|fdiv\|fmul"
3
4; Test that the load of the memory location is folded into the operation.
5
6define double @test_add(double %X, double* %P) {
7	%Y = load double* %P		; <double> [#uses=1]
8	%R = fadd double %X, %Y		; <double> [#uses=1]
9	ret double %R
10}
11
12define double @test_mul(double %X, double* %P) {
13	%Y = load double* %P		; <double> [#uses=1]
14	%R = fmul double %X, %Y		; <double> [#uses=1]
15	ret double %R
16}
17
18define double @test_sub(double %X, double* %P) {
19	%Y = load double* %P		; <double> [#uses=1]
20	%R = fsub double %X, %Y		; <double> [#uses=1]
21	ret double %R
22}
23
24define double @test_subr(double %X, double* %P) {
25	%Y = load double* %P		; <double> [#uses=1]
26	%R = fsub double %Y, %X		; <double> [#uses=1]
27	ret double %R
28}
29
30define double @test_div(double %X, double* %P) {
31	%Y = load double* %P		; <double> [#uses=1]
32	%R = fdiv double %X, %Y		; <double> [#uses=1]
33	ret double %R
34}
35
36define double @test_divr(double %X, double* %P) {
37	%Y = load double* %P		; <double> [#uses=1]
38	%R = fdiv double %Y, %X		; <double> [#uses=1]
39	ret double %R
40}
41