1*f4a2713aSLionel Sambuc // RUN: %clang_cc1 -triple x86_64 -emit-llvm -o - %s | FileCheck %s
2*f4a2713aSLionel Sambuc
3*f4a2713aSLionel Sambuc // Check that we don't generate unnecessary reloads.
4*f4a2713aSLionel Sambuc //
5*f4a2713aSLionel Sambuc // CHECK-LABEL: define void @f0()
6*f4a2713aSLionel Sambuc // CHECK: [[x_0:%.*]] = alloca i32, align 4
7*f4a2713aSLionel Sambuc // CHECK-NEXT: [[y_0:%.*]] = alloca i32, align 4
8*f4a2713aSLionel Sambuc // CHECK-NEXT: store i32 1, i32* [[x_0]]
9*f4a2713aSLionel Sambuc // CHECK-NEXT: store i32 1, i32* [[x_0]]
10*f4a2713aSLionel Sambuc // CHECK-NEXT: store i32 1, i32* [[y_0]]
11*f4a2713aSLionel Sambuc // CHECK: }
f0()12*f4a2713aSLionel Sambuc void f0() {
13*f4a2713aSLionel Sambuc int x, y;
14*f4a2713aSLionel Sambuc x = 1;
15*f4a2713aSLionel Sambuc y = (x = 1);
16*f4a2713aSLionel Sambuc }
17*f4a2713aSLionel Sambuc
18*f4a2713aSLionel Sambuc // This used to test that we generate reloads for volatile access,
19*f4a2713aSLionel Sambuc // but that does not appear to be correct behavior for C.
20*f4a2713aSLionel Sambuc //
21*f4a2713aSLionel Sambuc // CHECK-LABEL: define void @f1()
22*f4a2713aSLionel Sambuc // CHECK: [[x_1:%.*]] = alloca i32, align 4
23*f4a2713aSLionel Sambuc // CHECK-NEXT: [[y_1:%.*]] = alloca i32, align 4
24*f4a2713aSLionel Sambuc // CHECK-NEXT: store volatile i32 1, i32* [[x_1]]
25*f4a2713aSLionel Sambuc // CHECK-NEXT: store volatile i32 1, i32* [[x_1]]
26*f4a2713aSLionel Sambuc // CHECK-NEXT: store volatile i32 1, i32* [[y_1]]
27*f4a2713aSLionel Sambuc // CHECK: }
f1()28*f4a2713aSLionel Sambuc void f1() {
29*f4a2713aSLionel Sambuc volatile int x, y;
30*f4a2713aSLionel Sambuc x = 1;
31*f4a2713aSLionel Sambuc y = (x = 1);
32*f4a2713aSLionel Sambuc }
33