1; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
2; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck %s
3; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=2 -S | FileCheck %s
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8; atomicrmw xchg: store clean shadow, return clean shadow
9
10define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
11entry:
12  %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
13  ret i32 %0
14}
15
16; CHECK: @AtomicRmwXchg
17; CHECK: store i32 0,
18; CHECK: atomicrmw xchg {{.*}} seq_cst
19; CHECK: store i32 0, {{.*}} @__msan_retval_tls
20; CHECK: ret i32
21
22
23; atomicrmw max: exactly the same as above
24
25define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
26entry:
27  %0 = atomicrmw max i32* %p, i32 %x seq_cst
28  ret i32 %0
29}
30
31; CHECK: @AtomicRmwMax
32; CHECK: store i32 0,
33; CHECK: atomicrmw max {{.*}} seq_cst
34; CHECK: store i32 0, {{.*}} @__msan_retval_tls
35; CHECK: ret i32
36
37
38; cmpxchg: the same as above, but also check %a shadow
39
40define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
41entry:
42  %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
43  %0 = extractvalue { i32, i1 } %pair, 0
44  ret i32 %0
45}
46
47; CHECK: @Cmpxchg
48; CHECK: store { i32, i1 } zeroinitializer,
49; CHECK: icmp
50; CHECK: br
51; CHECK: @__msan_warning
52; CHECK: cmpxchg {{.*}} seq_cst seq_cst
53; CHECK: store i32 0, {{.*}} @__msan_retval_tls
54; CHECK: ret i32
55
56
57; relaxed cmpxchg: bump up to "release monotonic"
58
59define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
60entry:
61  %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
62  %0 = extractvalue { i32, i1 } %pair, 0
63  ret i32 %0
64}
65
66; CHECK: @CmpxchgMonotonic
67; CHECK: store { i32, i1 } zeroinitializer,
68; CHECK: icmp
69; CHECK: br
70; CHECK: @__msan_warning
71; CHECK: cmpxchg {{.*}} release monotonic
72; CHECK: store i32 0, {{.*}} @__msan_retval_tls
73; CHECK: ret i32
74
75
76; atomic load: preserve alignment, load shadow value after app value
77
78define i32 @AtomicLoad(i32* %p) sanitize_memory {
79entry:
80  %0 = load atomic i32* %p seq_cst, align 16
81  ret i32 %0
82}
83
84; CHECK: @AtomicLoad
85; CHECK: load atomic i32* {{.*}} seq_cst, align 16
86; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
87; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
88; CHECK: ret i32
89
90
91; atomic load: preserve alignment, load shadow value after app value
92
93define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
94entry:
95  %0 = load atomic i32* %p acquire, align 16
96  ret i32 %0
97}
98
99; CHECK: @AtomicLoadAcquire
100; CHECK: load atomic i32* {{.*}} acquire, align 16
101; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
102; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
103; CHECK: ret i32
104
105
106; atomic load monotonic: bump up to load acquire
107
108define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
109entry:
110  %0 = load atomic i32* %p monotonic, align 16
111  ret i32 %0
112}
113
114; CHECK: @AtomicLoadMonotonic
115; CHECK: load atomic i32* {{.*}} acquire, align 16
116; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
117; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
118; CHECK: ret i32
119
120
121; atomic load unordered: bump up to load acquire
122
123define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
124entry:
125  %0 = load atomic i32* %p unordered, align 16
126  ret i32 %0
127}
128
129; CHECK: @AtomicLoadUnordered
130; CHECK: load atomic i32* {{.*}} acquire, align 16
131; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
132; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
133; CHECK: ret i32
134
135
136; atomic store: preserve alignment, store clean shadow value before app value
137
138define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
139entry:
140  store atomic i32 %x, i32* %p seq_cst, align 16
141  ret void
142}
143
144; CHECK: @AtomicStore
145; CHECK-NOT: @__msan_param_tls
146; CHECK: store i32 0, i32* {{.*}}, align 16
147; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
148; CHECK: ret void
149
150
151; atomic store: preserve alignment, store clean shadow value before app value
152
153define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
154entry:
155  store atomic i32 %x, i32* %p release, align 16
156  ret void
157}
158
159; CHECK: @AtomicStoreRelease
160; CHECK-NOT: @__msan_param_tls
161; CHECK: store i32 0, i32* {{.*}}, align 16
162; CHECK: store atomic i32 %x, i32* %p release, align 16
163; CHECK: ret void
164
165
166; atomic store monotonic: bumped up to store release
167
168define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
169entry:
170  store atomic i32 %x, i32* %p monotonic, align 16
171  ret void
172}
173
174; CHECK: @AtomicStoreMonotonic
175; CHECK-NOT: @__msan_param_tls
176; CHECK: store i32 0, i32* {{.*}}, align 16
177; CHECK: store atomic i32 %x, i32* %p release, align 16
178; CHECK: ret void
179
180
181; atomic store unordered: bumped up to store release
182
183define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
184entry:
185  store atomic i32 %x, i32* %p unordered, align 16
186  ret void
187}
188
189; CHECK: @AtomicStoreUnordered
190; CHECK-NOT: @__msan_param_tls
191; CHECK: store i32 0, i32* {{.*}}, align 16
192; CHECK: store atomic i32 %x, i32* %p release, align 16
193; CHECK: ret void
194