1; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
2
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4target triple = "x86_64-unknown-linux-gnu"
5
6; atomicrmw xchg: store clean shadow, return clean shadow
7
8define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
9entry:
10  %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
11  ret i32 %0
12}
13
14; CHECK: @AtomicRmwXchg
15; CHECK: store i32 0,
16; CHECK: atomicrmw xchg {{.*}} seq_cst
17; CHECK: store i32 0, {{.*}} @__msan_retval_tls
18; CHECK: ret i32
19
20
21; atomicrmw max: exactly the same as above
22
23define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
24entry:
25  %0 = atomicrmw max i32* %p, i32 %x seq_cst
26  ret i32 %0
27}
28
29; CHECK: @AtomicRmwMax
30; CHECK: store i32 0,
31; CHECK: atomicrmw max {{.*}} seq_cst
32; CHECK: store i32 0, {{.*}} @__msan_retval_tls
33; CHECK: ret i32
34
35
36; cmpxchg: the same as above, but also check %a shadow
37
38define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
39entry:
40  %0 = cmpxchg i32* %p, i32 %a, i32 %b seq_cst
41  ret i32 %0
42}
43
44; CHECK: @Cmpxchg
45; CHECK: store i32 0,
46; CHECK: icmp
47; CHECK: br
48; CHECK: @__msan_warning
49; CHECK: cmpxchg {{.*}} seq_cst
50; CHECK: store i32 0, {{.*}} @__msan_retval_tls
51; CHECK: ret i32
52
53
54; relaxed cmpxchg: bump up to "release"
55
56define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
57entry:
58  %0 = cmpxchg i32* %p, i32 %a, i32 %b monotonic
59  ret i32 %0
60}
61
62; CHECK: @CmpxchgMonotonic
63; CHECK: store i32 0,
64; CHECK: icmp
65; CHECK: br
66; CHECK: @__msan_warning
67; CHECK: cmpxchg {{.*}} release
68; CHECK: store i32 0, {{.*}} @__msan_retval_tls
69; CHECK: ret i32
70
71
72; atomic load: preserve alignment, load shadow value after app value
73
74define i32 @AtomicLoad(i32* %p) sanitize_memory {
75entry:
76  %0 = load atomic i32* %p seq_cst, align 16
77  ret i32 %0
78}
79
80; CHECK: @AtomicLoad
81; CHECK: load atomic i32* {{.*}} seq_cst, align 16
82; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
83; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
84; CHECK: ret i32
85
86
87; atomic load: preserve alignment, load shadow value after app value
88
89define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
90entry:
91  %0 = load atomic i32* %p acquire, align 16
92  ret i32 %0
93}
94
95; CHECK: @AtomicLoadAcquire
96; CHECK: load atomic i32* {{.*}} acquire, align 16
97; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
98; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
99; CHECK: ret i32
100
101
102; atomic load monotonic: bump up to load acquire
103
104define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
105entry:
106  %0 = load atomic i32* %p monotonic, align 16
107  ret i32 %0
108}
109
110; CHECK: @AtomicLoadMonotonic
111; CHECK: load atomic i32* {{.*}} acquire, align 16
112; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
113; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
114; CHECK: ret i32
115
116
117; atomic load unordered: bump up to load acquire
118
119define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
120entry:
121  %0 = load atomic i32* %p unordered, align 16
122  ret i32 %0
123}
124
125; CHECK: @AtomicLoadUnordered
126; CHECK: load atomic i32* {{.*}} acquire, align 16
127; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
128; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
129; CHECK: ret i32
130
131
132; atomic store: preserve alignment, store clean shadow value before app value
133
134define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
135entry:
136  store atomic i32 %x, i32* %p seq_cst, align 16
137  ret void
138}
139
140; CHECK: @AtomicStore
141; CHECK-NOT: @__msan_param_tls
142; CHECK: store i32 0, i32* {{.*}}, align 16
143; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
144; CHECK: ret void
145
146
147; atomic store: preserve alignment, store clean shadow value before app value
148
149define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
150entry:
151  store atomic i32 %x, i32* %p release, align 16
152  ret void
153}
154
155; CHECK: @AtomicStoreRelease
156; CHECK-NOT: @__msan_param_tls
157; CHECK: store i32 0, i32* {{.*}}, align 16
158; CHECK: store atomic i32 %x, i32* %p release, align 16
159; CHECK: ret void
160
161
162; atomic store monotonic: bumped up to store release
163
164define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
165entry:
166  store atomic i32 %x, i32* %p monotonic, align 16
167  ret void
168}
169
170; CHECK: @AtomicStoreMonotonic
171; CHECK-NOT: @__msan_param_tls
172; CHECK: store i32 0, i32* {{.*}}, align 16
173; CHECK: store atomic i32 %x, i32* %p release, align 16
174; CHECK: ret void
175
176
177; atomic store unordered: bumped up to store release
178
179define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
180entry:
181  store atomic i32 %x, i32* %p unordered, align 16
182  ret void
183}
184
185; CHECK: @AtomicStoreUnordered
186; CHECK-NOT: @__msan_param_tls
187; CHECK: store i32 0, i32* {{.*}}, align 16
188; CHECK: store atomic i32 %x, i32* %p release, align 16
189; CHECK: ret void
190