1 // RUN: %clang_cc1 -ffreestanding -fms-extensions \
2 // RUN:         -triple x86_64--darwin -Oz -emit-llvm %s -o - \
3 // RUN:         | FileCheck %s
4 // RUN: %clang_cc1 -ffreestanding -fms-extensions \
5 // RUN:         -triple x86_64--linux -Oz -emit-llvm %s -o - \
6 // RUN:         | FileCheck %s
7 // RUN: %clang_cc1 -ffreestanding -fms-extensions \
8 // RUN:         -triple aarch64--darwin -Oz -emit-llvm %s -o - \
9 // RUN:         | FileCheck %s --check-prefix=CHECK-ARM-ARM64
10 // RUN: %clang_cc1 -ffreestanding -fms-extensions \
11 // RUN:         -triple aarch64--darwin -Oz -emit-llvm %s -o - \
12 // RUN:         | FileCheck %s --check-prefix=CHECK-ARM
13 // RUN: %clang_cc1 -ffreestanding -fms-extensions \
14 // RUN:         -triple armv7--darwin -Oz -emit-llvm %s -o - \
15 // RUN:         | FileCheck %s --check-prefix=CHECK-ARM
16 
17 // LP64 targets use 'long' as 'int' for MS intrinsics (-fms-extensions)
18 #ifdef __LP64__
19 #define LONG int
20 #else
21 #define LONG long
22 #endif
23 
test_BitScanForward(unsigned LONG * Index,unsigned LONG Mask)24 unsigned char test_BitScanForward(unsigned LONG *Index, unsigned LONG Mask) {
25   return _BitScanForward(Index, Mask);
26 }
27 // CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
28 // CHECK:   [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
29 // CHECK:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
30 // CHECK:   [[END_LABEL]]:
31 // CHECK:   [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
32 // CHECK:   ret i8 [[RESULT]]
33 // CHECK:   [[ISNOTZERO_LABEL]]:
34 // CHECK:   [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true)
35 // CHECK:   store i32 [[INDEX]], i32* %Index, align 4
36 // CHECK:   br label %[[END_LABEL]]
37 
test_BitScanReverse(unsigned LONG * Index,unsigned LONG Mask)38 unsigned char test_BitScanReverse(unsigned LONG *Index, unsigned LONG Mask) {
39   return _BitScanReverse(Index, Mask);
40 }
41 // CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
42 // CHECK:   [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
43 // CHECK:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
44 // CHECK:   [[END_LABEL]]:
45 // CHECK:   [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
46 // CHECK:   ret i8 [[RESULT]]
47 // CHECK:   [[ISNOTZERO_LABEL]]:
48 // CHECK:   [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
49 // CHECK:   [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
50 // CHECK:   store i32 [[INDEX]], i32* %Index, align 4
51 // CHECK:   br label %[[END_LABEL]]
52 
53 #if defined(__x86_64__)
test_BitScanForward64(unsigned LONG * Index,unsigned __int64 Mask)54 unsigned char test_BitScanForward64(unsigned LONG *Index, unsigned __int64 Mask) {
55   return _BitScanForward64(Index, Mask);
56 }
57 // CHECK: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
58 // CHECK:   [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
59 // CHECK:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
60 // CHECK:   [[END_LABEL]]:
61 // CHECK:   [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
62 // CHECK:   ret i8 [[RESULT]]
63 // CHECK:   [[ISNOTZERO_LABEL]]:
64 // CHECK:   [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
65 // CHECK:   [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
66 // CHECK:   store i32 [[TRUNC_INDEX]], i32* %Index, align 4
67 // CHECK:   br label %[[END_LABEL]]
68 
test_BitScanReverse64(unsigned LONG * Index,unsigned __int64 Mask)69 unsigned char test_BitScanReverse64(unsigned LONG *Index, unsigned __int64 Mask) {
70   return _BitScanReverse64(Index, Mask);
71 }
72 // CHECK: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
73 // CHECK:   [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
74 // CHECK:   br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
75 // CHECK:   [[END_LABEL]]:
76 // CHECK:   [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
77 // CHECK:   ret i8 [[RESULT]]
78 // CHECK:   [[ISNOTZERO_LABEL]]:
79 // CHECK:   [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
80 // CHECK:   [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
81 // CHECK:   [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
82 // CHECK:   store i32 [[INDEX]], i32* %Index, align 4
83 // CHECK:   br label %[[END_LABEL]]
84 #endif
85 
test_InterlockedExchange(LONG volatile * value,LONG mask)86 LONG test_InterlockedExchange(LONG volatile *value, LONG mask) {
87   return _InterlockedExchange(value, mask);
88 }
89 // CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
90 // CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst
91 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
92 // CHECK: }
93 
test_InterlockedExchangeAdd(LONG volatile * value,LONG mask)94 LONG test_InterlockedExchangeAdd(LONG volatile *value, LONG mask) {
95   return _InterlockedExchangeAdd(value, mask);
96 }
97 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
98 // CHECK:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst
99 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
100 // CHECK: }
101 
test_InterlockedExchangeSub(LONG volatile * value,LONG mask)102 LONG test_InterlockedExchangeSub(LONG volatile *value, LONG mask) {
103   return _InterlockedExchangeSub(value, mask);
104 }
105 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
106 // CHECK:   [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst
107 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
108 // CHECK: }
109 
test_InterlockedOr(LONG volatile * value,LONG mask)110 LONG test_InterlockedOr(LONG volatile *value, LONG mask) {
111   return _InterlockedOr(value, mask);
112 }
113 // CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
114 // CHECK:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst
115 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
116 // CHECK: }
117 
test_InterlockedXor(LONG volatile * value,LONG mask)118 LONG test_InterlockedXor(LONG volatile *value, LONG mask) {
119   return _InterlockedXor(value, mask);
120 }
121 // CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
122 // CHECK:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst
123 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
124 // CHECK: }
125 
test_InterlockedAnd(LONG volatile * value,LONG mask)126 LONG test_InterlockedAnd(LONG volatile *value, LONG mask) {
127   return _InterlockedAnd(value, mask);
128 }
129 // CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
130 // CHECK:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst
131 // CHECK:   ret i32 [[RESULT:%[0-9]+]]
132 // CHECK: }
133 
test_InterlockedCompareExchange(LONG volatile * Destination,LONG Exchange,LONG Comperand)134 LONG test_InterlockedCompareExchange(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
135   return _InterlockedCompareExchange(Destination, Exchange, Comperand);
136 }
137 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
138 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst
139 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
140 // CHECK: ret i32 [[RESULT]]
141 // CHECK: }
142 
test_InterlockedIncrement(LONG volatile * Addend)143 LONG test_InterlockedIncrement(LONG volatile *Addend) {
144   return _InterlockedIncrement(Addend);
145 }
146 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
147 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 seq_cst
148 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
149 // CHECK: ret i32 [[RESULT]]
150 // CHECK: }
151 
test_InterlockedDecrement(LONG volatile * Addend)152 LONG test_InterlockedDecrement(LONG volatile *Addend) {
153   return _InterlockedDecrement(Addend);
154 }
155 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
156 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst
157 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
158 // CHECK: ret i32 [[RESULT]]
159 // CHECK: }
160 
test__lzcnt16(unsigned short x)161 unsigned short test__lzcnt16(unsigned short x) {
162   return __lzcnt16(x);
163 }
164 // CHECK: i16 @test__lzcnt16
165 // CHECK:  [[RESULT:%[0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false)
166 // CHECK: ret i16 [[RESULT]]
167 // CHECK: }
168 
test__lzcnt(unsigned int x)169 unsigned int test__lzcnt(unsigned int x) {
170   return __lzcnt(x);
171 }
172 // CHECK: i32 @test__lzcnt
173 // CHECK:  [[RESULT:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
174 // CHECK: ret i32 [[RESULT]]
175 // CHECK: }
176 
test__lzcnt64(unsigned __int64 x)177 unsigned __int64 test__lzcnt64(unsigned __int64 x) {
178   return __lzcnt64(x);
179 }
180 // CHECK: i64 @test__lzcnt64
181 // CHECK:  [[RESULT:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false)
182 // CHECK: ret i64 [[RESULT]]
183 // CHECK: }
184 
test__popcnt16(unsigned short x)185 unsigned short test__popcnt16(unsigned short x) {
186   return __popcnt16(x);
187 }
188 // CHECK: i16 @test__popcnt16
189 // CHECK:  [[RESULT:%[0-9]+]] = tail call i16 @llvm.ctpop.i16(i16 %x)
190 // CHECK: ret i16 [[RESULT]]
191 // CHECK: }
192 
test__popcnt(unsigned int x)193 unsigned int test__popcnt(unsigned int x) {
194   return __popcnt(x);
195 }
196 // CHECK: i32 @test__popcnt
197 // CHECK:  [[RESULT:%[0-9]+]] = tail call i32 @llvm.ctpop.i32(i32 %x)
198 // CHECK: ret i32 [[RESULT]]
199 // CHECK: }
200 
test__popcnt64(unsigned __int64 x)201 unsigned __int64 test__popcnt64(unsigned __int64 x) {
202   return __popcnt64(x);
203 }
204 // CHECK: i64 @test__popcnt64
205 // CHECK:  [[RESULT:%[0-9]+]] = tail call i64 @llvm.ctpop.i64(i64 %x)
206 // CHECK: ret i64 [[RESULT]]
207 // CHECK: }
208 
209 #if defined(__aarch64__)
test_InterlockedAdd(LONG volatile * Addend,LONG Value)210 LONG test_InterlockedAdd(LONG volatile *Addend, LONG Value) {
211   return _InterlockedAdd(Addend, Value);
212 }
213 
214 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAdd(i32*{{[a-z_ ]*}}%Addend, i32 %Value) {{.*}} {
215 // CHECK-ARM-ARM64: %[[OLDVAL:[0-9]+]] = atomicrmw add i32* %Addend, i32 %Value seq_cst
216 // CHECK-ARM-ARM64: %[[NEWVAL:[0-9]+]] = add i32 %[[OLDVAL:[0-9]+]], %Value
217 // CHECK-ARM-ARM64: ret i32 %[[NEWVAL:[0-9]+]]
218 #endif
219 
220 #if defined(__arm__) || defined(__aarch64__)
test_InterlockedExchangeAdd_acq(LONG volatile * value,LONG mask)221 LONG test_InterlockedExchangeAdd_acq(LONG volatile *value, LONG mask) {
222   return _InterlockedExchangeAdd_acq(value, mask);
223 }
224 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
225 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask acquire
226 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
227 // CHECK-ARM: }
test_InterlockedExchangeAdd_rel(LONG volatile * value,LONG mask)228 LONG test_InterlockedExchangeAdd_rel(LONG volatile *value, LONG mask) {
229   return _InterlockedExchangeAdd_rel(value, mask);
230 }
231 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
232 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask release
233 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
234 // CHECK-ARM: }
test_InterlockedExchangeAdd_nf(LONG volatile * value,LONG mask)235 LONG test_InterlockedExchangeAdd_nf(LONG volatile *value, LONG mask) {
236   return _InterlockedExchangeAdd_nf(value, mask);
237 }
238 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
239 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask monotonic
240 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
241 // CHECK-ARM: }
242 
test_InterlockedExchange_acq(LONG volatile * value,LONG mask)243 LONG test_InterlockedExchange_acq(LONG volatile *value, LONG mask) {
244   return _InterlockedExchange_acq(value, mask);
245 }
246 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
247 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask acquire
248 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
249 // CHECK-ARM: }
test_InterlockedExchange_rel(LONG volatile * value,LONG mask)250 LONG test_InterlockedExchange_rel(LONG volatile *value, LONG mask) {
251   return _InterlockedExchange_rel(value, mask);
252 }
253 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
254 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask release
255 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
256 // CHECK-ARM: }
test_InterlockedExchange_nf(LONG volatile * value,LONG mask)257 LONG test_InterlockedExchange_nf(LONG volatile *value, LONG mask) {
258   return _InterlockedExchange_nf(value, mask);
259 }
260 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
261 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask monotonic
262 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
263 // CHECK-ARM: }
264 
test_InterlockedCompareExchange_acq(LONG volatile * Destination,LONG Exchange,LONG Comperand)265 LONG test_InterlockedCompareExchange_acq(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
266   return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
267 }
268 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
269 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire
270 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
271 // CHECK-ARM: ret i32 [[RESULT]]
272 // CHECK-ARM: }
273 
test_InterlockedCompareExchange_rel(LONG volatile * Destination,LONG Exchange,LONG Comperand)274 LONG test_InterlockedCompareExchange_rel(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
275   return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand);
276 }
277 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
278 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic
279 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
280 // CHECK-ARM: ret i32 [[RESULT]]
281 // CHECK-ARM: }
282 
test_InterlockedCompareExchange_nf(LONG volatile * Destination,LONG Exchange,LONG Comperand)283 LONG test_InterlockedCompareExchange_nf(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
284   return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand);
285 }
286 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
287 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic
288 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
289 // CHECK-ARM: ret i32 [[RESULT]]
290 // CHECK-ARM: }
291 
test_InterlockedOr_acq(LONG volatile * value,LONG mask)292 LONG test_InterlockedOr_acq(LONG volatile *value, LONG mask) {
293   return _InterlockedOr_acq(value, mask);
294 }
295 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
296 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire
297 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
298 // CHECK-ARM: }
299 
test_InterlockedOr_rel(LONG volatile * value,LONG mask)300 LONG test_InterlockedOr_rel(LONG volatile *value, LONG mask) {
301   return _InterlockedOr_rel(value, mask);
302 }
303 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
304 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release
305 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
306 // CHECK-ARM: }
307 
test_InterlockedOr_nf(LONG volatile * value,LONG mask)308 LONG test_InterlockedOr_nf(LONG volatile *value, LONG mask) {
309   return _InterlockedOr_nf(value, mask);
310 }
311 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
312 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic
313 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
314 // CHECK-ARM: }
315 
test_InterlockedXor_acq(LONG volatile * value,LONG mask)316 LONG test_InterlockedXor_acq(LONG volatile *value, LONG mask) {
317   return _InterlockedXor_acq(value, mask);
318 }
319 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
320 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire
321 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
322 // CHECK-ARM: }
323 
test_InterlockedXor_rel(LONG volatile * value,LONG mask)324 LONG test_InterlockedXor_rel(LONG volatile *value, LONG mask) {
325   return _InterlockedXor_rel(value, mask);
326 }
327 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
328 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release
329 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
330 // CHECK-ARM: }
331 
test_InterlockedXor_nf(LONG volatile * value,LONG mask)332 LONG test_InterlockedXor_nf(LONG volatile *value, LONG mask) {
333   return _InterlockedXor_nf(value, mask);
334 }
335 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
336 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic
337 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
338 // CHECK-ARM: }
339 
test_InterlockedAnd_acq(LONG volatile * value,LONG mask)340 LONG test_InterlockedAnd_acq(LONG volatile *value, LONG mask) {
341   return _InterlockedAnd_acq(value, mask);
342 }
343 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
344 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask acquire
345 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
346 // CHECK-ARM: }
347 
test_InterlockedAnd_rel(LONG volatile * value,LONG mask)348 LONG test_InterlockedAnd_rel(LONG volatile *value, LONG mask) {
349   return _InterlockedAnd_rel(value, mask);
350 }
351 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
352 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask release
353 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
354 // CHECK-ARM: }
355 
test_InterlockedAnd_nf(LONG volatile * value,LONG mask)356 LONG test_InterlockedAnd_nf(LONG volatile *value, LONG mask) {
357   return _InterlockedAnd_nf(value, mask);
358 }
359 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
360 // CHECK-ARM:   [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask monotonic
361 // CHECK-ARM:   ret i32 [[RESULT:%[0-9]+]]
362 // CHECK-ARM: }
363 
364 
test_InterlockedIncrement_acq(LONG volatile * Addend)365 LONG test_InterlockedIncrement_acq(LONG volatile *Addend) {
366   return _InterlockedIncrement_acq(Addend);
367 }
368 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
369 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire
370 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
371 // CHECK-ARM: ret i32 [[RESULT]]
372 // CHECK-ARM: }
373 
test_InterlockedIncrement_rel(LONG volatile * Addend)374 LONG test_InterlockedIncrement_rel(LONG volatile *Addend) {
375   return _InterlockedIncrement_rel(Addend);
376 }
377 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
378 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release
379 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
380 // CHECK-ARM: ret i32 [[RESULT]]
381 // CHECK-ARM: }
382 
test_InterlockedIncrement_nf(LONG volatile * Addend)383 LONG test_InterlockedIncrement_nf(LONG volatile *Addend) {
384   return _InterlockedIncrement_nf(Addend);
385 }
386 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
387 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic
388 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
389 // CHECK-ARM: ret i32 [[RESULT]]
390 // CHECK-ARM: }
391 
test_InterlockedDecrement_acq(LONG volatile * Addend)392 LONG test_InterlockedDecrement_acq(LONG volatile *Addend) {
393   return _InterlockedDecrement_acq(Addend);
394 }
395 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
396 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 acquire
397 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
398 // CHECK-ARM: ret i32 [[RESULT]]
399 // CHECK-ARM: }
400 
test_InterlockedDecrement_rel(LONG volatile * Addend)401 LONG test_InterlockedDecrement_rel(LONG volatile *Addend) {
402   return _InterlockedDecrement_rel(Addend);
403 }
404 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
405 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 release
406 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
407 // CHECK-ARM: ret i32 [[RESULT]]
408 // CHECK-ARM: }
409 
test_InterlockedDecrement_nf(LONG volatile * Addend)410 LONG test_InterlockedDecrement_nf(LONG volatile *Addend) {
411   return _InterlockedDecrement_nf(Addend);
412 }
413 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
414 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 monotonic
415 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
416 // CHECK-ARM: ret i32 [[RESULT]]
417 // CHECK-ARM: }
418 #endif
419