1; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux-gnu -verify-machineinstrs  -ppc-asm-full-reg-names | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32
2; This is already checked for in Atomics-64.ll
3; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu  -ppc-asm-full-reg-names | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64
4
5; FIXME: we don't currently check for the operations themselves with CHECK-NEXT,
6;   because they are implemented in a very messy way with lwarx/stwcx.
7;   It should be fixed soon in another patch.
8
9; We first check loads, for all sizes from i8 to i64.
10; We also vary orderings to check for barriers.
11define i8 @load_i8_unordered(i8* %mem) {
12; CHECK-LABEL: load_i8_unordered
13; CHECK: lbz
14; CHECK-NOT: sync
15  %val = load atomic i8, i8* %mem unordered, align 1
16  ret i8 %val
17}
18define i16 @load_i16_monotonic(i16* %mem) {
19; CHECK-LABEL: load_i16_monotonic
20; CHECK: lhz
21; CHECK-NOT: sync
22  %val = load atomic i16, i16* %mem monotonic, align 2
23  ret i16 %val
24}
25define i32 @load_i32_acquire(i32* %mem) {
26; CHECK-LABEL: load_i32_acquire
27; CHECK: lwz [[VAL:r[0-9]+]]
28  %val = load atomic i32, i32* %mem acquire, align 4
29; CHECK-PPC32: lwsync
30; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
31; CHECK-PPC64: bne- [[CR]], .+4
32; CHECK-PPC64: isync
33  ret i32 %val
34}
35define i64 @load_i64_seq_cst(i64* %mem) {
36; CHECK-LABEL: load_i64_seq_cst
37; CHECK: sync
38; PPC32: __sync_
39; PPC64-NOT: __sync_
40; PPC64: ld [[VAL:r[0-9]+]]
41  %val = load atomic i64, i64* %mem seq_cst, align 8
42; CHECK-PPC32: lwsync
43; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
44; CHECK-PPC64: bne- [[CR]], .+4
45; CHECK-PPC64: isync
46  ret i64 %val
47}
48
49; Stores
50define void @store_i8_unordered(i8* %mem) {
51; CHECK-LABEL: store_i8_unordered
52; CHECK-NOT: sync
53; CHECK: stb
54  store atomic i8 42, i8* %mem unordered, align 1
55  ret void
56}
57define void @store_i16_monotonic(i16* %mem) {
58; CHECK-LABEL: store_i16_monotonic
59; CHECK-NOT: sync
60; CHECK: sth
61  store atomic i16 42, i16* %mem monotonic, align 2
62  ret void
63}
64define void @store_i32_release(i32* %mem) {
65; CHECK-LABEL: store_i32_release
66; CHECK: lwsync
67; CHECK: stw
68  store atomic i32 42, i32* %mem release, align 4
69  ret void
70}
71define void @store_i64_seq_cst(i64* %mem) {
72; CHECK-LABEL: store_i64_seq_cst
73; CHECK: sync
74; PPC32: __sync_
75; PPC64-NOT: __sync_
76; PPC64: std
77  store atomic i64 42, i64* %mem seq_cst, align 8
78  ret void
79}
80
81; Atomic CmpXchg
82define i8 @cas_strong_i8_sc_sc(i8* %mem) {
83; CHECK-LABEL: cas_strong_i8_sc_sc
84; CHECK: sync
85  %val = cmpxchg i8* %mem, i8 0, i8 1 seq_cst seq_cst
86; CHECK: lwsync
87  %loaded = extractvalue { i8, i1} %val, 0
88  ret i8 %loaded
89}
90define i16 @cas_weak_i16_acquire_acquire(i16* %mem) {
91; CHECK-LABEL: cas_weak_i16_acquire_acquire
92;CHECK-NOT: sync
93  %val = cmpxchg weak i16* %mem, i16 0, i16 1 acquire acquire
94; CHECK: lwsync
95  %loaded = extractvalue { i16, i1} %val, 0
96  ret i16 %loaded
97}
98define i32 @cas_strong_i32_acqrel_acquire(i32* %mem) {
99; CHECK-LABEL: cas_strong_i32_acqrel_acquire
100; CHECK: lwsync
101  %val = cmpxchg i32* %mem, i32 0, i32 1 acq_rel acquire
102; CHECK: lwsync
103  %loaded = extractvalue { i32, i1} %val, 0
104  ret i32 %loaded
105}
106define i64 @cas_weak_i64_release_monotonic(i64* %mem) {
107; CHECK-LABEL: cas_weak_i64_release_monotonic
108; CHECK: lwsync
109  %val = cmpxchg weak i64* %mem, i64 0, i64 1 release monotonic
110; CHECK-NOT: [sync ]
111  %loaded = extractvalue { i64, i1} %val, 0
112  ret i64 %loaded
113}
114
115; AtomicRMW
116define i8 @add_i8_monotonic(i8* %mem, i8 %operand) {
117; CHECK-LABEL: add_i8_monotonic
118; CHECK-NOT: sync
119  %val = atomicrmw add i8* %mem, i8 %operand monotonic
120  ret i8 %val
121}
122define i16 @xor_i16_seq_cst(i16* %mem, i16 %operand) {
123; CHECK-LABEL: xor_i16_seq_cst
124; CHECK: sync
125  %val = atomicrmw xor i16* %mem, i16 %operand seq_cst
126; CHECK: lwsync
127  ret i16 %val
128}
129define i32 @xchg_i32_acq_rel(i32* %mem, i32 %operand) {
130; CHECK-LABEL: xchg_i32_acq_rel
131; CHECK: lwsync
132  %val = atomicrmw xchg i32* %mem, i32 %operand acq_rel
133; CHECK: lwsync
134  ret i32 %val
135}
136define i64 @and_i64_release(i64* %mem, i64 %operand) {
137; CHECK-LABEL: and_i64_release
138; CHECK: lwsync
139  %val = atomicrmw and i64* %mem, i64 %operand release
140; CHECK-NOT: [sync ]
141  ret i64 %val
142}
143