1; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
2; RUN:   -mcpu=pwr8 < %s | FileCheck %s
3; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
4; RUN:   -mcpu=pwr8 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-LE
5; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
6; RUN:   -mcpu=pwr7 -mattr=+crypto < %s | FileCheck %s
7; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
8; RUN:   -mcpu=pwr9 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-LE
9; FIXME: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s
10; FIXME: The original intent was to add a check-next for the blr after every check.
11; However, this currently fails since we don't eliminate stores of the unused
12; locals. These stores are sometimes scheduled after the crypto instruction
13
14; Function Attrs: nounwind
15define <16 x i8> @test_vpmsumb() #0 {
16entry:
17  %a = alloca <16 x i8>, align 16
18  %b = alloca <16 x i8>, align 16
19  store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
20  store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
21  %0 = load <16 x i8>,  <16 x i8>* %a, align 16
22  %1 = load <16 x i8>,  <16 x i8>* %b, align 16
23  %2 = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8> %0, <16 x i8> %1)
24  ret <16 x i8> %2
25; CHECK: vpmsumb 2,
26}
27
28; Function Attrs: nounwind readnone
29declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1
30
31; Function Attrs: nounwind
32define <8 x i16> @test_vpmsumh() #0 {
33entry:
34  %a = alloca <8 x i16>, align 16
35  %b = alloca <8 x i16>, align 16
36  store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
37  store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
38  %0 = load <8 x i16>,  <8 x i16>* %a, align 16
39  %1 = load <8 x i16>,  <8 x i16>* %b, align 16
40  %2 = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16> %0, <8 x i16> %1)
41  ret <8 x i16> %2
42; CHECK: vpmsumh 2,
43}
44
45; Function Attrs: nounwind readnone
46declare <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh(<8 x i16>, <8 x i16>) #1
47
48; Function Attrs: nounwind
49define <4 x i32> @test_vpmsumw() #0 {
50entry:
51  %a = alloca <4 x i32>, align 16
52  %b = alloca <4 x i32>, align 16
53  store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
54  store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
55  %0 = load <4 x i32>,  <4 x i32>* %a, align 16
56  %1 = load <4 x i32>,  <4 x i32>* %b, align 16
57  %2 = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32> %0, <4 x i32> %1)
58  ret <4 x i32> %2
59; CHECK: vpmsumw 2,
60}
61
62; Function Attrs: nounwind readnone
63declare <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw(<4 x i32>, <4 x i32>) #1
64
65; Function Attrs: nounwind
66define <2 x i64> @test_vpmsumd() #0 {
67entry:
68  %a = alloca <2 x i64>, align 16
69  %b = alloca <2 x i64>, align 16
70  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
71  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
72  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
73  %1 = load <2 x i64>,  <2 x i64>* %b, align 16
74  %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64> %0, <2 x i64> %1)
75  ret <2 x i64> %2
76; CHECK: vpmsumd 2,
77}
78
79; Function Attrs: nounwind readnone
80declare <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd(<2 x i64>, <2 x i64>) #1
81
82; Function Attrs: nounwind
83define <2 x i64> @test_vsbox() #0 {
84entry:
85  %a = alloca <2 x i64>, align 16
86  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
87  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
88  %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64> %0)
89  ret <2 x i64> %1
90; CHECK: vsbox 2,
91}
92
93; Function Attrs: nounwind readnone
94declare <2 x i64> @llvm.ppc.altivec.crypto.vsbox(<2 x i64>) #1
95
96; Function Attrs: nounwind
97define <16 x i8> @test_vpermxorb() #0 {
98entry:
99  %a = alloca <16 x i8>, align 16
100  %b = alloca <16 x i8>, align 16
101  %c = alloca <16 x i8>, align 16
102  store <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8>* %a, align 16
103  store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %b, align 16
104  store <16 x i8> <i8 113, i8 114, i8 115, i8 116, i8 117, i8 118, i8 119, i8 120, i8 121, i8 122, i8 123, i8 124, i8 125, i8 126, i8 127, i8 112>, <16 x i8>* %c, align 16
105  %0 = load <16 x i8>,  <16 x i8>* %a, align 16
106  %1 = load <16 x i8>,  <16 x i8>* %b, align 16
107  %2 = load <16 x i8>,  <16 x i8>* %c, align 16
108  %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
109  ret <16 x i8> %3
110; CHECK-LE: xxlnor
111; CHECK: vpermxor 2,
112}
113
114; Function Attrs: nounwind readnone
115declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8>, <16 x i8>, <16 x i8>) #1
116
117; Function Attrs: nounwind
118define <8 x i16> @test_vpermxorh() #0 {
119entry:
120  %a = alloca <8 x i16>, align 16
121  %b = alloca <8 x i16>, align 16
122  %c = alloca <8 x i16>, align 16
123  store <8 x i16> <i16 258, i16 772, i16 1286, i16 1800, i16 2314, i16 2828, i16 3342, i16 3856>, <8 x i16>* %a, align 16
124  store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %b, align 16
125  store <8 x i16> <i16 29042, i16 29556, i16 30070, i16 30584, i16 31098, i16 31612, i16 32126, i16 32624>, <8 x i16>* %c, align 16
126  %0 = load <8 x i16>,  <8 x i16>* %a, align 16
127  %1 = bitcast <8 x i16> %0 to <16 x i8>
128  %2 = load <8 x i16>,  <8 x i16>* %b, align 16
129  %3 = bitcast <8 x i16> %2 to <16 x i8>
130  %4 = load <8 x i16>,  <8 x i16>* %c, align 16
131  %5 = bitcast <8 x i16> %4 to <16 x i8>
132  %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
133  %7 = bitcast <16 x i8> %6 to <8 x i16>
134  ret <8 x i16> %7
135; CHECK-LE: xxlnor
136; CHECK: vpermxor 2,
137}
138
139; Function Attrs: nounwind
140define <4 x i32> @test_vpermxorw() #0 {
141entry:
142  %a = alloca <4 x i32>, align 16
143  %b = alloca <4 x i32>, align 16
144  %c = alloca <4 x i32>, align 16
145  store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
146  store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %b, align 16
147  store <4 x i32> <i32 1903326068, i32 1970698104, i32 2038070140, i32 2105442160>, <4 x i32>* %c, align 16
148  %0 = load <4 x i32>,  <4 x i32>* %a, align 16
149  %1 = bitcast <4 x i32> %0 to <16 x i8>
150  %2 = load <4 x i32>,  <4 x i32>* %b, align 16
151  %3 = bitcast <4 x i32> %2 to <16 x i8>
152  %4 = load <4 x i32>,  <4 x i32>* %c, align 16
153  %5 = bitcast <4 x i32> %4 to <16 x i8>
154  %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
155  %7 = bitcast <16 x i8> %6 to <4 x i32>
156  ret <4 x i32> %7
157; CHECK-LE: xxlnor
158; CHECK: vpermxor 2,
159}
160
161; Function Attrs: nounwind
162define <2 x i64> @test_vpermxord() #0 {
163entry:
164  %a = alloca <2 x i64>, align 16
165  %b = alloca <2 x i64>, align 16
166  %c = alloca <2 x i64>, align 16
167  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
168  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
169  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %c, align 16
170  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
171  %1 = bitcast <2 x i64> %0 to <16 x i8>
172  %2 = load <2 x i64>,  <2 x i64>* %b, align 16
173  %3 = bitcast <2 x i64> %2 to <16 x i8>
174  %4 = load <2 x i64>,  <2 x i64>* %c, align 16
175  %5 = bitcast <2 x i64> %4 to <16 x i8>
176  %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5)
177  %7 = bitcast <16 x i8> %6 to <2 x i64>
178  ret <2 x i64> %7
179; CHECK-LE: xxlnor
180; CHECK: vpermxor 2,
181}
182
183; Function Attrs: nounwind
184define <2 x i64> @test_vcipher() #0 {
185entry:
186  %a = alloca <2 x i64>, align 16
187  %b = alloca <2 x i64>, align 16
188  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
189  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
190  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
191  %1 = load <2 x i64>,  <2 x i64>* %b, align 16
192  %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64> %0, <2 x i64> %1)
193  ret <2 x i64> %2
194; CHECK: vcipher 2,
195}
196
197; Function Attrs: nounwind readnone
198declare <2 x i64> @llvm.ppc.altivec.crypto.vcipher(<2 x i64>, <2 x i64>) #1
199
200; Function Attrs: nounwind
201define <2 x i64> @test_vcipherlast() #0 {
202entry:
203  %a = alloca <2 x i64>, align 16
204  %b = alloca <2 x i64>, align 16
205  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
206  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
207  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
208  %1 = load <2 x i64>,  <2 x i64>* %b, align 16
209  %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64> %0, <2 x i64> %1)
210  ret <2 x i64> %2
211; CHECK: vcipherlast 2,
212}
213
214; Function Attrs: nounwind readnone
215declare <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast(<2 x i64>, <2 x i64>) #1
216
217; Function Attrs: nounwind
218define <2 x i64> @test_vncipher() #0 {
219entry:
220  %a = alloca <2 x i64>, align 16
221  %b = alloca <2 x i64>, align 16
222  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
223  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
224  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
225  %1 = load <2 x i64>,  <2 x i64>* %b, align 16
226  %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64> %0, <2 x i64> %1)
227  ret <2 x i64> %2
228; CHECK: vncipher 2,
229}
230
231; Function Attrs: nounwind readnone
232declare <2 x i64> @llvm.ppc.altivec.crypto.vncipher(<2 x i64>, <2 x i64>) #1
233
234; Function Attrs: nounwind
235define <2 x i64> @test_vncipherlast() #0 {
236entry:
237  %a = alloca <2 x i64>, align 16
238  %b = alloca <2 x i64>, align 16
239  store <2 x i64> <i64 72623859790382856, i64 651345242494996240>, <2 x i64>* %a, align 16
240  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %b, align 16
241  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
242  %1 = load <2 x i64>,  <2 x i64>* %b, align 16
243  %2 = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64> %0, <2 x i64> %1)
244  ret <2 x i64> %2
245; CHECK: vncipherlast 2,
246}
247
248; Function Attrs: nounwind readnone
249declare <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast(<2 x i64>, <2 x i64>) #1
250
251; Function Attrs: nounwind
252define <4 x i32> @test_vshasigmaw() #0 {
253entry:
254  %a = alloca <4 x i32>, align 16
255  store <4 x i32> <i32 16909060, i32 84281096, i32 151653132, i32 219025168>, <4 x i32>* %a, align 16
256  %0 = load <4 x i32>,  <4 x i32>* %a, align 16
257  %1 = call <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32> %0, i32 1, i32 15)
258  ret <4 x i32> %1
259; CHECK: vshasigmaw 2,
260}
261
262; Function Attrs: nounwind readnone
263declare <4 x i32> @llvm.ppc.altivec.crypto.vshasigmaw(<4 x i32>, i32, i32) #1
264
265; Function Attrs: nounwind
266define <2 x i64> @test_vshasigmad() #0 {
267entry:
268  %a = alloca <2 x i64>, align 16
269  store <2 x i64> <i64 8174723217654970232, i64 8753444600359583600>, <2 x i64>* %a, align 16
270  %0 = load <2 x i64>,  <2 x i64>* %a, align 16
271  %1 = call <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64> %0, i32 1, i32 15)
272  ret <2 x i64> %1
273; CHECK: vshasigmad 2,
274}
275
276; Function Attrs: nounwind readnone
277declare <2 x i64> @llvm.ppc.altivec.crypto.vshasigmad(<2 x i64>, i32, i32) #1
278
279attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
280attributes #1 = { nounwind readnone }
281
282!llvm.ident = !{!0}
283
284!0 = !{!"clang version 3.7.0 (trunk 230949) (llvm/trunk 230946)"}
285