1; Test the MSA intrinsics that are encoded with the VEC instruction format.
2
3; RUN: llc -march=mips -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
4; RUN: llc -march=mipsel -mattr=+msa,+fp64 < %s | FileCheck -check-prefix=ANYENDIAN %s
5
6@llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
7@llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
8@llvm_mips_and_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
9
10define void @llvm_mips_and_v_b_test() nounwind {
11entry:
12  %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
13  %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
14  %2 = bitcast <16 x i8> %0 to <16 x i8>
15  %3 = bitcast <16 x i8> %1 to <16 x i8>
16  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
17  %5 = bitcast <16 x i8> %4 to <16 x i8>
18  store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
19  ret void
20}
21
22; ANYENDIAN: llvm_mips_and_v_b_test:
23; ANYENDIAN: ld.b
24; ANYENDIAN: ld.b
25; ANYENDIAN: and.v
26; ANYENDIAN: st.b
27; ANYENDIAN: .size llvm_mips_and_v_b_test
28;
29@llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
30@llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
31@llvm_mips_and_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
32
33define void @llvm_mips_and_v_h_test() nounwind {
34entry:
35  %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
36  %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
37  %2 = bitcast <8 x i16> %0 to <16 x i8>
38  %3 = bitcast <8 x i16> %1 to <16 x i8>
39  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
40  %5 = bitcast <16 x i8> %4 to <8 x i16>
41  store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
42  ret void
43}
44
45; ANYENDIAN: llvm_mips_and_v_h_test:
46; ANYENDIAN: ld.b
47; ANYENDIAN: ld.b
48; ANYENDIAN: and.v
49; ANYENDIAN: st.b
50; ANYENDIAN: .size llvm_mips_and_v_h_test
51;
52@llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
53@llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
54@llvm_mips_and_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
55
56define void @llvm_mips_and_v_w_test() nounwind {
57entry:
58  %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
59  %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
60  %2 = bitcast <4 x i32> %0 to <16 x i8>
61  %3 = bitcast <4 x i32> %1 to <16 x i8>
62  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
63  %5 = bitcast <16 x i8> %4 to <4 x i32>
64  store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
65  ret void
66}
67
68; ANYENDIAN: llvm_mips_and_v_w_test:
69; ANYENDIAN: ld.b
70; ANYENDIAN: ld.b
71; ANYENDIAN: and.v
72; ANYENDIAN: st.b
73; ANYENDIAN: .size llvm_mips_and_v_w_test
74;
75@llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
76@llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
77@llvm_mips_and_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
78
79define void @llvm_mips_and_v_d_test() nounwind {
80entry:
81  %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
82  %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
83  %2 = bitcast <2 x i64> %0 to <16 x i8>
84  %3 = bitcast <2 x i64> %1 to <16 x i8>
85  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
86  %5 = bitcast <16 x i8> %4 to <2 x i64>
87  store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
88  ret void
89}
90
91; ANYENDIAN: llvm_mips_and_v_d_test:
92; ANYENDIAN: ld.b
93; ANYENDIAN: ld.b
94; ANYENDIAN: and.v
95; ANYENDIAN: st.b
96; ANYENDIAN: .size llvm_mips_and_v_d_test
97;
98define void @and_v_b_test() nounwind {
99entry:
100  %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1
101  %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2
102  %2 = and <16 x i8> %0, %1
103  store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
104  ret void
105}
106
107; ANYENDIAN: and_v_b_test:
108; ANYENDIAN: ld.b
109; ANYENDIAN: ld.b
110; ANYENDIAN: and.v
111; ANYENDIAN: st.b
112; ANYENDIAN: .size and_v_b_test
113;
114define void @and_v_h_test() nounwind {
115entry:
116  %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1
117  %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2
118  %2 = and <8 x i16> %0, %1
119  store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
120  ret void
121}
122
123; ANYENDIAN: and_v_h_test:
124; ANYENDIAN: ld.h
125; ANYENDIAN: ld.h
126; ANYENDIAN: and.v
127; ANYENDIAN: st.h
128; ANYENDIAN: .size and_v_h_test
129;
130
131define void @and_v_w_test() nounwind {
132entry:
133  %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1
134  %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2
135  %2 = and <4 x i32> %0, %1
136  store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
137  ret void
138}
139
140; ANYENDIAN: and_v_w_test:
141; ANYENDIAN: ld.w
142; ANYENDIAN: ld.w
143; ANYENDIAN: and.v
144; ANYENDIAN: st.w
145; ANYENDIAN: .size and_v_w_test
146;
147
148define void @and_v_d_test() nounwind {
149entry:
150  %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1
151  %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2
152  %2 = and <2 x i64> %0, %1
153  store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
154  ret void
155}
156
157; ANYENDIAN: and_v_d_test:
158; ANYENDIAN: ld.d
159; ANYENDIAN: ld.d
160; ANYENDIAN: and.v
161; ANYENDIAN: st.d
162; ANYENDIAN: .size and_v_d_test
163;
164@llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
165@llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
166@llvm_mips_bmnz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
167@llvm_mips_bmnz_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
168
169define void @llvm_mips_bmnz_v_b_test() nounwind {
170entry:
171  %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
172  %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
173  %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
174  %3 = bitcast <16 x i8> %0 to <16 x i8>
175  %4 = bitcast <16 x i8> %1 to <16 x i8>
176  %5 = bitcast <16 x i8> %2 to <16 x i8>
177  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
178  %7 = bitcast <16 x i8> %6 to <16 x i8>
179  store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
180  ret void
181}
182
183; ANYENDIAN: llvm_mips_bmnz_v_b_test:
184; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG1)(
185; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG2)(
186; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG3)(
187; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
188; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
189; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
190; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
191; ANYENDIAN-DAG: st.b [[R4]], 0(
192; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
193
194@llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
195@llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
196@llvm_mips_bmnz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
197@llvm_mips_bmnz_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
198
199define void @llvm_mips_bmnz_v_h_test() nounwind {
200entry:
201  %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
202  %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
203  %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
204  %3 = bitcast <8 x i16> %0 to <16 x i8>
205  %4 = bitcast <8 x i16> %1 to <16 x i8>
206  %5 = bitcast <8 x i16> %2 to <16 x i8>
207  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
208  %7 = bitcast <16 x i8> %6 to <8 x i16>
209  store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
210  ret void
211}
212
213; ANYENDIAN: llvm_mips_bmnz_v_h_test:
214; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG1)(
215; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG2)(
216; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG3)(
217; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
218; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
219; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
220; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
221; ANYENDIAN-DAG: st.b [[R4]], 0(
222; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
223
224@llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
225@llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
226@llvm_mips_bmnz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
227@llvm_mips_bmnz_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
228
229define void @llvm_mips_bmnz_v_w_test() nounwind {
230entry:
231  %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
232  %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
233  %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
234  %3 = bitcast <4 x i32> %0 to <16 x i8>
235  %4 = bitcast <4 x i32> %1 to <16 x i8>
236  %5 = bitcast <4 x i32> %2 to <16 x i8>
237  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
238  %7 = bitcast <16 x i8> %6 to <4 x i32>
239  store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
240  ret void
241}
242
243; ANYENDIAN: llvm_mips_bmnz_v_w_test:
244; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG1)(
245; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG2)(
246; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG3)(
247; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
248; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
249; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
250; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
251; ANYENDIAN-DAG: st.b [[R4]], 0(
252; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
253
254@llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
255@llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
256@llvm_mips_bmnz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
257@llvm_mips_bmnz_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
258
259define void @llvm_mips_bmnz_v_d_test() nounwind {
260entry:
261  %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
262  %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
263  %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
264  %3 = bitcast <2 x i64> %0 to <16 x i8>
265  %4 = bitcast <2 x i64> %1 to <16 x i8>
266  %5 = bitcast <2 x i64> %2 to <16 x i8>
267  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
268  %7 = bitcast <16 x i8> %6 to <2 x i64>
269  store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
270  ret void
271}
272
273; ANYENDIAN: llvm_mips_bmnz_v_d_test:
274; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG1)(
275; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG2)(
276; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG3)(
277; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
278; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
279; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
280; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
281; ANYENDIAN-DAG: st.b [[R4]], 0(
282; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
283
284@llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
285@llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
286@llvm_mips_bmz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
287@llvm_mips_bmz_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
288
289define void @llvm_mips_bmz_v_b_test() nounwind {
290entry:
291  %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1
292  %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2
293  %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3
294  %3 = bitcast <16 x i8> %0 to <16 x i8>
295  %4 = bitcast <16 x i8> %1 to <16 x i8>
296  %5 = bitcast <16 x i8> %2 to <16 x i8>
297  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
298  %7 = bitcast <16 x i8> %6 to <16 x i8>
299  store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
300  ret void
301}
302
303; ANYENDIAN: llvm_mips_bmz_v_b_test:
304; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG1)(
305; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG2)(
306; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG3)(
307; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
308; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
309; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
310; bmnz.v is the same as bmz.v with ws and wd_in swapped
311; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
312; ANYENDIAN-DAG: st.b [[R5]], 0(
313; ANYENDIAN: .size llvm_mips_bmz_v_b_test
314
315@llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
316@llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
317@llvm_mips_bmz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
318@llvm_mips_bmz_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
319
320define void @llvm_mips_bmz_v_h_test() nounwind {
321entry:
322  %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1
323  %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2
324  %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3
325  %3 = bitcast <8 x i16> %0 to <16 x i8>
326  %4 = bitcast <8 x i16> %1 to <16 x i8>
327  %5 = bitcast <8 x i16> %2 to <16 x i8>
328  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
329  %7 = bitcast <16 x i8> %6 to <8 x i16>
330  store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
331  ret void
332}
333
334; ANYENDIAN: llvm_mips_bmz_v_h_test:
335; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG1)(
336; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG2)(
337; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG3)(
338; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
339; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
340; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
341; bmnz.v is the same as bmz.v with ws and wd_in swapped
342; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
343; ANYENDIAN-DAG: st.b [[R5]], 0(
344; ANYENDIAN: .size llvm_mips_bmz_v_h_test
345
346@llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
347@llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
348@llvm_mips_bmz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
349@llvm_mips_bmz_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
350
351define void @llvm_mips_bmz_v_w_test() nounwind {
352entry:
353  %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1
354  %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2
355  %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3
356  %3 = bitcast <4 x i32> %0 to <16 x i8>
357  %4 = bitcast <4 x i32> %1 to <16 x i8>
358  %5 = bitcast <4 x i32> %2 to <16 x i8>
359  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
360  %7 = bitcast <16 x i8> %6 to <4 x i32>
361  store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
362  ret void
363}
364
365; ANYENDIAN: llvm_mips_bmz_v_w_test:
366; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG1)(
367; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG2)(
368; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG3)(
369; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
370; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
371; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
372; bmnz.v is the same as bmz.v with ws and wd_in swapped
373; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
374; ANYENDIAN-DAG: st.b [[R5]], 0(
375; ANYENDIAN: .size llvm_mips_bmz_v_w_test
376
377@llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
378@llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
379@llvm_mips_bmz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
380@llvm_mips_bmz_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
381
382define void @llvm_mips_bmz_v_d_test() nounwind {
383entry:
384  %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1
385  %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2
386  %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3
387  %3 = bitcast <2 x i64> %0 to <16 x i8>
388  %4 = bitcast <2 x i64> %1 to <16 x i8>
389  %5 = bitcast <2 x i64> %2 to <16 x i8>
390  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
391  %7 = bitcast <16 x i8> %6 to <2 x i64>
392  store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
393  ret void
394}
395
396; ANYENDIAN: llvm_mips_bmz_v_d_test:
397; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG1)(
398; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG2)(
399; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG3)(
400; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
401; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
402; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
403; bmnz.v is the same as bmz.v with ws and wd_in swapped
404; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
405; ANYENDIAN-DAG: st.b [[R5]], 0(
406; ANYENDIAN: .size llvm_mips_bmz_v_d_test
407
408@llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
409@llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
410@llvm_mips_bsel_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
411@llvm_mips_bsel_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
412
413define void @llvm_mips_bsel_v_b_test() nounwind {
414entry:
415  %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1
416  %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2
417  %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3
418  %3 = bitcast <16 x i8> %0 to <16 x i8>
419  %4 = bitcast <16 x i8> %1 to <16 x i8>
420  %5 = bitcast <16 x i8> %2 to <16 x i8>
421  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
422  %7 = bitcast <16 x i8> %6 to <16 x i8>
423  store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
424  ret void
425}
426
427; ANYENDIAN: llvm_mips_bsel_v_b_test:
428; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG1)(
429; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG2)(
430; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG3)(
431; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
432; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
433; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
434; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
435; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
436; ANYENDIAN-DAG: st.b [[R5]], 0(
437; ANYENDIAN: .size llvm_mips_bsel_v_b_test
438
439@llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
440@llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
441@llvm_mips_bsel_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
442@llvm_mips_bsel_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
443
444define void @llvm_mips_bsel_v_h_test() nounwind {
445entry:
446  %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1
447  %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2
448  %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3
449  %3 = bitcast <8 x i16> %0 to <16 x i8>
450  %4 = bitcast <8 x i16> %1 to <16 x i8>
451  %5 = bitcast <8 x i16> %2 to <16 x i8>
452  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
453  %7 = bitcast <16 x i8> %6 to <8 x i16>
454  store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
455  ret void
456}
457
458; ANYENDIAN: llvm_mips_bsel_v_h_test:
459; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG1)(
460; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG2)(
461; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG3)(
462; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
463; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
464; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
465; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
466; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
467; ANYENDIAN-DAG: st.b [[R5]], 0(
468; ANYENDIAN: .size llvm_mips_bsel_v_h_test
469
470@llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
471@llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
472@llvm_mips_bsel_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
473@llvm_mips_bsel_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
474
475define void @llvm_mips_bsel_v_w_test() nounwind {
476entry:
477  %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1
478  %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2
479  %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3
480  %3 = bitcast <4 x i32> %0 to <16 x i8>
481  %4 = bitcast <4 x i32> %1 to <16 x i8>
482  %5 = bitcast <4 x i32> %2 to <16 x i8>
483  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
484  %7 = bitcast <16 x i8> %6 to <4 x i32>
485  store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
486  ret void
487}
488
489; ANYENDIAN: llvm_mips_bsel_v_w_test:
490; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG1)(
491; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG2)(
492; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG3)(
493; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
494; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
495; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
496; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
497; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
498; ANYENDIAN-DAG: st.b [[R5]], 0(
499; ANYENDIAN: .size llvm_mips_bsel_v_w_test
500
501@llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
502@llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
503@llvm_mips_bsel_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
504@llvm_mips_bsel_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
505
506define void @llvm_mips_bsel_v_d_test() nounwind {
507entry:
508  %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1
509  %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2
510  %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3
511  %3 = bitcast <2 x i64> %0 to <16 x i8>
512  %4 = bitcast <2 x i64> %1 to <16 x i8>
513  %5 = bitcast <2 x i64> %2 to <16 x i8>
514  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
515  %7 = bitcast <16 x i8> %6 to <2 x i64>
516  store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
517  ret void
518}
519
520; ANYENDIAN: llvm_mips_bsel_v_d_test:
521; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG1)(
522; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG2)(
523; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG3)(
524; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
525; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
526; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
527; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
528; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
529; ANYENDIAN-DAG: st.b [[R5]], 0(
530; ANYENDIAN: .size llvm_mips_bsel_v_d_test
531
532@llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
533@llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
534@llvm_mips_nor_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
535
536define void @llvm_mips_nor_v_b_test() nounwind {
537entry:
538  %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1
539  %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2
540  %2 = bitcast <16 x i8> %0 to <16 x i8>
541  %3 = bitcast <16 x i8> %1 to <16 x i8>
542  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
543  %5 = bitcast <16 x i8> %4 to <16 x i8>
544  store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
545  ret void
546}
547
548; ANYENDIAN: llvm_mips_nor_v_b_test:
549; ANYENDIAN: ld.b
550; ANYENDIAN: ld.b
551; ANYENDIAN: nor.v
552; ANYENDIAN: st.b
553; ANYENDIAN: .size llvm_mips_nor_v_b_test
554;
555@llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
556@llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
557@llvm_mips_nor_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
558
559define void @llvm_mips_nor_v_h_test() nounwind {
560entry:
561  %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1
562  %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2
563  %2 = bitcast <8 x i16> %0 to <16 x i8>
564  %3 = bitcast <8 x i16> %1 to <16 x i8>
565  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
566  %5 = bitcast <16 x i8> %4 to <8 x i16>
567  store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
568  ret void
569}
570
571; ANYENDIAN: llvm_mips_nor_v_h_test:
572; ANYENDIAN: ld.b
573; ANYENDIAN: ld.b
574; ANYENDIAN: nor.v
575; ANYENDIAN: st.b
576; ANYENDIAN: .size llvm_mips_nor_v_h_test
577;
578@llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
579@llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
580@llvm_mips_nor_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
581
582define void @llvm_mips_nor_v_w_test() nounwind {
583entry:
584  %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1
585  %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2
586  %2 = bitcast <4 x i32> %0 to <16 x i8>
587  %3 = bitcast <4 x i32> %1 to <16 x i8>
588  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
589  %5 = bitcast <16 x i8> %4 to <4 x i32>
590  store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
591  ret void
592}
593
594; ANYENDIAN: llvm_mips_nor_v_w_test:
595; ANYENDIAN: ld.b
596; ANYENDIAN: ld.b
597; ANYENDIAN: nor.v
598; ANYENDIAN: st.b
599; ANYENDIAN: .size llvm_mips_nor_v_w_test
600;
601@llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
602@llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
603@llvm_mips_nor_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
604
605define void @llvm_mips_nor_v_d_test() nounwind {
606entry:
607  %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1
608  %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2
609  %2 = bitcast <2 x i64> %0 to <16 x i8>
610  %3 = bitcast <2 x i64> %1 to <16 x i8>
611  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
612  %5 = bitcast <16 x i8> %4 to <2 x i64>
613  store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
614  ret void
615}
616
617; ANYENDIAN: llvm_mips_nor_v_d_test:
618; ANYENDIAN: ld.b
619; ANYENDIAN: ld.b
620; ANYENDIAN: nor.v
621; ANYENDIAN: st.b
622; ANYENDIAN: .size llvm_mips_nor_v_d_test
623;
624@llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
625@llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
626@llvm_mips_or_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
627
628define void @llvm_mips_or_v_b_test() nounwind {
629entry:
630  %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
631  %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
632  %2 = bitcast <16 x i8> %0 to <16 x i8>
633  %3 = bitcast <16 x i8> %1 to <16 x i8>
634  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
635  %5 = bitcast <16 x i8> %4 to <16 x i8>
636  store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
637  ret void
638}
639
640; ANYENDIAN: llvm_mips_or_v_b_test:
641; ANYENDIAN: ld.b
642; ANYENDIAN: ld.b
643; ANYENDIAN: or.v
644; ANYENDIAN: st.b
645; ANYENDIAN: .size llvm_mips_or_v_b_test
646;
647@llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
648@llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
649@llvm_mips_or_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
650
651define void @llvm_mips_or_v_h_test() nounwind {
652entry:
653  %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
654  %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
655  %2 = bitcast <8 x i16> %0 to <16 x i8>
656  %3 = bitcast <8 x i16> %1 to <16 x i8>
657  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
658  %5 = bitcast <16 x i8> %4 to <8 x i16>
659  store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
660  ret void
661}
662
663; ANYENDIAN: llvm_mips_or_v_h_test:
664; ANYENDIAN: ld.b
665; ANYENDIAN: ld.b
666; ANYENDIAN: or.v
667; ANYENDIAN: st.b
668; ANYENDIAN: .size llvm_mips_or_v_h_test
669;
670@llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
671@llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
672@llvm_mips_or_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
673
674define void @llvm_mips_or_v_w_test() nounwind {
675entry:
676  %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
677  %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
678  %2 = bitcast <4 x i32> %0 to <16 x i8>
679  %3 = bitcast <4 x i32> %1 to <16 x i8>
680  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
681  %5 = bitcast <16 x i8> %4 to <4 x i32>
682  store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
683  ret void
684}
685
686; ANYENDIAN: llvm_mips_or_v_w_test:
687; ANYENDIAN: ld.b
688; ANYENDIAN: ld.b
689; ANYENDIAN: or.v
690; ANYENDIAN: st.b
691; ANYENDIAN: .size llvm_mips_or_v_w_test
692;
693@llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
694@llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
695@llvm_mips_or_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
696
697define void @llvm_mips_or_v_d_test() nounwind {
698entry:
699  %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
700  %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
701  %2 = bitcast <2 x i64> %0 to <16 x i8>
702  %3 = bitcast <2 x i64> %1 to <16 x i8>
703  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
704  %5 = bitcast <16 x i8> %4 to <2 x i64>
705  store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
706  ret void
707}
708
709; ANYENDIAN: llvm_mips_or_v_d_test:
710; ANYENDIAN: ld.b
711; ANYENDIAN: ld.b
712; ANYENDIAN: or.v
713; ANYENDIAN: st.b
714; ANYENDIAN: .size llvm_mips_or_v_d_test
715;
716define void @or_v_b_test() nounwind {
717entry:
718  %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1
719  %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2
720  %2 = or <16 x i8> %0, %1
721  store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
722  ret void
723}
724
725; ANYENDIAN: or_v_b_test:
726; ANYENDIAN: ld.b
727; ANYENDIAN: ld.b
728; ANYENDIAN: or.v
729; ANYENDIAN: st.b
730; ANYENDIAN: .size or_v_b_test
731;
732define void @or_v_h_test() nounwind {
733entry:
734  %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1
735  %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2
736  %2 = or <8 x i16> %0, %1
737  store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
738  ret void
739}
740
741; ANYENDIAN: or_v_h_test:
742; ANYENDIAN: ld.h
743; ANYENDIAN: ld.h
744; ANYENDIAN: or.v
745; ANYENDIAN: st.h
746; ANYENDIAN: .size or_v_h_test
747;
748
749define void @or_v_w_test() nounwind {
750entry:
751  %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1
752  %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2
753  %2 = or <4 x i32> %0, %1
754  store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
755  ret void
756}
757
758; ANYENDIAN: or_v_w_test:
759; ANYENDIAN: ld.w
760; ANYENDIAN: ld.w
761; ANYENDIAN: or.v
762; ANYENDIAN: st.w
763; ANYENDIAN: .size or_v_w_test
764;
765
766define void @or_v_d_test() nounwind {
767entry:
768  %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1
769  %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2
770  %2 = or <2 x i64> %0, %1
771  store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
772  ret void
773}
774
775; ANYENDIAN: or_v_d_test:
776; ANYENDIAN: ld.d
777; ANYENDIAN: ld.d
778; ANYENDIAN: or.v
779; ANYENDIAN: st.d
780; ANYENDIAN: .size or_v_d_test
781;
782@llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
783@llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
784@llvm_mips_xor_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
785
786define void @llvm_mips_xor_v_b_test() nounwind {
787entry:
788  %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
789  %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
790  %2 = bitcast <16 x i8> %0 to <16 x i8>
791  %3 = bitcast <16 x i8> %1 to <16 x i8>
792  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
793  %5 = bitcast <16 x i8> %4 to <16 x i8>
794  store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
795  ret void
796}
797
798; ANYENDIAN: llvm_mips_xor_v_b_test:
799; ANYENDIAN: ld.b
800; ANYENDIAN: ld.b
801; ANYENDIAN: xor.v
802; ANYENDIAN: st.b
803; ANYENDIAN: .size llvm_mips_xor_v_b_test
804;
805@llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
806@llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
807@llvm_mips_xor_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
808
809define void @llvm_mips_xor_v_h_test() nounwind {
810entry:
811  %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
812  %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
813  %2 = bitcast <8 x i16> %0 to <16 x i8>
814  %3 = bitcast <8 x i16> %1 to <16 x i8>
815  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
816  %5 = bitcast <16 x i8> %4 to <8 x i16>
817  store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
818  ret void
819}
820
821; ANYENDIAN: llvm_mips_xor_v_h_test:
822; ANYENDIAN: ld.b
823; ANYENDIAN: ld.b
824; ANYENDIAN: xor.v
825; ANYENDIAN: st.b
826; ANYENDIAN: .size llvm_mips_xor_v_h_test
827;
828@llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
829@llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
830@llvm_mips_xor_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
831
832define void @llvm_mips_xor_v_w_test() nounwind {
833entry:
834  %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
835  %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
836  %2 = bitcast <4 x i32> %0 to <16 x i8>
837  %3 = bitcast <4 x i32> %1 to <16 x i8>
838  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
839  %5 = bitcast <16 x i8> %4 to <4 x i32>
840  store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
841  ret void
842}
843
844; ANYENDIAN: llvm_mips_xor_v_w_test:
845; ANYENDIAN: ld.b
846; ANYENDIAN: ld.b
847; ANYENDIAN: xor.v
848; ANYENDIAN: st.b
849; ANYENDIAN: .size llvm_mips_xor_v_w_test
850;
851@llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
852@llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
853@llvm_mips_xor_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
854
855define void @llvm_mips_xor_v_d_test() nounwind {
856entry:
857  %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
858  %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
859  %2 = bitcast <2 x i64> %0 to <16 x i8>
860  %3 = bitcast <2 x i64> %1 to <16 x i8>
861  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
862  %5 = bitcast <16 x i8> %4 to <2 x i64>
863  store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
864  ret void
865}
866
867; ANYENDIAN: llvm_mips_xor_v_d_test:
868; ANYENDIAN: ld.b
869; ANYENDIAN: ld.b
870; ANYENDIAN: xor.v
871; ANYENDIAN: st.b
872; ANYENDIAN: .size llvm_mips_xor_v_d_test
873;
874define void @xor_v_b_test() nounwind {
875entry:
876  %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1
877  %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2
878  %2 = xor <16 x i8> %0, %1
879  store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
880  ret void
881}
882
883; ANYENDIAN: xor_v_b_test:
884; ANYENDIAN: ld.b
885; ANYENDIAN: ld.b
886; ANYENDIAN: xor.v
887; ANYENDIAN: st.b
888; ANYENDIAN: .size xor_v_b_test
889;
890define void @xor_v_h_test() nounwind {
891entry:
892  %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1
893  %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2
894  %2 = xor <8 x i16> %0, %1
895  store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
896  ret void
897}
898
899; ANYENDIAN: xor_v_h_test:
900; ANYENDIAN: ld.h
901; ANYENDIAN: ld.h
902; ANYENDIAN: xor.v
903; ANYENDIAN: st.h
904; ANYENDIAN: .size xor_v_h_test
905;
906
907define void @xor_v_w_test() nounwind {
908entry:
909  %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1
910  %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2
911  %2 = xor <4 x i32> %0, %1
912  store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
913  ret void
914}
915
916; ANYENDIAN: xor_v_w_test:
917; ANYENDIAN: ld.w
918; ANYENDIAN: ld.w
919; ANYENDIAN: xor.v
920; ANYENDIAN: st.w
921; ANYENDIAN: .size xor_v_w_test
922;
923
924define void @xor_v_d_test() nounwind {
925entry:
926  %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1
927  %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2
928  %2 = xor <2 x i64> %0, %1
929  store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
930  ret void
931}
932
933; ANYENDIAN: xor_v_d_test:
934; ANYENDIAN: ld.d
935; ANYENDIAN: ld.d
936; ANYENDIAN: xor.v
937; ANYENDIAN: st.d
938; ANYENDIAN: .size xor_v_d_test
939;
940declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
941declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
942declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
943declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
944declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
945declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
946declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind
947