1; Test the MSA intrinsics that are encoded with the VEC instruction format.
2
3; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 -relocation-model=pic < %s \
4; RUN:   | FileCheck -check-prefix=ANYENDIAN %s
5; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 -relocation-model=pic < %s \
6; RUN:   | FileCheck -check-prefix=ANYENDIAN %s
7
8@llvm_mips_and_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
9@llvm_mips_and_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
10@llvm_mips_and_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
11
12define void @llvm_mips_and_v_b_test() nounwind {
13entry:
14  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
15  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
16  %2 = bitcast <16 x i8> %0 to <16 x i8>
17  %3 = bitcast <16 x i8> %1 to <16 x i8>
18  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
19  %5 = bitcast <16 x i8> %4 to <16 x i8>
20  store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
21  ret void
22}
23
24; ANYENDIAN: llvm_mips_and_v_b_test:
25; ANYENDIAN: ld.b
26; ANYENDIAN: ld.b
27; ANYENDIAN: and.v
28; ANYENDIAN: st.b
29; ANYENDIAN: .size llvm_mips_and_v_b_test
30;
31@llvm_mips_and_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
32@llvm_mips_and_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
33@llvm_mips_and_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
34
35define void @llvm_mips_and_v_h_test() nounwind {
36entry:
37  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
38  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
39  %2 = bitcast <8 x i16> %0 to <16 x i8>
40  %3 = bitcast <8 x i16> %1 to <16 x i8>
41  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
42  %5 = bitcast <16 x i8> %4 to <8 x i16>
43  store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
44  ret void
45}
46
47; ANYENDIAN: llvm_mips_and_v_h_test:
48; ANYENDIAN: ld.b
49; ANYENDIAN: ld.b
50; ANYENDIAN: and.v
51; ANYENDIAN: st.b
52; ANYENDIAN: .size llvm_mips_and_v_h_test
53;
54@llvm_mips_and_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
55@llvm_mips_and_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
56@llvm_mips_and_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
57
58define void @llvm_mips_and_v_w_test() nounwind {
59entry:
60  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
61  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
62  %2 = bitcast <4 x i32> %0 to <16 x i8>
63  %3 = bitcast <4 x i32> %1 to <16 x i8>
64  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
65  %5 = bitcast <16 x i8> %4 to <4 x i32>
66  store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
67  ret void
68}
69
70; ANYENDIAN: llvm_mips_and_v_w_test:
71; ANYENDIAN: ld.b
72; ANYENDIAN: ld.b
73; ANYENDIAN: and.v
74; ANYENDIAN: st.b
75; ANYENDIAN: .size llvm_mips_and_v_w_test
76;
77@llvm_mips_and_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
78@llvm_mips_and_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
79@llvm_mips_and_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
80
81define void @llvm_mips_and_v_d_test() nounwind {
82entry:
83  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
84  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
85  %2 = bitcast <2 x i64> %0 to <16 x i8>
86  %3 = bitcast <2 x i64> %1 to <16 x i8>
87  %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
88  %5 = bitcast <16 x i8> %4 to <2 x i64>
89  store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
90  ret void
91}
92
93; ANYENDIAN: llvm_mips_and_v_d_test:
94; ANYENDIAN: ld.b
95; ANYENDIAN: ld.b
96; ANYENDIAN: and.v
97; ANYENDIAN: st.b
98; ANYENDIAN: .size llvm_mips_and_v_d_test
99;
100define void @and_v_b_test() nounwind {
101entry:
102  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
103  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
104  %2 = and <16 x i8> %0, %1
105  store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
106  ret void
107}
108
109; ANYENDIAN: and_v_b_test:
110; ANYENDIAN: ld.b
111; ANYENDIAN: ld.b
112; ANYENDIAN: and.v
113; ANYENDIAN: st.b
114; ANYENDIAN: .size and_v_b_test
115;
116define void @and_v_h_test() nounwind {
117entry:
118  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
119  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
120  %2 = and <8 x i16> %0, %1
121  store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
122  ret void
123}
124
125; ANYENDIAN: and_v_h_test:
126; ANYENDIAN: ld.h
127; ANYENDIAN: ld.h
128; ANYENDIAN: and.v
129; ANYENDIAN: st.h
130; ANYENDIAN: .size and_v_h_test
131;
132
133define void @and_v_w_test() nounwind {
134entry:
135  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
136  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
137  %2 = and <4 x i32> %0, %1
138  store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
139  ret void
140}
141
142; ANYENDIAN: and_v_w_test:
143; ANYENDIAN: ld.w
144; ANYENDIAN: ld.w
145; ANYENDIAN: and.v
146; ANYENDIAN: st.w
147; ANYENDIAN: .size and_v_w_test
148;
149
150define void @and_v_d_test() nounwind {
151entry:
152  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
153  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
154  %2 = and <2 x i64> %0, %1
155  store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
156  ret void
157}
158
159; ANYENDIAN: and_v_d_test:
160; ANYENDIAN: ld.d
161; ANYENDIAN: ld.d
162; ANYENDIAN: and.v
163; ANYENDIAN: st.d
164; ANYENDIAN: .size and_v_d_test
165;
166@llvm_mips_bmnz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
167@llvm_mips_bmnz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
168@llvm_mips_bmnz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
169@llvm_mips_bmnz_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
170
171define void @llvm_mips_bmnz_v_b_test() nounwind {
172entry:
173  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
174  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
175  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
176  %3 = bitcast <16 x i8> %0 to <16 x i8>
177  %4 = bitcast <16 x i8> %1 to <16 x i8>
178  %5 = bitcast <16 x i8> %2 to <16 x i8>
179  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
180  %7 = bitcast <16 x i8> %6 to <16 x i8>
181  store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
182  ret void
183}
184
185; ANYENDIAN: llvm_mips_bmnz_v_b_test:
186; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG1)(
187; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG2)(
188; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_b_ARG3)(
189; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
190; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
191; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
192; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
193; ANYENDIAN-DAG: st.b [[R4]], 0(
194; ANYENDIAN: .size llvm_mips_bmnz_v_b_test
195
196@llvm_mips_bmnz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
197@llvm_mips_bmnz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
198@llvm_mips_bmnz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
199@llvm_mips_bmnz_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
200
201define void @llvm_mips_bmnz_v_h_test() nounwind {
202entry:
203  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
204  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
205  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
206  %3 = bitcast <8 x i16> %0 to <16 x i8>
207  %4 = bitcast <8 x i16> %1 to <16 x i8>
208  %5 = bitcast <8 x i16> %2 to <16 x i8>
209  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
210  %7 = bitcast <16 x i8> %6 to <8 x i16>
211  store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
212  ret void
213}
214
215; ANYENDIAN: llvm_mips_bmnz_v_h_test:
216; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG1)(
217; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG2)(
218; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_h_ARG3)(
219; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
220; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
221; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
222; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
223; ANYENDIAN-DAG: st.b [[R4]], 0(
224; ANYENDIAN: .size llvm_mips_bmnz_v_h_test
225
226@llvm_mips_bmnz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
227@llvm_mips_bmnz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
228@llvm_mips_bmnz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
229@llvm_mips_bmnz_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
230
231define void @llvm_mips_bmnz_v_w_test() nounwind {
232entry:
233  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
234  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
235  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
236  %3 = bitcast <4 x i32> %0 to <16 x i8>
237  %4 = bitcast <4 x i32> %1 to <16 x i8>
238  %5 = bitcast <4 x i32> %2 to <16 x i8>
239  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
240  %7 = bitcast <16 x i8> %6 to <4 x i32>
241  store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
242  ret void
243}
244
245; ANYENDIAN: llvm_mips_bmnz_v_w_test:
246; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG1)(
247; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG2)(
248; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_w_ARG3)(
249; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
250; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
251; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
252; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
253; ANYENDIAN-DAG: st.b [[R4]], 0(
254; ANYENDIAN: .size llvm_mips_bmnz_v_w_test
255
256@llvm_mips_bmnz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
257@llvm_mips_bmnz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
258@llvm_mips_bmnz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
259@llvm_mips_bmnz_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
260
261define void @llvm_mips_bmnz_v_d_test() nounwind {
262entry:
263  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
264  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
265  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
266  %3 = bitcast <2 x i64> %0 to <16 x i8>
267  %4 = bitcast <2 x i64> %1 to <16 x i8>
268  %5 = bitcast <2 x i64> %2 to <16 x i8>
269  %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
270  %7 = bitcast <16 x i8> %6 to <2 x i64>
271  store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
272  ret void
273}
274
275; ANYENDIAN: llvm_mips_bmnz_v_d_test:
276; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG1)(
277; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG2)(
278; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmnz_v_d_ARG3)(
279; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
280; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
281; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
282; ANYENDIAN-DAG: bmnz.v [[R4]], [[R5]], [[R6]]
283; ANYENDIAN-DAG: st.b [[R4]], 0(
284; ANYENDIAN: .size llvm_mips_bmnz_v_d_test
285
286@llvm_mips_bmz_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
287@llvm_mips_bmz_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
288@llvm_mips_bmz_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
289@llvm_mips_bmz_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
290
291define void @llvm_mips_bmz_v_b_test() nounwind {
292entry:
293  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG1
294  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG2
295  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG3
296  %3 = bitcast <16 x i8> %0 to <16 x i8>
297  %4 = bitcast <16 x i8> %1 to <16 x i8>
298  %5 = bitcast <16 x i8> %2 to <16 x i8>
299  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
300  %7 = bitcast <16 x i8> %6 to <16 x i8>
301  store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
302  ret void
303}
304
305; ANYENDIAN: llvm_mips_bmz_v_b_test:
306; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG1)(
307; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG2)(
308; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_b_ARG3)(
309; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
310; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
311; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
312; bmnz.v is the same as bmz.v with ws and wd_in swapped
313; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
314; ANYENDIAN-DAG: st.b [[R5]], 0(
315; ANYENDIAN: .size llvm_mips_bmz_v_b_test
316
317@llvm_mips_bmz_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
318@llvm_mips_bmz_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
319@llvm_mips_bmz_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
320@llvm_mips_bmz_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
321
322define void @llvm_mips_bmz_v_h_test() nounwind {
323entry:
324  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG1
325  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG2
326  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG3
327  %3 = bitcast <8 x i16> %0 to <16 x i8>
328  %4 = bitcast <8 x i16> %1 to <16 x i8>
329  %5 = bitcast <8 x i16> %2 to <16 x i8>
330  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
331  %7 = bitcast <16 x i8> %6 to <8 x i16>
332  store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
333  ret void
334}
335
336; ANYENDIAN: llvm_mips_bmz_v_h_test:
337; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG1)(
338; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG2)(
339; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_h_ARG3)(
340; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
341; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
342; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
343; bmnz.v is the same as bmz.v with ws and wd_in swapped
344; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
345; ANYENDIAN-DAG: st.b [[R5]], 0(
346; ANYENDIAN: .size llvm_mips_bmz_v_h_test
347
348@llvm_mips_bmz_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
349@llvm_mips_bmz_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
350@llvm_mips_bmz_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
351@llvm_mips_bmz_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
352
353define void @llvm_mips_bmz_v_w_test() nounwind {
354entry:
355  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG1
356  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG2
357  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG3
358  %3 = bitcast <4 x i32> %0 to <16 x i8>
359  %4 = bitcast <4 x i32> %1 to <16 x i8>
360  %5 = bitcast <4 x i32> %2 to <16 x i8>
361  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
362  %7 = bitcast <16 x i8> %6 to <4 x i32>
363  store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
364  ret void
365}
366
367; ANYENDIAN: llvm_mips_bmz_v_w_test:
368; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG1)(
369; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG2)(
370; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_w_ARG3)(
371; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
372; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
373; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
374; bmnz.v is the same as bmz.v with ws and wd_in swapped
375; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
376; ANYENDIAN-DAG: st.b [[R5]], 0(
377; ANYENDIAN: .size llvm_mips_bmz_v_w_test
378
379@llvm_mips_bmz_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
380@llvm_mips_bmz_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
381@llvm_mips_bmz_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
382@llvm_mips_bmz_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
383
384define void @llvm_mips_bmz_v_d_test() nounwind {
385entry:
386  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG1
387  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG2
388  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG3
389  %3 = bitcast <2 x i64> %0 to <16 x i8>
390  %4 = bitcast <2 x i64> %1 to <16 x i8>
391  %5 = bitcast <2 x i64> %2 to <16 x i8>
392  %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
393  %7 = bitcast <16 x i8> %6 to <2 x i64>
394  store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
395  ret void
396}
397
398; ANYENDIAN: llvm_mips_bmz_v_d_test:
399; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG1)(
400; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG2)(
401; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bmz_v_d_ARG3)(
402; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
403; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
404; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
405; bmnz.v is the same as bmz.v with ws and wd_in swapped
406; ANYENDIAN-DAG: bmnz.v [[R5]], [[R4]], [[R6]]
407; ANYENDIAN-DAG: st.b [[R5]], 0(
408; ANYENDIAN: .size llvm_mips_bmz_v_d_test
409
410@llvm_mips_bsel_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
411@llvm_mips_bsel_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
412@llvm_mips_bsel_v_b_ARG3 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
413@llvm_mips_bsel_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
414
415define void @llvm_mips_bsel_v_b_test() nounwind {
416entry:
417  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG1
418  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG2
419  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG3
420  %3 = bitcast <16 x i8> %0 to <16 x i8>
421  %4 = bitcast <16 x i8> %1 to <16 x i8>
422  %5 = bitcast <16 x i8> %2 to <16 x i8>
423  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
424  %7 = bitcast <16 x i8> %6 to <16 x i8>
425  store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
426  ret void
427}
428
429; ANYENDIAN: llvm_mips_bsel_v_b_test:
430; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG1)(
431; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG2)(
432; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_b_ARG3)(
433; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
434; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
435; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
436; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
437; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
438; ANYENDIAN-DAG: st.b [[R5]], 0(
439; ANYENDIAN: .size llvm_mips_bsel_v_b_test
440
441@llvm_mips_bsel_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
442@llvm_mips_bsel_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
443@llvm_mips_bsel_v_h_ARG3 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
444@llvm_mips_bsel_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
445
446define void @llvm_mips_bsel_v_h_test() nounwind {
447entry:
448  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG1
449  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG2
450  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG3
451  %3 = bitcast <8 x i16> %0 to <16 x i8>
452  %4 = bitcast <8 x i16> %1 to <16 x i8>
453  %5 = bitcast <8 x i16> %2 to <16 x i8>
454  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
455  %7 = bitcast <16 x i8> %6 to <8 x i16>
456  store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
457  ret void
458}
459
460; ANYENDIAN: llvm_mips_bsel_v_h_test:
461; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG1)(
462; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG2)(
463; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_h_ARG3)(
464; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
465; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
466; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
467; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
468; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
469; ANYENDIAN-DAG: st.b [[R5]], 0(
470; ANYENDIAN: .size llvm_mips_bsel_v_h_test
471
472@llvm_mips_bsel_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
473@llvm_mips_bsel_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
474@llvm_mips_bsel_v_w_ARG3 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
475@llvm_mips_bsel_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
476
477define void @llvm_mips_bsel_v_w_test() nounwind {
478entry:
479  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG1
480  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG2
481  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG3
482  %3 = bitcast <4 x i32> %0 to <16 x i8>
483  %4 = bitcast <4 x i32> %1 to <16 x i8>
484  %5 = bitcast <4 x i32> %2 to <16 x i8>
485  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
486  %7 = bitcast <16 x i8> %6 to <4 x i32>
487  store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
488  ret void
489}
490
491; ANYENDIAN: llvm_mips_bsel_v_w_test:
492; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG1)(
493; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG2)(
494; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_w_ARG3)(
495; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
496; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
497; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
498; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
499; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
500; ANYENDIAN-DAG: st.b [[R5]], 0(
501; ANYENDIAN: .size llvm_mips_bsel_v_w_test
502
503@llvm_mips_bsel_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
504@llvm_mips_bsel_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
505@llvm_mips_bsel_v_d_ARG3 = global <2 x i64> <i64 0, i64 1>, align 16
506@llvm_mips_bsel_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
507
508define void @llvm_mips_bsel_v_d_test() nounwind {
509entry:
510  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG1
511  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG2
512  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG3
513  %3 = bitcast <2 x i64> %0 to <16 x i8>
514  %4 = bitcast <2 x i64> %1 to <16 x i8>
515  %5 = bitcast <2 x i64> %2 to <16 x i8>
516  %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
517  %7 = bitcast <16 x i8> %6 to <2 x i64>
518  store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
519  ret void
520}
521
522; ANYENDIAN: llvm_mips_bsel_v_d_test:
523; ANYENDIAN-DAG: lw [[R1:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG1)(
524; ANYENDIAN-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG2)(
525; ANYENDIAN-DAG: lw [[R3:\$[0-9]+]], %got(llvm_mips_bsel_v_d_ARG3)(
526; ANYENDIAN-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R1]])
527; ANYENDIAN-DAG: ld.b [[R5:\$w[0-9]+]], 0([[R2]])
528; ANYENDIAN-DAG: ld.b [[R6:\$w[0-9]+]], 0([[R3]])
529; bmnz.v is the same as bsel.v with (wd_in, wt, ws) -> (wt, ws, wd_in)
530; ANYENDIAN-DAG: bmnz.v [[R5]], [[R6]], [[R4]]
531; ANYENDIAN-DAG: st.b [[R5]], 0(
532; ANYENDIAN: .size llvm_mips_bsel_v_d_test
533
534@llvm_mips_nor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
535@llvm_mips_nor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
536@llvm_mips_nor_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
537
538define void @llvm_mips_nor_v_b_test() nounwind {
539entry:
540  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG1
541  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG2
542  %2 = bitcast <16 x i8> %0 to <16 x i8>
543  %3 = bitcast <16 x i8> %1 to <16 x i8>
544  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
545  %5 = bitcast <16 x i8> %4 to <16 x i8>
546  store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
547  ret void
548}
549
550; ANYENDIAN: llvm_mips_nor_v_b_test:
551; ANYENDIAN: ld.b
552; ANYENDIAN: ld.b
553; ANYENDIAN: nor.v
554; ANYENDIAN: st.b
555; ANYENDIAN: .size llvm_mips_nor_v_b_test
556;
557@llvm_mips_nor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
558@llvm_mips_nor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
559@llvm_mips_nor_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
560
561define void @llvm_mips_nor_v_h_test() nounwind {
562entry:
563  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG1
564  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG2
565  %2 = bitcast <8 x i16> %0 to <16 x i8>
566  %3 = bitcast <8 x i16> %1 to <16 x i8>
567  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
568  %5 = bitcast <16 x i8> %4 to <8 x i16>
569  store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
570  ret void
571}
572
573; ANYENDIAN: llvm_mips_nor_v_h_test:
574; ANYENDIAN: ld.b
575; ANYENDIAN: ld.b
576; ANYENDIAN: nor.v
577; ANYENDIAN: st.b
578; ANYENDIAN: .size llvm_mips_nor_v_h_test
579;
580@llvm_mips_nor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
581@llvm_mips_nor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
582@llvm_mips_nor_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
583
584define void @llvm_mips_nor_v_w_test() nounwind {
585entry:
586  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG1
587  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG2
588  %2 = bitcast <4 x i32> %0 to <16 x i8>
589  %3 = bitcast <4 x i32> %1 to <16 x i8>
590  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
591  %5 = bitcast <16 x i8> %4 to <4 x i32>
592  store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
593  ret void
594}
595
596; ANYENDIAN: llvm_mips_nor_v_w_test:
597; ANYENDIAN: ld.b
598; ANYENDIAN: ld.b
599; ANYENDIAN: nor.v
600; ANYENDIAN: st.b
601; ANYENDIAN: .size llvm_mips_nor_v_w_test
602;
603@llvm_mips_nor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
604@llvm_mips_nor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
605@llvm_mips_nor_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
606
607define void @llvm_mips_nor_v_d_test() nounwind {
608entry:
609  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG1
610  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG2
611  %2 = bitcast <2 x i64> %0 to <16 x i8>
612  %3 = bitcast <2 x i64> %1 to <16 x i8>
613  %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
614  %5 = bitcast <16 x i8> %4 to <2 x i64>
615  store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
616  ret void
617}
618
619; ANYENDIAN: llvm_mips_nor_v_d_test:
620; ANYENDIAN: ld.b
621; ANYENDIAN: ld.b
622; ANYENDIAN: nor.v
623; ANYENDIAN: st.b
624; ANYENDIAN: .size llvm_mips_nor_v_d_test
625;
626@llvm_mips_or_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
627@llvm_mips_or_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
628@llvm_mips_or_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
629
630define void @llvm_mips_or_v_b_test() nounwind {
631entry:
632  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
633  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
634  %2 = bitcast <16 x i8> %0 to <16 x i8>
635  %3 = bitcast <16 x i8> %1 to <16 x i8>
636  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
637  %5 = bitcast <16 x i8> %4 to <16 x i8>
638  store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
639  ret void
640}
641
642; ANYENDIAN: llvm_mips_or_v_b_test:
643; ANYENDIAN: ld.b
644; ANYENDIAN: ld.b
645; ANYENDIAN: or.v
646; ANYENDIAN: st.b
647; ANYENDIAN: .size llvm_mips_or_v_b_test
648;
649@llvm_mips_or_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
650@llvm_mips_or_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
651@llvm_mips_or_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
652
653define void @llvm_mips_or_v_h_test() nounwind {
654entry:
655  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
656  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
657  %2 = bitcast <8 x i16> %0 to <16 x i8>
658  %3 = bitcast <8 x i16> %1 to <16 x i8>
659  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
660  %5 = bitcast <16 x i8> %4 to <8 x i16>
661  store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
662  ret void
663}
664
665; ANYENDIAN: llvm_mips_or_v_h_test:
666; ANYENDIAN: ld.b
667; ANYENDIAN: ld.b
668; ANYENDIAN: or.v
669; ANYENDIAN: st.b
670; ANYENDIAN: .size llvm_mips_or_v_h_test
671;
672@llvm_mips_or_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
673@llvm_mips_or_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
674@llvm_mips_or_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
675
676define void @llvm_mips_or_v_w_test() nounwind {
677entry:
678  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
679  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
680  %2 = bitcast <4 x i32> %0 to <16 x i8>
681  %3 = bitcast <4 x i32> %1 to <16 x i8>
682  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
683  %5 = bitcast <16 x i8> %4 to <4 x i32>
684  store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
685  ret void
686}
687
688; ANYENDIAN: llvm_mips_or_v_w_test:
689; ANYENDIAN: ld.b
690; ANYENDIAN: ld.b
691; ANYENDIAN: or.v
692; ANYENDIAN: st.b
693; ANYENDIAN: .size llvm_mips_or_v_w_test
694;
695@llvm_mips_or_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
696@llvm_mips_or_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
697@llvm_mips_or_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
698
699define void @llvm_mips_or_v_d_test() nounwind {
700entry:
701  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
702  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
703  %2 = bitcast <2 x i64> %0 to <16 x i8>
704  %3 = bitcast <2 x i64> %1 to <16 x i8>
705  %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
706  %5 = bitcast <16 x i8> %4 to <2 x i64>
707  store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
708  ret void
709}
710
711; ANYENDIAN: llvm_mips_or_v_d_test:
712; ANYENDIAN: ld.b
713; ANYENDIAN: ld.b
714; ANYENDIAN: or.v
715; ANYENDIAN: st.b
716; ANYENDIAN: .size llvm_mips_or_v_d_test
717;
718define void @or_v_b_test() nounwind {
719entry:
720  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
721  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
722  %2 = or <16 x i8> %0, %1
723  store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
724  ret void
725}
726
727; ANYENDIAN: or_v_b_test:
728; ANYENDIAN: ld.b
729; ANYENDIAN: ld.b
730; ANYENDIAN: or.v
731; ANYENDIAN: st.b
732; ANYENDIAN: .size or_v_b_test
733;
734define void @or_v_h_test() nounwind {
735entry:
736  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
737  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
738  %2 = or <8 x i16> %0, %1
739  store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
740  ret void
741}
742
743; ANYENDIAN: or_v_h_test:
744; ANYENDIAN: ld.h
745; ANYENDIAN: ld.h
746; ANYENDIAN: or.v
747; ANYENDIAN: st.h
748; ANYENDIAN: .size or_v_h_test
749;
750
751define void @or_v_w_test() nounwind {
752entry:
753  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
754  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
755  %2 = or <4 x i32> %0, %1
756  store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
757  ret void
758}
759
760; ANYENDIAN: or_v_w_test:
761; ANYENDIAN: ld.w
762; ANYENDIAN: ld.w
763; ANYENDIAN: or.v
764; ANYENDIAN: st.w
765; ANYENDIAN: .size or_v_w_test
766;
767
768define void @or_v_d_test() nounwind {
769entry:
770  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
771  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
772  %2 = or <2 x i64> %0, %1
773  store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
774  ret void
775}
776
777; ANYENDIAN: or_v_d_test:
778; ANYENDIAN: ld.d
779; ANYENDIAN: ld.d
780; ANYENDIAN: or.v
781; ANYENDIAN: st.d
782; ANYENDIAN: .size or_v_d_test
783;
784@llvm_mips_xor_v_b_ARG1 = global <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, align 16
785@llvm_mips_xor_v_b_ARG2 = global <16 x i8> <i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31>, align 16
786@llvm_mips_xor_v_b_RES  = global <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, align 16
787
788define void @llvm_mips_xor_v_b_test() nounwind {
789entry:
790  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
791  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
792  %2 = bitcast <16 x i8> %0 to <16 x i8>
793  %3 = bitcast <16 x i8> %1 to <16 x i8>
794  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
795  %5 = bitcast <16 x i8> %4 to <16 x i8>
796  store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
797  ret void
798}
799
800; ANYENDIAN: llvm_mips_xor_v_b_test:
801; ANYENDIAN: ld.b
802; ANYENDIAN: ld.b
803; ANYENDIAN: xor.v
804; ANYENDIAN: st.b
805; ANYENDIAN: .size llvm_mips_xor_v_b_test
806;
807@llvm_mips_xor_v_h_ARG1 = global <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, align 16
808@llvm_mips_xor_v_h_ARG2 = global <8 x i16> <i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, align 16
809@llvm_mips_xor_v_h_RES  = global <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, align 16
810
811define void @llvm_mips_xor_v_h_test() nounwind {
812entry:
813  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
814  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
815  %2 = bitcast <8 x i16> %0 to <16 x i8>
816  %3 = bitcast <8 x i16> %1 to <16 x i8>
817  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
818  %5 = bitcast <16 x i8> %4 to <8 x i16>
819  store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
820  ret void
821}
822
823; ANYENDIAN: llvm_mips_xor_v_h_test:
824; ANYENDIAN: ld.b
825; ANYENDIAN: ld.b
826; ANYENDIAN: xor.v
827; ANYENDIAN: st.b
828; ANYENDIAN: .size llvm_mips_xor_v_h_test
829;
830@llvm_mips_xor_v_w_ARG1 = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16
831@llvm_mips_xor_v_w_ARG2 = global <4 x i32> <i32 4, i32 5, i32 6, i32 7>, align 16
832@llvm_mips_xor_v_w_RES  = global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 16
833
834define void @llvm_mips_xor_v_w_test() nounwind {
835entry:
836  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
837  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
838  %2 = bitcast <4 x i32> %0 to <16 x i8>
839  %3 = bitcast <4 x i32> %1 to <16 x i8>
840  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
841  %5 = bitcast <16 x i8> %4 to <4 x i32>
842  store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
843  ret void
844}
845
846; ANYENDIAN: llvm_mips_xor_v_w_test:
847; ANYENDIAN: ld.b
848; ANYENDIAN: ld.b
849; ANYENDIAN: xor.v
850; ANYENDIAN: st.b
851; ANYENDIAN: .size llvm_mips_xor_v_w_test
852;
853@llvm_mips_xor_v_d_ARG1 = global <2 x i64> <i64 0, i64 1>, align 16
854@llvm_mips_xor_v_d_ARG2 = global <2 x i64> <i64 2, i64 3>, align 16
855@llvm_mips_xor_v_d_RES  = global <2 x i64> <i64 0, i64 0>, align 16
856
857define void @llvm_mips_xor_v_d_test() nounwind {
858entry:
859  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
860  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
861  %2 = bitcast <2 x i64> %0 to <16 x i8>
862  %3 = bitcast <2 x i64> %1 to <16 x i8>
863  %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
864  %5 = bitcast <16 x i8> %4 to <2 x i64>
865  store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
866  ret void
867}
868
869; ANYENDIAN: llvm_mips_xor_v_d_test:
870; ANYENDIAN: ld.b
871; ANYENDIAN: ld.b
872; ANYENDIAN: xor.v
873; ANYENDIAN: st.b
874; ANYENDIAN: .size llvm_mips_xor_v_d_test
875;
876define void @xor_v_b_test() nounwind {
877entry:
878  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
879  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
880  %2 = xor <16 x i8> %0, %1
881  store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
882  ret void
883}
884
885; ANYENDIAN: xor_v_b_test:
886; ANYENDIAN: ld.b
887; ANYENDIAN: ld.b
888; ANYENDIAN: xor.v
889; ANYENDIAN: st.b
890; ANYENDIAN: .size xor_v_b_test
891;
892define void @xor_v_h_test() nounwind {
893entry:
894  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
895  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
896  %2 = xor <8 x i16> %0, %1
897  store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
898  ret void
899}
900
901; ANYENDIAN: xor_v_h_test:
902; ANYENDIAN: ld.h
903; ANYENDIAN: ld.h
904; ANYENDIAN: xor.v
905; ANYENDIAN: st.h
906; ANYENDIAN: .size xor_v_h_test
907;
908
909define void @xor_v_w_test() nounwind {
910entry:
911  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
912  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
913  %2 = xor <4 x i32> %0, %1
914  store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
915  ret void
916}
917
918; ANYENDIAN: xor_v_w_test:
919; ANYENDIAN: ld.w
920; ANYENDIAN: ld.w
921; ANYENDIAN: xor.v
922; ANYENDIAN: st.w
923; ANYENDIAN: .size xor_v_w_test
924;
925
926define void @xor_v_d_test() nounwind {
927entry:
928  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
929  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
930  %2 = xor <2 x i64> %0, %1
931  store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
932  ret void
933}
934
935; ANYENDIAN: xor_v_d_test:
936; ANYENDIAN: ld.d
937; ANYENDIAN: ld.d
938; ANYENDIAN: xor.v
939; ANYENDIAN: st.d
940; ANYENDIAN: .size xor_v_d_test
941;
942declare <16 x i8> @llvm.mips.and.v(<16 x i8>, <16 x i8>) nounwind
943declare <16 x i8> @llvm.mips.bmnz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
944declare <16 x i8> @llvm.mips.bmz.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
945declare <16 x i8> @llvm.mips.bsel.v(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
946declare <16 x i8> @llvm.mips.nor.v(<16 x i8>, <16 x i8>) nounwind
947declare <16 x i8> @llvm.mips.or.v(<16 x i8>, <16 x i8>) nounwind
948declare <16 x i8> @llvm.mips.xor.v(<16 x i8>, <16 x i8>) nounwind
949