1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
3
4define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
5; P5600-LABEL: sdiv_v16i8:
6; P5600:       # %bb.0: # %entry
7; P5600-NEXT:    ld.b $w0, 0($4)
8; P5600-NEXT:    ld.b $w1, 0($5)
9; P5600-NEXT:    div_s.b $w0, $w0, $w1
10; P5600-NEXT:    st.b $w0, 0($6)
11; P5600-NEXT:    jr $ra
12; P5600-NEXT:    nop
13entry:
14  %0 = load <16 x i8>, <16 x i8>* %a, align 16
15  %1 = load <16 x i8>, <16 x i8>* %b, align 16
16  %div = sdiv <16 x i8> %0, %1
17  store <16 x i8> %div, <16 x i8>* %c, align 16
18  ret void
19}
20
21define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
22; P5600-LABEL: sdiv_v8i16:
23; P5600:       # %bb.0: # %entry
24; P5600-NEXT:    ld.h $w0, 0($4)
25; P5600-NEXT:    ld.h $w1, 0($5)
26; P5600-NEXT:    div_s.h $w0, $w0, $w1
27; P5600-NEXT:    st.h $w0, 0($6)
28; P5600-NEXT:    jr $ra
29; P5600-NEXT:    nop
30entry:
31  %0 = load <8 x i16>, <8 x i16>* %a, align 16
32  %1 = load <8 x i16>, <8 x i16>* %b, align 16
33  %div = sdiv <8 x i16> %0, %1
34  store <8 x i16> %div, <8 x i16>* %c, align 16
35  ret void
36}
37
38define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
39; P5600-LABEL: sdiv_v4i32:
40; P5600:       # %bb.0: # %entry
41; P5600-NEXT:    ld.w $w0, 0($4)
42; P5600-NEXT:    ld.w $w1, 0($5)
43; P5600-NEXT:    div_s.w $w0, $w0, $w1
44; P5600-NEXT:    st.w $w0, 0($6)
45; P5600-NEXT:    jr $ra
46; P5600-NEXT:    nop
47entry:
48  %0 = load <4 x i32>, <4 x i32>* %a, align 16
49  %1 = load <4 x i32>, <4 x i32>* %b, align 16
50  %div = sdiv <4 x i32> %0, %1
51  store <4 x i32> %div, <4 x i32>* %c, align 16
52  ret void
53}
54
55define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
56; P5600-LABEL: sdiv_v2i64:
57; P5600:       # %bb.0: # %entry
58; P5600-NEXT:    ld.d $w0, 0($4)
59; P5600-NEXT:    ld.d $w1, 0($5)
60; P5600-NEXT:    div_s.d $w0, $w0, $w1
61; P5600-NEXT:    st.d $w0, 0($6)
62; P5600-NEXT:    jr $ra
63; P5600-NEXT:    nop
64entry:
65  %0 = load <2 x i64>, <2 x i64>* %a, align 16
66  %1 = load <2 x i64>, <2 x i64>* %b, align 16
67  %div = sdiv <2 x i64> %0, %1
68  store <2 x i64> %div, <2 x i64>* %c, align 16
69  ret void
70}
71
72define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
73; P5600-LABEL: srem_v16i8:
74; P5600:       # %bb.0: # %entry
75; P5600-NEXT:    ld.b $w0, 0($4)
76; P5600-NEXT:    ld.b $w1, 0($5)
77; P5600-NEXT:    mod_s.b $w0, $w0, $w1
78; P5600-NEXT:    st.b $w0, 0($6)
79; P5600-NEXT:    jr $ra
80; P5600-NEXT:    nop
81entry:
82  %0 = load <16 x i8>, <16 x i8>* %a, align 16
83  %1 = load <16 x i8>, <16 x i8>* %b, align 16
84  %rem = srem <16 x i8> %0, %1
85  store <16 x i8> %rem, <16 x i8>* %c, align 16
86  ret void
87}
88
89define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
90; P5600-LABEL: srem_v8i16:
91; P5600:       # %bb.0: # %entry
92; P5600-NEXT:    ld.h $w0, 0($4)
93; P5600-NEXT:    ld.h $w1, 0($5)
94; P5600-NEXT:    mod_s.h $w0, $w0, $w1
95; P5600-NEXT:    st.h $w0, 0($6)
96; P5600-NEXT:    jr $ra
97; P5600-NEXT:    nop
98entry:
99  %0 = load <8 x i16>, <8 x i16>* %a, align 16
100  %1 = load <8 x i16>, <8 x i16>* %b, align 16
101  %rem = srem <8 x i16> %0, %1
102  store <8 x i16> %rem, <8 x i16>* %c, align 16
103  ret void
104}
105
106define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
107; P5600-LABEL: srem_v4i32:
108; P5600:       # %bb.0: # %entry
109; P5600-NEXT:    ld.w $w0, 0($4)
110; P5600-NEXT:    ld.w $w1, 0($5)
111; P5600-NEXT:    mod_s.w $w0, $w0, $w1
112; P5600-NEXT:    st.w $w0, 0($6)
113; P5600-NEXT:    jr $ra
114; P5600-NEXT:    nop
115entry:
116  %0 = load <4 x i32>, <4 x i32>* %a, align 16
117  %1 = load <4 x i32>, <4 x i32>* %b, align 16
118  %rem = srem <4 x i32> %0, %1
119  store <4 x i32> %rem, <4 x i32>* %c, align 16
120  ret void
121}
122
123define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
124; P5600-LABEL: srem_v2i64:
125; P5600:       # %bb.0: # %entry
126; P5600-NEXT:    ld.d $w0, 0($4)
127; P5600-NEXT:    ld.d $w1, 0($5)
128; P5600-NEXT:    mod_s.d $w0, $w0, $w1
129; P5600-NEXT:    st.d $w0, 0($6)
130; P5600-NEXT:    jr $ra
131; P5600-NEXT:    nop
132entry:
133  %0 = load <2 x i64>, <2 x i64>* %a, align 16
134  %1 = load <2 x i64>, <2 x i64>* %b, align 16
135  %rem = srem <2 x i64> %0, %1
136  store <2 x i64> %rem, <2 x i64>* %c, align 16
137  ret void
138}
139
140define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
141; P5600-LABEL: udiv_v16u8:
142; P5600:       # %bb.0: # %entry
143; P5600-NEXT:    ld.b $w0, 0($4)
144; P5600-NEXT:    ld.b $w1, 0($5)
145; P5600-NEXT:    div_u.b $w0, $w0, $w1
146; P5600-NEXT:    st.b $w0, 0($6)
147; P5600-NEXT:    jr $ra
148; P5600-NEXT:    nop
149entry:
150  %0 = load <16 x i8>, <16 x i8>* %a, align 16
151  %1 = load <16 x i8>, <16 x i8>* %b, align 16
152  %div = udiv <16 x i8> %0, %1
153  store <16 x i8> %div, <16 x i8>* %c, align 16
154  ret void
155}
156
157define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
158; P5600-LABEL: udiv_v8u16:
159; P5600:       # %bb.0: # %entry
160; P5600-NEXT:    ld.h $w0, 0($4)
161; P5600-NEXT:    ld.h $w1, 0($5)
162; P5600-NEXT:    div_u.h $w0, $w0, $w1
163; P5600-NEXT:    st.h $w0, 0($6)
164; P5600-NEXT:    jr $ra
165; P5600-NEXT:    nop
166entry:
167  %0 = load <8 x i16>, <8 x i16>* %a, align 16
168  %1 = load <8 x i16>, <8 x i16>* %b, align 16
169  %div = udiv <8 x i16> %0, %1
170  store <8 x i16> %div, <8 x i16>* %c, align 16
171  ret void
172}
173
174define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
175; P5600-LABEL: udiv_v4u32:
176; P5600:       # %bb.0: # %entry
177; P5600-NEXT:    ld.w $w0, 0($4)
178; P5600-NEXT:    ld.w $w1, 0($5)
179; P5600-NEXT:    div_u.w $w0, $w0, $w1
180; P5600-NEXT:    st.w $w0, 0($6)
181; P5600-NEXT:    jr $ra
182; P5600-NEXT:    nop
183entry:
184  %0 = load <4 x i32>, <4 x i32>* %a, align 16
185  %1 = load <4 x i32>, <4 x i32>* %b, align 16
186  %div = udiv <4 x i32> %0, %1
187  store <4 x i32> %div, <4 x i32>* %c, align 16
188  ret void
189}
190
191define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
192; P5600-LABEL: udiv_v2u64:
193; P5600:       # %bb.0: # %entry
194; P5600-NEXT:    ld.d $w0, 0($4)
195; P5600-NEXT:    ld.d $w1, 0($5)
196; P5600-NEXT:    div_u.d $w0, $w0, $w1
197; P5600-NEXT:    st.d $w0, 0($6)
198; P5600-NEXT:    jr $ra
199; P5600-NEXT:    nop
200entry:
201  %0 = load <2 x i64>, <2 x i64>* %a, align 16
202  %1 = load <2 x i64>, <2 x i64>* %b, align 16
203  %div = udiv <2 x i64> %0, %1
204  store <2 x i64> %div, <2 x i64>* %c, align 16
205  ret void
206}
207
208define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
209; P5600-LABEL: urem_v16u8:
210; P5600:       # %bb.0: # %entry
211; P5600-NEXT:    ld.b $w0, 0($4)
212; P5600-NEXT:    ld.b $w1, 0($5)
213; P5600-NEXT:    mod_u.b $w0, $w0, $w1
214; P5600-NEXT:    st.b $w0, 0($6)
215; P5600-NEXT:    jr $ra
216; P5600-NEXT:    nop
217entry:
218  %0 = load <16 x i8>, <16 x i8>* %a, align 16
219  %1 = load <16 x i8>, <16 x i8>* %b, align 16
220  %rem = urem <16 x i8> %0, %1
221  store <16 x i8> %rem, <16 x i8>* %c, align 16
222  ret void
223}
224
225define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
226; P5600-LABEL: urem_v8u16:
227; P5600:       # %bb.0: # %entry
228; P5600-NEXT:    ld.h $w0, 0($4)
229; P5600-NEXT:    ld.h $w1, 0($5)
230; P5600-NEXT:    mod_u.h $w0, $w0, $w1
231; P5600-NEXT:    st.h $w0, 0($6)
232; P5600-NEXT:    jr $ra
233; P5600-NEXT:    nop
234entry:
235  %0 = load <8 x i16>, <8 x i16>* %a, align 16
236  %1 = load <8 x i16>, <8 x i16>* %b, align 16
237  %rem = urem <8 x i16> %0, %1
238  store <8 x i16> %rem, <8 x i16>* %c, align 16
239  ret void
240}
241
242define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
243; P5600-LABEL: urem_v4u32:
244; P5600:       # %bb.0: # %entry
245; P5600-NEXT:    ld.w $w0, 0($4)
246; P5600-NEXT:    ld.w $w1, 0($5)
247; P5600-NEXT:    mod_u.w $w0, $w0, $w1
248; P5600-NEXT:    st.w $w0, 0($6)
249; P5600-NEXT:    jr $ra
250; P5600-NEXT:    nop
251entry:
252  %0 = load <4 x i32>, <4 x i32>* %a, align 16
253  %1 = load <4 x i32>, <4 x i32>* %b, align 16
254  %rem = urem <4 x i32> %0, %1
255  store <4 x i32> %rem, <4 x i32>* %c, align 16
256  ret void
257}
258
259define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
260; P5600-LABEL: urem_v2u64:
261; P5600:       # %bb.0: # %entry
262; P5600-NEXT:    ld.d $w0, 0($4)
263; P5600-NEXT:    ld.d $w1, 0($5)
264; P5600-NEXT:    mod_u.d $w0, $w0, $w1
265; P5600-NEXT:    st.d $w0, 0($6)
266; P5600-NEXT:    jr $ra
267; P5600-NEXT:    nop
268entry:
269  %0 = load <2 x i64>, <2 x i64>* %a, align 16
270  %1 = load <2 x i64>, <2 x i64>* %b, align 16
271  %rem = urem <2 x i64> %0, %1
272  store <2 x i64> %rem, <2 x i64>* %c, align 16
273  ret void
274}
275