1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s
3
4; float16x4_t select_64(float16x4_t a, float16x4_t b, uint16x4_t c) { return vbsl_u16(c, a, b); }
5define <4 x half> @select_64(<4 x half> %a, <4 x half> %b, <4 x i16> %c) #0 {
6; CHECK-LABEL: select_64:
7; CHECK:       // %bb.0: // %entry
8; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
9; CHECK-NEXT:    ret
10entry:
11  %0 = bitcast <4 x half> %a to <4 x i16>
12  %1 = bitcast <4 x half> %b to <4 x i16>
13  %vbsl3.i = and <4 x i16> %0, %c
14  %2 = xor <4 x i16> %c, <i16 -1, i16 -1, i16 -1, i16 -1>
15  %vbsl4.i = and <4 x i16> %1, %2
16  %vbsl5.i = or <4 x i16> %vbsl3.i, %vbsl4.i
17  %3 = bitcast <4 x i16> %vbsl5.i to <4 x half>
18  ret <4 x half> %3
19}
20
21; float16x8_t select_128(float16x8_t a, float16x8_t b, uint16x8_t c) { return vbslq_u16(c, a, b); }
22define <8 x half> @select_128(<8 x half> %a, <8 x half> %b, <8 x i16> %c) #0 {
23; CHECK-LABEL: select_128:
24; CHECK:       // %bb.0: // %entry
25; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
26; CHECK-NEXT:    ret
27entry:
28  %0 = bitcast <8 x half> %a to <8 x i16>
29  %1 = bitcast <8 x half> %b to <8 x i16>
30  %vbsl3.i = and <8 x i16> %0, %c
31  %2 = xor <8 x i16> %c, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
32  %vbsl4.i = and <8 x i16> %1, %2
33  %vbsl5.i = or <8 x i16> %vbsl3.i, %vbsl4.i
34  %3 = bitcast <8 x i16> %vbsl5.i to <8 x half>
35  ret <8 x half> %3
36}
37
38; float16x4_t lane_64_64(float16x4_t a, float16x4_t b) {
39;  return vcopy_lane_s16(a, 1, b, 2);
40; }
41define <4 x half> @lane_64_64(<4 x half> %a, <4 x half> %b) #0 {
42; CHECK-LABEL: lane_64_64:
43; CHECK:       // %bb.0: // %entry
44; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
45; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
46; CHECK-NEXT:    mov v0.h[1], v1.h[2]
47; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
48; CHECK-NEXT:    ret
49entry:
50  %0 = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 6, i32 2, i32 3>
51  ret <4 x half> %0
52}
53
54; float16x8_t lane_128_64(float16x8_t a, float16x4_t b) {
55;   return vcopyq_lane_s16(a, 1, b, 2);
56; }
57define <8 x half> @lane_128_64(<8 x half> %a, <4 x half> %b) #0 {
58; CHECK-LABEL: lane_128_64:
59; CHECK:       // %bb.0: // %entry
60; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
61; CHECK-NEXT:    mov v0.h[1], v1.h[2]
62; CHECK-NEXT:    ret
63entry:
64  %0 = bitcast <4 x half> %b to <4 x i16>
65  %vget_lane = extractelement <4 x i16> %0, i32 2
66  %1 = bitcast <8 x half> %a to <8 x i16>
67  %vset_lane = insertelement <8 x i16> %1, i16 %vget_lane, i32 1
68  %2 = bitcast <8 x i16> %vset_lane to <8 x half>
69  ret <8 x half> %2
70}
71
72; float16x4_t lane_64_128(float16x4_t a, float16x8_t b) {
73;   return vcopy_laneq_s16(a, 3, b, 5);
74; }
75define <4 x half> @lane_64_128(<4 x half> %a, <8 x half> %b) #0 {
76; CHECK-LABEL: lane_64_128:
77; CHECK:       // %bb.0: // %entry
78; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
79; CHECK-NEXT:    mov v0.h[3], v1.h[5]
80; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
81; CHECK-NEXT:    ret
82entry:
83  %0 = bitcast <8 x half> %b to <8 x i16>
84  %vgetq_lane = extractelement <8 x i16> %0, i32 5
85  %1 = bitcast <4 x half> %a to <4 x i16>
86  %vset_lane = insertelement <4 x i16> %1, i16 %vgetq_lane, i32 3
87  %2 = bitcast <4 x i16> %vset_lane to <4 x half>
88  ret <4 x half> %2
89}
90
91; float16x8_t lane_128_128(float16x8_t a, float16x8_t b) {
92;   return vcopyq_laneq_s16(a, 3, b, 5);
93; }
94define <8 x half> @lane_128_128(<8 x half> %a, <8 x half> %b) #0 {
95; CHECK-LABEL: lane_128_128:
96; CHECK:       // %bb.0: // %entry
97; CHECK-NEXT:    mov v0.h[3], v1.h[5]
98; CHECK-NEXT:    ret
99entry:
100  %0 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 4, i32 5, i32 6, i32 7>
101  ret <8 x half> %0
102}
103
104; float16x4_t ext_64(float16x4_t a, float16x4_t b) {
105;   return vext_s16(a, b, 3);
106; }
107define <4 x half> @ext_64(<4 x half> %a, <4 x half> %b) #0 {
108; CHECK-LABEL: ext_64:
109; CHECK:       // %bb.0: // %entry
110; CHECK-NEXT:    ext v0.8b, v0.8b, v1.8b, #6
111; CHECK-NEXT:    ret
112entry:
113  %0 = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
114  ret <4 x half> %0
115}
116
117; float16x8_t ext_128(float16x8_t a, float16x8_t b) {
118;   return vextq_s16(a, b, 3);
119; }
120define <8 x half> @ext_128(<8 x half> %a, <8 x half> %b) #0 {
121; CHECK-LABEL: ext_128:
122; CHECK:       // %bb.0: // %entry
123; CHECK-NEXT:    ext v0.16b, v0.16b, v1.16b, #6
124; CHECK-NEXT:    ret
125entry:
126  %0 = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
127  ret <8 x half> %0
128}
129
130; float16x4_t rev32_64(float16x4_t a) {
131;   return vrev32_s16(a);
132; }
133define <4 x half> @rev32_64(<4 x half> %a) #0 {
134; CHECK-LABEL: rev32_64:
135; CHECK:       // %bb.0: // %entry
136; CHECK-NEXT:    rev32 v0.4h, v0.4h
137; CHECK-NEXT:    ret
138entry:
139  %0 = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
140  ret <4 x half> %0
141}
142
143; float16x4_t rev64_64(float16x4_t a) {
144;   return vrev64_s16(a);
145; }
146define <4 x half> @rev64_64(<4 x half> %a) #0 {
147; CHECK-LABEL: rev64_64:
148; CHECK:       // %bb.0: // %entry
149; CHECK-NEXT:    rev64 v0.4h, v0.4h
150; CHECK-NEXT:    ret
151entry:
152  %0 = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
153  ret <4 x half> %0
154}
155
156; float16x8_t rev32_128(float16x8_t a) {
157;   return vrev32q_s16(a);
158; }
159define <8 x half> @rev32_128(<8 x half> %a) #0 {
160; CHECK-LABEL: rev32_128:
161; CHECK:       // %bb.0: // %entry
162; CHECK-NEXT:    rev32 v0.8h, v0.8h
163; CHECK-NEXT:    ret
164entry:
165  %0 = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
166  ret <8 x half> %0
167}
168
169; float16x8_t rev64_128(float16x8_t a) {
170;   return vrev64q_s16(a);
171; }
172define <8 x half> @rev64_128(<8 x half> %a) #0 {
173; CHECK-LABEL: rev64_128:
174; CHECK:       // %bb.0: // %entry
175; CHECK-NEXT:    rev64 v0.8h, v0.8h
176; CHECK-NEXT:    ret
177entry:
178  %0 = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
179  ret <8 x half> %0
180}
181
182; float16x4_t create_64(long long a) { return vcreate_f16(a); }
183define <4 x half> @create_64(i64 %a) #0 {
184; CHECK-LABEL: create_64:
185; CHECK:       // %bb.0: // %entry
186; CHECK-NEXT:    fmov d0, x0
187; CHECK-NEXT:    ret
188entry:
189  %0 = bitcast i64 %a to <4 x half>
190  ret <4 x half> %0
191}
192
193; float16x4_t dup_64(__fp16 a) { return vdup_n_f16(a); }
194define <4 x half> @dup_64(half %a) #0 {
195; CHECK-LABEL: dup_64:
196; CHECK:       // %bb.0: // %entry
197; CHECK-NEXT:    // kill: def $h0 killed $h0 def $q0
198; CHECK-NEXT:    dup v0.4h, v0.h[0]
199; CHECK-NEXT:    ret
200entry:
201  %vecinit = insertelement <4 x half> undef, half %a, i32 0
202  %vecinit1 = insertelement <4 x half> %vecinit, half %a, i32 1
203  %vecinit2 = insertelement <4 x half> %vecinit1, half %a, i32 2
204  %vecinit3 = insertelement <4 x half> %vecinit2, half %a, i32 3
205  ret <4 x half> %vecinit3
206}
207
208; float16x8_t dup_128(__fp16 a) { return vdupq_n_f16(a); }
209define <8 x half> @dup_128(half %a) #0 {
210; CHECK-LABEL: dup_128:
211; CHECK:       // %bb.0: // %entry
212; CHECK-NEXT:    // kill: def $h0 killed $h0 def $q0
213; CHECK-NEXT:    dup v0.8h, v0.h[0]
214; CHECK-NEXT:    ret
215entry:
216  %vecinit = insertelement <8 x half> undef, half %a, i32 0
217  %vecinit1 = insertelement <8 x half> %vecinit, half %a, i32 1
218  %vecinit2 = insertelement <8 x half> %vecinit1, half %a, i32 2
219  %vecinit3 = insertelement <8 x half> %vecinit2, half %a, i32 3
220  %vecinit4 = insertelement <8 x half> %vecinit3, half %a, i32 4
221  %vecinit5 = insertelement <8 x half> %vecinit4, half %a, i32 5
222  %vecinit6 = insertelement <8 x half> %vecinit5, half %a, i32 6
223  %vecinit7 = insertelement <8 x half> %vecinit6, half %a, i32 7
224  ret <8 x half> %vecinit7
225}
226
227; float16x4_t dup_lane_64(float16x4_t a) { return vdup_lane_f16(a, 2); }
228define <4 x half> @dup_lane_64(<4 x half> %a) #0 {
229; CHECK-LABEL: dup_lane_64:
230; CHECK:       // %bb.0: // %entry
231; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
232; CHECK-NEXT:    dup v0.4h, v0.h[2]
233; CHECK-NEXT:    ret
234entry:
235  %shuffle = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
236  ret <4 x half> %shuffle
237}
238
239; float16x8_t dup_lane_128(float16x4_t a) { return vdupq_lane_f16(a, 2); }
240define <8 x half> @dup_lane_128(<4 x half> %a) #0 {
241; CHECK-LABEL: dup_lane_128:
242; CHECK:       // %bb.0: // %entry
243; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
244; CHECK-NEXT:    dup v0.8h, v0.h[2]
245; CHECK-NEXT:    ret
246entry:
247  %shuffle = shufflevector <4 x half> %a, <4 x half> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
248  ret <8 x half> %shuffle
249}
250
251; float16x4_t dup_laneq_64(float16x8_t a) { return vdup_laneq_f16(a, 2); }
252define <4 x half> @dup_laneq_64(<8 x half> %a) #0 {
253; CHECK-LABEL: dup_laneq_64:
254; CHECK:       // %bb.0: // %entry
255; CHECK-NEXT:    dup v0.4h, v0.h[2]
256; CHECK-NEXT:    ret
257entry:
258  %shuffle = shufflevector <8 x half> %a, <8 x half> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
259  ret <4 x half> %shuffle
260}
261
262; float16x8_t dup_laneq_128(float16x8_t a) { return vdupq_laneq_f16(a, 2); }
263define <8 x half> @dup_laneq_128(<8 x half> %a) #0 {
264; CHECK-LABEL: dup_laneq_128:
265; CHECK:       // %bb.0: // %entry
266; CHECK-NEXT:    dup v0.8h, v0.h[2]
267; CHECK-NEXT:    ret
268entry:
269  %shuffle = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
270  ret <8 x half> %shuffle
271}
272
273; float16x8_t vcombine(float16x4_t a, float16x4_t b) { return vcombine_f16(a, b); }
274define <8 x half> @vcombine(<4 x half> %a, <4 x half> %b) #0 {
275; CHECK-LABEL: vcombine:
276; CHECK:       // %bb.0: // %entry
277; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
278; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
279; CHECK-NEXT:    mov v0.d[1], v1.d[0]
280; CHECK-NEXT:    ret
281entry:
282  %shuffle.i = shufflevector <4 x half> %a, <4 x half> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
283  ret <8 x half> %shuffle.i
284}
285
286; float16x4_t get_high(float16x8_t a) { return vget_high_f16(a); }
287define <4 x half> @get_high(<8 x half> %a) #0 {
288; CHECK-LABEL: get_high:
289; CHECK:       // %bb.0: // %entry
290; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
291; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
292; CHECK-NEXT:    ret
293entry:
294  %shuffle.i = shufflevector <8 x half> %a, <8 x half> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
295  ret <4 x half> %shuffle.i
296}
297
298
299; float16x4_t get_low(float16x8_t a) { return vget_low_f16(a); }
300define <4 x half> @get_low(<8 x half> %a) #0 {
301; CHECK-LABEL: get_low:
302; CHECK:       // %bb.0: // %entry
303; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
304; CHECK-NEXT:    ret
305entry:
306  %shuffle.i = shufflevector <8 x half> %a, <8 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
307  ret <4 x half> %shuffle.i
308}
309
310; float16x4_t set_lane_64(float16x4_t a, __fp16 b) { return vset_lane_f16(b, a, 2); }
311define <4 x half> @set_lane_64(<4 x half> %a, half %b) #0 {
312; CHECK-LABEL: set_lane_64:
313; CHECK:       // %bb.0: // %entry
314; CHECK-NEXT:    // kill: def $h1 killed $h1 def $s1
315; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
316; CHECK-NEXT:    fmov w8, s1
317; CHECK-NEXT:    mov v0.h[2], w8
318; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
319; CHECK-NEXT:    ret
320entry:
321  %0 = bitcast half %b to i16
322  %1 = bitcast <4 x half> %a to <4 x i16>
323  %vset_lane = insertelement <4 x i16> %1, i16 %0, i32 2
324  %2 = bitcast <4 x i16> %vset_lane to <4 x half>
325  ret <4 x half> %2
326}
327
328
329; float16x8_t set_lane_128(float16x8_t a, __fp16 b) { return vsetq_lane_f16(b, a, 2); }
330define <8 x half> @set_lane_128(<8 x half> %a, half %b) #0 {
331; CHECK-LABEL: set_lane_128:
332; CHECK:       // %bb.0: // %entry
333; CHECK-NEXT:    // kill: def $h1 killed $h1 def $s1
334; CHECK-NEXT:    fmov w8, s1
335; CHECK-NEXT:    mov v0.h[2], w8
336; CHECK-NEXT:    ret
337entry:
338  %0 = bitcast half %b to i16
339  %1 = bitcast <8 x half> %a to <8 x i16>
340  %vset_lane = insertelement <8 x i16> %1, i16 %0, i32 2
341  %2 = bitcast <8 x i16> %vset_lane to <8 x half>
342  ret <8 x half> %2
343}
344
345; __fp16 get_lane_64(float16x4_t a) { return vget_lane_f16(a, 2); }
346define half @get_lane_64(<4 x half> %a) #0 {
347; CHECK-LABEL: get_lane_64:
348; CHECK:       // %bb.0: // %entry
349; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
350; CHECK-NEXT:    umov w8, v0.h[2]
351; CHECK-NEXT:    fmov s0, w8
352; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
353; CHECK-NEXT:    ret
354entry:
355  %0 = bitcast <4 x half> %a to <4 x i16>
356  %vget_lane = extractelement <4 x i16> %0, i32 2
357  %1 = bitcast i16 %vget_lane to half
358  ret half %1
359}
360
361; __fp16 get_lane_128(float16x8_t a) { return vgetq_lane_f16(a, 2); }
362define half @get_lane_128(<8 x half> %a) #0 {
363; CHECK-LABEL: get_lane_128:
364; CHECK:       // %bb.0: // %entry
365; CHECK-NEXT:    umov w8, v0.h[2]
366; CHECK-NEXT:    fmov s0, w8
367; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
368; CHECK-NEXT:    ret
369entry:
370  %0 = bitcast <8 x half> %a to <8 x i16>
371  %vgetq_lane = extractelement <8 x i16> %0, i32 2
372  %1 = bitcast i16 %vgetq_lane to half
373  ret half %1
374}
375