1; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
2
3define <8 x i8> @vshrns8(<8 x i16>* %A) nounwind {
4;CHECK-LABEL: vshrns8:
5;CHECK: vshrn.i16
6	%tmp1 = load <8 x i16>* %A
7        %tmp2 = lshr <8 x i16> %tmp1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
8        %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
9	ret <8 x i8> %tmp3
10}
11
12define <4 x i16> @vshrns16(<4 x i32>* %A) nounwind {
13;CHECK-LABEL: vshrns16:
14;CHECK: vshrn.i32
15	%tmp1 = load <4 x i32>* %A
16        %tmp2 = ashr <4 x i32> %tmp1, <i32 16, i32 16, i32 16, i32 16>
17        %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
18	ret <4 x i16> %tmp3
19}
20
21define <2 x i32> @vshrns32(<2 x i64>* %A) nounwind {
22;CHECK-LABEL: vshrns32:
23;CHECK: vshrn.i64
24	%tmp1 = load <2 x i64>* %A
25        %tmp2 = ashr <2 x i64> %tmp1, <i64 32, i64 32>
26        %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
27	ret <2 x i32> %tmp3
28}
29
30define <8 x i8> @vshrns8_bad(<8 x i16>* %A) nounwind {
31; CHECK-LABEL: vshrns8_bad:
32; CHECK: vshr.s16
33; CHECK: vmovn.i16
34        %tmp1 = load <8 x i16>* %A
35        %tmp2 = ashr <8 x i16> %tmp1, <i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9, i16 9>
36        %tmp3 = trunc <8 x i16> %tmp2 to <8 x i8>
37        ret <8 x i8> %tmp3
38}
39
40define <4 x i16> @vshrns16_bad(<4 x i32>* %A) nounwind {
41; CHECK-LABEL: vshrns16_bad:
42; CHECK: vshr.u32
43; CHECK: vmovn.i32
44        %tmp1 = load <4 x i32>* %A
45        %tmp2 = lshr <4 x i32> %tmp1, <i32 17, i32 17, i32 17, i32 17>
46        %tmp3 = trunc <4 x i32> %tmp2 to <4 x i16>
47        ret <4 x i16> %tmp3
48}
49
50define <2 x i32> @vshrns32_bad(<2 x i64>* %A) nounwind {
51; CHECK-LABEL: vshrns32_bad:
52; CHECK: vshr.u64
53; CHECK: vmovn.i64
54        %tmp1 = load <2 x i64>* %A
55        %tmp2 = lshr <2 x i64> %tmp1, <i64 33, i64 33>
56        %tmp3 = trunc <2 x i64> %tmp2 to <2 x i32>
57        ret <2 x i32> %tmp3
58}
59
60define <8 x i8> @vrshrns8(<8 x i16>* %A) nounwind {
61;CHECK-LABEL: vrshrns8:
62;CHECK: vrshrn.i16
63	%tmp1 = load <8 x i16>* %A
64	%tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
65	ret <8 x i8> %tmp2
66}
67
68define <4 x i16> @vrshrns16(<4 x i32>* %A) nounwind {
69;CHECK-LABEL: vrshrns16:
70;CHECK: vrshrn.i32
71	%tmp1 = load <4 x i32>* %A
72	%tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
73	ret <4 x i16> %tmp2
74}
75
76define <2 x i32> @vrshrns32(<2 x i64>* %A) nounwind {
77;CHECK-LABEL: vrshrns32:
78;CHECK: vrshrn.i64
79	%tmp1 = load <2 x i64>* %A
80	%tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
81	ret <2 x i32> %tmp2
82}
83
84declare <8 x i8>  @llvm.arm.neon.vrshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
85declare <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
86declare <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
87