1; Test that DAGCombiner gets helped by ComputeNumSignBitsForTargetNode() with
2; vector intrinsics.
3;
4; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 < %s  | FileCheck %s
5
6declare <8 x i16> @llvm.s390.vuphb(<16 x i8>)
7
8; VUPHB
9define <8 x i16> @f0() {
10; CHECK-LABEL: f0:
11; CHECK-LABEL: # %bb.0:
12; CHECK:       vuphb %v24, %v0
13; CHECK-NEXT:  br %r14
14  %unp = call <8 x i16> @llvm.s390.vuphb(<16 x i8>
15                                         <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1,
16                                          i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
17  %trunc = trunc <8 x i16> %unp to <8 x i8>
18  %ret = sext <8 x i8> %trunc to <8 x i16>
19  ret <8 x i16> %ret
20}
21
22declare <4 x i32> @llvm.s390.vuphh(<8 x i16>)
23
24; VUPHH
25define <4 x i32> @f1() {
26; CHECK-LABEL: f1:
27; CHECK-LABEL: # %bb.0:
28; CHECK:       vuphh %v24, %v0
29; CHECK-NEXT:  br %r14
30  %unp = call <4 x i32> @llvm.s390.vuphh(<8 x i16>
31                                         <i16 0, i16 1, i16 0, i16 1,
32                                          i16 0, i16 1, i16 0, i16 1>)
33  %trunc = trunc <4 x i32> %unp to <4 x i16>
34  %ret = sext <4 x i16> %trunc to <4 x i32>
35  ret <4 x i32> %ret
36}
37
38declare <2 x i64> @llvm.s390.vuphf(<4 x i32>)
39
40; VUPHF
41define <2 x i64> @f2() {
42; CHECK-LABEL: f2:
43; CHECK-LABEL: # %bb.0:
44; CHECK:       vuphf %v24, %v0
45; CHECK-NEXT:  br %r14
46  %unp = call <2 x i64> @llvm.s390.vuphf(<4 x i32> <i32 0, i32 1, i32 0, i32 1>)
47  %trunc = trunc <2 x i64> %unp to <2 x i32>
48  %ret = sext <2 x i32> %trunc to <2 x i64>
49  ret <2 x i64> %ret
50}
51
52declare <8 x i16> @llvm.s390.vuplb(<16 x i8>)
53
54; VUPLB
55define <8 x i16> @f3() {
56; CHECK-LABEL: f3:
57; CHECK-LABEL: # %bb.0:
58; CHECK:       vuplb %v24, %v0
59; CHECK-NEXT:  br %r14
60  %unp = call <8 x i16> @llvm.s390.vuplb(<16 x i8>
61                                         <i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1,
62                                          i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1>)
63  %trunc = trunc <8 x i16> %unp to <8 x i8>
64  %ret = sext <8 x i8> %trunc to <8 x i16>
65  ret <8 x i16> %ret
66}
67
68declare <4 x i32> @llvm.s390.vuplhw(<8 x i16>)
69
70; VUPLHW
71define <4 x i32> @f4() {
72; CHECK-LABEL: f4:
73; CHECK-LABEL: # %bb.0:
74; CHECK:       vuplhw %v24, %v0
75; CHECK-NEXT:  br %r14
76  %unp = call <4 x i32> @llvm.s390.vuplhw(<8 x i16>
77                                          <i16 1, i16 0, i16 1, i16 0,
78                                           i16 1, i16 0, i16 1, i16 0>)
79  %trunc = trunc <4 x i32> %unp to <4 x i16>
80  %ret = sext <4 x i16> %trunc to <4 x i32>
81  ret <4 x i32> %ret
82}
83
84declare <2 x i64> @llvm.s390.vuplf(<4 x i32>)
85
86; VUPLF
87define <2 x i64> @f5() {
88; CHECK-LABEL: f5:
89; CHECK-LABEL: # %bb.0:
90; CHECK:       vuplf %v24, %v0
91; CHECK-NEXT:  br %r14
92  %unp = call <2 x i64> @llvm.s390.vuplf(<4 x i32> <i32 1, i32 0, i32 1, i32 0>)
93  %trunc = trunc <2 x i64> %unp to <2 x i32>
94  %ret = sext <2 x i32> %trunc to <2 x i64>
95  ret <2 x i64> %ret
96}
97
98