1; REQUIRES: asserts
2
3; RUN: llc -march=hexagon -mcpu=hexagonv65 -O3 -debug-only=pipeliner \
4; RUN: < %s 2>&1 -pipeliner-experimental-cg=true | FileCheck %s
5
6; As part of https://reviews.llvm.org/D106308 this test broke.
7; It is not surprising as the tts is full of UB and run with O3.
8; FIXME: It is unclear what to do with this test now, replacing null/undef
9;        with pointer arguments could be a way to go.
10; XFAIL: *
11
12; Test that the artificial dependences are ignored while computing the
13; circuits.
14
15; The recurrence should be 1 here. If we do not ignore artificial deps,
16; it will be greater.
17; CHECK: rec=1,
18
19define void @foo(i32 %size) #0 {
20entry:
21  %add = add nsw i32 0, 4
22  %shr = ashr i32 %size, 1
23  br i1 undef, label %L57.us, label %L57.us.ur
24
25L57.us:
26  %R9.0470.us = phi i32 [ %sub40.us.3, %L57.us ], [ undef, %entry ]
27  %sub40.us.3 = add i32 %R9.0470.us, -64
28  br i1 undef, label %L57.us, label %for.cond22.for.end_crit_edge.us.ur-lcssa
29
30for.cond22.for.end_crit_edge.us.ur-lcssa:
31  %inc.us.3.lcssa = phi i32 [ undef, %L57.us ]
32  %sub40.us.3.lcssa = phi i32 [ %sub40.us.3, %L57.us ]
33  %0 = icmp eq i32 %inc.us.3.lcssa, %shr
34  br i1 %0, label %for.cond22.for.end_crit_edge.us, label %L57.us.ur
35
36L57.us.ur:
37  %R15_14.0478.us.ur = phi i64 [ %1, %L57.us.ur ], [ 0, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
38  %R13_12.0477.us.ur = phi i64 [ %14, %L57.us.ur ], [ 0, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
39  %R11_10.0476.us.ur = phi i64 [ %8, %L57.us.ur ], [ 0, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
40  %R7_6.0475.us.ur = phi i64 [ %7, %L57.us.ur ], [ 0, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
41  %R5_4.2474.us.ur = phi i64 [ %16, %L57.us.ur ], [ undef, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
42  %R3_2.0473.us.ur = phi i64 [ %9, %L57.us.ur ], [ 0, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
43  %R1_0.0472.us.ur = phi i64 [ %15, %L57.us.ur ], [ undef, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
44  %kk.0471.us.ur = phi i32 [ %inc.us.ur, %L57.us.ur ], [ 0, %entry ], [ %inc.us.3.lcssa, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
45  %R9.0470.us.ur = phi i32 [ %sub40.us.ur, %L57.us.ur ], [ undef, %entry ], [ %sub40.us.3.lcssa, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
46  %R8.0469.us.ur = phi i32 [ %sub34.us.ur, %L57.us.ur ], [ undef, %entry ], [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ]
47  %1 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %R15_14.0478.us.ur, i64 %R1_0.0472.us.ur, i64 %R3_2.0473.us.ur)
48  %2 = tail call i64 @llvm.hexagon.S2.shuffeh(i64 %R5_4.2474.us.ur, i64 %R7_6.0475.us.ur)
49  %3 = inttoptr i32 %R9.0470.us.ur to i16*
50  %4 = load i16, i16* %3, align 2
51  %conv27.us.ur = sext i16 %4 to i32
52  %sub28.us.ur = add i32 %R9.0470.us.ur, -8
53  %5 = inttoptr i32 %R8.0469.us.ur to i16*
54  %6 = load i16, i16* %5, align 2
55  %conv30.us.ur = sext i16 %6 to i32
56  %sub31.us.ur = add i32 %R8.0469.us.ur, -8
57  %7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %conv27.us.ur, i32 %conv30.us.ur)
58  %8 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %R11_10.0476.us.ur, i64 %R1_0.0472.us.ur, i64 %2)
59  %9 = tail call i64 @llvm.hexagon.S2.shuffeh(i64 %7, i64 %R5_4.2474.us.ur)
60  %10 = inttoptr i32 %sub31.us.ur to i16*
61  %11 = load i16, i16* %10, align 2
62  %conv33.us.ur = sext i16 %11 to i32
63  %sub34.us.ur = add i32 %R8.0469.us.ur, -16
64  %conv35.us.ur = trunc i64 %9 to i32
65  %12 = inttoptr i32 %sub28.us.ur to i16*
66  %13 = load i16, i16* %12, align 2
67  %conv39.us.ur = sext i16 %13 to i32
68  %sub40.us.ur = add i32 %R9.0470.us.ur, -16
69  %14 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 %R13_12.0477.us.ur, i64 %R1_0.0472.us.ur, i64 %9)
70  %15 = tail call i64 @llvm.hexagon.A2.combinew(i32 %conv35.us.ur, i32 undef)
71  %16 = tail call i64 @llvm.hexagon.A2.combinew(i32 %conv39.us.ur, i32 %conv33.us.ur)
72  %inc.us.ur = add nsw i32 %kk.0471.us.ur, 1
73  %exitcond535.ur = icmp eq i32 %inc.us.ur, %shr
74  br i1 %exitcond535.ur, label %for.cond22.for.end_crit_edge.us.ur-lcssa572, label %L57.us.ur
75
76for.cond22.for.end_crit_edge.us.ur-lcssa572:
77  %.lcssa730 = phi i64 [ %14, %L57.us.ur ]
78  %.lcssa729 = phi i64 [ %8, %L57.us.ur ]
79  %.lcssa728 = phi i64 [ %1, %L57.us.ur ]
80  %extract.t652 = trunc i64 %.lcssa730 to i32
81  %extract661 = lshr i64 %.lcssa729, 32
82  %extract.t662 = trunc i64 %extract661 to i32
83  %extract.t664 = trunc i64 %.lcssa728 to i32
84  br label %for.cond22.for.end_crit_edge.us
85
86for.cond22.for.end_crit_edge.us:
87  %.lcssa551.off0 = phi i32 [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ], [ %extract.t652, %for.cond22.for.end_crit_edge.us.ur-lcssa572 ]
88  %.lcssa550.off32 = phi i32 [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ], [ %extract.t662, %for.cond22.for.end_crit_edge.us.ur-lcssa572 ]
89  %.lcssa549.off0 = phi i32 [ undef, %for.cond22.for.end_crit_edge.us.ur-lcssa ], [ %extract.t664, %for.cond22.for.end_crit_edge.us.ur-lcssa572 ]
90  %17 = inttoptr i32 %add to i32*
91  store i32 %.lcssa549.off0, i32* %17, align 4
92  %add.ptr61.us = getelementptr inbounds i8, i8* null, i32 32
93  %18 = bitcast i8* %add.ptr61.us to i32*
94  store i32 %.lcssa551.off0, i32* %18, align 4
95  %19 = bitcast i8* undef to i32*
96  store i32 %.lcssa550.off32, i32* %19, align 4
97  call void @llvm.trap()
98  unreachable
99}
100
101; Function Attrs: nounwind readnone
102declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
103
104; Function Attrs: nounwind readnone
105declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1
106
107; Function Attrs: nounwind readnone
108declare i64 @llvm.hexagon.S2.shuffeh(i64, i64) #1
109
110; Function Attrs: noreturn nounwind
111declare void @llvm.trap() #2
112
113attributes #0 = { nounwind }
114attributes #1 = { nounwind readnone }
115attributes #2 = { noreturn nounwind }
116