1; RUN: llc -march=mipsel -mattr=+dsp < %s
2
3@g1 = common global i64 0, align 8
4@g2 = common global i64 0, align 8
5@g3 = common global i64 0, align 8
6
7define i64 @test_acreg_copy(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
8entry:
9  %0 = load i64* @g1, align 8
10  %1 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a0, i32 %a1)
11  %2 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a2, i32 %a3)
12  store i64 %1, i64* @g1, align 8
13  store i64 %2, i64* @g2, align 8
14  tail call void @foo1()
15  store i64 %2, i64* @g3, align 8
16  ret i64 %1
17}
18
19declare i64 @llvm.mips.maddu(i64, i32, i32)
20
21declare void @foo1()
22
23@g4 = common global <2 x i16> zeroinitializer, align 4
24@g5 = common global <2 x i16> zeroinitializer, align 4
25@g6 = common global <2 x i16> zeroinitializer, align 4
26
27define { i32 } @test_ccond_spill(i32 %a.coerce, i32 %b.coerce) {
28entry:
29  %0 = bitcast i32 %a.coerce to <2 x i16>
30  %1 = bitcast i32 %b.coerce to <2 x i16>
31  %cmp3 = icmp slt <2 x i16> %0, %1
32  %sext = sext <2 x i1> %cmp3 to <2 x i16>
33  store <2 x i16> %sext, <2 x i16>* @g4, align 4
34  tail call void @foo1()
35  %2 = load <2 x i16>* @g5, align 4
36  %3 = load <2 x i16>* @g6, align 4
37  %or = select <2 x i1> %cmp3, <2 x i16> %2, <2 x i16> %3
38  %4 = bitcast <2 x i16> %or to i32
39  %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
40  ret { i32 } %.fca.0.insert
41}
42