1; RUN: llc -march=hexagon < %s 2; REQUIRES: asserts 3 4; Function Attrs: nounwind 5define void @f0(i32 %a0) #0 { 6b0: 7 %v0 = ashr i32 %a0, 1 8 br label %b1 9 10b1: ; preds = %b1, %b0 11 %v1 = phi i32 [ %v17, %b1 ], [ undef, %b0 ] 12 %v2 = phi i32 [ %v19, %b1 ], [ 0, %b0 ] 13 %v3 = phi i32 [ %v4, %b1 ], [ undef, %b0 ] 14 %v4 = phi i32 [ %v14, %b1 ], [ undef, %b0 ] 15 %v5 = phi i32 [ %v18, %b1 ], [ undef, %b0 ] 16 %v6 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v1, i32 undef) 17 %v7 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v3, i32 %v3) 18 %v8 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v6, i64 undef, i32 2) 19 %v9 = tail call i64 @llvm.hexagon.M2.vdmacs.s0(i64 undef, i64 %v7, i64 %v8) 20 %v10 = inttoptr i32 %v5 to i16* 21 %v11 = load i16, i16* %v10, align 2 22 %v12 = sext i16 %v11 to i32 23 %v13 = add nsw i32 %v5, -8 24 %v14 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 %v12, i32 %v1) 25 %v15 = inttoptr i32 %v13 to i16* 26 %v16 = load i16, i16* %v15, align 2 27 %v17 = sext i16 %v16 to i32 28 %v18 = add nsw i32 %v5, -16 29 %v19 = add nsw i32 %v2, 1 30 %v20 = icmp eq i32 %v19, %v0 31 br i1 %v20, label %b2, label %b1 32 33b2: ; preds = %b1 34 %v21 = phi i64 [ %v9, %b1 ] 35 %v22 = trunc i64 %v21 to i32 36 %v23 = bitcast i8* undef to i32* 37 store i32 %v22, i32* %v23, align 4 38 call void @llvm.trap() 39 unreachable 40} 41 42; Function Attrs: nounwind readnone 43declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) #1 44 45; Function Attrs: nounwind readnone 46declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1 47 48; Function Attrs: nounwind readnone 49declare i64 @llvm.hexagon.M2.vdmacs.s0(i64, i64, i64) #1 50 51; Function Attrs: nounwind readnone 52declare i64 @llvm.hexagon.S2.valignib(i64, i64, i32) #1 53 54; Function Attrs: noreturn nounwind 55declare void @llvm.trap() #2 56 57attributes #0 = { nounwind "target-cpu"="hexagonv60" } 58attributes #1 = { nounwind readnone } 59attributes #2 = { noreturn nounwind } 60