1; RUN: opt < %s -S -mtriple=amdgcn-- -basic-aa -loop-unroll | FileCheck %s
2
3; Check that the loop in unroll_default is not fully unrolled using the default
4; unroll threshold
5; CHECK-LABEL: @unroll_default
6; CHECK: entry:
7; CHECK: br i1 %cmp
8; CHECK: ret void
9
10; Check that the same loop in unroll_full is fully unrolled when the default
11; unroll threshold is increased by use of the amdgpu-unroll-threshold attribute
12; CHECK-LABEL: @unroll_full
13; CHECK: entry:
14; CHECK-NOT: br i1 %cmp
15; CHECK: ret void
16
17@in = internal unnamed_addr global i32* null, align 8
18@out = internal unnamed_addr global i32* null, align 8
19
20define void @unroll_default() {
21entry:
22  br label %do.body
23
24do.body:                                          ; preds = %entry
25  %i.0 = phi i32 [ 0, %entry ], [ %inc, %do.body ]
26  %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
27  store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
28  %inc = add nsw i32 %i.0, 1
29  %cmp = icmp slt i32 %inc, 100
30  br i1 %cmp, label %do.body, label %do.end
31
32do.end:                                           ; preds = %do.body
33  ret void
34}
35
36define void @unroll_full() #0 {
37entry:
38  br label %do.body
39
40do.body:                                          ; preds = %entry
41  %i.0 = phi i32 [ 0, %entry ], [ %inc, %do.body ]
42  %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
43  store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
44  %inc = add nsw i32 %i.0, 1
45  %cmp = icmp slt i32 %inc, 100
46  br i1 %cmp, label %do.body, label %do.end
47
48do.end:                                           ; preds = %do.body
49  ret void
50}
51
52attributes #0 = { "amdgpu-unroll-threshold"="1000" }
53