1; RUN: opt -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -analyze -divergence -use-gpu-divergence-analysis %s | FileCheck %s
2; RUN: opt -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx908 -analyze -divergence -use-gpu-divergence-analysis %s | FileCheck %s
3; Make sure nothing crashes on targets with or without AGPRs
4
5; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_1_sgpr_virtreg_output':
6; CHECK-NOT: DIVERGENT
7define i32 @inline_asm_1_sgpr_virtreg_output() {
8  %sgpr = call i32 asm "s_mov_b32 $0, 0", "=s"()
9  ret i32 %sgpr
10}
11
12; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_1_sgpr_physreg_output':
13; CHECK-NOT: DIVERGENT
14define i32 @inline_asm_1_sgpr_physreg_output() {
15  %sgpr = call i32 asm "s_mov_b32 s0, 0", "={s0}"()
16  ret i32 %sgpr
17}
18
19; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_1_vgpr_virtreg_output':
20; CHECK: DIVERGENT: %vgpr = call i32 asm "v_mov_b32 $0, 0", "=v"()
21define i32 @inline_asm_1_vgpr_virtreg_output() {
22  %vgpr = call i32 asm "v_mov_b32 $0, 0", "=v"()
23  ret i32 %vgpr
24}
25
26; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_1_vgpr_physreg_output':
27; CHECK: DIVERGENT: %vgpr = call i32 asm "v_mov_b32 v0, 0", "={v0}"()
28define i32 @inline_asm_1_vgpr_physreg_output() {
29  %vgpr = call i32 asm "v_mov_b32 v0, 0", "={v0}"()
30  ret i32 %vgpr
31}
32
33; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_1_agpr_virtreg_output':
34; CHECK: DIVERGENT: %vgpr = call i32 asm "; def $0", "=a"()
35define i32 @inline_asm_1_agpr_virtreg_output() {
36  %vgpr = call i32 asm "; def $0", "=a"()
37  ret i32 %vgpr
38}
39
40; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_1_agpr_physreg_output':
41; CHECK: DIVERGENT: %vgpr = call i32 asm "; def a0", "={a0}"()
42define i32 @inline_asm_1_agpr_physreg_output() {
43  %vgpr = call i32 asm "; def a0", "={a0}"()
44  ret i32 %vgpr
45}
46
47; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_2_sgpr_virtreg_output':
48; CHECK-NOT: DIVERGENT
49define void @inline_asm_2_sgpr_virtreg_output() {
50  %asm = call { i32, i32 } asm "; def $0, $1", "=s,=s"()
51  %sgpr0 = extractvalue { i32, i32 } %asm, 0
52  %sgpr1 = extractvalue { i32, i32 } %asm, 1
53  store i32 %sgpr0, i32 addrspace(1)* undef
54  store i32 %sgpr1, i32 addrspace(1)* undef
55  ret void
56}
57
58; One output is SGPR, one is VGPR. Infer divergent for the aggregate, but uniform on the SGPR extract
59; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_sgpr_vgpr_virtreg_output':
60; CHECK: DIVERGENT:       %asm = call { i32, i32 } asm "; def $0, $1", "=s,=v"()
61; CHECK-NEXT: {{^[ \t]+}}%sgpr = extractvalue { i32, i32 } %asm, 0
62; CHECK-NEXT: DIVERGENT:       %vgpr = extractvalue { i32, i32 } %asm, 1
63define void @inline_asm_sgpr_vgpr_virtreg_output() {
64  %asm = call { i32, i32 } asm "; def $0, $1", "=s,=v"()
65  %sgpr = extractvalue { i32, i32 } %asm, 0
66  %vgpr = extractvalue { i32, i32 } %asm, 1
67  store i32 %sgpr, i32 addrspace(1)* undef
68  store i32 %vgpr, i32 addrspace(1)* undef
69  ret void
70}
71
72; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_vgpr_sgpr_virtreg_output':
73; CHECK: DIVERGENT:       %asm = call { i32, i32 } asm "; def $0, $1", "=v,=s"()
74; CHECK-NEXT: DIVERGENT:       %vgpr = extractvalue { i32, i32 } %asm, 0
75; CHECK-NEXT: {{^[ \t]+}}%sgpr = extractvalue { i32, i32 } %asm, 1
76define void @inline_asm_vgpr_sgpr_virtreg_output() {
77  %asm = call { i32, i32 } asm "; def $0, $1", "=v,=s"()
78  %vgpr = extractvalue { i32, i32 } %asm, 0
79  %sgpr = extractvalue { i32, i32 } %asm, 1
80  store i32 %vgpr, i32 addrspace(1)* undef
81  store i32 %sgpr, i32 addrspace(1)* undef
82  ret void
83}
84
85; Have an extra output constraint
86; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'multi_sgpr_inline_asm_output_input_constraint':
87; CHECK-NOT: DIVERGENT
88define void @multi_sgpr_inline_asm_output_input_constraint() {
89  %asm = call { i32, i32 } asm "; def $0, $1", "=s,=s,s"(i32 1234)
90  %sgpr0 = extractvalue { i32, i32 } %asm, 0
91  %sgpr1 = extractvalue { i32, i32 } %asm, 1
92  store i32 %sgpr0, i32 addrspace(1)* undef
93  store i32 %sgpr1, i32 addrspace(1)* undef
94  ret void
95}
96
97; CHECK: Printing analysis 'Legacy Divergence Analysis' for function 'inline_asm_vgpr_sgpr_virtreg_output_input_constraint':
98; CHECK: DIVERGENT:       %asm = call { i32, i32 } asm "; def $0, $1", "=v,=s,v"(i32 1234)
99; CHECK-NEXT: DIVERGENT:       %vgpr = extractvalue { i32, i32 } %asm, 0
100; CHECK-NEXT: {{^[ \t]+}}%sgpr = extractvalue { i32, i32 } %asm, 1
101define void @inline_asm_vgpr_sgpr_virtreg_output_input_constraint() {
102  %asm = call { i32, i32 } asm "; def $0, $1", "=v,=s,v"(i32 1234)
103  %vgpr = extractvalue { i32, i32 } %asm, 0
104  %sgpr = extractvalue { i32, i32 } %asm, 1
105  store i32 %vgpr, i32 addrspace(1)* undef
106  store i32 %sgpr, i32 addrspace(1)* undef
107  ret void
108}
109