1; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG --check-prefix=FUNC %s
2; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
3; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
4
5;FUNC-LABEL: {{^}}test1:
6;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
7
8;SI: v_add_i32_e32 [[REG:v[0-9]+]], {{v[0-9]+, v[0-9]+}}
9;SI-NOT: [[REG]]
10;SI: buffer_store_dword [[REG]],
11define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
12  %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
13  %a = load i32 addrspace(1)* %in
14  %b = load i32 addrspace(1)* %b_ptr
15  %result = add i32 %a, %b
16  store i32 %result, i32 addrspace(1)* %out
17  ret void
18}
19
20;FUNC-LABEL: {{^}}test2:
21;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
22;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
23
24;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
25;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
26
27define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
28  %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
29  %a = load <2 x i32> addrspace(1)* %in
30  %b = load <2 x i32> addrspace(1)* %b_ptr
31  %result = add <2 x i32> %a, %b
32  store <2 x i32> %result, <2 x i32> addrspace(1)* %out
33  ret void
34}
35
36;FUNC-LABEL: {{^}}test4:
37;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
38;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
39;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
40;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
41
42;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
43;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
44;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
45;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
46
47define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
48  %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
49  %a = load <4 x i32> addrspace(1)* %in
50  %b = load <4 x i32> addrspace(1)* %b_ptr
51  %result = add <4 x i32> %a, %b
52  store <4 x i32> %result, <4 x i32> addrspace(1)* %out
53  ret void
54}
55
56; FUNC-LABEL: {{^}}test8:
57; EG: ADD_INT
58; EG: ADD_INT
59; EG: ADD_INT
60; EG: ADD_INT
61; EG: ADD_INT
62; EG: ADD_INT
63; EG: ADD_INT
64; EG: ADD_INT
65; SI: s_add_i32
66; SI: s_add_i32
67; SI: s_add_i32
68; SI: s_add_i32
69; SI: s_add_i32
70; SI: s_add_i32
71; SI: s_add_i32
72; SI: s_add_i32
73define void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
74entry:
75  %0 = add <8 x i32> %a, %b
76  store <8 x i32> %0, <8 x i32> addrspace(1)* %out
77  ret void
78}
79
80; FUNC-LABEL: {{^}}test16:
81; EG: ADD_INT
82; EG: ADD_INT
83; EG: ADD_INT
84; EG: ADD_INT
85; EG: ADD_INT
86; EG: ADD_INT
87; EG: ADD_INT
88; EG: ADD_INT
89; EG: ADD_INT
90; EG: ADD_INT
91; EG: ADD_INT
92; EG: ADD_INT
93; EG: ADD_INT
94; EG: ADD_INT
95; EG: ADD_INT
96; EG: ADD_INT
97; SI: s_add_i32
98; SI: s_add_i32
99; SI: s_add_i32
100; SI: s_add_i32
101; SI: s_add_i32
102; SI: s_add_i32
103; SI: s_add_i32
104; SI: s_add_i32
105; SI: s_add_i32
106; SI: s_add_i32
107; SI: s_add_i32
108; SI: s_add_i32
109; SI: s_add_i32
110; SI: s_add_i32
111; SI: s_add_i32
112; SI: s_add_i32
113define void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
114entry:
115  %0 = add <16 x i32> %a, %b
116  store <16 x i32> %0, <16 x i32> addrspace(1)* %out
117  ret void
118}
119
120; FUNC-LABEL: {{^}}add64:
121; SI: s_add_u32
122; SI: s_addc_u32
123define void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
124entry:
125  %0 = add i64 %a, %b
126  store i64 %0, i64 addrspace(1)* %out
127  ret void
128}
129
130; The v_addc_u32 and v_add_i32 instruction can't read SGPRs, because they
131; use VCC.  The test is designed so that %a will be stored in an SGPR and
132; %0 will be stored in a VGPR, so the comiler will be forced to copy %a
133; to a VGPR before doing the add.
134
135; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
136; SI-NOT: v_addc_u32_e32 s
137define void @add64_sgpr_vgpr(i64 addrspace(1)* %out, i64 %a, i64 addrspace(1)* %in) {
138entry:
139  %0 = load i64 addrspace(1)* %in
140  %1 = add i64 %a, %0
141  store i64 %1, i64 addrspace(1)* %out
142  ret void
143}
144
145; Test i64 add inside a branch.
146; FUNC-LABEL: {{^}}add64_in_branch:
147; SI: s_add_u32
148; SI: s_addc_u32
149define void @add64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
150entry:
151  %0 = icmp eq i64 %a, 0
152  br i1 %0, label %if, label %else
153
154if:
155  %1 = load i64 addrspace(1)* %in
156  br label %endif
157
158else:
159  %2 = add i64 %a, %b
160  br label %endif
161
162endif:
163  %3 = phi i64 [%1, %if], [%2, %else]
164  store i64 %3, i64 addrspace(1)* %out
165  ret void
166}
167