1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
4
5
6; FUNC-LABEL: {{^}}xor_v2i32:
7; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
8; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
9
10; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
11; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
12
13define amdgpu_kernel void @xor_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1) {
14  %a = load <2 x i32>, <2 x i32> addrspace(1) * %in0
15  %b = load <2 x i32>, <2 x i32> addrspace(1) * %in1
16  %result = xor <2 x i32> %a, %b
17  store <2 x i32> %result, <2 x i32> addrspace(1)* %out
18  ret void
19}
20
21; FUNC-LABEL: {{^}}xor_v4i32:
22; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
23; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
24; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
26
27; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
28; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
29; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
30; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
31
32define amdgpu_kernel void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
33  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in0
34  %b = load <4 x i32>, <4 x i32> addrspace(1) * %in1
35  %result = xor <4 x i32> %a, %b
36  store <4 x i32> %result, <4 x i32> addrspace(1)* %out
37  ret void
38}
39
40; FUNC-LABEL: {{^}}xor_i1:
41; EG: XOR_INT {{\** *}}{{T[0-9]+\.[XYZW]}}, {{PS|PV\.[XYZW]}}, {{PS|PV\.[XYZW]}}
42
43; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 1.0, {{v[0-9]+}}
44; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 0, {{v[0-9]+}}
45; SI: s_xor_b64 [[XOR:vcc]], [[CMP1]], [[CMP0]]
46; SI: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
47; SI: buffer_store_dword [[RESULT]]
48; SI: s_endpgm
49define amdgpu_kernel void @xor_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) {
50  %a = load float, float addrspace(1) * %in0
51  %b = load float, float addrspace(1) * %in1
52  %acmp = fcmp oge float %a, 0.000000e+00
53  %bcmp = fcmp oge float %b, 1.000000e+00
54  %xor = xor i1 %acmp, %bcmp
55  %result = select i1 %xor, float %a, float %b
56  store float %result, float addrspace(1)* %out
57  ret void
58}
59
60; FUNC-LABEL: {{^}}v_xor_i1:
61; SI: buffer_load_ubyte [[B:v[0-9]+]]
62; SI: buffer_load_ubyte [[A:v[0-9]+]]
63; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[B]], [[A]]
64; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
65; SI: buffer_store_byte [[RESULT]]
66define amdgpu_kernel void @v_xor_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in0, i1 addrspace(1)* %in1) {
67  %a = load volatile i1, i1 addrspace(1)* %in0
68  %b = load volatile i1, i1 addrspace(1)* %in1
69  %xor = xor i1 %a, %b
70  store i1 %xor, i1 addrspace(1)* %out
71  ret void
72}
73
74; FUNC-LABEL: {{^}}vector_xor_i32:
75; SI: v_xor_b32_e32
76define amdgpu_kernel void @vector_xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
77  %a = load i32, i32 addrspace(1)* %in0
78  %b = load i32, i32 addrspace(1)* %in1
79  %result = xor i32 %a, %b
80  store i32 %result, i32 addrspace(1)* %out
81  ret void
82}
83
84; FUNC-LABEL: {{^}}scalar_xor_i32:
85; SI: s_xor_b32
86define amdgpu_kernel void @scalar_xor_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
87  %result = xor i32 %a, %b
88  store i32 %result, i32 addrspace(1)* %out
89  ret void
90}
91
92; FUNC-LABEL: {{^}}scalar_not_i32:
93; SI: s_not_b32
94define amdgpu_kernel void @scalar_not_i32(i32 addrspace(1)* %out, i32 %a) {
95  %result = xor i32 %a, -1
96  store i32 %result, i32 addrspace(1)* %out
97  ret void
98}
99
100; FUNC-LABEL: {{^}}vector_not_i32:
101; SI: v_not_b32
102define amdgpu_kernel void @vector_not_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) {
103  %a = load i32, i32 addrspace(1)* %in0
104  %b = load i32, i32 addrspace(1)* %in1
105  %result = xor i32 %a, -1
106  store i32 %result, i32 addrspace(1)* %out
107  ret void
108}
109
110; FUNC-LABEL: {{^}}vector_xor_i64:
111; SI: v_xor_b32_e32
112; SI: v_xor_b32_e32
113; SI: s_endpgm
114define amdgpu_kernel void @vector_xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
115  %a = load i64, i64 addrspace(1)* %in0
116  %b = load i64, i64 addrspace(1)* %in1
117  %result = xor i64 %a, %b
118  store i64 %result, i64 addrspace(1)* %out
119  ret void
120}
121
122; FUNC-LABEL: {{^}}scalar_xor_i64:
123; SI: s_xor_b64
124; SI: s_endpgm
125define amdgpu_kernel void @scalar_xor_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
126  %result = xor i64 %a, %b
127  store i64 %result, i64 addrspace(1)* %out
128  ret void
129}
130
131; FUNC-LABEL: {{^}}scalar_not_i64:
132; SI: s_not_b64
133define amdgpu_kernel void @scalar_not_i64(i64 addrspace(1)* %out, i64 %a) {
134  %result = xor i64 %a, -1
135  store i64 %result, i64 addrspace(1)* %out
136  ret void
137}
138
139; FUNC-LABEL: {{^}}vector_not_i64:
140; SI: v_not_b32
141; SI: v_not_b32
142define amdgpu_kernel void @vector_not_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in0, i64 addrspace(1)* %in1) {
143  %a = load i64, i64 addrspace(1)* %in0
144  %b = load i64, i64 addrspace(1)* %in1
145  %result = xor i64 %a, -1
146  store i64 %result, i64 addrspace(1)* %out
147  ret void
148}
149
150; Test that we have a pattern to match xor inside a branch.
151; Note that in the future the backend may be smart enough to
152; use an SALU instruction for this.
153
154; FUNC-LABEL: {{^}}xor_cf:
155; SI: s_xor_b64
156define amdgpu_kernel void @xor_cf(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b) {
157entry:
158  %0 = icmp eq i64 %a, 0
159  br i1 %0, label %if, label %else
160
161if:
162  %1 = xor i64 %a, %b
163  br label %endif
164
165else:
166  %2 = load i64, i64 addrspace(1)* %in
167  br label %endif
168
169endif:
170  %3 = phi i64 [%1, %if], [%2, %else]
171  store i64 %3, i64 addrspace(1)* %out
172  ret void
173}
174
175; FUNC-LABEL: {{^}}scalar_xor_literal_i64:
176; SI: s_load_dwordx2 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
177; SI-DAG: s_xor_b32 s[[RES_HI:[0-9]+]], s{{[0-9]+}}, 0xf237b
178; SI-DAG: s_xor_b32 s[[RES_LO:[0-9]+]], s{{[0-9]+}}, 0x3039
179; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_LO]]
180; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_HI]]
181define amdgpu_kernel void @scalar_xor_literal_i64(i64 addrspace(1)* %out, [8 x i32], i64 %a) {
182  %or = xor i64 %a, 4261135838621753
183  store i64 %or, i64 addrspace(1)* %out
184  ret void
185}
186
187; FUNC-LABEL: {{^}}scalar_xor_literal_multi_use_i64:
188; SI: s_load_dwordx4 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
189; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0xf237b
190; SI-DAG: s_movk_i32 s[[K_LO:[0-9]+]], 0x3039
191; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
192
193; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s[[K_LO]]
194; SI: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, s[[K_HI]]
195define amdgpu_kernel void @scalar_xor_literal_multi_use_i64(i64 addrspace(1)* %out, [8 x i32], i64 %a, i64 %b) {
196  %or = xor i64 %a, 4261135838621753
197  store i64 %or, i64 addrspace(1)* %out
198
199  %foo = add i64 %b, 4261135838621753
200  store volatile i64 %foo, i64 addrspace(1)* undef
201  ret void
202}
203
204; FUNC-LABEL: {{^}}scalar_xor_inline_imm_i64:
205; SI: s_load_dwordx2 s{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
206; SI-NOT: xor_b32
207; SI: s_xor_b32 s[[VAL_LO]], s{{[0-9]+}}, 63
208; SI-NOT: xor_b32
209; SI: v_mov_b32_e32 v[[VLO:[0-9]+]], s{{[0-9]+}}
210; SI-NOT: xor_b32
211; SI: v_mov_b32_e32 v[[VHI:[0-9]+]], s{{[0-9]+}}
212; SI-NOT: xor_b32
213; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
214define amdgpu_kernel void @scalar_xor_inline_imm_i64(i64 addrspace(1)* %out, [8 x i32], i64 %a) {
215  %or = xor i64 %a, 63
216  store i64 %or, i64 addrspace(1)* %out
217  ret void
218}
219
220; FUNC-LABEL: {{^}}scalar_xor_neg_inline_imm_i64:
221; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
222; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -8
223define amdgpu_kernel void @scalar_xor_neg_inline_imm_i64(i64 addrspace(1)* %out, [8 x i32], i64 %a) {
224  %or = xor i64 %a, -8
225  store i64 %or, i64 addrspace(1)* %out
226  ret void
227}
228
229; FUNC-LABEL: {{^}}vector_xor_i64_neg_inline_imm:
230; SI: buffer_load_dwordx2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
231; SI: v_xor_b32_e32 {{v[0-9]+}}, -8, v[[LO_VREG]]
232; SI: v_xor_b32_e32 {{v[0-9]+}}, -1, {{.*}}
233; SI: s_endpgm
234define amdgpu_kernel void @vector_xor_i64_neg_inline_imm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
235  %loada = load i64, i64 addrspace(1)* %a, align 8
236  %or = xor i64 %loada, -8
237  store i64 %or, i64 addrspace(1)* %out
238  ret void
239}
240
241; FUNC-LABEL: {{^}}vector_xor_literal_i64:
242; SI-DAG: buffer_load_dwordx2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
243; SI-DAG: v_xor_b32_e32 {{v[0-9]+}}, 0xdf77987f, v[[LO_VREG]]
244; SI-DAG: v_xor_b32_e32 {{v[0-9]+}}, 0x146f, v[[HI_VREG]]
245; SI: s_endpgm
246define amdgpu_kernel void @vector_xor_literal_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
247  %loada = load i64, i64 addrspace(1)* %a, align 8
248  %or = xor i64 %loada, 22470723082367
249  store i64 %or, i64 addrspace(1)* %out
250  ret void
251}
252