1; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefixes=VI,VI-OPT,PREGFX10,PREGFX10-OPT %s
2; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefixes=VI,VI-NOOPT,PREGFX10,PREGFX10-NOOPT %s
3; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefixes=VI,VI-OPT %s
4
5; FIXME: The register allocator / scheduler should be able to avoid these hazards.
6
7; VI-LABEL: {{^}}dpp_test:
8; VI: v_mov_b32_e32 v0, s{{[0-9]+}}
9; VI-NOOPT: v_mov_b32_e32 v1, s{{[0-9]+}}
10; PREGFX10: s_nop 1
11; VI-OPT: v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11]
12; VI-NOOPT: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11]
13define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) {
14  %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 1) #0
15  store i32 %tmp0, i32 addrspace(1)* %out
16  ret void
17}
18
19; VI-LABEL: {{^}}dpp_wait_states:
20; VI-NOOPT: v_mov_b32_e32 [[VGPR1:v[0-9]+]], s{{[0-9]+}}
21; VI: v_mov_b32_e32 [[VGPR0:v[0-9]+]], s{{[0-9]+}}
22; PREGFX10: s_nop 1
23; VI-OPT: v_mov_b32_dpp [[VGPR0]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
24; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:
25; PREGFX10: s_nop 1
26; VI-OPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
27; VI-NOOPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
28define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) {
29  %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 1) #0
30  %tmp1 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp0, i32 1, i32 1, i32 1, i1 1) #0
31  store i32 %tmp1, i32 addrspace(1)* %out
32  ret void
33}
34
35; VI-LABEL: {{^}}dpp_first_in_bb:
36; VI: ; %endif
37; PREGFX10-NOOPT: s_waitcnt
38; PREGFX10-NOOPT: v_mov_b32_e32
39; VI: v_mov_b32_dpp [[VGPR0:v[0-9]+]], v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
40; PREGFX10-OPT: s_mov_b32
41; PREGFX10-OPT: s_mov_b32
42; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
43; PREGFX10: s_nop 1
44; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
45define amdgpu_kernel void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) {
46  %cmp = fcmp oeq float %cond, 0.0
47  br i1 %cmp, label %if, label %else
48
49if:
50  %out_val = load float, float addrspace(1)* %out
51  %if_val = fadd float %a, %out_val
52  br label %endif
53
54else:
55  %in_val = load float, float addrspace(1)* %in
56  %else_val = fadd float %b, %in_val
57  br label %endif
58
59endif:
60  %val = phi float [%if_val, %if], [%else_val, %else]
61  %val_i32 = bitcast float %val to i32
62  %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %val_i32, i32 1, i32 1, i32 1, i1 1) #0
63  %tmp1 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp0, i32 1, i32 1, i32 1, i1 1) #0
64  %tmp2 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp1, i32 1, i32 1, i32 1, i1 1) #0
65  %tmp_float = bitcast i32 %tmp2 to float
66  store float %tmp_float, float addrspace(1)* %out
67  ret void
68}
69
70; VI-LABEL: {{^}}mov_dpp64_test:
71; VI: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
72; VI: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
73define amdgpu_kernel void @mov_dpp64_test(i64 addrspace(1)* %out, i64 %in1) {
74  %tmp0 = call i64 @llvm.amdgcn.mov.dpp.i64(i64 %in1, i32 1, i32 1, i32 1, i1 0) #0
75  store i64 %tmp0, i64 addrspace(1)* %out
76  ret void
77}
78
79; VI-LABEL: {{^}}mov_dpp64_imm_test:
80; VI-OPT-DAG: s_mov_b32 s[[SOLD_LO:[0-9]+]], 0x3afaedd9
81; VI-OPT-DAG: s_movk_i32 s[[SOLD_HI:[0-9]+]], 0x7047
82; VI-OPT-DAG: v_mov_b32_e32 v[[OLD_LO:[0-9]+]], s[[SOLD_LO]]
83; VI-OPT-DAG: v_mov_b32_e32 v[[OLD_HI:[0-9]+]], s[[SOLD_HI]]
84; VI-OPT-DAG: v_mov_b32_dpp v[[OLD_LO]], v[[OLD_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
85; VI-OPT-DAG: v_mov_b32_dpp v[[OLD_HI]], v[[OLD_HI]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
86; VI-NOOPT-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
87define amdgpu_kernel void @mov_dpp64_imm_test(i64 addrspace(1)* %out) {
88  %tmp0 = call i64 @llvm.amdgcn.mov.dpp.i64(i64 123451234512345, i32 1, i32 1, i32 1, i1 0) #0
89  store i64 %tmp0, i64 addrspace(1)* %out
90  ret void
91}
92
93declare i32 @llvm.amdgcn.mov.dpp.i32(i32, i32, i32, i32, i1) #0
94declare i64 @llvm.amdgcn.mov.dpp.i64(i64, i32, i32, i32, i1) #0
95
96attributes #0 = { nounwind readnone convergent }
97
98