1; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefixes=VI,VI-OPT,PREGFX10,PREGFX10-OPT %s 2; RUN: llc -O0 -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefixes=VI,VI-NOOPT,PREGFX10,PREGFX10-NOOPT %s 3; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefixes=VI,VI-OPT %s 4 5; FIXME: The register allocator / scheduler should be able to avoid these hazards. 6 7; VI-LABEL: {{^}}dpp_test: 8; VI: v_mov_b32_e32 v0, s{{[0-9]+}} 9; VI-NOOPT: v_mov_b32_e32 v1, s{{[0-9]+}} 10; PREGFX10-OPT: s_nop 1 11; PREGFX10-NOOPT: s_nop 0 12; PREGFX10-NOOPT: s_nop 0 13; VI-OPT: v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11] 14; VI-NOOPT: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11] 15define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) { 16 %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 1) #0 17 store i32 %tmp0, i32 addrspace(1)* %out 18 ret void 19} 20 21; VI-LABEL: {{^}}dpp_wait_states: 22; VI-NOOPT: v_mov_b32_e32 [[VGPR1:v[0-9]+]], s{{[0-9]+}} 23; VI: v_mov_b32_e32 [[VGPR0:v[0-9]+]], s{{[0-9]+}} 24; PREGFX10-OPT: s_nop 1 25; PREGFX10-NOOPT: s_nop 0 26; PREGFX10-NOOPT: s_nop 0 27; VI-OPT: v_mov_b32_dpp [[VGPR0]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 28; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl: 29; PREGFX10-OPT: s_nop 1 30; PREGFX10-NOOPT: s_nop 0 31; PREGFX10-NOOPT: s_nop 0 32; VI-OPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 33; VI-NOOPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 34define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) { 35 %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 1) #0 36 %tmp1 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp0, i32 1, i32 1, i32 1, i1 1) #0 37 store i32 %tmp1, i32 addrspace(1)* %out 38 ret void 39} 40 41; VI-LABEL: {{^}}dpp_first_in_bb: 42; VI: ; %endif 43; PREGFX10-OPT: s_mov_b32 44; PREGFX10-OPT: s_mov_b32 45; PREGFX10-NOOPT: s_waitcnt 46; PREGFX10-NOOPT: v_mov_b32_e32 47; PREGFX10-NOOPT-NEXT: s_nop 0 48; VI: v_mov_b32_dpp [[VGPR0:v[0-9]+]], v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 49; PREGFX10-OPT: s_nop 1 50; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 51; PREGFX10-OPT: s_nop 1 52; PREGFX10-NOOPT: s_nop 0 53; PREGFX10-NOOPT: s_nop 0 54; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 55define amdgpu_kernel void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) { 56 %cmp = fcmp oeq float %cond, 0.0 57 br i1 %cmp, label %if, label %else 58 59if: 60 %out_val = load float, float addrspace(1)* %out 61 %if_val = fadd float %a, %out_val 62 br label %endif 63 64else: 65 %in_val = load float, float addrspace(1)* %in 66 %else_val = fadd float %b, %in_val 67 br label %endif 68 69endif: 70 %val = phi float [%if_val, %if], [%else_val, %else] 71 %val_i32 = bitcast float %val to i32 72 %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %val_i32, i32 1, i32 1, i32 1, i1 1) #0 73 %tmp1 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp0, i32 1, i32 1, i32 1, i1 1) #0 74 %tmp2 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %tmp1, i32 1, i32 1, i32 1, i1 1) #0 75 %tmp_float = bitcast i32 %tmp2 to float 76 store float %tmp_float, float addrspace(1)* %out 77 ret void 78} 79 80; VI-LABEL: {{^}}mov_dpp64_test: 81; VI: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 82; VI: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 83define amdgpu_kernel void @mov_dpp64_test(i64 addrspace(1)* %out, i64 %in1) { 84 %tmp0 = call i64 @llvm.amdgcn.mov.dpp.i64(i64 %in1, i32 1, i32 1, i32 1, i1 0) #0 85 store i64 %tmp0, i64 addrspace(1)* %out 86 ret void 87} 88 89; VI-LABEL: {{^}}mov_dpp64_imm_test: 90; VI-OPT-DAG: s_mov_b32 s[[SOLD_LO:[0-9]+]], 0x3afaedd9 91; VI-OPT-DAG: s_movk_i32 s[[SOLD_HI:[0-9]+]], 0x7047 92; VI-OPT-DAG: v_mov_b32_e32 v[[OLD_LO:[0-9]+]], s[[SOLD_LO]] 93; VI-OPT-DAG: v_mov_b32_e32 v[[OLD_HI:[0-9]+]], s[[SOLD_HI]] 94; VI-OPT-DAG: v_mov_b32_dpp v[[OLD_LO]], v[[OLD_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 95; VI-OPT-DAG: v_mov_b32_dpp v[[OLD_HI]], v[[OLD_HI]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 96; VI-NOOPT-COUNT-2: v_mov_b32_dpp v{{[0-9]+}}, v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 97define amdgpu_kernel void @mov_dpp64_imm_test(i64 addrspace(1)* %out) { 98 %tmp0 = call i64 @llvm.amdgcn.mov.dpp.i64(i64 123451234512345, i32 1, i32 1, i32 1, i1 0) #0 99 store i64 %tmp0, i64 addrspace(1)* %out 100 ret void 101} 102 103declare i32 @llvm.amdgcn.mov.dpp.i32(i32, i32, i32, i32, i1) #0 104declare i64 @llvm.amdgcn.mov.dpp.i64(i64, i32, i32, i32, i1) #0 105 106attributes #0 = { nounwind readnone convergent } 107 108