1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -global-isel -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GFX8 %s
3; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -show-mc-encoding < %s | FileCheck -check-prefix=GFX10 %s
4
5; FIXME: Merge with DAG test
6
7define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) {
8; GFX8-LABEL: dpp_test:
9; GFX8:       ; %bb.0:
10; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
11; GFX8-NEXT:    s_load_dword s0, s[0:1], 0x2c
12; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
13; GFX8-NEXT:    v_mov_b32_e32 v0, s2
14; GFX8-NEXT:    v_mov_b32_e32 v2, s0
15; GFX8-NEXT:    v_mov_b32_e32 v1, s3
16; GFX8-NEXT:    s_nop 0
17; GFX8-NEXT:    v_mov_b32_dpp v2, v2 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:1
18; GFX8-NEXT:    flat_store_dword v[0:1], v2
19; GFX8-NEXT:    s_endpgm
20;
21; GFX10-LABEL: dpp_test:
22; GFX10:       ; %bb.0:
23; GFX10-NEXT:    s_clause 0x1 ; encoding: [0x01,0x00,0xa1,0xbf]
24; GFX10-NEXT:    s_load_dword s4, s[0:1], 0x2c ; encoding: [0x00,0x01,0x00,0xf4,0x2c,0x00,0x00,0xfa]
25; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24 ; encoding: [0x80,0x00,0x04,0xf4,0x24,0x00,0x00,0xfa]
26; GFX10-NEXT:    v_mov_b32_e32 v1, 0 ; encoding: [0x80,0x02,0x02,0x7e]
27; GFX10-NEXT:    s_waitcnt lgkmcnt(0) ; encoding: [0x7f,0xc0,0x8c,0xbf]
28; GFX10-NEXT:    v_mov_b32_e32 v0, s4 ; encoding: [0x04,0x02,0x00,0x7e]
29; GFX10-NEXT:    v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:1 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11]
30; GFX10-NEXT:    global_store_dword v1, v0, s[2:3] ; encoding: [0x00,0x80,0x70,0xdc,0x01,0x00,0x02,0x00]
31; GFX10-NEXT:    s_endpgm ; encoding: [0x00,0x00,0x81,0xbf]
32  %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 true) #0
33  store i32 %tmp0, i32 addrspace(1)* %out
34  ret void
35}
36define amdgpu_kernel void @mov_dpp64_test(i64 addrspace(1)* %out, i64 %in1) {
37; GFX8-LABEL: mov_dpp64_test:
38; GFX8:       ; %bb.0:
39; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
40; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
41; GFX8-NEXT:    v_mov_b32_e32 v0, s2
42; GFX8-NEXT:    v_mov_b32_e32 v1, s3
43; GFX8-NEXT:    v_mov_b32_e32 v3, s1
44; GFX8-NEXT:    v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
45; GFX8-NEXT:    v_mov_b32_dpp v1, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1
46; GFX8-NEXT:    v_mov_b32_e32 v2, s0
47; GFX8-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
48; GFX8-NEXT:    s_endpgm
49;
50; GFX10-LABEL: mov_dpp64_test:
51; GFX10:       ; %bb.0:
52; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24 ; encoding: [0x00,0x00,0x08,0xf4,0x24,0x00,0x00,0xfa]
53; GFX10-NEXT:    v_mov_b32_e32 v2, 0 ; encoding: [0x80,0x02,0x04,0x7e]
54; GFX10-NEXT:    s_waitcnt lgkmcnt(0) ; encoding: [0x7f,0xc0,0x8c,0xbf]
55; GFX10-NEXT:    v_mov_b32_e32 v0, s2 ; encoding: [0x02,0x02,0x00,0x7e]
56; GFX10-NEXT:    v_mov_b32_e32 v1, s3 ; encoding: [0x03,0x02,0x02,0x7e]
57; GFX10-NEXT:    v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x00,0x11]
58; GFX10-NEXT:    v_mov_b32_dpp v1, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 ; encoding: [0xfa,0x02,0x02,0x7e,0x01,0x01,0x00,0x11]
59; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1] ; encoding: [0x00,0x80,0x74,0xdc,0x02,0x00,0x00,0x00]
60; GFX10-NEXT:    s_endpgm ; encoding: [0x00,0x00,0x81,0xbf]
61  %tmp0 = call i64 @llvm.amdgcn.mov.dpp.i64(i64 %in1, i32 1, i32 1, i32 1, i1 false) #0
62  store i64 %tmp0, i64 addrspace(1)* %out
63  ret void
64}
65
66declare i32 @llvm.amdgcn.mov.dpp.i32(i32, i32 immarg, i32 immarg, i32 immarg, i1 immarg) #0
67declare i64 @llvm.amdgcn.mov.dpp.i64(i64, i32 immarg, i32 immarg, i32 immarg, i1 immarg) #0
68
69attributes #0 = { convergent nounwind readnone }
70