1; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
4
5; FUNC-LABEL: {{^}}rotl_i32:
6; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
7; R600-NEXT: 32
8; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
9
10; SI: s_sub_i32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
11; SI: v_mov_b32_e32 [[VDST:v[0-9]+]], [[SDST]]
12; SI: v_alignbit_b32 {{v[0-9]+, [s][0-9]+, s[0-9]+}}, [[VDST]]
13define amdgpu_kernel void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
14entry:
15  %0 = shl i32 %x, %y
16  %1 = sub i32 32, %y
17  %2 = lshr i32 %x, %1
18  %3 = or i32 %0, %2
19  store i32 %3, i32 addrspace(1)* %in
20  ret void
21}
22
23; FUNC-LABEL: {{^}}rotl_v2i32:
24; SI-DAG: s_sub_i32
25; SI-DAG: s_sub_i32
26; SI-DAG: v_alignbit_b32
27; SI-DAG: v_alignbit_b32
28; SI: s_endpgm
29define amdgpu_kernel void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
30entry:
31  %0 = shl <2 x i32> %x, %y
32  %1 = sub <2 x i32> <i32 32, i32 32>, %y
33  %2 = lshr <2 x i32> %x, %1
34  %3 = or <2 x i32> %0, %2
35  store <2 x i32> %3, <2 x i32> addrspace(1)* %in
36  ret void
37}
38
39; FUNC-LABEL: {{^}}rotl_v4i32:
40; SI-DAG: s_sub_i32
41; SI-DAG: v_alignbit_b32
42; SI-DAG: s_sub_i32
43; SI-DAG: v_alignbit_b32
44; SI-DAG: s_sub_i32
45; SI-DAG: v_alignbit_b32
46; SI-DAG: s_sub_i32
47; SI-DAG: v_alignbit_b32
48; SI: s_endpgm
49define amdgpu_kernel void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
50entry:
51  %0 = shl <4 x i32> %x, %y
52  %1 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
53  %2 = lshr <4 x i32> %x, %1
54  %3 = or <4 x i32> %0, %2
55  store <4 x i32> %3, <4 x i32> addrspace(1)* %in
56  ret void
57}
58
59; GCN-LABEL: @test_rotl_i16
60; GCN: global_load_ushort [[X:v[0-9]+]]
61; GCN: global_load_ushort [[D:v[0-9]+]]
62; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
63; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
64; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
65; GCN: v_lshlrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
66; GCN: v_lshrrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
67; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
68; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
69
70declare i16 @llvm.fshl.i16(i16, i16, i16)
71
72define void @test_rotl_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) {
73entry:
74  %arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16
75  %a = load i16, i16 addrspace(1)* %arrayidx
76  %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24
77  %b = load i16, i16 addrspace(1)* %arrayidx2
78  %c = tail call i16 @llvm.fshl.i16(i16 %a, i16 %a, i16 %b)
79  %arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4
80  store i16 %c, i16 addrspace(1)* %arrayidx5
81  ret void
82}
83