1; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
2; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
4
5; FUNC-LABEL: {{^}}rotr_i32:
6; R600: BIT_ALIGN_INT
7
8; SI: v_alignbit_b32
9define amdgpu_kernel void @rotr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
10entry:
11  %tmp0 = sub i32 32, %y
12  %tmp1 = shl i32 %x, %tmp0
13  %tmp2 = lshr i32 %x, %y
14  %tmp3 = or i32 %tmp1, %tmp2
15  store i32 %tmp3, i32 addrspace(1)* %in
16  ret void
17}
18
19; FUNC-LABEL: {{^}}rotr_v2i32:
20; R600: BIT_ALIGN_INT
21; R600: BIT_ALIGN_INT
22
23; SI: v_alignbit_b32
24; SI: v_alignbit_b32
25define amdgpu_kernel void @rotr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
26entry:
27  %tmp0 = sub <2 x i32> <i32 32, i32 32>, %y
28  %tmp1 = shl <2 x i32> %x, %tmp0
29  %tmp2 = lshr <2 x i32> %x, %y
30  %tmp3 = or <2 x i32> %tmp1, %tmp2
31  store <2 x i32> %tmp3, <2 x i32> addrspace(1)* %in
32  ret void
33}
34
35; FUNC-LABEL: {{^}}rotr_v4i32:
36; R600: BIT_ALIGN_INT
37; R600: BIT_ALIGN_INT
38; R600: BIT_ALIGN_INT
39; R600: BIT_ALIGN_INT
40
41; SI: v_alignbit_b32
42; SI: v_alignbit_b32
43; SI: v_alignbit_b32
44; SI: v_alignbit_b32
45define amdgpu_kernel void @rotr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
46entry:
47  %tmp0 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
48  %tmp1 = shl <4 x i32> %x, %tmp0
49  %tmp2 = lshr <4 x i32> %x, %y
50  %tmp3 = or <4 x i32> %tmp1, %tmp2
51  store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in
52  ret void
53}
54
55; GCN-LABEL: @test_rotr_i16
56; GCN: global_load_ushort [[X:v[0-9]+]]
57; GCN: global_load_ushort [[D:v[0-9]+]]
58; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
59; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
60; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
61; GCN: v_lshrrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
62; GCN: v_lshlrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
63; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
64; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
65
66declare i16 @llvm.fshr.i16(i16, i16, i16)
67
68define void @test_rotr_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) {
69entry:
70  %arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16
71  %a = load i16, i16 addrspace(1)* %arrayidx
72  %arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24
73  %b = load i16, i16 addrspace(1)* %arrayidx2
74  %c = tail call i16 @llvm.fshr.i16(i16 %a, i16 %a, i16 %b)
75  %arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4
76  store i16 %c, i16 addrspace(1)* %arrayidx5
77  ret void
78}
79