1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK
3
4define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
5; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
6; CHECK:       ; %bb.0:
7; CHECK-NEXT:    s_mov_b32 s11, s5
8; CHECK-NEXT:    s_mov_b32 s10, s4
9; CHECK-NEXT:    s_mov_b32 s9, s3
10; CHECK-NEXT:    s_mov_b32 s8, s2
11; CHECK-NEXT:    buffer_atomic_add_f32 v0, v1, s[8:11], s6 offen
12; CHECK-NEXT:    s_endpgm
13  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 24)
14  ret void
15}
16
17define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
18; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
19; CHECK:       ; %bb.0:
20; CHECK-NEXT:    s_mov_b32 s11, s5
21; CHECK-NEXT:    s_mov_b32 s10, s4
22; CHECK-NEXT:    s_mov_b32 s9, s3
23; CHECK-NEXT:    s_mov_b32 s8, s2
24; CHECK-NEXT:    buffer_atomic_add_f32 v0, off, s[8:11], s6
25; CHECK-NEXT:    s_endpgm
26  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
27  ret void
28}
29
30define amdgpu_ps void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
31; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
32; CHECK:       ; %bb.0:
33; CHECK-NEXT:    s_mov_b32 s11, s5
34; CHECK-NEXT:    s_mov_b32 s10, s4
35; CHECK-NEXT:    s_mov_b32 s9, s3
36; CHECK-NEXT:    s_mov_b32 s8, s2
37; CHECK-NEXT:    buffer_atomic_pk_add_f16 v0, v1, s[8:11], s6 offen
38; CHECK-NEXT:    s_endpgm
39  %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
40  ret void
41}
42
43define amdgpu_ps void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
44; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
45; CHECK:       ; %bb.0:
46; CHECK-NEXT:    s_mov_b32 s11, s5
47; CHECK-NEXT:    s_mov_b32 s10, s4
48; CHECK-NEXT:    s_mov_b32 s9, s3
49; CHECK-NEXT:    s_mov_b32 s8, s2
50; CHECK-NEXT:    buffer_atomic_pk_add_f16 v0, off, s[8:11], s6 offset:92
51; CHECK-NEXT:    s_endpgm
52  %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 92, i32 %soffset, i32 0)
53  ret void
54}
55
56define amdgpu_ps void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
57; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
58; CHECK:       ; %bb.0:
59; CHECK-NEXT:    s_mov_b32 s11, s5
60; CHECK-NEXT:    s_mov_b32 s10, s4
61; CHECK-NEXT:    s_mov_b32 s9, s3
62; CHECK-NEXT:    s_mov_b32 s8, s2
63; CHECK-NEXT:    buffer_atomic_add_f32 v0, v1, s[8:11], s6 offen slc
64; CHECK-NEXT:    s_endpgm
65  %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2)
66  ret void
67}
68
69declare float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float, <4 x i32>, i32, i32, i32 immarg) #0
70declare <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half>, <4 x i32>, i32, i32, i32 immarg) #0
71
72attributes #0 = { nounwind }
73