1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=regbankselect -regbankselect-fast -o - %s | FileCheck -check-prefix=FAST %s
3# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=regbankselect -regbankselect-greedy -o - %s | FileCheck -check-prefix=GREEDY %s
4
5# We see the offset is a VGPR, but this is due to a constant for some
6# reason ending up in a VGPR. This shouldn't really ever happen, but
7# make sure this doesn't break when looking through copies for the add
8# operands.
9
10---
11name:            s_buffer_load_f32_vgpr_offset_cross_bank_copy_add_offset
12legalized:       true
13tracksRegLiveness: true
14body:             |
15  bb.0:
16    liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr0
17
18    ; FAST-LABEL: name: s_buffer_load_f32_vgpr_offset_cross_bank_copy_add_offset
19    ; FAST: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr0
20    ; FAST: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
21    ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
22    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 256
23    ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
24    ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
25    ; FAST: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY3]], [[COPY2]]
26    ; FAST: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
27    ; FAST: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
28    ; FAST: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[COPY]](<4 x s32>), [[C2]](s32), [[COPY3]], [[C1]], 256, 0, 0 :: (dereferenceable invariant load (s32))
29    ; FAST: S_ENDPGM 0, implicit [[AMDGPU_BUFFER_LOAD]](s32)
30    ; GREEDY-LABEL: name: s_buffer_load_f32_vgpr_offset_cross_bank_copy_add_offset
31    ; GREEDY: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr0
32    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
33    ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
34    ; GREEDY: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 256
35    ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
36    ; GREEDY: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY2]], [[C]]
37    ; GREEDY: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
38    ; GREEDY: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
39    ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[COPY]](<4 x s32>), [[C2]](s32), [[COPY2]], [[C1]], 256, 0, 0 :: (dereferenceable invariant load (s32))
40    ; GREEDY: S_ENDPGM 0, implicit [[AMDGPU_BUFFER_LOAD]](s32)
41    %0:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
42    %1:_(s32) = COPY $sgpr0
43    %2:vgpr(s32) = G_CONSTANT i32 256
44    %3:_(s32) = G_ADD %1, %2
45    %4:_(s32) = G_AMDGPU_S_BUFFER_LOAD %0, %3, 0
46    S_ENDPGM 0, implicit %4
47
48...
49
50---
51name:            s_buffer_load_negative_offset
52legalized:       true
53tracksRegLiveness: true
54body:             |
55  bb.0:
56    liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
57
58    ; FAST-LABEL: name: s_buffer_load_negative_offset
59    ; FAST: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
60    ; FAST: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
61    ; FAST: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
62    ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -60
63    ; FAST: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
64    ; FAST: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY1]], [[COPY2]]
65    ; FAST: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
66    ; FAST: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
67    ; FAST: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[COPY]](<4 x s32>), [[C2]](s32), [[ADD]], [[C1]], 0, 0, 0 :: (dereferenceable invariant load (s32))
68    ; FAST: S_ENDPGM 0, implicit [[AMDGPU_BUFFER_LOAD]](s32)
69    ; GREEDY-LABEL: name: s_buffer_load_negative_offset
70    ; GREEDY: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
71    ; GREEDY: [[COPY:%[0-9]+]]:sgpr(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
72    ; GREEDY: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
73    ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -60
74    ; GREEDY: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
75    ; GREEDY: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY1]], [[COPY2]]
76    ; GREEDY: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
77    ; GREEDY: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
78    ; GREEDY: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[COPY]](<4 x s32>), [[C2]](s32), [[ADD]], [[C1]], 0, 0, 0 :: (dereferenceable invariant load (s32))
79    ; GREEDY: S_ENDPGM 0, implicit [[AMDGPU_BUFFER_LOAD]](s32)
80    %0:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
81    %1:_(s32) = COPY $vgpr0
82    %2:_(s32) = G_CONSTANT i32 -60
83    %3:_(s32) = G_ADD %1, %2
84    %4:_(s32) = G_AMDGPU_S_BUFFER_LOAD %0, %3, 0
85    S_ENDPGM 0, implicit %4
86
87...
88