1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=GFX8 3# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx906 -O0 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=GFX9 4 5--- 6name: test_smulo_s32 7body: | 8 bb.0: 9 liveins: $vgpr0, $vgpr1 10 11 ; GFX8-LABEL: name: test_smulo_s32 12 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 13 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 14 ; GFX8: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH [[COPY]], [[COPY1]] 15 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]] 16 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 17 ; GFX8: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[MUL]], [[C]](s32) 18 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]] 19 ; GFX8: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 20 ; GFX8: $vgpr0 = COPY [[MUL]](s32) 21 ; GFX8: $vgpr1 = COPY [[SEXT]](s32) 22 ; GFX9-LABEL: name: test_smulo_s32 23 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 24 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 25 ; GFX9: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH [[COPY]], [[COPY1]] 26 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]] 27 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 28 ; GFX9: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[MUL]], [[C]](s32) 29 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]] 30 ; GFX9: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 31 ; GFX9: $vgpr0 = COPY [[MUL]](s32) 32 ; GFX9: $vgpr1 = COPY [[SEXT]](s32) 33 %0:_(s32) = COPY $vgpr0 34 %1:_(s32) = COPY $vgpr1 35 %2:_(s32), %3:_(s1) = G_SMULO %0, %1 36 %4:_(s32) = G_SEXT %3 37 $vgpr0 = COPY %2 38 $vgpr1 = COPY %4 39... 40 41--- 42name: test_smulo_v2s32 43body: | 44 bb.0: 45 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 46 47 ; GFX8-LABEL: name: test_smulo_v2s32 48 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 49 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 50 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 51 ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 52 ; GFX8: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH [[UV]], [[UV2]] 53 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]] 54 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 55 ; GFX8: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[MUL]], [[C]](s32) 56 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]] 57 ; GFX8: [[SMULH1:%[0-9]+]]:_(s32) = G_SMULH [[UV1]], [[UV3]] 58 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]] 59 ; GFX8: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[MUL1]], [[C]](s32) 60 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH1]](s32), [[ASHR1]] 61 ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32) 62 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 63 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) 64 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT]], 1 65 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT1]], 1 66 ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32) 67 ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) 68 ; GFX8: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>) 69 ; GFX9-LABEL: name: test_smulo_v2s32 70 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 71 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 72 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 73 ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 74 ; GFX9: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH [[UV]], [[UV2]] 75 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]] 76 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 77 ; GFX9: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[MUL]], [[C]](s32) 78 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]] 79 ; GFX9: [[SMULH1:%[0-9]+]]:_(s32) = G_SMULH [[UV1]], [[UV3]] 80 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]] 81 ; GFX9: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[MUL1]], [[C]](s32) 82 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH1]](s32), [[ASHR1]] 83 ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32) 84 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 85 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT]], 1 86 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) 87 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT1]], 1 88 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32) 89 ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) 90 ; GFX9: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>) 91 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 92 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 93 %2:_(<2 x s32>), %3:_(<2 x s1>) = G_SMULO %0, %1 94 %4:_(<2 x s32>) = G_SEXT %3 95 $vgpr0_vgpr1 = COPY %2 96 $vgpr2_vgpr3 = COPY %4 97... 98 99--- 100name: test_smulo_s16 101body: | 102 bb.0: 103 liveins: $vgpr0, $vgpr1 104 105 ; GFX8-LABEL: name: test_smulo_s16 106 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 107 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 108 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16 109 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 16 110 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 111 ; GFX8: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 16 112 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 113 ; GFX8: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 16 114 ; GFX8: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 115 ; GFX8: $vgpr0 = COPY [[SEXT_INREG3]](s32) 116 ; GFX8: $vgpr1 = COPY [[SEXT]](s32) 117 ; GFX9-LABEL: name: test_smulo_s16 118 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 119 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 120 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 16 121 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 16 122 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 123 ; GFX9: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 16 124 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 125 ; GFX9: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 16 126 ; GFX9: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 127 ; GFX9: $vgpr0 = COPY [[SEXT_INREG3]](s32) 128 ; GFX9: $vgpr1 = COPY [[SEXT]](s32) 129 %0:_(s32) = COPY $vgpr0 130 %1:_(s32) = COPY $vgpr1 131 %2:_(s16) = G_TRUNC %0 132 %3:_(s16) = G_TRUNC %1 133 %4:_(s16), %6:_(s1) = G_SMULO %2, %3 134 %5:_(s32) = G_SEXT %4 135 %7:_(s32) = G_SEXT %6 136 $vgpr0 = COPY %5 137 $vgpr1 = COPY %7 138... 139 140--- 141name: test_smulo_s8 142body: | 143 bb.0: 144 liveins: $vgpr0, $vgpr1 145 146 ; GFX8-LABEL: name: test_smulo_s8 147 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 148 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 149 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8 150 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 151 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 152 ; GFX8: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 153 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 154 ; GFX8: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 155 ; GFX8: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 156 ; GFX8: $vgpr0 = COPY [[SEXT_INREG3]](s32) 157 ; GFX8: $vgpr1 = COPY [[SEXT]](s32) 158 ; GFX9-LABEL: name: test_smulo_s8 159 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 160 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 161 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8 162 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 163 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 164 ; GFX9: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 165 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 166 ; GFX9: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 167 ; GFX9: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ICMP]](s1) 168 ; GFX9: $vgpr0 = COPY [[SEXT_INREG3]](s32) 169 ; GFX9: $vgpr1 = COPY [[SEXT]](s32) 170 %0:_(s32) = COPY $vgpr0 171 %1:_(s32) = COPY $vgpr1 172 %2:_(s8) = G_TRUNC %0 173 %3:_(s8) = G_TRUNC %1 174 %4:_(s8), %6:_(s1) = G_SMULO %2, %3 175 %5:_(s32) = G_SEXT %4 176 %7:_(s32) = G_SEXT %6 177 $vgpr0 = COPY %5 178 $vgpr1 = COPY %7 179... 180 181--- 182name: test_smulo_v2s16 183body: | 184 bb.0: 185 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 186 ; GFX8-LABEL: name: test_smulo_v2s16 187 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 188 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 189 ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 190 ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 191 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16 192 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16 193 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 194 ; GFX8: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 16 195 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 196 ; GFX8: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16 197 ; GFX8: [[SEXT_INREG4:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV3]], 16 198 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG3]], [[SEXT_INREG4]] 199 ; GFX8: [[SEXT_INREG5:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL1]], 16 200 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[SEXT_INREG5]] 201 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 202 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]] 203 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C]] 204 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 205 ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32) 206 ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] 207 ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) 208 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 209 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) 210 ; GFX8: [[SEXT_INREG6:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT]], 1 211 ; GFX8: [[SEXT_INREG7:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT1]], 1 212 ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG6]](s32), [[SEXT_INREG7]](s32) 213 ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST]](<2 x s16>) 214 ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C1]](s32) 215 ; GFX8: [[SEXT_INREG8:%[0-9]+]]:_(s32) = G_SEXT_INREG [[BITCAST1]], 16 216 ; GFX8: [[SEXT_INREG9:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR]], 16 217 ; GFX8: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG8]](s32), [[SEXT_INREG9]](s32) 218 ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR1]](<2 x s32>) 219 ; GFX8: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s32>) 220 ; GFX9-LABEL: name: test_smulo_v2s16 221 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 222 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 223 ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 224 ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 225 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16 226 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16 227 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 228 ; GFX9: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 16 229 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 230 ; GFX9: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16 231 ; GFX9: [[SEXT_INREG4:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV3]], 16 232 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG3]], [[SEXT_INREG4]] 233 ; GFX9: [[SEXT_INREG5:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL1]], 16 234 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[SEXT_INREG5]] 235 ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[MUL]](s32), [[MUL1]](s32) 236 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 237 ; GFX9: [[SEXT_INREG6:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT]], 1 238 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) 239 ; GFX9: [[SEXT_INREG7:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ANYEXT1]], 1 240 ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG6]](s32), [[SEXT_INREG7]](s32) 241 ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[BUILD_VECTOR_TRUNC]](<2 x s16>) 242 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 243 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) 244 ; GFX9: [[SEXT_INREG8:%[0-9]+]]:_(s32) = G_SEXT_INREG [[BITCAST]], 16 245 ; GFX9: [[SEXT_INREG9:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR]], 16 246 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG8]](s32), [[SEXT_INREG9]](s32) 247 ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR1]](<2 x s32>) 248 ; GFX9: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s32>) 249 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 250 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 251 %2:_(<2 x s16>) = G_TRUNC %0 252 %3:_(<2 x s16>) = G_TRUNC %1 253 %4:_(<2 x s16>), %6:_(<2 x s1>) = G_SMULO %2, %3 254 %7:_(<2 x s32>) = G_SEXT %6 255 %5:_(<2 x s32>) = G_SEXT %4 256 $vgpr0_vgpr1 = COPY %5 257 $vgpr2_vgpr3 = COPY %7 258... 259 260 261--- 262name: test_smulo_v2s8 263body: | 264 bb.0: 265 liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 266 ; GFX8-LABEL: name: test_smulo_v2s8 267 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 268 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 269 ; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 270 ; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 271 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8 272 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 8 273 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 274 ; GFX8: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 275 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 276 ; GFX8: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 277 ; GFX8: [[SEXT_INREG4:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY3]], 8 278 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG3]], [[SEXT_INREG4]] 279 ; GFX8: [[SEXT_INREG5:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL1]], 8 280 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[SEXT_INREG5]] 281 ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 282 ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[MUL]](s32) 283 ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]] 284 ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[MUL1]](s32) 285 ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] 286 ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 287 ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16) 288 ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] 289 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) 290 ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 291 ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) 292 ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) 293 ; GFX8: $vgpr1 = COPY [[ANYEXT1]](s32) 294 ; GFX8: $vgpr2 = COPY [[ANYEXT2]](s32) 295 ; GFX9-LABEL: name: test_smulo_v2s8 296 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 297 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 298 ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 299 ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 300 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8 301 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY2]], 8 302 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 303 ; GFX9: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 304 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 305 ; GFX9: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 306 ; GFX9: [[SEXT_INREG4:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY3]], 8 307 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG3]], [[SEXT_INREG4]] 308 ; GFX9: [[SEXT_INREG5:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL1]], 8 309 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL1]](s32), [[SEXT_INREG5]] 310 ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 311 ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[MUL]](s32) 312 ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]] 313 ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[MUL1]](s32) 314 ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] 315 ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 316 ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16) 317 ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]] 318 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) 319 ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 320 ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) 321 ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) 322 ; GFX9: $vgpr1 = COPY [[ANYEXT1]](s32) 323 ; GFX9: $vgpr2 = COPY [[ANYEXT2]](s32) 324 %0:_(s32) = COPY $vgpr0 325 %1:_(s32) = COPY $vgpr1 326 %2:_(s32) = COPY $vgpr2 327 %3:_(s32) = COPY $vgpr3 328 %5:_(s8) = G_TRUNC %0 329 %6:_(s8) = G_TRUNC %1 330 %7:_(s8) = G_TRUNC %2 331 %8:_(s8) = G_TRUNC %3 332 %11:_(<2 x s8>) = G_BUILD_VECTOR %5, %6 333 %12:_(<2 x s8>) = G_BUILD_VECTOR %7, %8 334 %13:_(<2 x s8>), %19:_(<2 x s1>) = G_SMULO %11, %12 335 %20:_(<2 x s32>) = G_SEXT %19 336 %14:_(s8), %15:_(s8) = G_UNMERGE_VALUES %13 337 %21:_(s1), %22:_(s1) = G_UNMERGE_VALUES %19 338 %17:_(s16) = G_MERGE_VALUES %14, %15 339 %18:_(s32) = G_ANYEXT %17 340 %23:_(s32) = G_ANYEXT %21 341 %24:_(s32) = G_ANYEXT %22 342 $vgpr0 = COPY %18 343 $vgpr1 = COPY %23 344 $vgpr2 = COPY %24 345... 346 347--- 348name: test_smulo_v4s8 349body: | 350 bb.0: 351 liveins: $vgpr0, $vgpr1 352 ; GFX8-LABEL: name: test_smulo_v4s8 353 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 354 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 355 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 356 ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) 357 ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 358 ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32) 359 ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 360 ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32) 361 ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32) 362 ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32) 363 ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) 364 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8 365 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 366 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 367 ; GFX8: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 368 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 369 ; GFX8: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR]], 8 370 ; GFX8: [[SEXT_INREG4:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR3]], 8 371 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG3]], [[SEXT_INREG4]] 372 ; GFX8: [[SEXT_INREG5:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR1]], 8 373 ; GFX8: [[SEXT_INREG6:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR4]], 8 374 ; GFX8: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG5]], [[SEXT_INREG6]] 375 ; GFX8: [[SEXT_INREG7:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR2]], 8 376 ; GFX8: [[SEXT_INREG8:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR5]], 8 377 ; GFX8: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG7]], [[SEXT_INREG8]] 378 ; GFX8: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 379 ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C3]] 380 ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C3]] 381 ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32) 382 ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] 383 ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL2]], [[C3]] 384 ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32) 385 ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] 386 ; GFX8: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL3]], [[C3]] 387 ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32) 388 ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] 389 ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 390 ; GFX8: $vgpr0 = COPY [[OR2]](s32) 391 ; GFX8: $vgpr1 = COPY [[ANYEXT]](s32) 392 ; GFX9-LABEL: name: test_smulo_v4s8 393 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 394 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 395 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 396 ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) 397 ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 398 ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32) 399 ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 400 ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32) 401 ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32) 402 ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32) 403 ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) 404 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 8 405 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8 406 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 407 ; GFX9: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 8 408 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 409 ; GFX9: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR]], 8 410 ; GFX9: [[SEXT_INREG4:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR3]], 8 411 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG3]], [[SEXT_INREG4]] 412 ; GFX9: [[SEXT_INREG5:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR1]], 8 413 ; GFX9: [[SEXT_INREG6:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR4]], 8 414 ; GFX9: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG5]], [[SEXT_INREG6]] 415 ; GFX9: [[SEXT_INREG7:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR2]], 8 416 ; GFX9: [[SEXT_INREG8:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LSHR5]], 8 417 ; GFX9: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG7]], [[SEXT_INREG8]] 418 ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 419 ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C3]] 420 ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[MUL1]], [[C3]] 421 ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32) 422 ; GFX9: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] 423 ; GFX9: [[AND2:%[0-9]+]]:_(s32) = G_AND [[MUL2]], [[C3]] 424 ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32) 425 ; GFX9: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] 426 ; GFX9: [[AND3:%[0-9]+]]:_(s32) = G_AND [[MUL3]], [[C3]] 427 ; GFX9: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32) 428 ; GFX9: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] 429 ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) 430 ; GFX9: $vgpr0 = COPY [[OR2]](s32) 431 ; GFX9: $vgpr1 = COPY [[ANYEXT]](s32) 432 %0:_(s32) = COPY $vgpr0 433 %1:_(s32) = COPY $vgpr1 434 %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0 435 %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8) = G_UNMERGE_VALUES %1 436 %10:_(<4 x s8>) = G_BUILD_VECTOR %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) 437 %11:_(<4 x s8>) = G_BUILD_VECTOR %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8) 438 %12:_(<4 x s8>), %18:_(<4 x s1>) = G_SMULO %10:_, %11:_ 439 %13:_(s8), %14:_(s8), %15:_(s8), %16:_(s8) = G_UNMERGE_VALUES %12:_(<4 x s8>) 440 %19:_(s1), %20:_(s1), %21:_(s1), %22:_(s1) = G_UNMERGE_VALUES %18:_(<4 x s1>) 441 %17:_(s32) = G_MERGE_VALUES %13, %14, %15, %16 442 %23:_(s32) = G_ANYEXT %19 443 $vgpr0 = COPY %17 444 $vgpr1 = COPY %23 445... 446--- 447name: test_smulo_s24 448body: | 449 bb.0: 450 liveins: $vgpr0, $vgpr1 451 452 ; GFX8-LABEL: name: test_smulo_s24 453 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 454 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 455 ; GFX8: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 24 456 ; GFX8: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 24 457 ; GFX8: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH [[SEXT_INREG]], [[SEXT_INREG1]] 458 ; GFX8: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 459 ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 460 ; GFX8: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[MUL]], [[C]](s32) 461 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]] 462 ; GFX8: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 24 463 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 464 ; GFX8: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]] 465 ; GFX8: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 24 466 ; GFX8: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[OR]](s1) 467 ; GFX8: $vgpr0 = COPY [[SEXT_INREG3]](s32) 468 ; GFX8: $vgpr1 = COPY [[SEXT]](s32) 469 ; GFX9-LABEL: name: test_smulo_s24 470 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 471 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 472 ; GFX9: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 24 473 ; GFX9: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 24 474 ; GFX9: [[SMULH:%[0-9]+]]:_(s32) = G_SMULH [[SEXT_INREG]], [[SEXT_INREG1]] 475 ; GFX9: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG]], [[SEXT_INREG1]] 476 ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 477 ; GFX9: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[MUL]], [[C]](s32) 478 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SMULH]](s32), [[ASHR]] 479 ; GFX9: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 24 480 ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[MUL]](s32), [[SEXT_INREG2]] 481 ; GFX9: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]] 482 ; GFX9: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[MUL]], 24 483 ; GFX9: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[OR]](s1) 484 ; GFX9: $vgpr0 = COPY [[SEXT_INREG3]](s32) 485 ; GFX9: $vgpr1 = COPY [[SEXT]](s32) 486 %0:_(s32) = COPY $vgpr0 487 %1:_(s32) = COPY $vgpr1 488 %2:_(s24) = G_TRUNC %0 489 %3:_(s24) = G_TRUNC %1 490 %4:_(s24), %6:_(s1) = G_SMULO %2, %3 491 %5:_(s32) = G_SEXT %4 492 %7:_(s32) = G_SEXT %6 493 $vgpr0 = COPY %5 494 $vgpr1 = COPY %7 495... 496 497