1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s 3 4--- 5name: test_usube_s32 6body: | 7 bb.0: 8 liveins: $vgpr0, $vgpr1, $vgpr2 9 10 ; CHECK-LABEL: name: test_usube_s32 11 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 12 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 13 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 14 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 15 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] 16 ; CHECK: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]] 17 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[USUBE1]](s1) 18 ; CHECK: $vgpr0 = COPY [[USUBE]](s32) 19 ; CHECK: $vgpr1 = COPY [[ZEXT]](s32) 20 %0:_(s32) = COPY $vgpr0 21 %1:_(s32) = COPY $vgpr1 22 %2:_(s32) = COPY $vgpr2 23 %3:_(s32) = G_CONSTANT i32 0 24 %4:_(s1) = G_ICMP intpred(eq), %2, %3 25 %5:_(s32), %6:_(s1) = G_USUBE %0, %1, %4 26 %7:_(s32) = G_ZEXT %6 27 $vgpr0 = COPY %5 28 $vgpr1 = COPY %7 29... 30 31--- 32name: test_usube_v2s32 33body: | 34 bb.0: 35 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5 36 37 ; CHECK-LABEL: name: test_usube_v2s32 38 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 39 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 40 ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5 41 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 42 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<2 x s32>) 43 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[C]] 44 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C]] 45 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 46 ; CHECK: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 47 ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[UV4]] 48 ; CHECK: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[UV5]] 49 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) 50 ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP1]](s1) 51 ; CHECK: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[ZEXT]] 52 ; CHECK: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[ZEXT1]] 53 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB2]](s32), [[SUB3]](s32) 54 ; CHECK: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 55 ; CHECK: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 56 ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV6]](s32), [[UV8]] 57 ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV7]](s32), [[UV9]] 58 ; CHECK: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) 59 ; CHECK: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) 60 ; CHECK: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV10]](s32), [[UV12]] 61 ; CHECK: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV11]](s32), [[UV13]] 62 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1) 63 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP4]](s1) 64 ; CHECK: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[ANYEXT]], [[ANYEXT1]] 65 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1) 66 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP5]](s1) 67 ; CHECK: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[ANYEXT2]], [[ANYEXT3]] 68 ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) 69 ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) 70 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 71 ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT4]], [[C1]] 72 ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT5]], [[C1]] 73 ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32) 74 ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) 75 ; CHECK: $vgpr2_vgpr3 = COPY [[BUILD_VECTOR1]](<2 x s32>) 76 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 77 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 78 %2:_(<2 x s32>) = COPY $vgpr4_vgpr5 79 %3:_(s32) = G_CONSTANT i32 0 80 %4:_(<2 x s32>) = G_BUILD_VECTOR %3, %3 81 %5:_(<2 x s1>) = G_ICMP intpred(eq), %2, %4 82 %6:_(<2 x s32>), %7:_(<2 x s1>) = G_USUBE %0, %1, %5 83 %8:_(<2 x s32>) = G_ZEXT %7 84 $vgpr0_vgpr1 = COPY %6 85 $vgpr2_vgpr3 = COPY %8 86... 87 88--- 89name: test_usube_s16 90body: | 91 bb.0: 92 liveins: $vgpr0, $vgpr1, $vgpr2 93 94 ; CHECK-LABEL: name: test_usube_s16 95 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1 96 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 97 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 98 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C]] 99 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 100 ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]] 101 ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND %13, [[C1]] 102 ; CHECK: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[AND]], [[AND1]], [[ICMP]] 103 ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[USUBE]], [[C1]] 104 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[USUBE]](s32), [[AND2]] 105 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP1]](s1) 106 ; CHECK: $vgpr0 = COPY [[USUBE]](s32) 107 ; CHECK: $vgpr1 = COPY [[ZEXT]](s32) 108 %0:_(s32) = COPY $vgpr0 109 %1:_(s32) = COPY $vgpr1 110 %2:_(s32) = COPY $vgpr2 111 %3:_(s32) = G_CONSTANT i32 0 112 %4:_(s1) = G_ICMP intpred(eq), %2, %3 113 %5:_(s16) = G_TRUNC %0 114 %6:_(s16) = G_TRUNC %1 115 %7:_(s16), %8:_(s1) = G_USUBE %6, %7, %4 116 %9:_(s32) = G_ANYEXT %7 117 %10:_(s32) = G_ZEXT %8 118 $vgpr0 = COPY %9 119 $vgpr1 = COPY %10 120... 121 122--- 123name: test_usube_s64 124body: | 125 bb.0: 126 liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4 127 128 ; CHECK-LABEL: name: test_usube_s64 129 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 130 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 131 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr4 132 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 133 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] 134 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) 135 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64) 136 ; CHECK: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]] 137 ; CHECK: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]] 138 ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ICMP]](s1) 139 ; CHECK: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64) 140 ; CHECK: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[USUBO]], [[UV4]] 141 ; CHECK: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[USUBE]], [[UV5]], [[USUBO3]] 142 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32) 143 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]] 144 ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]] 145 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1) 146 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP2]](s1) 147 ; CHECK: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[ANYEXT]], [[ANYEXT1]] 148 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 149 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) 150 ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C1]] 151 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) 152 ; CHECK: $vgpr2 = COPY [[AND]](s32) 153 %0:_(s64) = COPY $vgpr0_vgpr1 154 %1:_(s64) = COPY $vgpr2_vgpr3 155 %2:_(s32) = COPY $vgpr4 156 %3:_(s32) = G_CONSTANT i32 0 157 %4:_(s1) = G_ICMP intpred(eq), %2, %3 158 %5:_(s64), %6:_(s1) = G_USUBE %0, %1, %4 159 %7:_(s32) = G_ZEXT %6 160 $vgpr0_vgpr1 = COPY %5 161 $vgpr2 = COPY %7 162... 163