1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s 3 4; If positive... 5 6define i32 @zext_ifpos(i32 %x) { 7; CHECK-LABEL: zext_ifpos: 8; CHECK: # %bb.0: 9; CHECK-NEXT: nor 3, 3, 3 10; CHECK-NEXT: srwi 3, 3, 31 11; CHECK-NEXT: blr 12 %c = icmp sgt i32 %x, -1 13 %e = zext i1 %c to i32 14 ret i32 %e 15} 16 17define i32 @add_zext_ifpos(i32 %x) { 18; CHECK-LABEL: add_zext_ifpos: 19; CHECK: # %bb.0: 20; CHECK-NEXT: srawi 3, 3, 31 21; CHECK-NEXT: addi 3, 3, 42 22; CHECK-NEXT: blr 23 %c = icmp sgt i32 %x, -1 24 %e = zext i1 %c to i32 25 %r = add i32 %e, 41 26 ret i32 %r 27} 28 29define <4 x i32> @add_zext_ifpos_vec_splat(<4 x i32> %x) { 30; CHECK-LABEL: add_zext_ifpos_vec_splat: 31; CHECK: # %bb.0: 32; CHECK-NEXT: xxleqv 35, 35, 35 33; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha 34; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l 35; CHECK-NEXT: vcmpgtsw 2, 2, 3 36; CHECK-NEXT: lvx 3, 0, 3 37; CHECK-NEXT: vsubuwm 2, 3, 2 38; CHECK-NEXT: blr 39 %c = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 40 %e = zext <4 x i1> %c to <4 x i32> 41 %r = add <4 x i32> %e, <i32 41, i32 41, i32 41, i32 41> 42 ret <4 x i32> %r 43} 44 45define i32 @sel_ifpos_tval_bigger(i32 %x) { 46; CHECK-LABEL: sel_ifpos_tval_bigger: 47; CHECK: # %bb.0: 48; CHECK-NEXT: li 4, 41 49; CHECK-NEXT: cmpwi 0, 3, -1 50; CHECK-NEXT: li 3, 42 51; CHECK-NEXT: isel 3, 3, 4, 1 52; CHECK-NEXT: blr 53 %c = icmp sgt i32 %x, -1 54 %r = select i1 %c, i32 42, i32 41 55 ret i32 %r 56} 57 58define i32 @sext_ifpos(i32 %x) { 59; CHECK-LABEL: sext_ifpos: 60; CHECK: # %bb.0: 61; CHECK-NEXT: nor 3, 3, 3 62; CHECK-NEXT: srawi 3, 3, 31 63; CHECK-NEXT: blr 64 %c = icmp sgt i32 %x, -1 65 %e = sext i1 %c to i32 66 ret i32 %e 67} 68 69define i32 @add_sext_ifpos(i32 %x) { 70; CHECK-LABEL: add_sext_ifpos: 71; CHECK: # %bb.0: 72; CHECK-NEXT: srwi 3, 3, 31 73; CHECK-NEXT: addi 3, 3, 41 74; CHECK-NEXT: blr 75 %c = icmp sgt i32 %x, -1 76 %e = sext i1 %c to i32 77 %r = add i32 %e, 42 78 ret i32 %r 79} 80 81define <4 x i32> @add_sext_ifpos_vec_splat(<4 x i32> %x) { 82; CHECK-LABEL: add_sext_ifpos_vec_splat: 83; CHECK: # %bb.0: 84; CHECK-NEXT: xxleqv 35, 35, 35 85; CHECK-NEXT: addis 3, 2, .LCPI6_0@toc@ha 86; CHECK-NEXT: addi 3, 3, .LCPI6_0@toc@l 87; CHECK-NEXT: vcmpgtsw 2, 2, 3 88; CHECK-NEXT: lvx 3, 0, 3 89; CHECK-NEXT: vadduwm 2, 2, 3 90; CHECK-NEXT: blr 91 %c = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 92 %e = sext <4 x i1> %c to <4 x i32> 93 %r = add <4 x i32> %e, <i32 42, i32 42, i32 42, i32 42> 94 ret <4 x i32> %r 95} 96 97define i32 @sel_ifpos_fval_bigger(i32 %x) { 98; CHECK-LABEL: sel_ifpos_fval_bigger: 99; CHECK: # %bb.0: 100; CHECK-NEXT: li 4, 42 101; CHECK-NEXT: cmpwi 0, 3, -1 102; CHECK-NEXT: li 3, 41 103; CHECK-NEXT: isel 3, 3, 4, 1 104; CHECK-NEXT: blr 105 %c = icmp sgt i32 %x, -1 106 %r = select i1 %c, i32 41, i32 42 107 ret i32 %r 108} 109 110; If negative... 111 112define i32 @zext_ifneg(i32 %x) { 113; CHECK-LABEL: zext_ifneg: 114; CHECK: # %bb.0: 115; CHECK-NEXT: srwi 3, 3, 31 116; CHECK-NEXT: blr 117 %c = icmp slt i32 %x, 0 118 %r = zext i1 %c to i32 119 ret i32 %r 120} 121 122define i32 @add_zext_ifneg(i32 %x) { 123; CHECK-LABEL: add_zext_ifneg: 124; CHECK: # %bb.0: 125; CHECK-NEXT: srwi 3, 3, 31 126; CHECK-NEXT: addi 3, 3, 41 127; CHECK-NEXT: blr 128 %c = icmp slt i32 %x, 0 129 %e = zext i1 %c to i32 130 %r = add i32 %e, 41 131 ret i32 %r 132} 133 134define i32 @sel_ifneg_tval_bigger(i32 %x) { 135; CHECK-LABEL: sel_ifneg_tval_bigger: 136; CHECK: # %bb.0: 137; CHECK-NEXT: li 4, 41 138; CHECK-NEXT: cmpwi 0, 3, 0 139; CHECK-NEXT: li 3, 42 140; CHECK-NEXT: isel 3, 3, 4, 0 141; CHECK-NEXT: blr 142 %c = icmp slt i32 %x, 0 143 %r = select i1 %c, i32 42, i32 41 144 ret i32 %r 145} 146 147define i32 @sext_ifneg(i32 %x) { 148; CHECK-LABEL: sext_ifneg: 149; CHECK: # %bb.0: 150; CHECK-NEXT: srawi 3, 3, 31 151; CHECK-NEXT: blr 152 %c = icmp slt i32 %x, 0 153 %r = sext i1 %c to i32 154 ret i32 %r 155} 156 157define i32 @add_sext_ifneg(i32 %x) { 158; CHECK-LABEL: add_sext_ifneg: 159; CHECK: # %bb.0: 160; CHECK-NEXT: srawi 3, 3, 31 161; CHECK-NEXT: addi 3, 3, 42 162; CHECK-NEXT: blr 163 %c = icmp slt i32 %x, 0 164 %e = sext i1 %c to i32 165 %r = add i32 %e, 42 166 ret i32 %r 167} 168 169define i32 @sel_ifneg_fval_bigger(i32 %x) { 170; CHECK-LABEL: sel_ifneg_fval_bigger: 171; CHECK: # %bb.0: 172; CHECK-NEXT: li 4, 42 173; CHECK-NEXT: cmpwi 0, 3, 0 174; CHECK-NEXT: li 3, 41 175; CHECK-NEXT: isel 3, 3, 4, 0 176; CHECK-NEXT: blr 177 %c = icmp slt i32 %x, 0 178 %r = select i1 %c, i32 41, i32 42 179 ret i32 %r 180} 181 182define i32 @add_lshr_not(i32 %x) { 183; CHECK-LABEL: add_lshr_not: 184; CHECK: # %bb.0: 185; CHECK-NEXT: srawi 3, 3, 31 186; CHECK-NEXT: addi 3, 3, 42 187; CHECK-NEXT: blr 188 %not = xor i32 %x, -1 189 %sh = lshr i32 %not, 31 190 %r = add i32 %sh, 41 191 ret i32 %r 192} 193 194define <4 x i32> @add_lshr_not_vec_splat(<4 x i32> %x) { 195; CHECK-LABEL: add_lshr_not_vec_splat: 196; CHECK: # %bb.0: 197; CHECK-NEXT: vspltisw 3, -16 198; CHECK-NEXT: vspltisw 4, 15 199; CHECK-NEXT: addis 3, 2, .LCPI15_0@toc@ha 200; CHECK-NEXT: addi 3, 3, .LCPI15_0@toc@l 201; CHECK-NEXT: vsubuwm 3, 4, 3 202; CHECK-NEXT: vsraw 2, 2, 3 203; CHECK-NEXT: lvx 3, 0, 3 204; CHECK-NEXT: vadduwm 2, 2, 3 205; CHECK-NEXT: blr 206 %c = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 207 %e = lshr <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31> 208 %r = add <4 x i32> %e, <i32 42, i32 42, i32 42, i32 42> 209 ret <4 x i32> %r 210} 211 212define i32 @sub_lshr_not(i32 %x) { 213; CHECK-LABEL: sub_lshr_not: 214; CHECK: # %bb.0: 215; CHECK-NEXT: srwi 3, 3, 31 216; CHECK-NEXT: ori 3, 3, 42 217; CHECK-NEXT: blr 218 %not = xor i32 %x, -1 219 %sh = lshr i32 %not, 31 220 %r = sub i32 43, %sh 221 ret i32 %r 222} 223 224define <4 x i32> @sub_lshr_not_vec_splat(<4 x i32> %x) { 225; CHECK-LABEL: sub_lshr_not_vec_splat: 226; CHECK: # %bb.0: 227; CHECK-NEXT: vspltisw 3, -16 228; CHECK-NEXT: vspltisw 4, 15 229; CHECK-NEXT: addis 3, 2, .LCPI17_0@toc@ha 230; CHECK-NEXT: addi 3, 3, .LCPI17_0@toc@l 231; CHECK-NEXT: vsubuwm 3, 4, 3 232; CHECK-NEXT: vsrw 2, 2, 3 233; CHECK-NEXT: lvx 3, 0, 3 234; CHECK-NEXT: vadduwm 2, 2, 3 235; CHECK-NEXT: blr 236 %c = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> 237 %e = lshr <4 x i32> %c, <i32 31, i32 31, i32 31, i32 31> 238 %r = sub <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %e 239 ret <4 x i32> %r 240} 241 242define i32 @sub_lshr(i32 %x, i32 %y) { 243; CHECK-LABEL: sub_lshr: 244; CHECK: # %bb.0: 245; CHECK-NEXT: srawi 3, 3, 31 246; CHECK-NEXT: add 3, 4, 3 247; CHECK-NEXT: blr 248 %sh = lshr i32 %x, 31 249 %r = sub i32 %y, %sh 250 ret i32 %r 251} 252 253define <4 x i32> @sub_lshr_vec(<4 x i32> %x, <4 x i32> %y) { 254; CHECK-LABEL: sub_lshr_vec: 255; CHECK: # %bb.0: 256; CHECK-NEXT: vspltisw 4, -16 257; CHECK-NEXT: vspltisw 5, 15 258; CHECK-NEXT: vsubuwm 4, 5, 4 259; CHECK-NEXT: vsraw 2, 2, 4 260; CHECK-NEXT: vadduwm 2, 3, 2 261; CHECK-NEXT: blr 262 %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31> 263 %r = sub <4 x i32> %y, %sh 264 ret <4 x i32> %r 265} 266 267define i32 @sub_const_op_lshr(i32 %x) { 268; CHECK-LABEL: sub_const_op_lshr: 269; CHECK: # %bb.0: 270; CHECK-NEXT: srawi 3, 3, 31 271; CHECK-NEXT: addi 3, 3, 43 272; CHECK-NEXT: blr 273 %sh = lshr i32 %x, 31 274 %r = sub i32 43, %sh 275 ret i32 %r 276} 277 278define <4 x i32> @sub_const_op_lshr_vec(<4 x i32> %x) { 279; CHECK-LABEL: sub_const_op_lshr_vec: 280; CHECK: # %bb.0: 281; CHECK-NEXT: vspltisw 3, -16 282; CHECK-NEXT: vspltisw 4, 15 283; CHECK-NEXT: addis 3, 2, .LCPI21_0@toc@ha 284; CHECK-NEXT: addi 3, 3, .LCPI21_0@toc@l 285; CHECK-NEXT: vsubuwm 3, 4, 3 286; CHECK-NEXT: vsraw 2, 2, 3 287; CHECK-NEXT: lvx 3, 0, 3 288; CHECK-NEXT: vadduwm 2, 2, 3 289; CHECK-NEXT: blr 290 %sh = lshr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31> 291 %r = sub <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %sh 292 ret <4 x i32> %r 293} 294 295