1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt %s -instsimplify -S | FileCheck %s 3 4declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1 5 6define i1 @t0_umul(i4 %size, i4 %nmemb) { 7; CHECK-LABEL: @t0_umul( 8; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]]) 9; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 10; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true 11; CHECK-NEXT: ret i1 [[PHITMP]] 12; 13 %cmp = icmp eq i4 %size, 0 14 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 15 %smul.ov = extractvalue { i4, i1 } %smul, 1 16 %phitmp = xor i1 %smul.ov, true 17 %or = or i1 %cmp, %phitmp 18 ret i1 %or 19} 20 21define i1 @t1_commutative(i4 %size, i4 %nmemb) { 22; CHECK-LABEL: @t1_commutative( 23; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]]) 24; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 25; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true 26; CHECK-NEXT: ret i1 [[PHITMP]] 27; 28 %cmp = icmp eq i4 %size, 0 29 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 30 %smul.ov = extractvalue { i4, i1 } %smul, 1 31 %phitmp = xor i1 %smul.ov, true 32 %or = or i1 %phitmp, %cmp ; swapped 33 ret i1 %or 34} 35 36define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) { 37; CHECK-LABEL: @n2_wrong_size( 38; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0 39; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]]) 40; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 41; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true 42; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] 43; CHECK-NEXT: ret i1 [[OR]] 44; 45 %cmp = icmp eq i4 %size1, 0 ; not %size0 46 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb) 47 %smul.ov = extractvalue { i4, i1 } %smul, 1 48 %phitmp = xor i1 %smul.ov, true 49 %or = or i1 %cmp, %phitmp 50 ret i1 %or 51} 52 53define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) { 54; CHECK-LABEL: @n3_wrong_pred( 55; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0 56; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) 57; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 58; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true 59; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] 60; CHECK-NEXT: ret i1 [[OR]] 61; 62 %cmp = icmp ne i4 %size, 0 ; not 'eq' 63 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 64 %smul.ov = extractvalue { i4, i1 } %smul, 1 65 %phitmp = xor i1 %smul.ov, true 66 %or = or i1 %cmp, %phitmp 67 ret i1 %or 68} 69 70define i1 @n4_not_and(i4 %size, i4 %nmemb) { 71; CHECK-LABEL: @n4_not_and( 72; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0 73; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) 74; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 75; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true 76; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]] 77; CHECK-NEXT: ret i1 [[OR]] 78; 79 %cmp = icmp eq i4 %size, 0 80 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 81 %smul.ov = extractvalue { i4, i1 } %smul, 1 82 %phitmp = xor i1 %smul.ov, true 83 %or = and i1 %cmp, %phitmp ; not 'or' 84 ret i1 %or 85} 86 87define i1 @n5_not_zero(i4 %size, i4 %nmemb) { 88; CHECK-LABEL: @n5_not_zero( 89; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1 90; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]]) 91; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1 92; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true 93; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]] 94; CHECK-NEXT: ret i1 [[OR]] 95; 96 %cmp = icmp eq i4 %size, 1 ; should be '0' 97 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb) 98 %smul.ov = extractvalue { i4, i1 } %smul, 1 99 %phitmp = xor i1 %smul.ov, true 100 %or = or i1 %cmp, %phitmp 101 ret i1 %or 102} 103