1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=arm-eabi -mattr=neon | FileCheck %s 3 4define i32 @sext_inc(i1 zeroext %x) { 5; CHECK-LABEL: sext_inc: 6; CHECK: @ %bb.0: 7; CHECK-NEXT: eor r0, r0, #1 8; CHECK-NEXT: mov pc, lr 9 %ext = sext i1 %x to i32 10 %add = add i32 %ext, 1 11 ret i32 %add 12} 13 14define <4 x i32> @sext_inc_vec(<4 x i1> %x) { 15; CHECK-LABEL: sext_inc_vec: 16; CHECK: @ %bb.0: 17; CHECK-NEXT: vmov.i16 d16, #0x1 18; CHECK-NEXT: vmov d17, r0, r1 19; CHECK-NEXT: veor d16, d17, d16 20; CHECK-NEXT: vmov.i32 q9, #0x1 21; CHECK-NEXT: vmovl.u16 q8, d16 22; CHECK-NEXT: vand q8, q8, q9 23; CHECK-NEXT: vmov r0, r1, d16 24; CHECK-NEXT: vmov r2, r3, d17 25; CHECK-NEXT: mov pc, lr 26 %ext = sext <4 x i1> %x to <4 x i32> 27 %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> 28 ret <4 x i32> %add 29} 30 31define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { 32; CHECK-LABEL: cmpgt_sext_inc_vec: 33; CHECK: @ %bb.0: 34; CHECK-NEXT: vmov d17, r2, r3 35; CHECK-NEXT: vmov d16, r0, r1 36; CHECK-NEXT: mov r0, sp 37; CHECK-NEXT: vld1.64 {d18, d19}, [r0] 38; CHECK-NEXT: vcge.s32 q8, q9, q8 39; CHECK-NEXT: vmov.i32 q9, #0x1 40; CHECK-NEXT: vand q8, q8, q9 41; CHECK-NEXT: vmov r0, r1, d16 42; CHECK-NEXT: vmov r2, r3, d17 43; CHECK-NEXT: mov pc, lr 44 %cmp = icmp sgt <4 x i32> %x, %y 45 %ext = sext <4 x i1> %cmp to <4 x i32> 46 %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> 47 ret <4 x i32> %add 48} 49 50define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { 51; CHECK-LABEL: cmpne_sext_inc_vec: 52; CHECK: @ %bb.0: 53; CHECK-NEXT: vmov d17, r2, r3 54; CHECK-NEXT: mov r12, sp 55; CHECK-NEXT: vld1.64 {d18, d19}, [r12] 56; CHECK-NEXT: vmov d16, r0, r1 57; CHECK-NEXT: vceq.i32 q8, q8, q9 58; CHECK-NEXT: vmov.i32 q9, #0x1 59; CHECK-NEXT: vand q8, q8, q9 60; CHECK-NEXT: vmov r0, r1, d16 61; CHECK-NEXT: vmov r2, r3, d17 62; CHECK-NEXT: mov pc, lr 63 %cmp = icmp ne <4 x i32> %x, %y 64 %ext = sext <4 x i1> %cmp to <4 x i32> 65 %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1> 66 ret <4 x i32> %add 67} 68 69