1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -march=hexagon < %s | FileCheck %s 3 4@data1 = external global [2 x [31 x i8]], align 8 5@data2 = external global [2 x [91 x i8]], align 8 6 7define i32 @Prefer_M4_or_andn(i32 %a0, i32 %a1, i32 %a2) #0 { 8; CHECK-LABEL: Prefer_M4_or_andn: 9; CHECK: // %bb.0: // %b3 10; CHECK-NEXT: { 11; CHECK-NEXT: r2 = asl(r2,#5) 12; CHECK-NEXT: } 13; CHECK-NEXT: { 14; CHECK-NEXT: r2 |= and(r0,~r1) 15; CHECK-NEXT: } 16; CHECK-NEXT: { 17; CHECK-NEXT: r0 = r2 18; CHECK-NEXT: } 19; CHECK-NEXT: { 20; CHECK-NEXT: jumpr r31 21; CHECK-NEXT: } 22b3: 23 %v4 = xor i32 %a1, -1 24 %v5 = shl i32 %a2, 5 25 %v6 = and i32 %a0, %v4 26 %v7 = or i32 %v6, %v5 27 ret i32 %v7 28} 29 30define i32 @Prefer_M4_mpyri_addi(i32 %a0) #0 { 31; CHECK-LABEL: Prefer_M4_mpyri_addi: 32; CHECK: // %bb.0: // %b1 33; CHECK-NEXT: { 34; CHECK-NEXT: r0 = add(##data1,mpyi(r0,#31)) 35; CHECK-NEXT: } 36; CHECK-NEXT: { 37; CHECK-NEXT: jumpr r31 38; CHECK-NEXT: } 39b1: 40 %v2 = getelementptr inbounds [2 x [31 x i8]], [2 x [31 x i8]]* @data1, i32 0, i32 %a0 41 %v3 = ptrtoint [31 x i8]* %v2 to i32 42 ret i32 %v3 43} 44 45define i32 @Prefer_M4_mpyrr_addi(i32 %a0) #0 { 46; CHECK-LABEL: Prefer_M4_mpyrr_addi: 47; CHECK: // %bb.0: // %b1 48; CHECK-NEXT: { 49; CHECK-NEXT: r1 = #91 50; CHECK-NEXT: } 51; CHECK-NEXT: { 52; CHECK-NEXT: r0 = add(##data2,mpyi(r0,r1)) 53; CHECK-NEXT: } 54; CHECK-NEXT: { 55; CHECK-NEXT: jumpr r31 56; CHECK-NEXT: } 57b1: 58 %v2 = getelementptr inbounds [2 x [91 x i8]], [2 x [91 x i8]]* @data2, i32 0, i32 %a0 59 %v3 = ptrtoint [91 x i8]* %v2 to i32 60 ret i32 %v3 61} 62 63define i32 @Prefer_S2_tstbit_r(i32 %a0, i32 %a1) #0 { 64; CHECK-LABEL: Prefer_S2_tstbit_r: 65; CHECK: // %bb.0: // %b2 66; CHECK-NEXT: { 67; CHECK-NEXT: p0 = tstbit(r0,r1) 68; CHECK-NEXT: } 69; CHECK-NEXT: { 70; CHECK-NEXT: r0 = mux(p0,#1,#0) 71; CHECK-NEXT: } 72; CHECK-NEXT: { 73; CHECK-NEXT: jumpr r31 74; CHECK-NEXT: } 75b2: 76 %v3 = shl i32 1, %a1 77 %v4 = and i32 %a0, %v3 78 %v5 = icmp ne i32 %v4, 0 79 %v6 = zext i1 %v5 to i32 80 ret i32 %v6 81} 82 83define i32 @Prefer_S4_ntstbit_r(i32 %a0, i32 %a1) #0 { 84; CHECK-LABEL: Prefer_S4_ntstbit_r: 85; CHECK: // %bb.0: // %b2 86; CHECK-NEXT: { 87; CHECK-NEXT: p0 = !tstbit(r0,r1) 88; CHECK-NEXT: } 89; CHECK-NEXT: { 90; CHECK-NEXT: r0 = mux(p0,#1,#0) 91; CHECK-NEXT: } 92; CHECK-NEXT: { 93; CHECK-NEXT: jumpr r31 94; CHECK-NEXT: } 95b2: 96 %v3 = shl i32 1, %a1 97 %v4 = and i32 %a0, %v3 98 %v5 = icmp eq i32 %v4, 0 99 %v6 = zext i1 %v5 to i32 100 ret i32 %v6 101} 102 103define i64 @Prefer_L2_loadrub_io(i8* %a0) #0 { 104; CHECK-LABEL: Prefer_L2_loadrub_io: 105; CHECK: // %bb.0: // %b1 106; CHECK-NEXT: { 107; CHECK-NEXT: r0 = memub(r0+#65) 108; CHECK-NEXT: } 109; CHECK-NEXT: { 110; CHECK-NEXT: r1:0 = combine(#0,r0) 111; CHECK-NEXT: } 112; CHECK-NEXT: { 113; CHECK-NEXT: jumpr r31 114; CHECK-NEXT: } 115b1: 116 %v2 = getelementptr i8, i8* %a0, i32 65 117 %v3 = load i8, i8* %v2 118 %v4 = zext i8 %v3 to i64 119 ret i64 %v4 120} 121 122attributes #0 = { optnone noinline nounwind readnone } 123