1; RUN: llc < %s -march=arm | FileCheck -check-prefix=ARM %s 2; RUN: llc < %s -march=thumb | FileCheck -check-prefix=THUMB %s 3; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck -check-prefix=T2 %s 4; RUN: llc < %s -mtriple=thumbv8 | FileCheck -check-prefix=V8 %s 5 6; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified. 7 8%struct.Foo = type { i8* } 9 10; ARM: foo 11; THUMB: foo 12; T2: foo 13define %struct.Foo* @foo(%struct.Foo* %this, i32 %acc) nounwind readonly align 2 { 14entry: 15 %scevgep = getelementptr %struct.Foo* %this, i32 1 16 br label %tailrecurse 17 18tailrecurse: ; preds = %sw.bb, %entry 19 %lsr.iv2 = phi %struct.Foo* [ %scevgep3, %sw.bb ], [ %scevgep, %entry ] 20 %lsr.iv = phi i32 [ %lsr.iv.next, %sw.bb ], [ 1, %entry ] 21 %acc.tr = phi i32 [ %or, %sw.bb ], [ %acc, %entry ] 22 %lsr.iv24 = bitcast %struct.Foo* %lsr.iv2 to i8** 23 %scevgep5 = getelementptr i8** %lsr.iv24, i32 -1 24 %tmp2 = load i8** %scevgep5 25 %0 = ptrtoint i8* %tmp2 to i32 26 27; ARM: ands {{r[0-9]+}}, {{r[0-9]+}}, #3 28; ARM-NEXT: beq 29 30; THUMB: movs r[[R0:[0-9]+]], #3 31; THUMB-NEXT: ands r[[R0]], r 32; THUMB-NEXT: cmp r[[R0]], #0 33; THUMB-NEXT: beq 34 35; T2: ands {{r[0-9]+}}, {{r[0-9]+}}, #3 36; T2-NEXT: beq 37 38 %and = and i32 %0, 3 39 %tst = icmp eq i32 %and, 0 40 br i1 %tst, label %sw.bb, label %tailrecurse.switch 41 42tailrecurse.switch: ; preds = %tailrecurse 43; V8-LABEL: %tailrecurse.switch 44; V8: cmp 45; V8-NEXT: beq 46; V8-NEXT: %tailrecurse.switch 47; V8: cmp 48; V8-NEXT: beq 49; V8-NEXT: %tailrecurse.switch 50; V8: cmp 51; V8-NEXT: beq 52; V8-NEXT: b 53; The trailing space in the last line checks that the branch is unconditional 54 switch i32 %and, label %sw.epilog [ 55 i32 1, label %sw.bb 56 i32 3, label %sw.bb6 57 i32 2, label %sw.bb8 58 ] 59 60sw.bb: ; preds = %tailrecurse.switch, %tailrecurse 61 %shl = shl i32 %acc.tr, 1 62 %or = or i32 %and, %shl 63 %lsr.iv.next = add i32 %lsr.iv, 1 64 %scevgep3 = getelementptr %struct.Foo* %lsr.iv2, i32 1 65 br label %tailrecurse 66 67sw.bb6: ; preds = %tailrecurse.switch 68 ret %struct.Foo* %lsr.iv2 69 70sw.bb8: ; preds = %tailrecurse.switch 71 %tmp1 = add i32 %acc.tr, %lsr.iv 72 %add.ptr11 = getelementptr inbounds %struct.Foo* %this, i32 %tmp1 73 ret %struct.Foo* %add.ptr11 74 75sw.epilog: ; preds = %tailrecurse.switch 76 ret %struct.Foo* undef 77} 78 79; Another test that exercises the AND/TST peephole optimization and also 80; generates a predicated ANDS instruction. Check that the predicate is printed 81; after the "S" modifier on the instruction. 82 83%struct.S = type { i8* (i8*)*, [1 x i8] } 84 85; ARM: bar 86; THUMB: bar 87; T2: bar 88; V8-LABEL: bar: 89define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly { 90entry: 91 %0 = getelementptr inbounds %struct.S* %x, i32 0, i32 1, i32 0 92 %1 = load i8* %0, align 1 93 %2 = zext i8 %1 to i32 94; ARM: ands 95; THUMB: ands 96; T2: ands 97; V8: ands 98; V8-NEXT: beq 99 %3 = and i32 %2, 112 100 %4 = icmp eq i32 %3, 0 101 br i1 %4, label %return, label %bb 102 103bb: ; preds = %entry 104; V8-NEXT: %bb 105 %5 = getelementptr inbounds %struct.S* %y, i32 0, i32 1, i32 0 106 %6 = load i8* %5, align 1 107 %7 = zext i8 %6 to i32 108; ARM: andsne 109; THUMB: ands 110; T2: andsne 111; V8: ands 112; V8-NEXT: beq 113 %8 = and i32 %7, 112 114 %9 = icmp eq i32 %8, 0 115 br i1 %9, label %return, label %bb2 116 117bb2: ; preds = %bb 118; V8-NEXT: %bb2 119; V8-NEXT: cmp 120; V8-NEXT: it ne 121; V8-NEXT: cmpne 122; V8-NEXT: bne 123 %10 = icmp eq i32 %3, 16 124 %11 = icmp eq i32 %8, 16 125 %or.cond = or i1 %10, %11 126 br i1 %or.cond, label %bb4, label %return 127 128bb4: ; preds = %bb2 129 %12 = ptrtoint %struct.S* %x to i32 130 %phitmp = trunc i32 %12 to i8 131 ret i8 %phitmp 132 133return: ; preds = %bb2, %bb, %entry 134 ret i8 1 135} 136