1; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s 2; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s 3; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-T1 4; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-T1 5 6define void @func(i32 %argc, i8** %argv) nounwind { 7entry: 8 %argc.addr = alloca i32 ; <i32*> [#uses=1] 9 %argv.addr = alloca i8** ; <i8***> [#uses=1] 10 %val1 = alloca i32 ; <i32*> [#uses=2] 11 %val2 = alloca i32 ; <i32*> [#uses=15] 12 %andt = alloca i32 ; <i32*> [#uses=2] 13 %ort = alloca i32 ; <i32*> [#uses=2] 14 %xort = alloca i32 ; <i32*> [#uses=2] 15 %old = alloca i32 ; <i32*> [#uses=18] 16 %temp = alloca i32 ; <i32*> [#uses=2] 17 store i32 %argc, i32* %argc.addr 18 store i8** %argv, i8*** %argv.addr 19 store i32 0, i32* %val1 20 store i32 31, i32* %val2 21 store i32 3855, i32* %andt 22 store i32 3855, i32* %ort 23 store i32 3855, i32* %xort 24 store i32 4, i32* %temp 25 %tmp = load i32* %temp 26 ; CHECK: ldrex 27 ; CHECK: add 28 ; CHECK: strex 29 ; CHECK-T1: blx ___sync_fetch_and_add_4 30 %0 = atomicrmw add i32* %val1, i32 %tmp monotonic 31 store i32 %0, i32* %old 32 ; CHECK: ldrex 33 ; CHECK: sub 34 ; CHECK: strex 35 ; CHECK-T1: blx ___sync_fetch_and_sub_4 36 %1 = atomicrmw sub i32* %val2, i32 30 monotonic 37 store i32 %1, i32* %old 38 ; CHECK: ldrex 39 ; CHECK: add 40 ; CHECK: strex 41 ; CHECK-T1: blx ___sync_fetch_and_add_4 42 %2 = atomicrmw add i32* %val2, i32 1 monotonic 43 store i32 %2, i32* %old 44 ; CHECK: ldrex 45 ; CHECK: sub 46 ; CHECK: strex 47 ; CHECK-T1: blx ___sync_fetch_and_sub_4 48 %3 = atomicrmw sub i32* %val2, i32 1 monotonic 49 store i32 %3, i32* %old 50 ; CHECK: ldrex 51 ; CHECK: and 52 ; CHECK: strex 53 ; CHECK-T1: blx ___sync_fetch_and_and_4 54 %4 = atomicrmw and i32* %andt, i32 4080 monotonic 55 store i32 %4, i32* %old 56 ; CHECK: ldrex 57 ; CHECK: or 58 ; CHECK: strex 59 ; CHECK-T1: blx ___sync_fetch_and_or_4 60 %5 = atomicrmw or i32* %ort, i32 4080 monotonic 61 store i32 %5, i32* %old 62 ; CHECK: ldrex 63 ; CHECK: eor 64 ; CHECK: strex 65 ; CHECK-T1: blx ___sync_fetch_and_xor_4 66 %6 = atomicrmw xor i32* %xort, i32 4080 monotonic 67 store i32 %6, i32* %old 68 ; CHECK: ldrex 69 ; CHECK: cmp 70 ; CHECK: strex 71 ; CHECK-T1: blx ___sync_fetch_and_min_4 72 %7 = atomicrmw min i32* %val2, i32 16 monotonic 73 store i32 %7, i32* %old 74 %neg = sub i32 0, 1 75 ; CHECK: ldrex 76 ; CHECK: cmp 77 ; CHECK: strex 78 ; CHECK-T1: blx ___sync_fetch_and_min_4 79 %8 = atomicrmw min i32* %val2, i32 %neg monotonic 80 store i32 %8, i32* %old 81 ; CHECK: ldrex 82 ; CHECK: cmp 83 ; CHECK: strex 84 ; CHECK-T1: blx ___sync_fetch_and_max_4 85 %9 = atomicrmw max i32* %val2, i32 1 monotonic 86 store i32 %9, i32* %old 87 ; CHECK: ldrex 88 ; CHECK: cmp 89 ; CHECK: strex 90 ; CHECK-T1: blx ___sync_fetch_and_max_4 91 %10 = atomicrmw max i32* %val2, i32 0 monotonic 92 store i32 %10, i32* %old 93 ; CHECK: ldrex 94 ; CHECK: cmp 95 ; CHECK: strex 96 ; CHECK-T1: blx ___sync_fetch_and_umin_4 97 %11 = atomicrmw umin i32* %val2, i32 16 monotonic 98 store i32 %11, i32* %old 99 %uneg = sub i32 0, 1 100 ; CHECK: ldrex 101 ; CHECK: cmp 102 ; CHECK: strex 103 ; CHECK-T1: blx ___sync_fetch_and_umin_4 104 %12 = atomicrmw umin i32* %val2, i32 %uneg monotonic 105 store i32 %12, i32* %old 106 ; CHECK: ldrex 107 ; CHECK: cmp 108 ; CHECK: strex 109 ; CHECK-T1: blx ___sync_fetch_and_umax_4 110 %13 = atomicrmw umax i32* %val2, i32 1 monotonic 111 store i32 %13, i32* %old 112 ; CHECK: ldrex 113 ; CHECK: cmp 114 ; CHECK: strex 115 ; CHECK-T1: blx ___sync_fetch_and_umax_4 116 %14 = atomicrmw umax i32* %val2, i32 0 monotonic 117 store i32 %14, i32* %old 118 119 ret void 120} 121 122define void @func2() nounwind { 123entry: 124 %val = alloca i16 125 %old = alloca i16 126 store i16 31, i16* %val 127 ; CHECK: ldrex 128 ; CHECK: cmp 129 ; CHECK: strex 130 ; CHECK-T1: blx ___sync_fetch_and_umin_2 131 %0 = atomicrmw umin i16* %val, i16 16 monotonic 132 store i16 %0, i16* %old 133 %uneg = sub i16 0, 1 134 ; CHECK: ldrex 135 ; CHECK: cmp 136 ; CHECK: strex 137 ; CHECK-T1: blx ___sync_fetch_and_umin_2 138 %1 = atomicrmw umin i16* %val, i16 %uneg monotonic 139 store i16 %1, i16* %old 140 ; CHECK: ldrex 141 ; CHECK: cmp 142 ; CHECK: strex 143 ; CHECK-T1: blx ___sync_fetch_and_umax_2 144 %2 = atomicrmw umax i16* %val, i16 1 monotonic 145 store i16 %2, i16* %old 146 ; CHECK: ldrex 147 ; CHECK: cmp 148 ; CHECK: strex 149 ; CHECK-T1: blx ___sync_fetch_and_umax_2 150 %3 = atomicrmw umax i16* %val, i16 0 monotonic 151 store i16 %3, i16* %old 152 ret void 153} 154 155define void @func3() nounwind { 156entry: 157 %val = alloca i8 158 %old = alloca i8 159 store i8 31, i8* %val 160 ; CHECK: ldrex 161 ; CHECK: cmp 162 ; CHECK: strex 163 ; CHECK-T1: blx ___sync_fetch_and_umin_1 164 %0 = atomicrmw umin i8* %val, i8 16 monotonic 165 store i8 %0, i8* %old 166 ; CHECK: ldrex 167 ; CHECK: cmp 168 ; CHECK: strex 169 ; CHECK-T1: blx ___sync_fetch_and_umin_1 170 %uneg = sub i8 0, 1 171 %1 = atomicrmw umin i8* %val, i8 %uneg monotonic 172 store i8 %1, i8* %old 173 ; CHECK: ldrex 174 ; CHECK: cmp 175 ; CHECK: strex 176 ; CHECK-T1: blx ___sync_fetch_and_umax_1 177 %2 = atomicrmw umax i8* %val, i8 1 monotonic 178 store i8 %2, i8* %old 179 ; CHECK: ldrex 180 ; CHECK: cmp 181 ; CHECK: strex 182 ; CHECK-T1: blx ___sync_fetch_and_umax_1 183 %3 = atomicrmw umax i8* %val, i8 0 monotonic 184 store i8 %3, i8* %old 185 ret void 186} 187 188; CHECK: func4 189; This function should not need to use callee-saved registers. 190; rdar://problem/12203728 191; CHECK-NOT: r4 192define i32 @func4(i32* %p) nounwind optsize ssp { 193entry: 194 %0 = atomicrmw add i32* %p, i32 1 monotonic 195 ret i32 %0 196} 197