1; Make sure short memsets on ARM lower to stores, even when optimizing for size.
2; RUN: llc -march=arm < %s | FileCheck %s -check-prefix=CHECK-GENERIC
3; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s -check-prefix=CHECK-UNALIGNED
4
5target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
6target triple = "thumbv7-apple-ios5.0.0"
7
8; CHECK-GENERIC:      strb
9; CHECK-GENERIT-NEXT: strb
10; CHECK-GENERIT-NEXT: strb
11; CHECK-GENERIT-NEXT: strb
12; CHECK-GENERIT-NEXT: strb
13; CHECK-UNALIGNED:    strb
14; CHECK-UNALIGNED:    str
15define void @foo(i8* nocapture %c) nounwind optsize {
16entry:
17  call void @llvm.memset.p0i8.i64(i8* %c, i8 -1, i64 5, i32 1, i1 false)
18  ret void
19}
20
21declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
22