1; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2
3define void @vst2i8(i8* %A, <8 x i8>* %B) nounwind {
4;CHECK-LABEL: vst2i8:
5;Check the alignment value.  Max for this instruction is 128 bits:
6;CHECK: vst2.8 {d16, d17}, [r0:64]
7	%tmp1 = load <8 x i8>* %B
8	call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
9	ret void
10}
11
12;Check for a post-increment updating store with register increment.
13define void @vst2i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
14;CHECK-LABEL: vst2i8_update:
15;CHECK: vst2.8 {d16, d17}, [r1], r2
16	%A = load i8** %ptr
17	%tmp1 = load <8 x i8>* %B
18	call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4)
19	%tmp2 = getelementptr i8* %A, i32 %inc
20	store i8* %tmp2, i8** %ptr
21	ret void
22}
23
24define void @vst2i16(i16* %A, <4 x i16>* %B) nounwind {
25;CHECK-LABEL: vst2i16:
26;Check the alignment value.  Max for this instruction is 128 bits:
27;CHECK: vst2.16 {d16, d17}, [r0:128]
28	%tmp0 = bitcast i16* %A to i8*
29	%tmp1 = load <4 x i16>* %B
30	call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32)
31	ret void
32}
33
34define void @vst2i32(i32* %A, <2 x i32>* %B) nounwind {
35;CHECK-LABEL: vst2i32:
36;CHECK: vst2.32
37	%tmp0 = bitcast i32* %A to i8*
38	%tmp1 = load <2 x i32>* %B
39	call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
40	ret void
41}
42
43define void @vst2f(float* %A, <2 x float>* %B) nounwind {
44;CHECK-LABEL: vst2f:
45;CHECK: vst2.32
46	%tmp0 = bitcast float* %A to i8*
47	%tmp1 = load <2 x float>* %B
48	call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
49	ret void
50}
51
52define void @vst2i64(i64* %A, <1 x i64>* %B) nounwind {
53;CHECK-LABEL: vst2i64:
54;Check the alignment value.  Max for this instruction is 128 bits:
55;CHECK: vst1.64 {d16, d17}, [r0:128]
56	%tmp0 = bitcast i64* %A to i8*
57	%tmp1 = load <1 x i64>* %B
58	call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 32)
59	ret void
60}
61
62;Check for a post-increment updating store.
63define void @vst2i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
64;CHECK-LABEL: vst2i64_update:
65;CHECK: vst1.64 {d16, d17}, [r1:64]!
66	%A = load i64** %ptr
67	%tmp0 = bitcast i64* %A to i8*
68	%tmp1 = load <1 x i64>* %B
69	call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 8)
70	%tmp2 = getelementptr i64* %A, i32 2
71	store i64* %tmp2, i64** %ptr
72	ret void
73}
74
75define void @vst2Qi8(i8* %A, <16 x i8>* %B) nounwind {
76;CHECK-LABEL: vst2Qi8:
77;Check the alignment value.  Max for this instruction is 256 bits:
78;CHECK: vst2.8 {d16, d17, d18, d19}, [r0:64]
79	%tmp1 = load <16 x i8>* %B
80	call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 8)
81	ret void
82}
83
84define void @vst2Qi16(i16* %A, <8 x i16>* %B) nounwind {
85;CHECK-LABEL: vst2Qi16:
86;Check the alignment value.  Max for this instruction is 256 bits:
87;CHECK: vst2.16 {d16, d17, d18, d19}, [r0:128]
88	%tmp0 = bitcast i16* %A to i8*
89	%tmp1 = load <8 x i16>* %B
90	call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 16)
91	ret void
92}
93
94define void @vst2Qi32(i32* %A, <4 x i32>* %B) nounwind {
95;CHECK-LABEL: vst2Qi32:
96;Check the alignment value.  Max for this instruction is 256 bits:
97;CHECK: vst2.32 {d16, d17, d18, d19}, [r0:256]
98	%tmp0 = bitcast i32* %A to i8*
99	%tmp1 = load <4 x i32>* %B
100	call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 64)
101	ret void
102}
103
104define void @vst2Qf(float* %A, <4 x float>* %B) nounwind {
105;CHECK-LABEL: vst2Qf:
106;CHECK: vst2.32
107	%tmp0 = bitcast float* %A to i8*
108	%tmp1 = load <4 x float>* %B
109	call void @llvm.arm.neon.vst2.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
110	ret void
111}
112
113define i8* @vst2update(i8* %out, <4 x i16>* %B) nounwind {
114;CHECK-LABEL: vst2update:
115;CHECK: vst2.16 {d16, d17}, [r0]!
116	%tmp1 = load <4 x i16>* %B
117	tail call void @llvm.arm.neon.vst2.v4i16(i8* %out, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 2)
118	%t5 = getelementptr inbounds i8* %out, i32 16
119	ret i8* %t5
120}
121
122define i8* @vst2update2(i8 * %out, <4 x float> * %this) nounwind optsize ssp align 2 {
123;CHECK-LABEL: vst2update2:
124;CHECK: vst2.32 {d16, d17, d18, d19}, [r0]!
125  %tmp1 = load <4 x float>* %this
126  call void @llvm.arm.neon.vst2.v4f32(i8* %out, <4 x float> %tmp1, <4 x float> %tmp1, i32 4) nounwind
127  %tmp2 = getelementptr inbounds i8* %out, i32  32
128  ret i8* %tmp2
129}
130
131declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind
132declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind
133declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind
134declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind
135declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32) nounwind
136
137declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32) nounwind
138declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind
139declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
140declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
141