1; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon %s -o - | FileCheck %s
2
3; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon -regalloc=basic %s -o - \
4; RUN:	| FileCheck %s
5
6define <8 x i8> @vld1i8(i8* %A) nounwind {
7;CHECK-LABEL: vld1i8:
8;Check the alignment value.  Max for this instruction is 64 bits:
9;CHECK: vld1.8 {d16}, [r0:64]
10	%tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8.p0i8(i8* %A, i32 16)
11	ret <8 x i8> %tmp1
12}
13
14define <4 x i16> @vld1i16(i16* %A) nounwind {
15;CHECK-LABEL: vld1i16:
16;CHECK: vld1.16
17	%tmp0 = bitcast i16* %A to i8*
18	%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* %tmp0, i32 1)
19	ret <4 x i16> %tmp1
20}
21
22;Check for a post-increment updating load.
23define <4 x i16> @vld1i16_update(i16** %ptr) nounwind {
24;CHECK-LABEL: vld1i16_update:
25;CHECK: vld1.16 {d16}, [{{r[0-9]+}}]!
26	%A = load i16*, i16** %ptr
27	%tmp0 = bitcast i16* %A to i8*
28	%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8* %tmp0, i32 1)
29	%tmp2 = getelementptr i16, i16* %A, i32 4
30	       store i16* %tmp2, i16** %ptr
31	ret <4 x i16> %tmp1
32}
33
34define <2 x i32> @vld1i32(i32* %A) nounwind {
35;CHECK-LABEL: vld1i32:
36;CHECK: vld1.32
37	%tmp0 = bitcast i32* %A to i8*
38	%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8* %tmp0, i32 1)
39	ret <2 x i32> %tmp1
40}
41
42;Check for a post-increment updating load with register increment.
43define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind {
44;CHECK-LABEL: vld1i32_update:
45;CHECK: vld1.32 {d16}, [{{r[0-9]+}}], {{r[0-9]+}}
46	%A = load i32*, i32** %ptr
47	%tmp0 = bitcast i32* %A to i8*
48	%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8* %tmp0, i32 1)
49	%tmp2 = getelementptr i32, i32* %A, i32 %inc
50	store i32* %tmp2, i32** %ptr
51	ret <2 x i32> %tmp1
52}
53
54define <2 x float> @vld1f(float* %A) nounwind {
55;CHECK-LABEL: vld1f:
56;CHECK: vld1.32
57	%tmp0 = bitcast float* %A to i8*
58	%tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32.p0i8(i8* %tmp0, i32 1)
59	ret <2 x float> %tmp1
60}
61
62define <1 x i64> @vld1i64(i64* %A) nounwind {
63;CHECK-LABEL: vld1i64:
64;CHECK: vld1.64
65	%tmp0 = bitcast i64* %A to i8*
66	%tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8* %tmp0, i32 1)
67	ret <1 x i64> %tmp1
68}
69
70define <16 x i8> @vld1Qi8(i8* %A) nounwind {
71;CHECK-LABEL: vld1Qi8:
72;Check the alignment value.  Max for this instruction is 128 bits:
73;CHECK: vld1.8 {d16, d17}, [r0:64]
74	%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %A, i32 8)
75	ret <16 x i8> %tmp1
76}
77
78;Check for a post-increment updating load.
79define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
80;CHECK-LABEL: vld1Qi8_update:
81;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+|lr}}:64]!
82	%A = load i8*, i8** %ptr
83	%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %A, i32 8)
84	%tmp2 = getelementptr i8, i8* %A, i32 16
85	store i8* %tmp2, i8** %ptr
86	ret <16 x i8> %tmp1
87}
88
89define <8 x i16> @vld1Qi16(i16* %A) nounwind {
90;CHECK-LABEL: vld1Qi16:
91;Check the alignment value.  Max for this instruction is 128 bits:
92;CHECK: vld1.16 {d16, d17}, [r0:128]
93	%tmp0 = bitcast i16* %A to i8*
94	%tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %tmp0, i32 32)
95	ret <8 x i16> %tmp1
96}
97
98define <4 x i32> @vld1Qi32(i32* %A) nounwind {
99;CHECK-LABEL: vld1Qi32:
100;CHECK: vld1.32
101	%tmp0 = bitcast i32* %A to i8*
102	%tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32.p0i8(i8* %tmp0, i32 1)
103	ret <4 x i32> %tmp1
104}
105
106define <4 x float> @vld1Qf(float* %A) nounwind {
107;CHECK-LABEL: vld1Qf:
108;CHECK: vld1.32
109	%tmp0 = bitcast float* %A to i8*
110	%tmp1 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* %tmp0, i32 1)
111	ret <4 x float> %tmp1
112}
113
114define <2 x i64> @vld1Qi64(i64* %A) nounwind {
115;CHECK-LABEL: vld1Qi64:
116;CHECK: vld1.64
117	%tmp0 = bitcast i64* %A to i8*
118	%tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8* %tmp0, i32 1)
119	ret <2 x i64> %tmp1
120}
121
122define <2 x double> @vld1Qf64(double* %A) nounwind {
123;CHECK-LABEL: vld1Qf64:
124;CHECK: vld1.64
125	%tmp0 = bitcast double* %A to i8*
126	%tmp1 = call <2 x double> @llvm.arm.neon.vld1.v2f64.p0i8(i8* %tmp0, i32 1)
127	ret <2 x double> %tmp1
128}
129
130declare <8 x i8>  @llvm.arm.neon.vld1.v8i8.p0i8(i8*, i32) nounwind readonly
131declare <4 x i16> @llvm.arm.neon.vld1.v4i16.p0i8(i8*, i32) nounwind readonly
132declare <2 x i32> @llvm.arm.neon.vld1.v2i32.p0i8(i8*, i32) nounwind readonly
133declare <2 x float> @llvm.arm.neon.vld1.v2f32.p0i8(i8*, i32) nounwind readonly
134declare <1 x i64> @llvm.arm.neon.vld1.v1i64.p0i8(i8*, i32) nounwind readonly
135
136declare <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8*, i32) nounwind readonly
137declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
138declare <4 x i32> @llvm.arm.neon.vld1.v4i32.p0i8(i8*, i32) nounwind readonly
139declare <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8*, i32) nounwind readonly
140declare <2 x i64> @llvm.arm.neon.vld1.v2i64.p0i8(i8*, i32) nounwind readonly
141declare <2 x double> @llvm.arm.neon.vld1.v2f64.p0i8(i8*, i32) nounwind readonly
142
143; Radar 8355607
144; Do not crash if the vld1 result is not used.
145define void @unused_vld1_result() {
146entry:
147  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1)
148  call void @llvm.trap()
149  unreachable
150}
151
152declare void @llvm.trap() nounwind
153