1; RUN: opt < %s -instcombine -S | FileCheck %s
2target datalayout = "E-p:64:64:64-p1:32:32:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
3
4; Instcombine should be able to prove vector alignment in the
5; presence of a few mild address computation tricks.
6
7; CHECK-LABEL: @test0(
8; CHECK: align 16
9
10define void @test0(i8* %b, i64 %n, i64 %u, i64 %y) nounwind  {
11entry:
12  %c = ptrtoint i8* %b to i64
13  %d = and i64 %c, -16
14  %e = inttoptr i64 %d to double*
15  %v = mul i64 %u, 2
16  %z = and i64 %y, -2
17  %t1421 = icmp eq i64 %n, 0
18  br i1 %t1421, label %return, label %bb
19
20bb:
21  %i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
22  %j = mul i64 %i, %v
23  %h = add i64 %j, %z
24  %t8 = getelementptr double* %e, i64 %h
25  %p = bitcast double* %t8 to <2 x double>*
26  store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
27  %indvar.next = add i64 %i, 1
28  %exitcond = icmp eq i64 %indvar.next, %n
29  br i1 %exitcond, label %return, label %bb
30
31return:
32  ret void
33}
34
35; When we see a unaligned load from an insufficiently aligned global or
36; alloca, increase the alignment of the load, turning it into an aligned load.
37
38; CHECK-LABEL: @test1(
39; CHECK: tmp = load
40; CHECK: GLOBAL{{.*}}align 16
41
42@GLOBAL = internal global [4 x i32] zeroinitializer
43
44define <16 x i8> @test1(<2 x i64> %x) {
45entry:
46	%tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
47	ret <16 x i8> %tmp
48}
49
50@GLOBAL_as1 = internal addrspace(1) global [4 x i32] zeroinitializer
51
52define <16 x i8> @test1_as1(<2 x i64> %x) {
53; CHECK-LABEL: @test1_as1(
54; CHECK: tmp = load
55; CHECK: GLOBAL_as1{{.*}}align 16
56  %tmp = load <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 1
57  ret <16 x i8> %tmp
58}
59
60@GLOBAL_as1_gep = internal addrspace(1) global [8 x i32] zeroinitializer
61
62define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
63; CHECK-LABEL: @test1_as1_gep(
64; CHECK: tmp = load
65; CHECK: GLOBAL_as1_gep{{.*}}align 16
66  %tmp = load <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr ([8 x i32] addrspace(1)* @GLOBAL_as1_gep, i16 0, i16 4) to <16 x i8> addrspace(1)*), align 1
67  ret <16 x i8> %tmp
68}
69
70
71; When a load or store lacks an explicit alignment, add one.
72
73; CHECK-LABEL: @test2(
74; CHECK: load double* %p, align 8
75; CHECK: store double %n, double* %p, align 8
76
77define double @test2(double* %p, double %n) nounwind {
78  %t = load double* %p
79  store double %n, double* %p
80  ret double %t
81}
82
83declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
84
85declare void @use(i8*)
86
87%struct.s = type { i32, i32, i32, i32 }
88
89define void @test3(%struct.s* sret %a4) {
90; Check that the alignment is bumped up the alignment of the sret type.
91; CHECK-LABEL: @test3(
92  %a4.cast = bitcast %struct.s* %a4 to i8*
93  call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i32 1, i1 false)
94; CHECK: call void @llvm.memset.p0i8.i64(i8* %a4.cast, i8 0, i64 16, i32 4, i1 false)
95  call void @use(i8* %a4.cast)
96  ret void
97}
98