1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
3 
4 __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
5 
6 // Condition where parameter to m1 is not size_t.
7 // CHECK-LABEL: define {{[^@]+}}@test1
8 // CHECK-SAME: (i32 [[A:%.*]]) #0
9 // CHECK-NEXT:  entry:
10 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
11 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
12 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
13 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])
14 // CHECK-NEXT:    [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64
15 // CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
16 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
17 // CHECK-NEXT:    ret i32 [[TMP1]]
18 //
test1(__INT32_TYPE__ a)19 __INT32_TYPE__ test1(__INT32_TYPE__ a) {
20   return *m1(a);
21 }
22 // Condition where test2 param needs casting.
23 // CHECK-LABEL: define {{[^@]+}}@test2
24 // CHECK-SAME: (i64 [[A:%.*]]) #0
25 // CHECK-NEXT:  entry:
26 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
27 // CHECK-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
28 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
29 // CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
30 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])
31 // CHECK-NEXT:    [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64
32 // CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
33 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
34 // CHECK-NEXT:    ret i32 [[TMP1]]
35 //
test2(__SIZE_TYPE__ a)36 __INT32_TYPE__ test2(__SIZE_TYPE__ a) {
37   return *m1(a);
38 }
39 __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
40 
41 // test3 param needs casting, but 'm2' is correct.
42 // CHECK-LABEL: define {{[^@]+}}@test3
43 // CHECK-SAME: (i32 [[A:%.*]]) #0
44 // CHECK-NEXT:  entry:
45 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
46 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
47 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
48 // CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[TMP0]] to i64
49 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])
50 // CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ]
51 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
52 // CHECK-NEXT:    ret i32 [[TMP1]]
53 //
test3(__INT32_TYPE__ a)54 __INT32_TYPE__ test3(__INT32_TYPE__ a) {
55   return *m2(a);
56 }
57 
58 // Every type matches, canonical example.
59 // CHECK-LABEL: define {{[^@]+}}@test4
60 // CHECK-SAME: (i64 [[A:%.*]]) #0
61 // CHECK-NEXT:  entry:
62 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
63 // CHECK-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
64 // CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
65 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])
66 // CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ]
67 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
68 // CHECK-NEXT:    ret i32 [[TMP1]]
69 //
test4(__SIZE_TYPE__ a)70 __INT32_TYPE__ test4(__SIZE_TYPE__ a) {
71   return *m2(a);
72 }
73 
74 
75 struct Empty {};
76 struct MultiArgs { __INT64_TYPE__ a, b;};
77 // Struct parameter doesn't take up an IR parameter, 'i' takes up 2.
78 // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane.
79 __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)));
80 // CHECK-LABEL: define {{[^@]+}}@test5
81 // CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0
82 // CHECK-NEXT:  entry:
83 // CHECK-NEXT:    [[A:%.*]] = alloca i128, align 16
84 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i128, align 16
85 // CHECK-NEXT:    [[E:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1
86 // CHECK-NEXT:    [[COERCE:%.*]] = alloca i128, align 16
87 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }*
88 // CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0
89 // CHECK-NEXT:    store i64 [[A_COERCE0]], i64* [[TMP1]], align 16
90 // CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1
91 // CHECK-NEXT:    store i64 [[A_COERCE1]], i64* [[TMP2]], align 8
92 // CHECK-NEXT:    [[A1:%.*]] = load i128, i128* [[A]], align 16
93 // CHECK-NEXT:    store i128 [[A1]], i128* [[A_ADDR]], align 16
94 // CHECK-NEXT:    [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16
95 // CHECK-NEXT:    store i128 [[TMP3]], i128* [[COERCE]], align 16
96 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }*
97 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0
98 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 16
99 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
100 // CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
101 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])
102 // CHECK-NEXT:    [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
103 // CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
104 // CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4
105 // CHECK-NEXT:    ret i32 [[TMP9]]
106 //
test5(__int128_t a)107 __INT32_TYPE__ test5(__int128_t a) {
108   struct Empty e;
109   return *m3(e, a);
110 }
111 // Struct parameter takes up 2 parameters, 'i' takes up 2.
112 __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2)));
113 // CHECK-LABEL: define {{[^@]+}}@test6
114 // CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0
115 // CHECK-NEXT:  entry:
116 // CHECK-NEXT:    [[A:%.*]] = alloca i128, align 16
117 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i128, align 16
118 // CHECK-NEXT:    [[E:%.*]] = alloca [[STRUCT_MULTIARGS:%.*]], align 8
119 // CHECK-NEXT:    [[COERCE:%.*]] = alloca i128, align 16
120 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }*
121 // CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0
122 // CHECK-NEXT:    store i64 [[A_COERCE0]], i64* [[TMP1]], align 16
123 // CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1
124 // CHECK-NEXT:    store i64 [[A_COERCE1]], i64* [[TMP2]], align 8
125 // CHECK-NEXT:    [[A1:%.*]] = load i128, i128* [[A]], align 16
126 // CHECK-NEXT:    store i128 [[A1]], i128* [[A_ADDR]], align 16
127 // CHECK-NEXT:    [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16
128 // CHECK-NEXT:    [[TMP4:%.*]] = bitcast %struct.MultiArgs* [[E]] to { i64, i64 }*
129 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0
130 // CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8
131 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
132 // CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
133 // CHECK-NEXT:    store i128 [[TMP3]], i128* [[COERCE]], align 16
134 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }*
135 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 0
136 // CHECK-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 16
137 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1
138 // CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
139 // CHECK-NEXT:    [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])
140 // CHECK-NEXT:    [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
141 // CHECK-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
142 // CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4
143 // CHECK-NEXT:    ret i32 [[TMP14]]
144 //
test6(__int128_t a)145 __INT32_TYPE__ test6(__int128_t a) {
146   struct MultiArgs e;
147   return *m4(e, a);
148 }
149 
150