1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
2 // RUN: %cheri_purecap_cc1 -DCODEGEN -Wno-tautological-compare -o - -O2 -emit-llvm %s -cheri-uintcap=offset | FileCheck %s -check-prefix=PURECAP
3 // RUN: %cheri_cc1 -DCODEGEN -Wno-tautological-compare -o - -O2 -emit-llvm %s -cheri-uintcap=offset | FileCheck %s -check-prefix=HYBRID
4 
5 typedef long vaddr_t;
6 typedef __UINTPTR_TYPE__ uintptr_t;
7 
8 #define __static_assert_power_of_two(val)                          \
9   _Static_assert(__builtin_choose_expr(__builtin_constant_p(val),  \
10                                        (val & (val - 1)) == 0, 1), \
11                  "Alignment must be a power-of-two");
12 
13 #define __macro_is_aligned_array(addr, align) ({                               \
14   extern char __check_align_is_power_of_two[__builtin_choose_expr(             \
15       __builtin_constant_p(align), ((align & (align - 1)) == 0 ? 1 : -1), 1)]; \
16   _Bool result = ((vaddr_t)addr & ((vaddr_t)(align)-1)) == 0;                  \
17   result;                                                                      \
18 })
19 
20 #define __macro_is_aligned(addr, align) ({                        \
21   __static_assert_power_of_two(align)                             \
22       _Bool result = ((vaddr_t)addr & ((vaddr_t)(align)-1)) == 0; \
23   result;                                                         \
24 })
25 
26 // PURECAP-LABEL: define {{[^@]+}}@is_aligned_macro
27 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i32 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #0
28 // PURECAP-NEXT:  entry:
29 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
30 // PURECAP-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
31 // PURECAP-NEXT:    [[CONV:%.*]] = sext i32 [[ALIGN]] to i64
32 // PURECAP-NEXT:    [[SUB:%.*]] = add nsw i64 [[CONV]], -1
33 // PURECAP-NEXT:    [[AND:%.*]] = and i64 [[TMP1]], [[SUB]]
34 // PURECAP-NEXT:    [[CMP:%.*]] = icmp eq i64 [[AND]], 0
35 // PURECAP-NEXT:    ret i1 [[CMP]]
36 //
37 // HYBRID-LABEL: define {{[^@]+}}@is_aligned_macro
38 // HYBRID-SAME: (i32* [[PTR:%.*]], i32 signext [[ALIGN:%.*]]) local_unnamed_addr #0
39 // HYBRID-NEXT:  entry:
40 // HYBRID-NEXT:    [[TMP0:%.*]] = ptrtoint i32* [[PTR]] to i64
41 // HYBRID-NEXT:    [[CONV:%.*]] = sext i32 [[ALIGN]] to i64
42 // HYBRID-NEXT:    [[SUB:%.*]] = add nsw i64 [[CONV]], -1
43 // HYBRID-NEXT:    [[AND:%.*]] = and i64 [[SUB]], [[TMP0]]
44 // HYBRID-NEXT:    [[CMP:%.*]] = icmp eq i64 [[AND]], 0
45 // HYBRID-NEXT:    ret i1 [[CMP]]
46 //
is_aligned_macro(int * ptr,int align)47 _Bool is_aligned_macro(int *ptr, int align) {
48   return __macro_is_aligned(ptr, align);
49 }
50 
51 // PURECAP-LABEL: define {{[^@]+}}@is_aligned_builtin
52 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i32 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #0
53 // PURECAP-NEXT:  entry:
54 // PURECAP-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
55 // PURECAP-NEXT:    [[MASK:%.*]] = add nsw i64 [[ALIGNMENT]], -1
56 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
57 // PURECAP-NEXT:    [[PTRADDR:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
58 // PURECAP-NEXT:    [[SET_BITS:%.*]] = and i64 [[PTRADDR]], [[MASK]]
59 // PURECAP-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
60 // PURECAP-NEXT:    ret i1 [[IS_ALIGNED]]
61 //
62 // HYBRID-LABEL: define {{[^@]+}}@is_aligned_builtin
63 // HYBRID-SAME: (i32* [[PTR:%.*]], i32 signext [[ALIGN:%.*]]) local_unnamed_addr #0
64 // HYBRID-NEXT:  entry:
65 // HYBRID-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
66 // HYBRID-NEXT:    [[MASK:%.*]] = add nsw i64 [[ALIGNMENT]], -1
67 // HYBRID-NEXT:    [[INTPTR:%.*]] = ptrtoint i32* [[PTR]] to i64
68 // HYBRID-NEXT:    [[SET_BITS:%.*]] = and i64 [[MASK]], [[INTPTR]]
69 // HYBRID-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
70 // HYBRID-NEXT:    ret i1 [[IS_ALIGNED]]
71 //
is_aligned_builtin(int * ptr,int align)72 _Bool is_aligned_builtin(int *ptr, int align) {
73   return __builtin_is_aligned(ptr, align);
74 }
75 
76 // PURECAP-LABEL: define {{[^@]+}}@align_up_inline
77 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #0
78 // PURECAP-NEXT:  entry:
79 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
80 // PURECAP-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
81 // PURECAP-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
82 // PURECAP-NEXT:    [[AND:%.*]] = and i64 [[TMP1]], [[SUB]]
83 // PURECAP-NEXT:    [[CMP_NOT:%.*]] = icmp eq i64 [[AND]], 0
84 // PURECAP-NEXT:    [[SUB1:%.*]] = sub nsw i64 [[ALIGN]], [[AND]]
85 // PURECAP-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8 addrspace(200)* [[TMP0]], i64 [[SUB1]]
86 // PURECAP-NEXT:    [[RESULT_0:%.*]] = select i1 [[CMP_NOT]], i8 addrspace(200)* [[TMP0]], i8 addrspace(200)* [[ADD_PTR]]
87 // PURECAP-NEXT:    [[TMP2:%.*]] = bitcast i8 addrspace(200)* [[RESULT_0]] to i32 addrspace(200)*
88 // PURECAP-NEXT:    ret i32 addrspace(200)* [[TMP2]]
89 //
90 // HYBRID-LABEL: define {{[^@]+}}@align_up_inline
91 // HYBRID-SAME: (i32* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
92 // HYBRID-NEXT:  entry:
93 // HYBRID-NEXT:    [[TMP0:%.*]] = bitcast i32* [[PTR]] to i8*
94 // HYBRID-NEXT:    [[TMP1:%.*]] = ptrtoint i32* [[PTR]] to i64
95 // HYBRID-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
96 // HYBRID-NEXT:    [[AND:%.*]] = and i64 [[SUB]], [[TMP1]]
97 // HYBRID-NEXT:    [[CMP_NOT:%.*]] = icmp eq i64 [[AND]], 0
98 // HYBRID-NEXT:    [[SUB1:%.*]] = sub nsw i64 [[ALIGN]], [[AND]]
99 // HYBRID-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[SUB1]]
100 // HYBRID-NEXT:    [[RESULT_0:%.*]] = select i1 [[CMP_NOT]], i8* [[TMP0]], i8* [[ADD_PTR]]
101 // HYBRID-NEXT:    [[TMP2:%.*]] = bitcast i8* [[RESULT_0]] to i32*
102 // HYBRID-NEXT:    ret i32* [[TMP2]]
103 //
align_up_inline(int * ptr,vaddr_t align)104 int *align_up_inline(int *ptr, vaddr_t align) {
105   char *result = (char *)ptr;
106   vaddr_t unaligned = (vaddr_t)ptr & (align - 1);
107   if (unaligned != 0) {
108     result += align - unaligned;
109   }
110   return (int *)result;
111 }
112 
113 #define __macro_align_up(addr, align) ({                                                       \
114   __static_assert_power_of_two(align);                                                         \
115   vaddr_t unaligned_bits = (vaddr_t)addr & (align - 1);                                        \
116   unaligned_bits == 0 ? addr : (__typeof__(addr))((uintptr_t)addr + (align - unaligned_bits)); \
117 })
118 
119 // PURECAP-LABEL: define {{[^@]+}}@align_up_macro
120 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #0
121 // PURECAP-NEXT:  entry:
122 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
123 // PURECAP-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
124 // PURECAP-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
125 // PURECAP-NEXT:    [[AND:%.*]] = and i64 [[TMP1]], [[SUB]]
126 // PURECAP-NEXT:    [[CMP:%.*]] = icmp eq i64 [[AND]], 0
127 // PURECAP-NEXT:    br i1 [[CMP]], label [[COND_END:%.*]], label [[COND_FALSE:%.*]]
128 // PURECAP:       cond.false:
129 // PURECAP-NEXT:    [[SUB1:%.*]] = sub i64 [[ALIGN]], [[AND]]
130 // PURECAP-NEXT:    [[TMP2:%.*]] = tail call i64 @llvm.cheri.cap.offset.get.i64(i8 addrspace(200)* [[TMP0]])
131 // PURECAP-NEXT:    [[ADD:%.*]] = add i64 [[SUB1]], [[TMP2]]
132 // PURECAP-NEXT:    [[TMP3:%.*]] = tail call i8 addrspace(200)* @llvm.cheri.cap.offset.set.i64(i8 addrspace(200)* [[TMP0]], i64 [[ADD]])
133 // PURECAP-NEXT:    [[TMP4:%.*]] = bitcast i8 addrspace(200)* [[TMP3]] to i32 addrspace(200)*
134 // PURECAP-NEXT:    br label [[COND_END]]
135 // PURECAP:       cond.end:
136 // PURECAP-NEXT:    [[COND:%.*]] = phi i32 addrspace(200)* [ [[TMP4]], [[COND_FALSE]] ], [ [[PTR]], [[ENTRY:%.*]] ]
137 // PURECAP-NEXT:    ret i32 addrspace(200)* [[COND]]
138 //
139 // HYBRID-LABEL: define {{[^@]+}}@align_up_macro
140 // HYBRID-SAME: (i32* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
141 // HYBRID-NEXT:  entry:
142 // HYBRID-NEXT:    [[TMP0:%.*]] = ptrtoint i32* [[PTR]] to i64
143 // HYBRID-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
144 // HYBRID-NEXT:    [[AND:%.*]] = and i64 [[SUB]], [[TMP0]]
145 // HYBRID-NEXT:    [[CMP:%.*]] = icmp eq i64 [[AND]], 0
146 // HYBRID-NEXT:    [[SUB1:%.*]] = add i64 [[TMP0]], [[ALIGN]]
147 // HYBRID-NEXT:    [[ADD:%.*]] = sub i64 [[SUB1]], [[AND]]
148 // HYBRID-NEXT:    [[TMP1:%.*]] = inttoptr i64 [[ADD]] to i32*
149 // HYBRID-NEXT:    [[COND:%.*]] = select i1 [[CMP]], i32* [[PTR]], i32* [[TMP1]]
150 // HYBRID-NEXT:    ret i32* [[COND]]
151 //
align_up_macro(int * ptr,vaddr_t align)152 int *align_up_macro(int *ptr, vaddr_t align) {
153   return __macro_align_up(ptr, align);
154 }
155 
156 // PURECAP-LABEL: define {{[^@]+}}@align_up_builtin
157 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #2
158 // PURECAP-NEXT:  entry:
159 // PURECAP-NEXT:    [[MASK:%.*]] = add i64 [[ALIGN]], -1
160 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
161 // PURECAP-NEXT:    [[PTRADDR:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
162 // PURECAP-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[MASK]], [[PTRADDR]]
163 // PURECAP-NEXT:    [[INVERTED_MASK:%.*]] = sub i64 0, [[ALIGN]]
164 // PURECAP-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
165 // PURECAP-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[PTRADDR]]
166 // PURECAP-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8 addrspace(200)* [[TMP0]], i64 [[DIFF]]
167 // PURECAP-NEXT:    [[TMP1:%.*]] = bitcast i8 addrspace(200)* [[ALIGNED_RESULT]] to i32 addrspace(200)*
168 // PURECAP-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32 addrspace(200)* [[TMP1]], i64 [[ALIGN]]) ]
169 // PURECAP-NEXT:    ret i32 addrspace(200)* [[TMP1]]
170 //
171 // HYBRID-LABEL: define {{[^@]+}}@align_up_builtin
172 // HYBRID-SAME: (i32* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #1
173 // HYBRID-NEXT:  entry:
174 // HYBRID-NEXT:    [[MASK:%.*]] = add i64 [[ALIGN]], -1
175 // HYBRID-NEXT:    [[INTPTR:%.*]] = ptrtoint i32* [[PTR]] to i64
176 // HYBRID-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[MASK]], [[INTPTR]]
177 // HYBRID-NEXT:    [[INVERTED_MASK:%.*]] = sub i64 0, [[ALIGN]]
178 // HYBRID-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
179 // HYBRID-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
180 // HYBRID-NEXT:    [[TMP0:%.*]] = bitcast i32* [[PTR]] to i8*
181 // HYBRID-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
182 // HYBRID-NEXT:    [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to i32*
183 // HYBRID-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP1]], i64 [[ALIGN]]) ]
184 // HYBRID-NEXT:    ret i32* [[TMP1]]
185 //
align_up_builtin(int * ptr,vaddr_t align)186 int *align_up_builtin(int *ptr, vaddr_t align) {
187   return __builtin_align_up(ptr, align);
188 }
189 
190 // PURECAP-LABEL: define {{[^@]+}}@align_up_macro_int_type
191 // PURECAP-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #4
192 // PURECAP-NEXT:  entry:
193 // PURECAP-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
194 // PURECAP-NEXT:    [[AND:%.*]] = and i64 [[SUB]], [[ADDR]]
195 // PURECAP-NEXT:    [[CMP:%.*]] = icmp eq i64 [[AND]], 0
196 // PURECAP-NEXT:    [[SUB1:%.*]] = sub nsw i64 [[ALIGN]], [[AND]]
197 // PURECAP-NEXT:    [[ADD:%.*]] = select i1 [[CMP]], i64 0, i64 [[SUB1]]
198 // PURECAP-NEXT:    [[COND:%.*]] = add i64 [[ADD]], [[ADDR]]
199 // PURECAP-NEXT:    ret i64 [[COND]]
200 //
201 // HYBRID-LABEL: define {{[^@]+}}@align_up_macro_int_type
202 // HYBRID-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
203 // HYBRID-NEXT:  entry:
204 // HYBRID-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
205 // HYBRID-NEXT:    [[AND:%.*]] = and i64 [[SUB]], [[ADDR]]
206 // HYBRID-NEXT:    [[CMP:%.*]] = icmp eq i64 [[AND]], 0
207 // HYBRID-NEXT:    [[SUB1:%.*]] = sub nsw i64 [[ALIGN]], [[AND]]
208 // HYBRID-NEXT:    [[ADD:%.*]] = select i1 [[CMP]], i64 0, i64 [[SUB1]]
209 // HYBRID-NEXT:    [[COND:%.*]] = add i64 [[ADD]], [[ADDR]]
210 // HYBRID-NEXT:    ret i64 [[COND]]
211 //
align_up_macro_int_type(vaddr_t addr,vaddr_t align)212 vaddr_t align_up_macro_int_type(vaddr_t addr, vaddr_t align) {
213   return __macro_align_up(addr, align);
214 }
215 
216 // PURECAP-LABEL: define {{[^@]+}}@align_up_builtin_int_type
217 // PURECAP-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #4
218 // PURECAP-NEXT:  entry:
219 // PURECAP-NEXT:    [[MASK:%.*]] = add i64 [[ADDR]], -1
220 // PURECAP-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[MASK]], [[ALIGN]]
221 // PURECAP-NEXT:    [[INVERTED_MASK:%.*]] = sub i64 0, [[ALIGN]]
222 // PURECAP-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
223 // PURECAP-NEXT:    ret i64 [[ALIGNED_RESULT]]
224 //
225 // HYBRID-LABEL: define {{[^@]+}}@align_up_builtin_int_type
226 // HYBRID-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
227 // HYBRID-NEXT:  entry:
228 // HYBRID-NEXT:    [[MASK:%.*]] = add i64 [[ADDR]], -1
229 // HYBRID-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[MASK]], [[ALIGN]]
230 // HYBRID-NEXT:    [[INVERTED_MASK:%.*]] = sub i64 0, [[ALIGN]]
231 // HYBRID-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
232 // HYBRID-NEXT:    ret i64 [[ALIGNED_RESULT]]
233 //
align_up_builtin_int_type(vaddr_t addr,vaddr_t align)234 vaddr_t align_up_builtin_int_type(vaddr_t addr, vaddr_t align) {
235   return __builtin_align_up(addr, align);
236 }
237 
238 // PURECAP-LABEL: define {{[^@]+}}@align_up_macro_const() local_unnamed_addr addrspace(200) #4
239 // PURECAP-NEXT:  entry:
240 // PURECAP-NEXT:    ret i32 64
241 //
242 // HYBRID-LABEL: define {{[^@]+}}@align_up_macro_const() local_unnamed_addr #0
243 // HYBRID-NEXT:  entry:
244 // HYBRID-NEXT:    ret i32 64
245 //
align_up_macro_const()246 int align_up_macro_const() {
247   return __macro_align_up(31, 64);
248 }
249 
250 // PURECAP-LABEL: define {{[^@]+}}@align_up_builtin_const() local_unnamed_addr addrspace(200) #4
251 // PURECAP-NEXT:  entry:
252 // PURECAP-NEXT:    ret i32 64
253 //
254 // HYBRID-LABEL: define {{[^@]+}}@align_up_builtin_const() local_unnamed_addr #0
255 // HYBRID-NEXT:  entry:
256 // HYBRID-NEXT:    ret i32 64
257 //
align_up_builtin_const()258 int align_up_builtin_const() {
259   return __builtin_align_up(31, 64);
260 }
261 
262 #define __macro_align_down(addr, align) ({              \
263   __static_assert_power_of_two(align);                  \
264   vaddr_t unaligned_bits = (vaddr_t)addr & (align - 1); \
265   (__typeof__(addr))((uintptr_t)addr - unaligned_bits); \
266 })
267 
268 // PURECAP-LABEL: define {{[^@]+}}@align_down_macro
269 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #0
270 // PURECAP-NEXT:  entry:
271 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
272 // PURECAP-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
273 // PURECAP-NEXT:    [[SUB:%.*]] = add nsw i64 [[ALIGN]], -1
274 // PURECAP-NEXT:    [[AND:%.*]] = and i64 [[TMP1]], [[SUB]]
275 // PURECAP-NEXT:    [[TMP2:%.*]] = tail call i64 @llvm.cheri.cap.offset.get.i64(i8 addrspace(200)* [[TMP0]])
276 // PURECAP-NEXT:    [[SUB1:%.*]] = sub i64 [[TMP2]], [[AND]]
277 // PURECAP-NEXT:    [[TMP3:%.*]] = tail call i8 addrspace(200)* @llvm.cheri.cap.offset.set.i64(i8 addrspace(200)* [[TMP0]], i64 [[SUB1]])
278 // PURECAP-NEXT:    [[TMP4:%.*]] = bitcast i8 addrspace(200)* [[TMP3]] to i32 addrspace(200)*
279 // PURECAP-NEXT:    ret i32 addrspace(200)* [[TMP4]]
280 //
281 // HYBRID-LABEL: define {{[^@]+}}@align_down_macro
282 // HYBRID-SAME: (i32* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
283 // HYBRID-NEXT:  entry:
284 // HYBRID-NEXT:    [[TMP0:%.*]] = ptrtoint i32* [[PTR]] to i64
285 // HYBRID-NEXT:    [[SUB_NOT:%.*]] = sub i64 0, [[ALIGN]]
286 // HYBRID-NEXT:    [[SUB1:%.*]] = and i64 [[TMP0]], [[SUB_NOT]]
287 // HYBRID-NEXT:    [[TMP1:%.*]] = inttoptr i64 [[SUB1]] to i32*
288 // HYBRID-NEXT:    ret i32* [[TMP1]]
289 //
align_down_macro(int * ptr,vaddr_t align)290 int *align_down_macro(int *ptr, vaddr_t align) {
291   return __macro_align_down(ptr, align);
292 }
293 
294 // PURECAP-LABEL: define {{[^@]+}}@align_down_builtin
295 // PURECAP-SAME: (i32 addrspace(200)* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #2
296 // PURECAP-NEXT:  entry:
297 // PURECAP-NEXT:    [[TMP0:%.*]] = bitcast i32 addrspace(200)* [[PTR]] to i8 addrspace(200)*
298 // PURECAP-NEXT:    [[PTRADDR:%.*]] = tail call i64 @llvm.cheri.cap.address.get.i64(i8 addrspace(200)* [[TMP0]])
299 // PURECAP-NEXT:    [[TMP1:%.*]] = add i64 [[ALIGN]], -1
300 // PURECAP-NEXT:    [[TMP2:%.*]] = and i64 [[PTRADDR]], [[TMP1]]
301 // PURECAP-NEXT:    [[DIFF:%.*]] = sub i64 0, [[TMP2]]
302 // PURECAP-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8 addrspace(200)* [[TMP0]], i64 [[DIFF]]
303 // PURECAP-NEXT:    [[TMP3:%.*]] = bitcast i8 addrspace(200)* [[ALIGNED_RESULT]] to i32 addrspace(200)*
304 // PURECAP-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32 addrspace(200)* [[TMP3]], i64 [[ALIGN]]) ]
305 // PURECAP-NEXT:    ret i32 addrspace(200)* [[TMP3]]
306 //
307 // HYBRID-LABEL: define {{[^@]+}}@align_down_builtin
308 // HYBRID-SAME: (i32* [[PTR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #1
309 // HYBRID-NEXT:  entry:
310 // HYBRID-NEXT:    [[INTPTR:%.*]] = ptrtoint i32* [[PTR]] to i64
311 // HYBRID-NEXT:    [[TMP0:%.*]] = add i64 [[ALIGN]], -1
312 // HYBRID-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], [[INTPTR]]
313 // HYBRID-NEXT:    [[DIFF:%.*]] = sub i64 0, [[TMP1]]
314 // HYBRID-NEXT:    [[TMP2:%.*]] = bitcast i32* [[PTR]] to i8*
315 // HYBRID-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP2]], i64 [[DIFF]]
316 // HYBRID-NEXT:    [[TMP3:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to i32*
317 // HYBRID-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP3]], i64 [[ALIGN]]) ]
318 // HYBRID-NEXT:    ret i32* [[TMP3]]
319 //
align_down_builtin(int * ptr,vaddr_t align)320 int *align_down_builtin(int *ptr, vaddr_t align) {
321   return __builtin_align_down(ptr, align);
322 }
323 
324 // PURECAP-LABEL: define {{[^@]+}}@align_down_macro_int_type
325 // PURECAP-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #4
326 // PURECAP-NEXT:  entry:
327 // PURECAP-NEXT:    [[SUB_NOT:%.*]] = sub i64 0, [[ALIGN]]
328 // PURECAP-NEXT:    [[SUB1:%.*]] = and i64 [[SUB_NOT]], [[ADDR]]
329 // PURECAP-NEXT:    ret i64 [[SUB1]]
330 //
331 // HYBRID-LABEL: define {{[^@]+}}@align_down_macro_int_type
332 // HYBRID-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
333 // HYBRID-NEXT:  entry:
334 // HYBRID-NEXT:    [[SUB_NOT:%.*]] = sub i64 0, [[ALIGN]]
335 // HYBRID-NEXT:    [[SUB1:%.*]] = and i64 [[SUB_NOT]], [[ADDR]]
336 // HYBRID-NEXT:    ret i64 [[SUB1]]
337 //
align_down_macro_int_type(vaddr_t addr,vaddr_t align)338 vaddr_t align_down_macro_int_type(vaddr_t addr, vaddr_t align) {
339   return __macro_align_down(addr, align);
340 }
341 
342 // PURECAP-LABEL: define {{[^@]+}}@align_down_builtin_int_type
343 // PURECAP-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr addrspace(200) #4
344 // PURECAP-NEXT:  entry:
345 // PURECAP-NEXT:    [[INVERTED_MASK:%.*]] = sub i64 0, [[ALIGN]]
346 // PURECAP-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[INVERTED_MASK]], [[ADDR]]
347 // PURECAP-NEXT:    ret i64 [[ALIGNED_RESULT]]
348 //
349 // HYBRID-LABEL: define {{[^@]+}}@align_down_builtin_int_type
350 // HYBRID-SAME: (i64 signext [[ADDR:%.*]], i64 signext [[ALIGN:%.*]]) local_unnamed_addr #0
351 // HYBRID-NEXT:  entry:
352 // HYBRID-NEXT:    [[INVERTED_MASK:%.*]] = sub i64 0, [[ALIGN]]
353 // HYBRID-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[INVERTED_MASK]], [[ADDR]]
354 // HYBRID-NEXT:    ret i64 [[ALIGNED_RESULT]]
355 //
align_down_builtin_int_type(vaddr_t addr,vaddr_t align)356 vaddr_t align_down_builtin_int_type(vaddr_t addr, vaddr_t align) {
357   return __builtin_align_down(addr, align);
358 }
359 
360 // PURECAP-LABEL: define {{[^@]+}}@align_down_macro_const() local_unnamed_addr addrspace(200) #4
361 // PURECAP-NEXT:  entry:
362 // PURECAP-NEXT:    ret i32 64
363 //
364 // HYBRID-LABEL: define {{[^@]+}}@align_down_macro_const() local_unnamed_addr #0
365 // HYBRID-NEXT:  entry:
366 // HYBRID-NEXT:    ret i32 64
367 //
align_down_macro_const()368 int align_down_macro_const() {
369   return __macro_align_down(65, 32);
370 }
371 
372 // PURECAP-LABEL: define {{[^@]+}}@align_down_builtin_const() local_unnamed_addr addrspace(200) #4
373 // PURECAP-NEXT:  entry:
374 // PURECAP-NEXT:    ret i32 64
375 //
376 // HYBRID-LABEL: define {{[^@]+}}@align_down_builtin_const() local_unnamed_addr #0
377 // HYBRID-NEXT:  entry:
378 // HYBRID-NEXT:    ret i32 64
379 //
align_down_builtin_const()380 int align_down_builtin_const() {
381   return __builtin_align_down(65, 32);
382 }
383 
384 #ifndef CODEGEN
bad_align_macro(int * ptr)385 void bad_align_macro(int *ptr) {
386   (void)__macro_is_aligned_array(ptr, 7); // expected-error {{'__check_align_is_power_of_two' declared as an array with a negative size}}
387   (void)__macro_is_aligned(ptr, 7);       // expected-error {{static_assert failed due to requirement '(7 & (7 - 1)) == 0' "Alignment must be a power-of-two"}}
388   (void)__macro_align_up(ptr, 7);         // expected-error {{static_assert failed due to requirement '(7 & (7 - 1)) == 0' "Alignment must be a power-of-two"}}
389   (void)__macro_align_down(ptr, 7);       // expected-error {{static_assert failed due to requirement '(7 & (7 - 1)) == 0' "Alignment must be a power-of-two"}}
390 }
391 
bad_align_builtin(int * ptr)392 _Bool bad_align_builtin(int *ptr) {
393   (void)__builtin_is_aligned(ptr, 7); // expected-error {{requested alignment is not a power of 2}}
394   (void)__builtin_align_up(ptr, 7);   // expected-error {{requested alignment is not a power of 2}}
395   (void)__builtin_align_down(ptr, 7); // expected-error {{requested alignment is not a power of 2}}
396 }
397 #endif
398