1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED
3 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -fpadding-on-unsigned-fixed-point -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED
4
5 short _Accum sa;
6 _Accum a, a2, a3, a4;
7 long _Accum la;
8 unsigned short _Accum usa;
9 unsigned _Accum ua;
10 unsigned long _Accum ula;
11
12 short _Fract sf;
13 _Fract f;
14 long _Fract lf;
15 unsigned short _Fract usf;
16 unsigned _Fract uf;
17 unsigned long _Fract ulf;
18
19 _Sat short _Accum sa_sat;
20 _Sat _Accum a_sat;
21 _Sat long _Accum la_sat;
22 _Sat unsigned short _Accum usa_sat;
23 _Sat unsigned _Accum ua_sat;
24 _Sat unsigned long _Accum ula_sat;
25 _Sat unsigned _Fract uf_sat;
26
27 int i;
28 unsigned int ui;
29 _Bool b;
30
31 // CHECK-LABEL: @sadd_sasasa(
32 // CHECK-NEXT: entry:
33 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
34 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @sa, align 2
35 // CHECK-NEXT: [[TMP2:%.*]] = add i16 [[TMP0]], [[TMP1]]
36 // CHECK-NEXT: store i16 [[TMP2]], i16* @sa, align 2
37 // CHECK-NEXT: ret void
38 //
sadd_sasasa()39 void sadd_sasasa() {
40 sa = sa + sa;
41 }
42
43 // CHECK-LABEL: @sadd_asaa(
44 // CHECK-NEXT: entry:
45 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
46 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4
47 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
48 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
49 // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[UPSCALE]], [[TMP1]]
50 // CHECK-NEXT: store i32 [[TMP2]], i32* @a, align 4
51 // CHECK-NEXT: ret void
52 //
sadd_asaa()53 void sadd_asaa() {
54 a = sa + a;
55 }
56
57 // CHECK-LABEL: @sadd_sasasf(
58 // CHECK-NEXT: entry:
59 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
60 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @sf, align 1
61 // CHECK-NEXT: [[RESIZE:%.*]] = sext i8 [[TMP1]] to i16
62 // CHECK-NEXT: [[TMP2:%.*]] = add i16 [[TMP0]], [[RESIZE]]
63 // CHECK-NEXT: store i16 [[TMP2]], i16* @sa, align 2
64 // CHECK-NEXT: ret void
65 //
sadd_sasasf()66 void sadd_sasasf() {
67 sa = sa + sf;
68 }
69
70 // CHECK-LABEL: @sadd_sasaf(
71 // CHECK-NEXT: entry:
72 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
73 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @f, align 2
74 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i24
75 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
76 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i24
77 // CHECK-NEXT: [[TMP2:%.*]] = add i24 [[UPSCALE]], [[RESIZE1]]
78 // CHECK-NEXT: [[DOWNSCALE:%.*]] = ashr i24 [[TMP2]], 8
79 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
80 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
81 // CHECK-NEXT: ret void
82 //
sadd_sasaf()83 void sadd_sasaf() {
84 sa = sa + f;
85 }
86
87 // CHECK-LABEL: @sadd_aasf(
88 // CHECK-NEXT: entry:
89 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
90 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @sf, align 1
91 // CHECK-NEXT: [[RESIZE:%.*]] = sext i8 [[TMP1]] to i32
92 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
93 // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], [[UPSCALE]]
94 // CHECK-NEXT: store i32 [[TMP2]], i32* @a, align 4
95 // CHECK-NEXT: ret void
96 //
sadd_aasf()97 void sadd_aasf() {
98 a = a + sf;
99 }
100
101 // CHECK-LABEL: @sadd_aalf(
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
104 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @lf, align 4
105 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i48
106 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i48 [[RESIZE]], 16
107 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i48
108 // CHECK-NEXT: [[TMP2:%.*]] = add i48 [[UPSCALE]], [[RESIZE1]]
109 // CHECK-NEXT: [[DOWNSCALE:%.*]] = ashr i48 [[TMP2]], 16
110 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i48 [[DOWNSCALE]] to i32
111 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
112 // CHECK-NEXT: ret void
113 //
sadd_aalf()114 void sadd_aalf() {
115 a = a + lf;
116 }
117
118 // SIGNED-LABEL: @sadd_sasausa(
119 // SIGNED-NEXT: entry:
120 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
121 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
122 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
123 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
124 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i17
125 // SIGNED-NEXT: [[TMP2:%.*]] = add i17 [[UPSCALE]], [[RESIZE1]]
126 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
127 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
128 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
129 // SIGNED-NEXT: ret void
130 //
131 // UNSIGNED-LABEL: @sadd_sasausa(
132 // UNSIGNED-NEXT: entry:
133 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
134 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
135 // UNSIGNED-NEXT: [[TMP2:%.*]] = add i16 [[TMP0]], [[TMP1]]
136 // UNSIGNED-NEXT: store i16 [[TMP2]], i16* @sa, align 2
137 // UNSIGNED-NEXT: ret void
138 //
sadd_sasausa()139 void sadd_sasausa() {
140 sa = sa + usa;
141 }
142
143 // SIGNED-LABEL: @sadd_asaua(
144 // SIGNED-NEXT: entry:
145 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
146 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
147 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i33
148 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 9
149 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i33
150 // SIGNED-NEXT: [[TMP2:%.*]] = add i33 [[UPSCALE]], [[RESIZE1]]
151 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
152 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
153 // SIGNED-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
154 // SIGNED-NEXT: ret void
155 //
156 // UNSIGNED-LABEL: @sadd_asaua(
157 // UNSIGNED-NEXT: entry:
158 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
159 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
160 // UNSIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
161 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
162 // UNSIGNED-NEXT: [[TMP2:%.*]] = add i32 [[UPSCALE]], [[TMP1]]
163 // UNSIGNED-NEXT: store i32 [[TMP2]], i32* @a, align 4
164 // UNSIGNED-NEXT: ret void
165 //
sadd_asaua()166 void sadd_asaua() {
167 a = sa + ua;
168 }
169
170 // SIGNED-LABEL: @sadd_sasausf(
171 // SIGNED-NEXT: entry:
172 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
173 // SIGNED-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
174 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
175 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
176 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i8 [[TMP1]] to i17
177 // SIGNED-NEXT: [[TMP2:%.*]] = add i17 [[UPSCALE]], [[RESIZE1]]
178 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
179 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
180 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
181 // SIGNED-NEXT: ret void
182 //
183 // UNSIGNED-LABEL: @sadd_sasausf(
184 // UNSIGNED-NEXT: entry:
185 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
186 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
187 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
188 // UNSIGNED-NEXT: [[TMP2:%.*]] = add i16 [[TMP0]], [[RESIZE]]
189 // UNSIGNED-NEXT: store i16 [[TMP2]], i16* @sa, align 2
190 // UNSIGNED-NEXT: ret void
191 //
sadd_sasausf()192 void sadd_sasausf() {
193 sa = sa + usf;
194 }
195
196 // SIGNED-LABEL: @sadd_sasaulf(
197 // SIGNED-NEXT: entry:
198 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
199 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ulf, align 4
200 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i41
201 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i41 [[RESIZE]], 25
202 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i41
203 // SIGNED-NEXT: [[TMP2:%.*]] = add i41 [[UPSCALE]], [[RESIZE1]]
204 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i41 [[TMP2]], 25
205 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i41 [[DOWNSCALE]] to i16
206 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
207 // SIGNED-NEXT: ret void
208 //
209 // UNSIGNED-LABEL: @sadd_sasaulf(
210 // UNSIGNED-NEXT: entry:
211 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
212 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ulf, align 4
213 // UNSIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
214 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 24
215 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
216 // UNSIGNED-NEXT: [[TMP2:%.*]] = add i40 [[UPSCALE]], [[RESIZE1]]
217 // UNSIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i40 [[TMP2]], 24
218 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[DOWNSCALE]] to i16
219 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
220 // UNSIGNED-NEXT: ret void
221 //
sadd_sasaulf()222 void sadd_sasaulf() {
223 sa = sa + ulf;
224 }
225
226 // CHECK-LABEL: @sadd_aaaaa(
227 // CHECK-NEXT: entry:
228 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
229 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @a2, align 4
230 // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP0]], [[TMP1]]
231 // CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* @a3, align 4
232 // CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP2]], [[TMP3]]
233 // CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* @a4, align 4
234 // CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP4]], [[TMP5]]
235 // CHECK-NEXT: store i32 [[TMP6]], i32* @a, align 4
236 // CHECK-NEXT: ret void
237 //
sadd_aaaaa()238 void sadd_aaaaa() {
239 a = a + a2 + a3 + a4;
240 }
241
242
243 // CHECK-LABEL: @uadd_usausausa(
244 // CHECK-NEXT: entry:
245 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
246 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
247 // CHECK-NEXT: [[TMP2:%.*]] = add i16 [[TMP0]], [[TMP1]]
248 // CHECK-NEXT: store i16 [[TMP2]], i16* @usa, align 2
249 // CHECK-NEXT: ret void
250 //
uadd_usausausa()251 void uadd_usausausa() {
252 usa = usa + usa;
253 }
254
255 // CHECK-LABEL: @uadd_uausaua(
256 // CHECK-NEXT: entry:
257 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
258 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
259 // CHECK-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
260 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
261 // CHECK-NEXT: [[TMP2:%.*]] = add i32 [[UPSCALE]], [[TMP1]]
262 // CHECK-NEXT: store i32 [[TMP2]], i32* @ua, align 4
263 // CHECK-NEXT: ret void
264 //
uadd_uausaua()265 void uadd_uausaua() {
266 ua = usa + ua;
267 }
268
269 // CHECK-LABEL: @uadd_usausausf(
270 // CHECK-NEXT: entry:
271 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
272 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
273 // CHECK-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
274 // CHECK-NEXT: [[TMP2:%.*]] = add i16 [[TMP0]], [[RESIZE]]
275 // CHECK-NEXT: store i16 [[TMP2]], i16* @usa, align 2
276 // CHECK-NEXT: ret void
277 //
uadd_usausausf()278 void uadd_usausausf() {
279 usa = usa + usf;
280 }
281
282 // CHECK-LABEL: @uadd_usausauf(
283 // CHECK-NEXT: entry:
284 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
285 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @uf, align 2
286 // CHECK-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
287 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
288 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
289 // CHECK-NEXT: [[TMP2:%.*]] = add i24 [[UPSCALE]], [[RESIZE1]]
290 // CHECK-NEXT: [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
291 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
292 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
293 // CHECK-NEXT: ret void
294 //
uadd_usausauf()295 void uadd_usausauf() {
296 usa = usa + uf;
297 }
298
299
300 // CHECK-LABEL: @int_sasai(
301 // CHECK-NEXT: entry:
302 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
303 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
304 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
305 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
306 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
307 // CHECK-NEXT: [[TMP2:%.*]] = add i39 [[RESIZE]], [[UPSCALE]]
308 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
309 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
310 // CHECK-NEXT: ret void
311 //
int_sasai()312 void int_sasai() {
313 sa = sa + i;
314 }
315
316 // CHECK-LABEL: @int_sasaui(
317 // CHECK-NEXT: entry:
318 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
319 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
320 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
321 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
322 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
323 // CHECK-NEXT: [[TMP2:%.*]] = add i40 [[RESIZE]], [[UPSCALE]]
324 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
325 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
326 // CHECK-NEXT: ret void
327 //
int_sasaui()328 void int_sasaui() {
329 sa = sa + ui;
330 }
331
332 // SIGNED-LABEL: @int_usausai(
333 // SIGNED-NEXT: entry:
334 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
335 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
336 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
337 // SIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
338 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
339 // SIGNED-NEXT: [[TMP2:%.*]] = add i40 [[RESIZE]], [[UPSCALE]]
340 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
341 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
342 // SIGNED-NEXT: ret void
343 //
344 // UNSIGNED-LABEL: @int_usausai(
345 // UNSIGNED-NEXT: entry:
346 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
347 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
348 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
349 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
350 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
351 // UNSIGNED-NEXT: [[TMP2:%.*]] = add i39 [[RESIZE]], [[UPSCALE]]
352 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
353 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
354 // UNSIGNED-NEXT: ret void
355 //
int_usausai()356 void int_usausai() {
357 usa = usa + i;
358 }
359
360 // SIGNED-LABEL: @int_usausaui(
361 // SIGNED-NEXT: entry:
362 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
363 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
364 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
365 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
366 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
367 // SIGNED-NEXT: [[TMP2:%.*]] = add i40 [[RESIZE]], [[UPSCALE]]
368 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
369 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
370 // SIGNED-NEXT: ret void
371 //
372 // UNSIGNED-LABEL: @int_usausaui(
373 // UNSIGNED-NEXT: entry:
374 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
375 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
376 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
377 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i39
378 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
379 // UNSIGNED-NEXT: [[TMP2:%.*]] = add i39 [[RESIZE]], [[UPSCALE]]
380 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
381 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
382 // UNSIGNED-NEXT: ret void
383 //
int_usausaui()384 void int_usausaui() {
385 usa = usa + ui;
386 }
387
388 // CHECK-LABEL: @int_lflfui(
389 // CHECK-NEXT: entry:
390 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @lf, align 4
391 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
392 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i64
393 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i64
394 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i64 [[RESIZE1]], 31
395 // CHECK-NEXT: [[TMP2:%.*]] = add i64 [[RESIZE]], [[UPSCALE]]
396 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i64 [[TMP2]] to i32
397 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @lf, align 4
398 // CHECK-NEXT: ret void
399 //
int_lflfui()400 void int_lflfui() {
401 lf = lf + ui;
402 }
403
404 // CHECK-LABEL: @int_aab(
405 // CHECK-NEXT: entry:
406 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
407 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @b, align 1
408 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP1]] to i1
409 // CHECK-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32
410 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
411 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[CONV]] to i47
412 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
413 // CHECK-NEXT: [[TMP2:%.*]] = add i47 [[RESIZE]], [[UPSCALE]]
414 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
415 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
416 // CHECK-NEXT: ret void
417 //
int_aab()418 void int_aab() {
419 a = a + b;
420 }
421
422
423 // CHECK-LABEL: @sat_sassasas(
424 // CHECK-NEXT: entry:
425 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
426 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @sa_sat, align 2
427 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
428 // CHECK-NEXT: store i16 [[TMP2]], i16* @sa_sat, align 2
429 // CHECK-NEXT: ret void
430 //
sat_sassasas()431 void sat_sassasas() {
432 sa_sat = sa + sa_sat;
433 }
434
435 // SIGNED-LABEL: @sat_usasusausas(
436 // SIGNED-NEXT: entry:
437 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
438 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
439 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
440 // SIGNED-NEXT: store i16 [[TMP2]], i16* @usa_sat, align 2
441 // SIGNED-NEXT: ret void
442 //
443 // UNSIGNED-LABEL: @sat_usasusausas(
444 // UNSIGNED-NEXT: entry:
445 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
446 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
447 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
448 // UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
449 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
450 // UNSIGNED-NEXT: store i16 [[RESIZE1]], i16* @usa_sat, align 2
451 // UNSIGNED-NEXT: ret void
452 //
sat_usasusausas()453 void sat_usasusausas() {
454 usa_sat = usa + usa_sat;
455 }
456
457 // SIGNED-LABEL: @sat_uasuausas(
458 // SIGNED-NEXT: entry:
459 // SIGNED-NEXT: [[TMP0:%.*]] = load i32, i32* @ua, align 4
460 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
461 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
462 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
463 // SIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]])
464 // SIGNED-NEXT: store i32 [[TMP2]], i32* @ua_sat, align 4
465 // SIGNED-NEXT: ret void
466 //
467 // UNSIGNED-LABEL: @sat_uasuausas(
468 // UNSIGNED-NEXT: entry:
469 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i32, i32* @ua, align 4
470 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
471 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
472 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
473 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]])
474 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
475 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
476 // UNSIGNED-NEXT: store i32 [[RESIZE2]], i32* @ua_sat, align 4
477 // UNSIGNED-NEXT: ret void
478 //
sat_uasuausas()479 void sat_uasuausas() {
480 ua_sat = ua + usa_sat;
481 }
482
483 // CHECK-LABEL: @sat_sassasi(
484 // CHECK-NEXT: entry:
485 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa_sat, align 2
486 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
487 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
488 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
489 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
490 // CHECK-NEXT: [[TMP2:%.*]] = call i39 @llvm.sadd.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]])
491 // CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
492 // CHECK-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
493 // CHECK-NEXT: [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], -32768
494 // CHECK-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i39 -32768, i39 [[SATMAX]]
495 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
496 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa_sat, align 2
497 // CHECK-NEXT: ret void
498 //
sat_sassasi()499 void sat_sassasi() {
500 sa_sat = sa_sat + i;
501 }
502
503 // CHECK-LABEL: @sat_sassasui(
504 // CHECK-NEXT: entry:
505 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa_sat, align 2
506 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
507 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
508 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
509 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
510 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.sadd.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]])
511 // CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 32767
512 // CHECK-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i40 32767, i40 [[TMP2]]
513 // CHECK-NEXT: [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], -32768
514 // CHECK-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i40 -32768, i40 [[SATMAX]]
515 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
516 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa_sat, align 2
517 // CHECK-NEXT: ret void
518 //
sat_sassasui()519 void sat_sassasui() {
520 sa_sat = sa_sat + ui;
521 }
522
523 // SIGNED-LABEL: @sat_ufsufsufs(
524 // SIGNED-NEXT: entry:
525 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
526 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
527 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
528 // SIGNED-NEXT: store i16 [[TMP2]], i16* @uf_sat, align 2
529 // SIGNED-NEXT: ret void
530 //
531 // UNSIGNED-LABEL: @sat_ufsufsufs(
532 // UNSIGNED-NEXT: entry:
533 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
534 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
535 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
536 // UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
537 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
538 // UNSIGNED-NEXT: store i16 [[RESIZE1]], i16* @uf_sat, align 2
539 // UNSIGNED-NEXT: ret void
540 //
sat_ufsufsufs()541 void sat_ufsufsufs() {
542 uf_sat = uf_sat + uf_sat;
543 }
544
545 // SIGNED-LABEL: @sat_usasusasi(
546 // SIGNED-NEXT: entry:
547 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa_sat, align 2
548 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
549 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
550 // SIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
551 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
552 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.sadd.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]])
553 // SIGNED-NEXT: [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 65535
554 // SIGNED-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i40 65535, i40 [[TMP2]]
555 // SIGNED-NEXT: [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], 0
556 // SIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i40 0, i40 [[SATMAX]]
557 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
558 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa_sat, align 2
559 // SIGNED-NEXT: ret void
560 //
561 // UNSIGNED-LABEL: @sat_usasusasi(
562 // UNSIGNED-NEXT: entry:
563 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa_sat, align 2
564 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
565 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
566 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
567 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
568 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.sadd.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]])
569 // UNSIGNED-NEXT: [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
570 // UNSIGNED-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
571 // UNSIGNED-NEXT: [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], 0
572 // UNSIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i39 0, i39 [[SATMAX]]
573 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
574 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa_sat, align 2
575 // UNSIGNED-NEXT: ret void
576 //
sat_usasusasi()577 void sat_usasusasi() {
578 usa_sat = usa_sat + i;
579 }
580