1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED
3 // RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -fpadding-on-unsigned-fixed-point -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED
4
5 short _Accum sa;
6 _Accum a, a2, a3, a4;
7 long _Accum la;
8 unsigned short _Accum usa;
9 unsigned _Accum ua;
10 unsigned long _Accum ula;
11
12 short _Fract sf;
13 _Fract f;
14 long _Fract lf;
15 unsigned short _Fract usf;
16 unsigned _Fract uf;
17 unsigned long _Fract ulf;
18
19 _Sat short _Accum sa_sat;
20 _Sat _Accum a_sat;
21 _Sat long _Accum la_sat;
22 _Sat unsigned short _Accum usa_sat;
23 _Sat unsigned _Accum ua_sat;
24 _Sat unsigned long _Accum ula_sat;
25 _Sat unsigned _Fract uf_sat;
26
27 int i;
28 unsigned int ui;
29 _Bool b;
30
31 // CHECK-LABEL: @smul_sasasa(
32 // CHECK-NEXT: entry:
33 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
34 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @sa, align 2
35 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
36 // CHECK-NEXT: store i16 [[TMP2]], i16* @sa, align 2
37 // CHECK-NEXT: ret void
38 //
smul_sasasa()39 void smul_sasasa() {
40 sa = sa * sa;
41 }
42
43 // CHECK-LABEL: @smul_asaa(
44 // CHECK-NEXT: entry:
45 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
46 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4
47 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
48 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
49 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
50 // CHECK-NEXT: store i32 [[TMP2]], i32* @a, align 4
51 // CHECK-NEXT: ret void
52 //
smul_asaa()53 void smul_asaa() {
54 a = sa * a;
55 }
56
57 // CHECK-LABEL: @smul_sasasf(
58 // CHECK-NEXT: entry:
59 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
60 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @sf, align 1
61 // CHECK-NEXT: [[RESIZE:%.*]] = sext i8 [[TMP1]] to i16
62 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
63 // CHECK-NEXT: store i16 [[TMP2]], i16* @sa, align 2
64 // CHECK-NEXT: ret void
65 //
smul_sasasf()66 void smul_sasasf() {
67 sa = sa * sf;
68 }
69
70 // CHECK-LABEL: @smul_sasaf(
71 // CHECK-NEXT: entry:
72 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
73 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @f, align 2
74 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i24
75 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
76 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i24
77 // CHECK-NEXT: [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
78 // CHECK-NEXT: [[DOWNSCALE:%.*]] = ashr i24 [[TMP2]], 8
79 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
80 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
81 // CHECK-NEXT: ret void
82 //
smul_sasaf()83 void smul_sasaf() {
84 sa = sa * f;
85 }
86
87 // CHECK-LABEL: @smul_aasf(
88 // CHECK-NEXT: entry:
89 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
90 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @sf, align 1
91 // CHECK-NEXT: [[RESIZE:%.*]] = sext i8 [[TMP1]] to i32
92 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
93 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
94 // CHECK-NEXT: store i32 [[TMP2]], i32* @a, align 4
95 // CHECK-NEXT: ret void
96 //
smul_aasf()97 void smul_aasf() {
98 a = a * sf;
99 }
100
101 // CHECK-LABEL: @smul_aalf(
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
104 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @lf, align 4
105 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i48
106 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i48 [[RESIZE]], 16
107 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i48
108 // CHECK-NEXT: [[TMP2:%.*]] = call i48 @llvm.smul.fix.i48(i48 [[UPSCALE]], i48 [[RESIZE1]], i32 31)
109 // CHECK-NEXT: [[DOWNSCALE:%.*]] = ashr i48 [[TMP2]], 16
110 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i48 [[DOWNSCALE]] to i32
111 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
112 // CHECK-NEXT: ret void
113 //
smul_aalf()114 void smul_aalf() {
115 a = a * lf;
116 }
117
118 // SIGNED-LABEL: @smul_sasausa(
119 // SIGNED-NEXT: entry:
120 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
121 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
122 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
123 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
124 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i17
125 // SIGNED-NEXT: [[TMP2:%.*]] = call i17 @llvm.smul.fix.i17(i17 [[UPSCALE]], i17 [[RESIZE1]], i32 8)
126 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
127 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
128 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
129 // SIGNED-NEXT: ret void
130 //
131 // UNSIGNED-LABEL: @smul_sasausa(
132 // UNSIGNED-NEXT: entry:
133 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
134 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
135 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
136 // UNSIGNED-NEXT: store i16 [[TMP2]], i16* @sa, align 2
137 // UNSIGNED-NEXT: ret void
138 //
smul_sasausa()139 void smul_sasausa() {
140 sa = sa * usa;
141 }
142
143 // SIGNED-LABEL: @smul_asaua(
144 // SIGNED-NEXT: entry:
145 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
146 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
147 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i33
148 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 9
149 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i33
150 // SIGNED-NEXT: [[TMP2:%.*]] = call i33 @llvm.smul.fix.i33(i33 [[UPSCALE]], i33 [[RESIZE1]], i32 16)
151 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
152 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
153 // SIGNED-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
154 // SIGNED-NEXT: ret void
155 //
156 // UNSIGNED-LABEL: @smul_asaua(
157 // UNSIGNED-NEXT: entry:
158 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
159 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
160 // UNSIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
161 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
162 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
163 // UNSIGNED-NEXT: store i32 [[TMP2]], i32* @a, align 4
164 // UNSIGNED-NEXT: ret void
165 //
smul_asaua()166 void smul_asaua() {
167 a = sa * ua;
168 }
169
170 // SIGNED-LABEL: @smul_sasausf(
171 // SIGNED-NEXT: entry:
172 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
173 // SIGNED-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
174 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
175 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
176 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i8 [[TMP1]] to i17
177 // SIGNED-NEXT: [[TMP2:%.*]] = call i17 @llvm.smul.fix.i17(i17 [[UPSCALE]], i17 [[RESIZE1]], i32 8)
178 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
179 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
180 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
181 // SIGNED-NEXT: ret void
182 //
183 // UNSIGNED-LABEL: @smul_sasausf(
184 // UNSIGNED-NEXT: entry:
185 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
186 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
187 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
188 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
189 // UNSIGNED-NEXT: store i16 [[TMP2]], i16* @sa, align 2
190 // UNSIGNED-NEXT: ret void
191 //
smul_sasausf()192 void smul_sasausf() {
193 sa = sa * usf;
194 }
195
196 // SIGNED-LABEL: @smul_sasaulf(
197 // SIGNED-NEXT: entry:
198 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
199 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ulf, align 4
200 // SIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i41
201 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i41 [[RESIZE]], 25
202 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i41
203 // SIGNED-NEXT: [[TMP2:%.*]] = call i41 @llvm.smul.fix.i41(i41 [[UPSCALE]], i41 [[RESIZE1]], i32 32)
204 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i41 [[TMP2]], 25
205 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i41 [[DOWNSCALE]] to i16
206 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
207 // SIGNED-NEXT: ret void
208 //
209 // UNSIGNED-LABEL: @smul_sasaulf(
210 // UNSIGNED-NEXT: entry:
211 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
212 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ulf, align 4
213 // UNSIGNED-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
214 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 24
215 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
216 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 31)
217 // UNSIGNED-NEXT: [[DOWNSCALE:%.*]] = ashr i40 [[TMP2]], 24
218 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[DOWNSCALE]] to i16
219 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
220 // UNSIGNED-NEXT: ret void
221 //
smul_sasaulf()222 void smul_sasaulf() {
223 sa = sa * ulf;
224 }
225
226 // CHECK-LABEL: @smul_aaaaa(
227 // CHECK-NEXT: entry:
228 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
229 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @a2, align 4
230 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP0]], i32 [[TMP1]], i32 15)
231 // CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* @a3, align 4
232 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP2]], i32 [[TMP3]], i32 15)
233 // CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* @a4, align 4
234 // CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP4]], i32 [[TMP5]], i32 15)
235 // CHECK-NEXT: store i32 [[TMP6]], i32* @a, align 4
236 // CHECK-NEXT: ret void
237 //
smul_aaaaa()238 void smul_aaaaa() {
239 a = a * a2 * a3 * a4;
240 }
241
242
243 // SIGNED-LABEL: @umul_usausausa(
244 // SIGNED-NEXT: entry:
245 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
246 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
247 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 8)
248 // SIGNED-NEXT: store i16 [[TMP2]], i16* @usa, align 2
249 // SIGNED-NEXT: ret void
250 //
251 // UNSIGNED-LABEL: @umul_usausausa(
252 // UNSIGNED-NEXT: entry:
253 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
254 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
255 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
256 // UNSIGNED-NEXT: store i16 [[TMP2]], i16* @usa, align 2
257 // UNSIGNED-NEXT: ret void
258 //
umul_usausausa()259 void umul_usausausa() {
260 usa = usa * usa;
261 }
262
263 // SIGNED-LABEL: @umul_uausaua(
264 // SIGNED-NEXT: entry:
265 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
266 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
267 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
268 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
269 // SIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.umul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 16)
270 // SIGNED-NEXT: store i32 [[TMP2]], i32* @ua, align 4
271 // SIGNED-NEXT: ret void
272 //
273 // UNSIGNED-LABEL: @umul_uausaua(
274 // UNSIGNED-NEXT: entry:
275 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
276 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ua, align 4
277 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
278 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
279 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
280 // UNSIGNED-NEXT: store i32 [[TMP2]], i32* @ua, align 4
281 // UNSIGNED-NEXT: ret void
282 //
umul_uausaua()283 void umul_uausaua() {
284 ua = usa * ua;
285 }
286
287 // SIGNED-LABEL: @umul_usausausf(
288 // SIGNED-NEXT: entry:
289 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
290 // SIGNED-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
291 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
292 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 8)
293 // SIGNED-NEXT: store i16 [[TMP2]], i16* @usa, align 2
294 // SIGNED-NEXT: ret void
295 //
296 // UNSIGNED-LABEL: @umul_usausausf(
297 // UNSIGNED-NEXT: entry:
298 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
299 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i8, i8* @usf, align 1
300 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
301 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
302 // UNSIGNED-NEXT: store i16 [[TMP2]], i16* @usa, align 2
303 // UNSIGNED-NEXT: ret void
304 //
umul_usausausf()305 void umul_usausausf() {
306 usa = usa * usf;
307 }
308
309 // SIGNED-LABEL: @umul_usausauf(
310 // SIGNED-NEXT: entry:
311 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
312 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @uf, align 2
313 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
314 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
315 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
316 // SIGNED-NEXT: [[TMP2:%.*]] = call i24 @llvm.umul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 16)
317 // SIGNED-NEXT: [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
318 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
319 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
320 // SIGNED-NEXT: ret void
321 //
322 // UNSIGNED-LABEL: @umul_usausauf(
323 // UNSIGNED-NEXT: entry:
324 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
325 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @uf, align 2
326 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
327 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
328 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
329 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
330 // UNSIGNED-NEXT: [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
331 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
332 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
333 // UNSIGNED-NEXT: ret void
334 //
umul_usausauf()335 void umul_usausauf() {
336 usa = usa * uf;
337 }
338
339
340 // CHECK-LABEL: @int_sasai(
341 // CHECK-NEXT: entry:
342 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
343 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
344 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
345 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
346 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
347 // CHECK-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
348 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
349 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
350 // CHECK-NEXT: ret void
351 //
int_sasai()352 void int_sasai() {
353 sa = sa * i;
354 }
355
356 // CHECK-LABEL: @int_sasaui(
357 // CHECK-NEXT: entry:
358 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
359 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
360 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
361 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
362 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
363 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 7)
364 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
365 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
366 // CHECK-NEXT: ret void
367 //
int_sasaui()368 void int_sasaui() {
369 sa = sa * ui;
370 }
371
372 // SIGNED-LABEL: @int_usausai(
373 // SIGNED-NEXT: entry:
374 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
375 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
376 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
377 // SIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
378 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
379 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
380 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
381 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
382 // SIGNED-NEXT: ret void
383 //
384 // UNSIGNED-LABEL: @int_usausai(
385 // UNSIGNED-NEXT: entry:
386 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
387 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
388 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
389 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
390 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
391 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
392 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
393 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
394 // UNSIGNED-NEXT: ret void
395 //
int_usausai()396 void int_usausai() {
397 usa = usa * i;
398 }
399
400 // SIGNED-LABEL: @int_usausaui(
401 // SIGNED-NEXT: entry:
402 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
403 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
404 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
405 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
406 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
407 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.umul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
408 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
409 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
410 // SIGNED-NEXT: ret void
411 //
412 // UNSIGNED-LABEL: @int_usausaui(
413 // UNSIGNED-NEXT: entry:
414 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
415 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
416 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
417 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i39
418 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
419 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.umul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
420 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
421 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
422 // UNSIGNED-NEXT: ret void
423 //
int_usausaui()424 void int_usausaui() {
425 usa = usa * ui;
426 }
427
428 // CHECK-LABEL: @int_lflfui(
429 // CHECK-NEXT: entry:
430 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @lf, align 4
431 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
432 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i64
433 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i64
434 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i64 [[RESIZE1]], 31
435 // CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.smul.fix.i64(i64 [[RESIZE]], i64 [[UPSCALE]], i32 31)
436 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i64 [[TMP2]] to i32
437 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @lf, align 4
438 // CHECK-NEXT: ret void
439 //
int_lflfui()440 void int_lflfui() {
441 lf = lf * ui;
442 }
443
444 // CHECK-LABEL: @int_aab(
445 // CHECK-NEXT: entry:
446 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @a, align 4
447 // CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* @b, align 1
448 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP1]] to i1
449 // CHECK-NEXT: [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32
450 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
451 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[CONV]] to i47
452 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
453 // CHECK-NEXT: [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[RESIZE]], i47 [[UPSCALE]], i32 15)
454 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
455 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
456 // CHECK-NEXT: ret void
457 //
int_aab()458 void int_aab() {
459 a = a * b;
460 }
461
462 // CHECK-LABEL: @int_aia(
463 // CHECK-NEXT: entry:
464 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @i, align 4
465 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @a, align 4
466 // CHECK-NEXT: [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
467 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
468 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i47
469 // CHECK-NEXT: [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[UPSCALE]], i47 [[RESIZE1]], i32 15)
470 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
471 // CHECK-NEXT: store i32 [[RESIZE2]], i32* @a, align 4
472 // CHECK-NEXT: ret void
473 //
int_aia()474 void int_aia() {
475 a = i * a;
476 }
477
478 // SIGNED-LABEL: @int_usauiusa(
479 // SIGNED-NEXT: entry:
480 // SIGNED-NEXT: [[TMP0:%.*]] = load i32, i32* @ui, align 4
481 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
482 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i32 [[TMP0]] to i40
483 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 8
484 // SIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i40
485 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.umul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 8)
486 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
487 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
488 // SIGNED-NEXT: ret void
489 //
490 // UNSIGNED-LABEL: @int_usauiusa(
491 // UNSIGNED-NEXT: entry:
492 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i32, i32* @ui, align 4
493 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa, align 2
494 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i32 [[TMP0]] to i39
495 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE]], 7
496 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i39
497 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.umul.fix.i39(i39 [[UPSCALE]], i39 [[RESIZE1]], i32 7)
498 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
499 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa, align 2
500 // UNSIGNED-NEXT: ret void
501 //
int_usauiusa()502 void int_usauiusa() {
503 usa = ui * usa;
504 }
505
506 // CHECK-LABEL: @int_sauisa(
507 // CHECK-NEXT: entry:
508 // CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @ui, align 4
509 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @sa, align 2
510 // CHECK-NEXT: [[RESIZE:%.*]] = zext i32 [[TMP0]] to i40
511 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 7
512 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i40
513 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 7)
514 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
515 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa, align 2
516 // CHECK-NEXT: ret void
517 //
int_sauisa()518 void int_sauisa() {
519 sa = ui * sa;
520 }
521
522
523 // CHECK-LABEL: @sat_sassasas(
524 // CHECK-NEXT: entry:
525 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa, align 2
526 // CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* @sa_sat, align 2
527 // CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
528 // CHECK-NEXT: store i16 [[TMP2]], i16* @sa_sat, align 2
529 // CHECK-NEXT: ret void
530 //
sat_sassasas()531 void sat_sassasas() {
532 sa_sat = sa * sa_sat;
533 }
534
535 // SIGNED-LABEL: @sat_usasusausas(
536 // SIGNED-NEXT: entry:
537 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
538 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
539 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 8)
540 // SIGNED-NEXT: store i16 [[TMP2]], i16* @usa_sat, align 2
541 // SIGNED-NEXT: ret void
542 //
543 // UNSIGNED-LABEL: @sat_usasusausas(
544 // UNSIGNED-NEXT: entry:
545 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa, align 2
546 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
547 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
548 // UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
549 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
550 // UNSIGNED-NEXT: store i16 [[RESIZE1]], i16* @usa_sat, align 2
551 // UNSIGNED-NEXT: ret void
552 //
sat_usasusausas()553 void sat_usasusausas() {
554 usa_sat = usa * usa_sat;
555 }
556
557 // SIGNED-LABEL: @sat_uasuausas(
558 // SIGNED-NEXT: entry:
559 // SIGNED-NEXT: [[TMP0:%.*]] = load i32, i32* @ua, align 4
560 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
561 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
562 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
563 // SIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.umul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 16)
564 // SIGNED-NEXT: store i32 [[TMP2]], i32* @ua_sat, align 4
565 // SIGNED-NEXT: ret void
566 //
567 // UNSIGNED-LABEL: @sat_uasuausas(
568 // UNSIGNED-NEXT: entry:
569 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i32, i32* @ua, align 4
570 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
571 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
572 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
573 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i32 @llvm.smul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
574 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
575 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
576 // UNSIGNED-NEXT: store i32 [[RESIZE2]], i32* @ua_sat, align 4
577 // UNSIGNED-NEXT: ret void
578 //
sat_uasuausas()579 void sat_uasuausas() {
580 ua_sat = ua * usa_sat;
581 }
582
583 // CHECK-LABEL: @sat_sassasi(
584 // CHECK-NEXT: entry:
585 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa_sat, align 2
586 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
587 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
588 // CHECK-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
589 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
590 // CHECK-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
591 // CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
592 // CHECK-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
593 // CHECK-NEXT: [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], -32768
594 // CHECK-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i39 -32768, i39 [[SATMAX]]
595 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
596 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa_sat, align 2
597 // CHECK-NEXT: ret void
598 //
sat_sassasi()599 void sat_sassasi() {
600 sa_sat = sa_sat * i;
601 }
602
603 // CHECK-LABEL: @sat_sassasui(
604 // CHECK-NEXT: entry:
605 // CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* @sa_sat, align 2
606 // CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4
607 // CHECK-NEXT: [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
608 // CHECK-NEXT: [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
609 // CHECK-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
610 // CHECK-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 7)
611 // CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 32767
612 // CHECK-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i40 32767, i40 [[TMP2]]
613 // CHECK-NEXT: [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], -32768
614 // CHECK-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i40 -32768, i40 [[SATMAX]]
615 // CHECK-NEXT: [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
616 // CHECK-NEXT: store i16 [[RESIZE2]], i16* @sa_sat, align 2
617 // CHECK-NEXT: ret void
618 //
sat_sassasui()619 void sat_sassasui() {
620 sa_sat = sa_sat * ui;
621 }
622
623 // SIGNED-LABEL: @sat_ufsufsufs(
624 // SIGNED-NEXT: entry:
625 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
626 // SIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
627 // SIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.umul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 16)
628 // SIGNED-NEXT: store i16 [[TMP2]], i16* @uf_sat, align 2
629 // SIGNED-NEXT: ret void
630 //
631 // UNSIGNED-LABEL: @sat_ufsufsufs(
632 // UNSIGNED-NEXT: entry:
633 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
634 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
635 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 15)
636 // UNSIGNED-NEXT: [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
637 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
638 // UNSIGNED-NEXT: store i16 [[RESIZE1]], i16* @uf_sat, align 2
639 // UNSIGNED-NEXT: ret void
640 //
sat_ufsufsufs()641 void sat_ufsufsufs() {
642 uf_sat = uf_sat * uf_sat;
643 }
644
645 // SIGNED-LABEL: @sat_usasusasi(
646 // SIGNED-NEXT: entry:
647 // SIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa_sat, align 2
648 // SIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
649 // SIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
650 // SIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
651 // SIGNED-NEXT: [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
652 // SIGNED-NEXT: [[TMP2:%.*]] = call i40 @llvm.smul.fix.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
653 // SIGNED-NEXT: [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 65535
654 // SIGNED-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i40 65535, i40 [[TMP2]]
655 // SIGNED-NEXT: [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], 0
656 // SIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i40 0, i40 [[SATMAX]]
657 // SIGNED-NEXT: [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
658 // SIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa_sat, align 2
659 // SIGNED-NEXT: ret void
660 //
661 // UNSIGNED-LABEL: @sat_usasusasi(
662 // UNSIGNED-NEXT: entry:
663 // UNSIGNED-NEXT: [[TMP0:%.*]] = load i16, i16* @usa_sat, align 2
664 // UNSIGNED-NEXT: [[TMP1:%.*]] = load i32, i32* @i, align 4
665 // UNSIGNED-NEXT: [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
666 // UNSIGNED-NEXT: [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
667 // UNSIGNED-NEXT: [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
668 // UNSIGNED-NEXT: [[TMP2:%.*]] = call i39 @llvm.smul.fix.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
669 // UNSIGNED-NEXT: [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
670 // UNSIGNED-NEXT: [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
671 // UNSIGNED-NEXT: [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], 0
672 // UNSIGNED-NEXT: [[SATMIN:%.*]] = select i1 [[TMP4]], i39 0, i39 [[SATMAX]]
673 // UNSIGNED-NEXT: [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
674 // UNSIGNED-NEXT: store i16 [[RESIZE2]], i16* @usa_sat, align 2
675 // UNSIGNED-NEXT: ret void
676 //
sat_usasusasi()677 void sat_usasusasi() {
678 usa_sat = usa_sat * i;
679 }
680