1; RUN: llc -O0 < %s | FileCheck %s
2
3target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
4target triple = "msp430---elf"
5
6@g_double = global double 123.0, align 8
7@g_float = global float 123.0, align 8
8@g_i32 = global i32 123, align 8
9@g_i64 = global i64 456, align 8
10@g_i16 = global i16 789, align 8
11
12define float @d2f() #0 {
13entry:
14; CHECK: d2f:
15
16; CHECK: call #__mspabi_cvtdf
17  %0 = load volatile double, double* @g_double, align 8
18  %1 = fptrunc double %0 to float
19
20  ret float %1
21}
22
23define double @f2d() #0 {
24entry:
25; CHECK: f2d:
26
27; CHECK: call #__mspabi_cvtfd
28  %0 = load volatile float, float* @g_float, align 8
29  %1 = fpext float %0 to double
30
31  ret double %1
32}
33
34define i32 @d2l() #0 {
35entry:
36; CHECK: d2l:
37
38; CHECK: call #__mspabi_fixdli
39  %0 = load volatile double, double* @g_double, align 8
40  %1 = fptosi double %0 to i32
41
42  ret i32 %1
43}
44
45define i64 @d2ll() #0 {
46entry:
47; CHECK: d2ll:
48
49; CHECK: call #__mspabi_fixdlli
50  %0 = load volatile double, double* @g_double, align 8
51  %1 = fptosi double %0 to i64
52
53  ret i64 %1
54}
55
56define i32 @d2ul() #0 {
57entry:
58; CHECK: d2ul:
59
60; CHECK: call #__mspabi_fixdul
61  %0 = load volatile double, double* @g_double, align 8
62  %1 = fptoui double %0 to i32
63
64  ret i32 %1
65}
66
67define i64 @d2ull() #0 {
68entry:
69; CHECK: d2ull:
70
71; CHECK: call #__mspabi_fixdull
72  %0 = load volatile double, double* @g_double, align 8
73  %1 = fptoui double %0 to i64
74
75  ret i64 %1
76}
77
78define i32 @f2l() #0 {
79entry:
80; CHECK: f2l:
81
82; CHECK: call #__mspabi_fixfli
83  %0 = load volatile float, float* @g_float, align 8
84  %1 = fptosi float %0 to i32
85
86  ret i32 %1
87}
88
89define i64 @f2ll() #0 {
90entry:
91; CHECK: f2ll:
92
93; CHECK: call #__mspabi_fixflli
94  %0 = load volatile float, float* @g_float, align 8
95  %1 = fptosi float %0 to i64
96
97  ret i64 %1
98}
99
100define i32 @f2ul() #0 {
101entry:
102; CHECK: f2ul:
103
104; CHECK: call #__mspabi_fixful
105  %0 = load volatile float, float* @g_float, align 8
106  %1 = fptoui float %0 to i32
107
108  ret i32 %1
109}
110
111define i64 @f2ull() #0 {
112entry:
113; CHECK: f2ull:
114
115; CHECK: call #__mspabi_fixfull
116  %0 = load volatile float, float* @g_float, align 8
117  %1 = fptoui float %0 to i64
118
119  ret i64 %1
120}
121
122define double @l2d() #0 {
123entry:
124; CHECK: l2d:
125
126; CHECK: call #__mspabi_fltlid
127  %0 = load volatile i32, i32* @g_i32, align 8
128  %1 = sitofp i32 %0 to double
129
130  ret double %1
131}
132
133define double @ll2d() #0 {
134entry:
135; CHECK: ll2d:
136
137; CHECK: call #__mspabi_fltllid
138  %0 = load volatile i64, i64* @g_i64, align 8
139  %1 = sitofp i64 %0 to double
140
141  ret double %1
142}
143
144define double @ul2d() #0 {
145entry:
146; CHECK: ul2d:
147
148; CHECK: call #__mspabi_fltuld
149  %0 = load volatile i32, i32* @g_i32, align 8
150  %1 = uitofp i32 %0 to double
151
152  ret double %1
153}
154
155define double @ull2d() #0 {
156entry:
157; CHECK: ull2d:
158
159; CHECK: call #__mspabi_fltulld
160  %0 = load volatile i64, i64* @g_i64, align 8
161  %1 = uitofp i64 %0 to double
162
163  ret double %1
164}
165
166define float @l2f() #0 {
167entry:
168; CHECK: l2f:
169
170; CHECK: call #__mspabi_fltlif
171  %0 = load volatile i32, i32* @g_i32, align 8
172  %1 = sitofp i32 %0 to float
173
174  ret float %1
175}
176
177define float @ll2f() #0 {
178entry:
179; CHECK: ll2f:
180
181; CHECK: call #__mspabi_fltllif
182  %0 = load volatile i64, i64* @g_i64, align 8
183  %1 = sitofp i64 %0 to float
184
185  ret float %1
186}
187
188define float @ul2f() #0 {
189entry:
190; CHECK: ul2f:
191
192; CHECK: call #__mspabi_fltulf
193  %0 = load volatile i32, i32* @g_i32, align 8
194  %1 = uitofp i32 %0 to float
195
196  ret float %1
197}
198
199define float @ull2f() #0 {
200entry:
201; CHECK: ull2f:
202
203; CHECK: call #__mspabi_fltullf
204  %0 = load volatile i64, i64* @g_i64, align 8
205  %1 = uitofp i64 %0 to float
206
207  ret float %1
208}
209
210define i1 @cmpd_oeq() #0 {
211entry:
212; CHECK: cmpd_oeq:
213
214; CHECK: call #__mspabi_cmpd
215  %0 = load volatile double, double* @g_double, align 8
216  %1 = fcmp oeq double %0, 123.0
217
218  ret i1 %1
219}
220
221define i1 @cmpd_une() #0 {
222entry:
223; CHECK: cmpd_une:
224
225; CHECK: call #__mspabi_cmpd
226  %0 = load volatile double, double* @g_double, align 8
227  %1 = fcmp une double %0, 123.0
228
229  ret i1 %1
230}
231
232define i1 @cmpd_oge() #0 {
233entry:
234; CHECK: cmpd_oge:
235
236; CHECK: call #__mspabi_cmpd
237  %0 = load volatile double, double* @g_double, align 8
238  %1 = fcmp oge double %0, 123.0
239
240  ret i1 %1
241}
242
243define i1 @cmpd_olt() #0 {
244entry:
245; CHECK: cmpd_olt:
246
247; CHECK: call #__mspabi_cmpd
248  %0 = load volatile double, double* @g_double, align 8
249  %1 = fcmp olt double %0, 123.0
250
251  ret i1 %1
252}
253
254define i1 @cmpd_ole() #0 {
255entry:
256; CHECK: cmpd_ole:
257
258; CHECK: call #__mspabi_cmpd
259  %0 = load volatile double, double* @g_double, align 8
260  %1 = fcmp ole double %0, 123.0
261
262  ret i1 %1
263}
264
265define i1 @cmpd_ogt() #0 {
266entry:
267; CHECK: cmpd_ogt:
268
269; CHECK: call #__mspabi_cmpd
270  %0 = load volatile double, double* @g_double, align 8
271  %1 = fcmp ogt double %0, 123.0
272
273  ret i1 %1
274}
275
276define i1 @cmpf_oeq() #0 {
277entry:
278; CHECK: cmpf_oeq:
279
280; CHECK: call #__mspabi_cmpf
281  %0 = load volatile float, float* @g_float, align 8
282  %1 = fcmp oeq float %0, 123.0
283
284  ret i1 %1
285}
286
287define i1 @cmpf_une() #0 {
288entry:
289; CHECK: cmpf_une:
290
291; CHECK: call #__mspabi_cmpf
292  %0 = load volatile float, float* @g_float, align 8
293  %1 = fcmp une float %0, 123.0
294
295  ret i1 %1
296}
297
298define i1 @cmpf_oge() #0 {
299entry:
300; CHECK: cmpf_oge:
301
302; CHECK: call #__mspabi_cmpf
303  %0 = load volatile float, float* @g_float, align 8
304  %1 = fcmp oge float %0, 123.0
305
306  ret i1 %1
307}
308
309define i1 @cmpf_olt() #0 {
310entry:
311; CHECK: cmpf_olt:
312
313; CHECK: call #__mspabi_cmpf
314  %0 = load volatile float, float* @g_float, align 8
315  %1 = fcmp olt float %0, 123.0
316
317  ret i1 %1
318}
319
320define i1 @cmpf_ole() #0 {
321entry:
322; CHECK: cmpf_ole:
323
324; CHECK: call #__mspabi_cmpf
325  %0 = load volatile float, float* @g_float, align 8
326  %1 = fcmp ole float %0, 123.0
327
328  ret i1 %1
329}
330
331define i1 @cmpf_ogt() #0 {
332entry:
333; CHECK: cmpf_ogt:
334
335; CHECK: call #__mspabi_cmpf
336  %0 = load volatile float, float* @g_float, align 8
337  %1 = fcmp ogt float %0, 123.0
338
339  ret i1 %1
340}
341
342define double @addd() #0 {
343entry:
344; CHECK: addd:
345
346; CHECK: call #__mspabi_addd
347  %0 = load volatile double, double* @g_double, align 8
348  %1 = fadd double %0, 123.0
349
350  ret double %1
351}
352
353define float @addf() #0 {
354entry:
355; CHECK: addf:
356
357; CHECK: call #__mspabi_addf
358  %0 = load volatile float, float* @g_float, align 8
359  %1 = fadd float %0, 123.0
360
361  ret float %1
362}
363
364define double @divd() #0 {
365entry:
366; CHECK: divd:
367
368; CHECK: call #__mspabi_divd
369  %0 = load volatile double, double* @g_double, align 8
370  %1 = fdiv double %0, 123.0
371
372  ret double %1
373}
374
375define float @divf() #0 {
376entry:
377; CHECK: divf:
378
379; CHECK: call #__mspabi_divf
380  %0 = load volatile float, float* @g_float, align 8
381  %1 = fdiv float %0, 123.0
382
383  ret float %1
384}
385
386define double @mpyd() #0 {
387entry:
388; CHECK: mpyd:
389
390; CHECK: call #__mspabi_mpyd
391  %0 = load volatile double, double* @g_double, align 8
392  %1 = fmul double %0, 123.0
393
394  ret double %1
395}
396
397define float @mpyf() #0 {
398entry:
399; CHECK: mpyf:
400
401; CHECK: call #__mspabi_mpyf
402  %0 = load volatile float, float* @g_float, align 8
403  %1 = fmul float %0, 123.0
404
405  ret float %1
406}
407
408define double @subd() #0 {
409entry:
410; CHECK: subd:
411
412; CHECK: call #__mspabi_subd
413  %0 = load volatile double, double* @g_double, align 8
414  %1 = fsub double %0, %0
415
416  ret double %1
417}
418
419define float @subf() #0 {
420entry:
421; CHECK: subf:
422
423; CHECK: call #__mspabi_subf
424  %0 = load volatile float, float* @g_float, align 8
425  %1 = fsub float %0, %0
426
427  ret float %1
428}
429
430define i16 @divi() #0 {
431entry:
432; CHECK: divi:
433
434; CHECK: call #__mspabi_divi
435  %0 = load volatile i16, i16* @g_i16, align 8
436  %1 = load volatile i16, i16* @g_i16, align 8
437  %2 = sdiv i16 %0, %1
438
439  ret i16 %2
440}
441
442define i32 @divli() #0 {
443entry:
444; CHECK: divli:
445
446; CHECK: call #__mspabi_divli
447  %0 = load volatile i32, i32* @g_i32, align 8
448  %1 = load volatile i32, i32* @g_i32, align 8
449  %2 = sdiv i32 %0, %1
450
451  ret i32 %2
452}
453
454define i64 @divlli() #0 {
455entry:
456; CHECK: divlli:
457
458; CHECK: call #__mspabi_divlli
459  %0 = load volatile i64, i64* @g_i64, align 8
460  %1 = load volatile i64, i64* @g_i64, align 8
461  %2 = sdiv i64 %0, %1
462
463  ret i64 %2
464}
465
466define i16 @divu() #0 {
467entry:
468; CHECK: divu:
469
470; CHECK: call #__mspabi_divu
471  %0 = load volatile i16, i16* @g_i16, align 8
472  %1 = load volatile i16, i16* @g_i16, align 8
473  %2 = udiv i16 %0, %1
474
475  ret i16 %2
476}
477
478define i32 @divul() #0 {
479entry:
480; CHECK: divul:
481
482; CHECK: call #__mspabi_divul
483  %0 = load volatile i32, i32* @g_i32, align 8
484  %1 = load volatile i32, i32* @g_i32, align 8
485  %2 = udiv i32 %0, %1
486
487  ret i32 %2
488}
489
490define i64 @divull() #0 {
491entry:
492; CHECK: divull:
493
494; CHECK: call #__mspabi_divull
495  %0 = load volatile i64, i64* @g_i64, align 8
496  %1 = load volatile i64, i64* @g_i64, align 8
497  %2 = udiv i64 %0, %1
498
499  ret i64 %2
500}
501
502define i16 @remi() #0 {
503entry:
504; CHECK: remi:
505
506; CHECK: call #__mspabi_remi
507  %0 = load volatile i16, i16* @g_i16, align 8
508  %1 = load volatile i16, i16* @g_i16, align 8
509  %2 = srem i16 %0, %1
510
511  ret i16 %2
512}
513
514define i32 @remli() #0 {
515entry:
516; CHECK: remli:
517
518; CHECK: call #__mspabi_remli
519  %0 = load volatile i32, i32* @g_i32, align 8
520  %1 = load volatile i32, i32* @g_i32, align 8
521  %2 = srem i32 %0, %1
522
523  ret i32 %2
524}
525
526define i64 @remlli() #0 {
527entry:
528; CHECK: remlli:
529
530; CHECK: call #__mspabi_remlli
531  %0 = load volatile i64, i64* @g_i64, align 8
532  %1 = load volatile i64, i64* @g_i64, align 8
533  %2 = srem i64 %0, %1
534
535  ret i64 %2
536}
537
538define i16 @remu() #0 {
539entry:
540; CHECK: remu:
541
542; CHECK: call #__mspabi_remu
543  %0 = load volatile i16, i16* @g_i16, align 8
544  %1 = load volatile i16, i16* @g_i16, align 8
545  %2 = urem i16 %0, %1
546
547  ret i16 %2
548}
549
550define i32 @remul() #0 {
551entry:
552; CHECK: remul:
553
554; CHECK: call #__mspabi_remul
555  %0 = load volatile i32, i32* @g_i32, align 8
556  %1 = load volatile i32, i32* @g_i32, align 8
557  %2 = urem i32 %0, %1
558
559  ret i32 %2
560}
561
562define i64 @remull() #0 {
563entry:
564; CHECK: remull:
565
566; CHECK: call #__mspabi_remull
567  %0 = load volatile i64, i64* @g_i64, align 8
568  %1 = load volatile i64, i64* @g_i64, align 8
569  %2 = urem i64 %0, %1
570
571  ret i64 %2
572}
573
574define i16 @mpyi() #0 {
575entry:
576; CHECK: mpyi:
577
578; CHECK: call #__mspabi_mpyi
579  %0 = load volatile i16, i16* @g_i16, align 8
580  %1 = mul i16 %0, %0
581
582  ret i16 %1
583}
584
585define i32 @mpyli() #0 {
586entry:
587; CHECK: mpyli:
588
589; CHECK: call #__mspabi_mpyl
590  %0 = load volatile i32, i32* @g_i32, align 8
591  %1 = mul i32 %0, %0
592
593  ret i32 %1
594}
595
596define i64 @mpylli() #0 {
597entry:
598; CHECK: mpylli:
599
600; CHECK: call #__mspabi_mpyll
601  %0 = load volatile i64, i64* @g_i64, align 8
602  %1 = mul i64 %0, %0
603
604  ret i64 %1
605}
606
607@i = external global i32, align 2
608
609define i32 @srll() #0 {
610entry:
611; CHECK-LABEL: srll:
612; CHECK: call #__mspabi_srll
613  %0 = load volatile i32, i32* @g_i32, align 2
614  %1 = load volatile i32, i32* @i, align 2
615  %shr = lshr i32 %0, %1
616
617  ret i32 %shr
618}
619
620define i32 @sral() #0 {
621entry:
622; CHECK-LABEL: sral:
623; CHECK: call #__mspabi_sral
624  %0 = load volatile i32, i32* @g_i32, align 2
625  %1 = load volatile i32, i32* @i, align 2
626  %shr = ashr i32 %0, %1
627
628  ret i32 %shr
629}
630
631define i32 @slll() #0 {
632entry:
633; CHECK-LABEL: slll:
634; CHECK: call #__mspabi_slll
635  %0 = load volatile i32, i32* @g_i32, align 2
636  %1 = load volatile i32, i32* @i, align 2
637  %shr = shl i32 %0, %1
638
639  ret i32 %shr
640}
641
642attributes #0 = { nounwind }
643