1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV64IF %s
6
7; For RV64F, fcvt.l.s is semantically equivalent to fcvt.w.s in this case
8; because fptosi will produce poison if the result doesn't fit into an i32.
9define i32 @fcvt_w_s(float %a) nounwind {
10; RV32IF-LABEL: fcvt_w_s:
11; RV32IF:       # %bb.0:
12; RV32IF-NEXT:    fmv.w.x ft0, a0
13; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
14; RV32IF-NEXT:    ret
15;
16; RV64IF-LABEL: fcvt_w_s:
17; RV64IF:       # %bb.0:
18; RV64IF-NEXT:    fmv.w.x ft0, a0
19; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
20; RV64IF-NEXT:    ret
21  %1 = fptosi float %a to i32
22  ret i32 %1
23}
24
25define i32 @fcvt_w_s_sat(float %a) nounwind {
26; RV32IF-LABEL: fcvt_w_s_sat:
27; RV32IF:       # %bb.0: # %start
28; RV32IF-NEXT:    lui a1, %hi(.LCPI1_0)
29; RV32IF-NEXT:    flw ft1, %lo(.LCPI1_0)(a1)
30; RV32IF-NEXT:    fmv.w.x ft0, a0
31; RV32IF-NEXT:    fle.s a0, ft1, ft0
32; RV32IF-NEXT:    lui a1, 524288
33; RV32IF-NEXT:    bnez a0, .LBB1_2
34; RV32IF-NEXT:  # %bb.1: # %start
35; RV32IF-NEXT:    lui a0, 524288
36; RV32IF-NEXT:    j .LBB1_3
37; RV32IF-NEXT:  .LBB1_2:
38; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
39; RV32IF-NEXT:  .LBB1_3: # %start
40; RV32IF-NEXT:    lui a2, %hi(.LCPI1_1)
41; RV32IF-NEXT:    flw ft1, %lo(.LCPI1_1)(a2)
42; RV32IF-NEXT:    flt.s a2, ft1, ft0
43; RV32IF-NEXT:    bnez a2, .LBB1_6
44; RV32IF-NEXT:  # %bb.4: # %start
45; RV32IF-NEXT:    feq.s a1, ft0, ft0
46; RV32IF-NEXT:    beqz a1, .LBB1_7
47; RV32IF-NEXT:  .LBB1_5: # %start
48; RV32IF-NEXT:    ret
49; RV32IF-NEXT:  .LBB1_6:
50; RV32IF-NEXT:    addi a0, a1, -1
51; RV32IF-NEXT:    feq.s a1, ft0, ft0
52; RV32IF-NEXT:    bnez a1, .LBB1_5
53; RV32IF-NEXT:  .LBB1_7: # %start
54; RV32IF-NEXT:    mv a0, zero
55; RV32IF-NEXT:    ret
56;
57; RV64IF-LABEL: fcvt_w_s_sat:
58; RV64IF:       # %bb.0: # %start
59; RV64IF-NEXT:    lui a1, %hi(.LCPI1_0)
60; RV64IF-NEXT:    flw ft1, %lo(.LCPI1_0)(a1)
61; RV64IF-NEXT:    fmv.w.x ft0, a0
62; RV64IF-NEXT:    fle.s a0, ft1, ft0
63; RV64IF-NEXT:    lui a1, 524288
64; RV64IF-NEXT:    bnez a0, .LBB1_2
65; RV64IF-NEXT:  # %bb.1: # %start
66; RV64IF-NEXT:    lui a0, 524288
67; RV64IF-NEXT:    j .LBB1_3
68; RV64IF-NEXT:  .LBB1_2:
69; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
70; RV64IF-NEXT:  .LBB1_3: # %start
71; RV64IF-NEXT:    lui a2, %hi(.LCPI1_1)
72; RV64IF-NEXT:    flw ft1, %lo(.LCPI1_1)(a2)
73; RV64IF-NEXT:    flt.s a2, ft1, ft0
74; RV64IF-NEXT:    bnez a2, .LBB1_6
75; RV64IF-NEXT:  # %bb.4: # %start
76; RV64IF-NEXT:    feq.s a1, ft0, ft0
77; RV64IF-NEXT:    beqz a1, .LBB1_7
78; RV64IF-NEXT:  .LBB1_5: # %start
79; RV64IF-NEXT:    ret
80; RV64IF-NEXT:  .LBB1_6:
81; RV64IF-NEXT:    addiw a0, a1, -1
82; RV64IF-NEXT:    feq.s a1, ft0, ft0
83; RV64IF-NEXT:    bnez a1, .LBB1_5
84; RV64IF-NEXT:  .LBB1_7: # %start
85; RV64IF-NEXT:    mv a0, zero
86; RV64IF-NEXT:    ret
87start:
88  %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a)
89  ret i32 %0
90}
91declare i32 @llvm.fptosi.sat.i32.f32(float)
92
93; For RV64F, fcvt.lu.s is semantically equivalent to fcvt.wu.s in this case
94; because fptoui will produce poison if the result doesn't fit into an i32.
95define i32 @fcvt_wu_s(float %a) nounwind {
96; RV32IF-LABEL: fcvt_wu_s:
97; RV32IF:       # %bb.0:
98; RV32IF-NEXT:    fmv.w.x ft0, a0
99; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
100; RV32IF-NEXT:    ret
101;
102; RV64IF-LABEL: fcvt_wu_s:
103; RV64IF:       # %bb.0:
104; RV64IF-NEXT:    fmv.w.x ft0, a0
105; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
106; RV64IF-NEXT:    ret
107  %1 = fptoui float %a to i32
108  ret i32 %1
109}
110
111; Test where the fptoui has multiple uses, one of which causes a sext to be
112; inserted on RV64.
113; FIXME: We should not have an fcvt.wu.s and an fcvt.lu.s.
114define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) {
115; RV32IF-LABEL: fcvt_wu_s_multiple_use:
116; RV32IF:       # %bb.0:
117; RV32IF-NEXT:    fmv.w.x ft0, a0
118; RV32IF-NEXT:    fcvt.wu.s a1, ft0, rtz
119; RV32IF-NEXT:    addi a0, zero, 1
120; RV32IF-NEXT:    beqz a1, .LBB3_2
121; RV32IF-NEXT:  # %bb.1:
122; RV32IF-NEXT:    mv a0, a1
123; RV32IF-NEXT:  .LBB3_2:
124; RV32IF-NEXT:    ret
125;
126; RV64IF-LABEL: fcvt_wu_s_multiple_use:
127; RV64IF:       # %bb.0:
128; RV64IF-NEXT:    fmv.w.x ft0, a0
129; RV64IF-NEXT:    fcvt.wu.s a1, ft0, rtz
130; RV64IF-NEXT:    addi a0, zero, 1
131; RV64IF-NEXT:    beqz a1, .LBB3_2
132; RV64IF-NEXT:  # %bb.1:
133; RV64IF-NEXT:    mv a0, a1
134; RV64IF-NEXT:  .LBB3_2:
135; RV64IF-NEXT:    ret
136  %a = fptoui float %x to i32
137  %b = icmp eq i32 %a, 0
138  %c = select i1 %b, i32 1, i32 %a
139  ret i32 %c
140}
141
142define i32 @fcvt_wu_s_sat(float %a) nounwind {
143; RV32IF-LABEL: fcvt_wu_s_sat:
144; RV32IF:       # %bb.0: # %start
145; RV32IF-NEXT:    fmv.w.x ft0, a0
146; RV32IF-NEXT:    fmv.w.x ft1, zero
147; RV32IF-NEXT:    fle.s a0, ft1, ft0
148; RV32IF-NEXT:    bnez a0, .LBB4_2
149; RV32IF-NEXT:  # %bb.1: # %start
150; RV32IF-NEXT:    mv a1, zero
151; RV32IF-NEXT:    j .LBB4_3
152; RV32IF-NEXT:  .LBB4_2:
153; RV32IF-NEXT:    fcvt.wu.s a1, ft0, rtz
154; RV32IF-NEXT:  .LBB4_3: # %start
155; RV32IF-NEXT:    lui a0, %hi(.LCPI4_0)
156; RV32IF-NEXT:    flw ft1, %lo(.LCPI4_0)(a0)
157; RV32IF-NEXT:    flt.s a2, ft1, ft0
158; RV32IF-NEXT:    addi a0, zero, -1
159; RV32IF-NEXT:    bnez a2, .LBB4_5
160; RV32IF-NEXT:  # %bb.4: # %start
161; RV32IF-NEXT:    mv a0, a1
162; RV32IF-NEXT:  .LBB4_5: # %start
163; RV32IF-NEXT:    ret
164;
165; RV64IF-LABEL: fcvt_wu_s_sat:
166; RV64IF:       # %bb.0: # %start
167; RV64IF-NEXT:    fmv.w.x ft0, a0
168; RV64IF-NEXT:    fmv.w.x ft1, zero
169; RV64IF-NEXT:    fle.s a0, ft1, ft0
170; RV64IF-NEXT:    bnez a0, .LBB4_2
171; RV64IF-NEXT:  # %bb.1: # %start
172; RV64IF-NEXT:    mv a0, zero
173; RV64IF-NEXT:    j .LBB4_3
174; RV64IF-NEXT:  .LBB4_2:
175; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
176; RV64IF-NEXT:  .LBB4_3: # %start
177; RV64IF-NEXT:    lui a1, %hi(.LCPI4_0)
178; RV64IF-NEXT:    flw ft1, %lo(.LCPI4_0)(a1)
179; RV64IF-NEXT:    flt.s a1, ft1, ft0
180; RV64IF-NEXT:    beqz a1, .LBB4_5
181; RV64IF-NEXT:  # %bb.4:
182; RV64IF-NEXT:    addi a0, zero, -1
183; RV64IF-NEXT:    srli a0, a0, 32
184; RV64IF-NEXT:  .LBB4_5: # %start
185; RV64IF-NEXT:    ret
186start:
187  %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a)
188  ret i32 %0
189}
190declare i32 @llvm.fptoui.sat.i32.f32(float)
191
192define i32 @fmv_x_w(float %a, float %b) nounwind {
193; RV32IF-LABEL: fmv_x_w:
194; RV32IF:       # %bb.0:
195; RV32IF-NEXT:    fmv.w.x ft0, a1
196; RV32IF-NEXT:    fmv.w.x ft1, a0
197; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
198; RV32IF-NEXT:    fmv.x.w a0, ft0
199; RV32IF-NEXT:    ret
200;
201; RV64IF-LABEL: fmv_x_w:
202; RV64IF:       # %bb.0:
203; RV64IF-NEXT:    fmv.w.x ft0, a1
204; RV64IF-NEXT:    fmv.w.x ft1, a0
205; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
206; RV64IF-NEXT:    fmv.x.w a0, ft0
207; RV64IF-NEXT:    ret
208; Ensure fmv.x.w is generated even for a soft float calling convention
209  %1 = fadd float %a, %b
210  %2 = bitcast float %1 to i32
211  ret i32 %2
212}
213
214define float @fcvt_s_w(i32 %a) nounwind {
215; RV32IF-LABEL: fcvt_s_w:
216; RV32IF:       # %bb.0:
217; RV32IF-NEXT:    fcvt.s.w ft0, a0
218; RV32IF-NEXT:    fmv.x.w a0, ft0
219; RV32IF-NEXT:    ret
220;
221; RV64IF-LABEL: fcvt_s_w:
222; RV64IF:       # %bb.0:
223; RV64IF-NEXT:    fcvt.s.w ft0, a0
224; RV64IF-NEXT:    fmv.x.w a0, ft0
225; RV64IF-NEXT:    ret
226  %1 = sitofp i32 %a to float
227  ret float %1
228}
229
230define float @fcvt_s_w_load(i32* %p) nounwind {
231; RV32IF-LABEL: fcvt_s_w_load:
232; RV32IF:       # %bb.0:
233; RV32IF-NEXT:    lw a0, 0(a0)
234; RV32IF-NEXT:    fcvt.s.w ft0, a0
235; RV32IF-NEXT:    fmv.x.w a0, ft0
236; RV32IF-NEXT:    ret
237;
238; RV64IF-LABEL: fcvt_s_w_load:
239; RV64IF:       # %bb.0:
240; RV64IF-NEXT:    lw a0, 0(a0)
241; RV64IF-NEXT:    fcvt.s.w ft0, a0
242; RV64IF-NEXT:    fmv.x.w a0, ft0
243; RV64IF-NEXT:    ret
244  %a = load i32, i32* %p
245  %1 = sitofp i32 %a to float
246  ret float %1
247}
248
249define float @fcvt_s_wu(i32 %a) nounwind {
250; RV32IF-LABEL: fcvt_s_wu:
251; RV32IF:       # %bb.0:
252; RV32IF-NEXT:    fcvt.s.wu ft0, a0
253; RV32IF-NEXT:    fmv.x.w a0, ft0
254; RV32IF-NEXT:    ret
255;
256; RV64IF-LABEL: fcvt_s_wu:
257; RV64IF:       # %bb.0:
258; RV64IF-NEXT:    fcvt.s.wu ft0, a0
259; RV64IF-NEXT:    fmv.x.w a0, ft0
260; RV64IF-NEXT:    ret
261  %1 = uitofp i32 %a to float
262  ret float %1
263}
264
265define float @fcvt_s_wu_load(i32* %p) nounwind {
266; RV32IF-LABEL: fcvt_s_wu_load:
267; RV32IF:       # %bb.0:
268; RV32IF-NEXT:    lw a0, 0(a0)
269; RV32IF-NEXT:    fcvt.s.wu ft0, a0
270; RV32IF-NEXT:    fmv.x.w a0, ft0
271; RV32IF-NEXT:    ret
272;
273; RV64IF-LABEL: fcvt_s_wu_load:
274; RV64IF:       # %bb.0:
275; RV64IF-NEXT:    lwu a0, 0(a0)
276; RV64IF-NEXT:    fcvt.s.wu ft0, a0
277; RV64IF-NEXT:    fmv.x.w a0, ft0
278; RV64IF-NEXT:    ret
279  %a = load i32, i32* %p
280  %1 = uitofp i32 %a to float
281  ret float %1
282}
283
284define float @fmv_w_x(i32 %a, i32 %b) nounwind {
285; RV32IF-LABEL: fmv_w_x:
286; RV32IF:       # %bb.0:
287; RV32IF-NEXT:    fmv.w.x ft0, a0
288; RV32IF-NEXT:    fmv.w.x ft1, a1
289; RV32IF-NEXT:    fadd.s ft0, ft0, ft1
290; RV32IF-NEXT:    fmv.x.w a0, ft0
291; RV32IF-NEXT:    ret
292;
293; RV64IF-LABEL: fmv_w_x:
294; RV64IF:       # %bb.0:
295; RV64IF-NEXT:    fmv.w.x ft0, a0
296; RV64IF-NEXT:    fmv.w.x ft1, a1
297; RV64IF-NEXT:    fadd.s ft0, ft0, ft1
298; RV64IF-NEXT:    fmv.x.w a0, ft0
299; RV64IF-NEXT:    ret
300; Ensure fmv.w.x is generated even for a soft float calling convention
301  %1 = bitcast i32 %a to float
302  %2 = bitcast i32 %b to float
303  %3 = fadd float %1, %2
304  ret float %3
305}
306
307define i64 @fcvt_l_s(float %a) nounwind {
308; RV32IF-LABEL: fcvt_l_s:
309; RV32IF:       # %bb.0:
310; RV32IF-NEXT:    addi sp, sp, -16
311; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
312; RV32IF-NEXT:    call __fixsfdi@plt
313; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
314; RV32IF-NEXT:    addi sp, sp, 16
315; RV32IF-NEXT:    ret
316;
317; RV64IF-LABEL: fcvt_l_s:
318; RV64IF:       # %bb.0:
319; RV64IF-NEXT:    fmv.w.x ft0, a0
320; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
321; RV64IF-NEXT:    ret
322  %1 = fptosi float %a to i64
323  ret i64 %1
324}
325
326define i64 @fcvt_l_s_sat(float %a) nounwind {
327; RV32IF-LABEL: fcvt_l_s_sat:
328; RV32IF:       # %bb.0: # %start
329; RV32IF-NEXT:    addi sp, sp, -16
330; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
331; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
332; RV32IF-NEXT:    lui a1, %hi(.LCPI12_0)
333; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a1)
334; RV32IF-NEXT:    fmv.w.x ft1, a0
335; RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
336; RV32IF-NEXT:    fle.s s0, ft0, ft1
337; RV32IF-NEXT:    call __fixsfdi@plt
338; RV32IF-NEXT:    mv a2, a0
339; RV32IF-NEXT:    bnez s0, .LBB12_2
340; RV32IF-NEXT:  # %bb.1: # %start
341; RV32IF-NEXT:    mv a2, zero
342; RV32IF-NEXT:  .LBB12_2: # %start
343; RV32IF-NEXT:    lui a0, %hi(.LCPI12_1)
344; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_1)(a0)
345; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
346; RV32IF-NEXT:    flt.s a3, ft0, ft1
347; RV32IF-NEXT:    fmv.s ft0, ft1
348; RV32IF-NEXT:    addi a0, zero, -1
349; RV32IF-NEXT:    beqz a3, .LBB12_9
350; RV32IF-NEXT:  # %bb.3: # %start
351; RV32IF-NEXT:    feq.s a2, ft0, ft0
352; RV32IF-NEXT:    beqz a2, .LBB12_10
353; RV32IF-NEXT:  .LBB12_4: # %start
354; RV32IF-NEXT:    lui a4, 524288
355; RV32IF-NEXT:    beqz s0, .LBB12_11
356; RV32IF-NEXT:  .LBB12_5: # %start
357; RV32IF-NEXT:    bnez a3, .LBB12_12
358; RV32IF-NEXT:  .LBB12_6: # %start
359; RV32IF-NEXT:    bnez a2, .LBB12_8
360; RV32IF-NEXT:  .LBB12_7: # %start
361; RV32IF-NEXT:    mv a1, zero
362; RV32IF-NEXT:  .LBB12_8: # %start
363; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
364; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
365; RV32IF-NEXT:    addi sp, sp, 16
366; RV32IF-NEXT:    ret
367; RV32IF-NEXT:  .LBB12_9: # %start
368; RV32IF-NEXT:    mv a0, a2
369; RV32IF-NEXT:    feq.s a2, ft0, ft0
370; RV32IF-NEXT:    bnez a2, .LBB12_4
371; RV32IF-NEXT:  .LBB12_10: # %start
372; RV32IF-NEXT:    mv a0, zero
373; RV32IF-NEXT:    lui a4, 524288
374; RV32IF-NEXT:    bnez s0, .LBB12_5
375; RV32IF-NEXT:  .LBB12_11: # %start
376; RV32IF-NEXT:    lui a1, 524288
377; RV32IF-NEXT:    beqz a3, .LBB12_6
378; RV32IF-NEXT:  .LBB12_12:
379; RV32IF-NEXT:    addi a1, a4, -1
380; RV32IF-NEXT:    beqz a2, .LBB12_7
381; RV32IF-NEXT:    j .LBB12_8
382;
383; RV64IF-LABEL: fcvt_l_s_sat:
384; RV64IF:       # %bb.0: # %start
385; RV64IF-NEXT:    lui a1, %hi(.LCPI12_0)
386; RV64IF-NEXT:    flw ft1, %lo(.LCPI12_0)(a1)
387; RV64IF-NEXT:    fmv.w.x ft0, a0
388; RV64IF-NEXT:    fle.s a0, ft1, ft0
389; RV64IF-NEXT:    addi a1, zero, -1
390; RV64IF-NEXT:    bnez a0, .LBB12_2
391; RV64IF-NEXT:  # %bb.1: # %start
392; RV64IF-NEXT:    slli a0, a1, 63
393; RV64IF-NEXT:    j .LBB12_3
394; RV64IF-NEXT:  .LBB12_2:
395; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
396; RV64IF-NEXT:  .LBB12_3: # %start
397; RV64IF-NEXT:    lui a2, %hi(.LCPI12_1)
398; RV64IF-NEXT:    flw ft1, %lo(.LCPI12_1)(a2)
399; RV64IF-NEXT:    flt.s a2, ft1, ft0
400; RV64IF-NEXT:    bnez a2, .LBB12_6
401; RV64IF-NEXT:  # %bb.4: # %start
402; RV64IF-NEXT:    feq.s a1, ft0, ft0
403; RV64IF-NEXT:    beqz a1, .LBB12_7
404; RV64IF-NEXT:  .LBB12_5: # %start
405; RV64IF-NEXT:    ret
406; RV64IF-NEXT:  .LBB12_6:
407; RV64IF-NEXT:    srli a0, a1, 1
408; RV64IF-NEXT:    feq.s a1, ft0, ft0
409; RV64IF-NEXT:    bnez a1, .LBB12_5
410; RV64IF-NEXT:  .LBB12_7: # %start
411; RV64IF-NEXT:    mv a0, zero
412; RV64IF-NEXT:    ret
413start:
414  %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a)
415  ret i64 %0
416}
417declare i64 @llvm.fptosi.sat.i64.f32(float)
418
419define i64 @fcvt_lu_s(float %a) nounwind {
420; RV32IF-LABEL: fcvt_lu_s:
421; RV32IF:       # %bb.0:
422; RV32IF-NEXT:    addi sp, sp, -16
423; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
424; RV32IF-NEXT:    call __fixunssfdi@plt
425; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
426; RV32IF-NEXT:    addi sp, sp, 16
427; RV32IF-NEXT:    ret
428;
429; RV64IF-LABEL: fcvt_lu_s:
430; RV64IF:       # %bb.0:
431; RV64IF-NEXT:    fmv.w.x ft0, a0
432; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
433; RV64IF-NEXT:    ret
434  %1 = fptoui float %a to i64
435  ret i64 %1
436}
437
438define i64 @fcvt_lu_s_sat(float %a) nounwind {
439; RV32IF-LABEL: fcvt_lu_s_sat:
440; RV32IF:       # %bb.0: # %start
441; RV32IF-NEXT:    addi sp, sp, -16
442; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
443; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
444; RV32IF-NEXT:    fmv.w.x ft1, a0
445; RV32IF-NEXT:    fmv.w.x ft0, zero
446; RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
447; RV32IF-NEXT:    fle.s s0, ft0, ft1
448; RV32IF-NEXT:    call __fixunssfdi@plt
449; RV32IF-NEXT:    mv a3, a0
450; RV32IF-NEXT:    bnez s0, .LBB14_2
451; RV32IF-NEXT:  # %bb.1: # %start
452; RV32IF-NEXT:    mv a3, zero
453; RV32IF-NEXT:  .LBB14_2: # %start
454; RV32IF-NEXT:    lui a0, %hi(.LCPI14_0)
455; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a0)
456; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
457; RV32IF-NEXT:    flt.s a4, ft0, ft1
458; RV32IF-NEXT:    addi a2, zero, -1
459; RV32IF-NEXT:    addi a0, zero, -1
460; RV32IF-NEXT:    beqz a4, .LBB14_7
461; RV32IF-NEXT:  # %bb.3: # %start
462; RV32IF-NEXT:    beqz s0, .LBB14_8
463; RV32IF-NEXT:  .LBB14_4: # %start
464; RV32IF-NEXT:    bnez a4, .LBB14_6
465; RV32IF-NEXT:  .LBB14_5: # %start
466; RV32IF-NEXT:    mv a2, a1
467; RV32IF-NEXT:  .LBB14_6: # %start
468; RV32IF-NEXT:    mv a1, a2
469; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
470; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
471; RV32IF-NEXT:    addi sp, sp, 16
472; RV32IF-NEXT:    ret
473; RV32IF-NEXT:  .LBB14_7: # %start
474; RV32IF-NEXT:    mv a0, a3
475; RV32IF-NEXT:    bnez s0, .LBB14_4
476; RV32IF-NEXT:  .LBB14_8: # %start
477; RV32IF-NEXT:    mv a1, zero
478; RV32IF-NEXT:    beqz a4, .LBB14_5
479; RV32IF-NEXT:    j .LBB14_6
480;
481; RV64IF-LABEL: fcvt_lu_s_sat:
482; RV64IF:       # %bb.0: # %start
483; RV64IF-NEXT:    fmv.w.x ft0, a0
484; RV64IF-NEXT:    fmv.w.x ft1, zero
485; RV64IF-NEXT:    fle.s a0, ft1, ft0
486; RV64IF-NEXT:    bnez a0, .LBB14_2
487; RV64IF-NEXT:  # %bb.1: # %start
488; RV64IF-NEXT:    mv a1, zero
489; RV64IF-NEXT:    j .LBB14_3
490; RV64IF-NEXT:  .LBB14_2:
491; RV64IF-NEXT:    fcvt.lu.s a1, ft0, rtz
492; RV64IF-NEXT:  .LBB14_3: # %start
493; RV64IF-NEXT:    lui a0, %hi(.LCPI14_0)
494; RV64IF-NEXT:    flw ft1, %lo(.LCPI14_0)(a0)
495; RV64IF-NEXT:    flt.s a2, ft1, ft0
496; RV64IF-NEXT:    addi a0, zero, -1
497; RV64IF-NEXT:    bnez a2, .LBB14_5
498; RV64IF-NEXT:  # %bb.4: # %start
499; RV64IF-NEXT:    mv a0, a1
500; RV64IF-NEXT:  .LBB14_5: # %start
501; RV64IF-NEXT:    ret
502start:
503  %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a)
504  ret i64 %0
505}
506declare i64 @llvm.fptoui.sat.i64.f32(float)
507
508define float @fcvt_s_l(i64 %a) nounwind {
509; RV32IF-LABEL: fcvt_s_l:
510; RV32IF:       # %bb.0:
511; RV32IF-NEXT:    addi sp, sp, -16
512; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
513; RV32IF-NEXT:    call __floatdisf@plt
514; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
515; RV32IF-NEXT:    addi sp, sp, 16
516; RV32IF-NEXT:    ret
517;
518; RV64IF-LABEL: fcvt_s_l:
519; RV64IF:       # %bb.0:
520; RV64IF-NEXT:    fcvt.s.l ft0, a0
521; RV64IF-NEXT:    fmv.x.w a0, ft0
522; RV64IF-NEXT:    ret
523  %1 = sitofp i64 %a to float
524  ret float %1
525}
526
527define float @fcvt_s_lu(i64 %a) nounwind {
528; RV32IF-LABEL: fcvt_s_lu:
529; RV32IF:       # %bb.0:
530; RV32IF-NEXT:    addi sp, sp, -16
531; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
532; RV32IF-NEXT:    call __floatundisf@plt
533; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
534; RV32IF-NEXT:    addi sp, sp, 16
535; RV32IF-NEXT:    ret
536;
537; RV64IF-LABEL: fcvt_s_lu:
538; RV64IF:       # %bb.0:
539; RV64IF-NEXT:    fcvt.s.lu ft0, a0
540; RV64IF-NEXT:    fmv.x.w a0, ft0
541; RV64IF-NEXT:    ret
542  %1 = uitofp i64 %a to float
543  ret float %1
544}
545
546define float @fcvt_s_w_i8(i8 signext %a) nounwind {
547; RV32IF-LABEL: fcvt_s_w_i8:
548; RV32IF:       # %bb.0:
549; RV32IF-NEXT:    fcvt.s.w ft0, a0
550; RV32IF-NEXT:    fmv.x.w a0, ft0
551; RV32IF-NEXT:    ret
552;
553; RV64IF-LABEL: fcvt_s_w_i8:
554; RV64IF:       # %bb.0:
555; RV64IF-NEXT:    fcvt.s.w ft0, a0
556; RV64IF-NEXT:    fmv.x.w a0, ft0
557; RV64IF-NEXT:    ret
558  %1 = sitofp i8 %a to float
559  ret float %1
560}
561
562define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
563; RV32IF-LABEL: fcvt_s_wu_i8:
564; RV32IF:       # %bb.0:
565; RV32IF-NEXT:    fcvt.s.wu ft0, a0
566; RV32IF-NEXT:    fmv.x.w a0, ft0
567; RV32IF-NEXT:    ret
568;
569; RV64IF-LABEL: fcvt_s_wu_i8:
570; RV64IF:       # %bb.0:
571; RV64IF-NEXT:    fcvt.s.wu ft0, a0
572; RV64IF-NEXT:    fmv.x.w a0, ft0
573; RV64IF-NEXT:    ret
574  %1 = uitofp i8 %a to float
575  ret float %1
576}
577
578define float @fcvt_s_w_i16(i16 signext %a) nounwind {
579; RV32IF-LABEL: fcvt_s_w_i16:
580; RV32IF:       # %bb.0:
581; RV32IF-NEXT:    fcvt.s.w ft0, a0
582; RV32IF-NEXT:    fmv.x.w a0, ft0
583; RV32IF-NEXT:    ret
584;
585; RV64IF-LABEL: fcvt_s_w_i16:
586; RV64IF:       # %bb.0:
587; RV64IF-NEXT:    fcvt.s.w ft0, a0
588; RV64IF-NEXT:    fmv.x.w a0, ft0
589; RV64IF-NEXT:    ret
590  %1 = sitofp i16 %a to float
591  ret float %1
592}
593
594define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
595; RV32IF-LABEL: fcvt_s_wu_i16:
596; RV32IF:       # %bb.0:
597; RV32IF-NEXT:    fcvt.s.wu ft0, a0
598; RV32IF-NEXT:    fmv.x.w a0, ft0
599; RV32IF-NEXT:    ret
600;
601; RV64IF-LABEL: fcvt_s_wu_i16:
602; RV64IF:       # %bb.0:
603; RV64IF-NEXT:    fcvt.s.wu ft0, a0
604; RV64IF-NEXT:    fmv.x.w a0, ft0
605; RV64IF-NEXT:    ret
606  %1 = uitofp i16 %a to float
607  ret float %1
608}
609