1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 /* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "target.h"
28 #include "tree.h"
29 #include "diagnostic-core.h"
30 #include "fold-const.h"
31 #include "stor-layout.h"
32 #include "convert.h"
33 #include "langhooks.h"
34 #include "builtins.h"
35 #include "ubsan.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38 #include "asan.h"
39
40 #define maybe_fold_build1_loc(FOLD_P, LOC, CODE, TYPE, EXPR) \
41 ((FOLD_P) ? fold_build1_loc (LOC, CODE, TYPE, EXPR) \
42 : build1_loc (LOC, CODE, TYPE, EXPR))
43 #define maybe_fold_build2_loc(FOLD_P, LOC, CODE, TYPE, EXPR1, EXPR2) \
44 ((FOLD_P) ? fold_build2_loc (LOC, CODE, TYPE, EXPR1, EXPR2) \
45 : build2_loc (LOC, CODE, TYPE, EXPR1, EXPR2))
46
47 /* Convert EXPR to some pointer or reference type TYPE.
48 EXPR must be pointer, reference, integer, enumeral, or literal zero;
49 in other cases error is called. If FOLD_P is true, try to fold the
50 expression. */
51
52 static tree
convert_to_pointer_1(tree type,tree expr,bool fold_p)53 convert_to_pointer_1 (tree type, tree expr, bool fold_p)
54 {
55 location_t loc = EXPR_LOCATION (expr);
56 if (TREE_TYPE (expr) == type)
57 return expr;
58
59 switch (TREE_CODE (TREE_TYPE (expr)))
60 {
61 case POINTER_TYPE:
62 case REFERENCE_TYPE:
63 {
64 /* If the pointers point to different address spaces, conversion needs
65 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
66 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
67 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
68
69 if (to_as == from_as)
70 return maybe_fold_build1_loc (fold_p, loc, NOP_EXPR, type, expr);
71 else
72 return maybe_fold_build1_loc (fold_p, loc, ADDR_SPACE_CONVERT_EXPR,
73 type, expr);
74 }
75
76 case INTEGER_TYPE:
77 case ENUMERAL_TYPE:
78 case BOOLEAN_TYPE:
79 {
80 /* If the input precision differs from the target pointer type
81 precision, first convert the input expression to an integer type of
82 the target precision. Some targets, e.g. VMS, need several pointer
83 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
84 unsigned int pprec = TYPE_PRECISION (type);
85 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
86
87 if (eprec != pprec)
88 expr
89 = maybe_fold_build1_loc (fold_p, loc, NOP_EXPR,
90 lang_hooks.types.type_for_size (pprec, 0),
91 expr);
92 }
93 return maybe_fold_build1_loc (fold_p, loc, CONVERT_EXPR, type, expr);
94
95 default:
96 error ("cannot convert to a pointer type");
97 return convert_to_pointer_1 (type, integer_zero_node, fold_p);
98 }
99 }
100
101 /* A wrapper around convert_to_pointer_1 that always folds the
102 expression. */
103
104 tree
convert_to_pointer(tree type,tree expr)105 convert_to_pointer (tree type, tree expr)
106 {
107 return convert_to_pointer_1 (type, expr, true);
108 }
109
110 /* A wrapper around convert_to_pointer_1 that only folds the
111 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
112
113 tree
convert_to_pointer_maybe_fold(tree type,tree expr,bool dofold)114 convert_to_pointer_maybe_fold (tree type, tree expr, bool dofold)
115 {
116 return convert_to_pointer_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
117 }
118
119 /* Convert EXPR to some floating-point type TYPE.
120
121 EXPR must be float, fixed-point, integer, or enumeral;
122 in other cases error is called. If FOLD_P is true, try to fold
123 the expression. */
124
125 static tree
convert_to_real_1(tree type,tree expr,bool fold_p)126 convert_to_real_1 (tree type, tree expr, bool fold_p)
127 {
128 enum built_in_function fcode = builtin_mathfn_code (expr);
129 tree itype = TREE_TYPE (expr);
130 location_t loc = EXPR_LOCATION (expr);
131
132 if (TREE_CODE (expr) == COMPOUND_EXPR)
133 {
134 tree t = convert_to_real_1 (type, TREE_OPERAND (expr, 1), fold_p);
135 if (t == TREE_OPERAND (expr, 1))
136 return expr;
137 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
138 TREE_OPERAND (expr, 0), t);
139 }
140
141 /* Disable until we figure out how to decide whether the functions are
142 present in runtime. */
143 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
144 if (optimize
145 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
146 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
147 {
148 switch (fcode)
149 {
150 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
151 CASE_MATHFN (COSH)
152 CASE_MATHFN (EXP)
153 CASE_MATHFN (EXP10)
154 CASE_MATHFN (EXP2)
155 CASE_MATHFN (EXPM1)
156 CASE_MATHFN (GAMMA)
157 CASE_MATHFN (J0)
158 CASE_MATHFN (J1)
159 CASE_MATHFN (LGAMMA)
160 CASE_MATHFN (POW10)
161 CASE_MATHFN (SINH)
162 CASE_MATHFN (TGAMMA)
163 CASE_MATHFN (Y0)
164 CASE_MATHFN (Y1)
165 /* The above functions may set errno differently with float
166 input or output so this transformation is not safe with
167 -fmath-errno. */
168 if (flag_errno_math)
169 break;
170 gcc_fallthrough ();
171 CASE_MATHFN (ACOS)
172 CASE_MATHFN (ACOSH)
173 CASE_MATHFN (ASIN)
174 CASE_MATHFN (ASINH)
175 CASE_MATHFN (ATAN)
176 CASE_MATHFN (ATANH)
177 CASE_MATHFN (CBRT)
178 CASE_MATHFN (COS)
179 CASE_MATHFN (ERF)
180 CASE_MATHFN (ERFC)
181 CASE_MATHFN (LOG)
182 CASE_MATHFN (LOG10)
183 CASE_MATHFN (LOG2)
184 CASE_MATHFN (LOG1P)
185 CASE_MATHFN (SIN)
186 CASE_MATHFN (TAN)
187 CASE_MATHFN (TANH)
188 /* The above functions are not safe to do this conversion. */
189 if (!flag_unsafe_math_optimizations)
190 break;
191 gcc_fallthrough ();
192 CASE_MATHFN (SQRT)
193 CASE_MATHFN (FABS)
194 CASE_MATHFN (LOGB)
195 #undef CASE_MATHFN
196 if (call_expr_nargs (expr) != 1
197 || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (CALL_EXPR_ARG (expr, 0))))
198 break;
199 {
200 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
201 tree newtype = type;
202
203 /* We have (outertype)sqrt((innertype)x). Choose the wider mode
204 from the both as the safe type for operation. */
205 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
206 newtype = TREE_TYPE (arg0);
207
208 /* We consider to convert
209
210 (T1) sqrtT2 ((T2) exprT3)
211 to
212 (T1) sqrtT4 ((T4) exprT3)
213
214 , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
215 and T4 is NEWTYPE. All those types are of floating point types.
216 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
217 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
218 T2 and T4. See the following URL for a reference:
219 http://stackoverflow.com/questions/9235456/determining-
220 floating-point-square-root
221 */
222 if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
223 && !flag_unsafe_math_optimizations)
224 {
225 /* The following conversion is unsafe even the precision condition
226 below is satisfied:
227
228 (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
229 */
230 if (TYPE_MODE (type) != TYPE_MODE (newtype))
231 break;
232
233 int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
234 int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
235 if (p1 < p2 * 2 + 2)
236 break;
237 }
238
239 /* Be careful about integer to fp conversions.
240 These may overflow still. */
241 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
242 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
243 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
244 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
245 {
246 tree fn = mathfn_built_in (newtype, fcode);
247 if (fn)
248 {
249 tree arg = convert_to_real_1 (newtype, arg0, fold_p);
250 expr = build_call_expr (fn, 1, arg);
251 if (newtype == type)
252 return expr;
253 }
254 }
255 }
256 default:
257 break;
258 }
259 }
260
261 /* Propagate the cast into the operation. */
262 if (itype != type && FLOAT_TYPE_P (type))
263 switch (TREE_CODE (expr))
264 {
265 /* Convert (float)-x into -(float)x. This is safe for
266 round-to-nearest rounding mode when the inner type is float. */
267 case ABS_EXPR:
268 case NEGATE_EXPR:
269 if (!flag_rounding_math
270 && FLOAT_TYPE_P (itype)
271 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
272 {
273 tree arg = convert_to_real_1 (type, TREE_OPERAND (expr, 0),
274 fold_p);
275 return build1 (TREE_CODE (expr), type, arg);
276 }
277 break;
278 /* Convert (outertype)((innertype0)a+(innertype1)b)
279 into ((newtype)a+(newtype)b) where newtype
280 is the widest mode from all of these. */
281 case PLUS_EXPR:
282 case MINUS_EXPR:
283 case MULT_EXPR:
284 case RDIV_EXPR:
285 {
286 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
287 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
288
289 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
290 && FLOAT_TYPE_P (TREE_TYPE (arg1))
291 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
292 {
293 tree newtype = type;
294
295 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
296 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
297 || TYPE_MODE (type) == SDmode)
298 newtype = dfloat32_type_node;
299 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
300 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
301 || TYPE_MODE (type) == DDmode)
302 newtype = dfloat64_type_node;
303 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
304 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
305 || TYPE_MODE (type) == TDmode)
306 newtype = dfloat128_type_node;
307 if (newtype == dfloat32_type_node
308 || newtype == dfloat64_type_node
309 || newtype == dfloat128_type_node)
310 {
311 expr = build2 (TREE_CODE (expr), newtype,
312 convert_to_real_1 (newtype, arg0,
313 fold_p),
314 convert_to_real_1 (newtype, arg1,
315 fold_p));
316 if (newtype == type)
317 return expr;
318 break;
319 }
320
321 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
322 newtype = TREE_TYPE (arg0);
323 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
324 newtype = TREE_TYPE (arg1);
325 /* Sometimes this transformation is safe (cannot
326 change results through affecting double rounding
327 cases) and sometimes it is not. If NEWTYPE is
328 wider than TYPE, e.g. (float)((long double)double
329 + (long double)double) converted to
330 (float)(double + double), the transformation is
331 unsafe regardless of the details of the types
332 involved; double rounding can arise if the result
333 of NEWTYPE arithmetic is a NEWTYPE value half way
334 between two representable TYPE values but the
335 exact value is sufficiently different (in the
336 right direction) for this difference to be
337 visible in ITYPE arithmetic. If NEWTYPE is the
338 same as TYPE, however, the transformation may be
339 safe depending on the types involved: it is safe
340 if the ITYPE has strictly more than twice as many
341 mantissa bits as TYPE, can represent infinities
342 and NaNs if the TYPE can, and has sufficient
343 exponent range for the product or ratio of two
344 values representable in the TYPE to be within the
345 range of normal values of ITYPE. */
346 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
347 && (flag_unsafe_math_optimizations
348 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
349 && real_can_shorten_arithmetic (TYPE_MODE (itype),
350 TYPE_MODE (type))
351 && !excess_precision_type (newtype))))
352 {
353 expr = build2 (TREE_CODE (expr), newtype,
354 convert_to_real_1 (newtype, arg0,
355 fold_p),
356 convert_to_real_1 (newtype, arg1,
357 fold_p));
358 if (newtype == type)
359 return expr;
360 }
361 }
362 }
363 break;
364 default:
365 break;
366 }
367
368 switch (TREE_CODE (TREE_TYPE (expr)))
369 {
370 case REAL_TYPE:
371 /* Ignore the conversion if we don't need to store intermediate
372 results and neither type is a decimal float. */
373 return build1_loc (loc,
374 (flag_float_store
375 || DECIMAL_FLOAT_TYPE_P (type)
376 || DECIMAL_FLOAT_TYPE_P (itype))
377 ? CONVERT_EXPR : NOP_EXPR, type, expr);
378
379 case INTEGER_TYPE:
380 case ENUMERAL_TYPE:
381 case BOOLEAN_TYPE:
382 return build1 (FLOAT_EXPR, type, expr);
383
384 case FIXED_POINT_TYPE:
385 return build1 (FIXED_CONVERT_EXPR, type, expr);
386
387 case COMPLEX_TYPE:
388 return convert (type,
389 maybe_fold_build1_loc (fold_p, loc, REALPART_EXPR,
390 TREE_TYPE (TREE_TYPE (expr)),
391 expr));
392
393 case POINTER_TYPE:
394 case REFERENCE_TYPE:
395 error ("pointer value used where a floating point value was expected");
396 return convert_to_real_1 (type, integer_zero_node, fold_p);
397
398 default:
399 error ("aggregate value used where a float was expected");
400 return convert_to_real_1 (type, integer_zero_node, fold_p);
401 }
402 }
403
404 /* A wrapper around convert_to_real_1 that always folds the
405 expression. */
406
407 tree
convert_to_real(tree type,tree expr)408 convert_to_real (tree type, tree expr)
409 {
410 return convert_to_real_1 (type, expr, true);
411 }
412
413 /* A wrapper around convert_to_real_1 that only folds the
414 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
415
416 tree
convert_to_real_maybe_fold(tree type,tree expr,bool dofold)417 convert_to_real_maybe_fold (tree type, tree expr, bool dofold)
418 {
419 return convert_to_real_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
420 }
421
422 /* Try to narrow EX_FORM ARG0 ARG1 in narrowed arg types producing a
423 result in TYPE. */
424
425 static tree
do_narrow(location_t loc,enum tree_code ex_form,tree type,tree arg0,tree arg1,tree expr,unsigned inprec,unsigned outprec,bool dofold)426 do_narrow (location_t loc,
427 enum tree_code ex_form, tree type, tree arg0, tree arg1,
428 tree expr, unsigned inprec, unsigned outprec, bool dofold)
429 {
430 /* Do the arithmetic in type TYPEX,
431 then convert result to TYPE. */
432 tree typex = type;
433
434 /* Can't do arithmetic in enumeral types
435 so use an integer type that will hold the values. */
436 if (TREE_CODE (typex) == ENUMERAL_TYPE)
437 typex = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
438 TYPE_UNSIGNED (typex));
439
440 /* The type demotion below might cause doing unsigned arithmetic
441 instead of signed, and thus hide overflow bugs. */
442 if ((ex_form == PLUS_EXPR || ex_form == MINUS_EXPR)
443 && !TYPE_UNSIGNED (typex)
444 && sanitize_flags_p (SANITIZE_SI_OVERFLOW))
445 return NULL_TREE;
446
447 /* But now perhaps TYPEX is as wide as INPREC.
448 In that case, do nothing special here.
449 (Otherwise would recurse infinitely in convert. */
450 if (TYPE_PRECISION (typex) != inprec)
451 {
452 /* Don't do unsigned arithmetic where signed was wanted,
453 or vice versa.
454 Exception: if both of the original operands were
455 unsigned then we can safely do the work as unsigned.
456 Exception: shift operations take their type solely
457 from the first argument.
458 Exception: the LSHIFT_EXPR case above requires that
459 we perform this operation unsigned lest we produce
460 signed-overflow undefinedness.
461 And we may need to do it as unsigned
462 if we truncate to the original size. */
463 if (TYPE_UNSIGNED (TREE_TYPE (expr))
464 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
465 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
466 || ex_form == LSHIFT_EXPR
467 || ex_form == RSHIFT_EXPR
468 || ex_form == LROTATE_EXPR
469 || ex_form == RROTATE_EXPR))
470 || ex_form == LSHIFT_EXPR
471 /* If we have !flag_wrapv, and either ARG0 or
472 ARG1 is of a signed type, we have to do
473 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
474 type in case the operation in outprec precision
475 could overflow. Otherwise, we would introduce
476 signed-overflow undefinedness. */
477 || ((!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
478 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0)))
479 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
480 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
481 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
482 > outprec)
483 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
484 > outprec))
485 && (ex_form == PLUS_EXPR
486 || ex_form == MINUS_EXPR
487 || ex_form == MULT_EXPR)))
488 {
489 if (!TYPE_UNSIGNED (typex))
490 typex = unsigned_type_for (typex);
491 }
492 else
493 {
494 if (TYPE_UNSIGNED (typex))
495 typex = signed_type_for (typex);
496 }
497 /* We should do away with all this once we have a proper
498 type promotion/demotion pass, see PR45397. */
499 expr = maybe_fold_build2_loc (dofold, loc, ex_form, typex,
500 convert (typex, arg0),
501 convert (typex, arg1));
502 return convert (type, expr);
503 }
504
505 return NULL_TREE;
506 }
507
508 /* Convert EXPR to some integer (or enum) type TYPE.
509
510 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
511 fixed-point or vector; in other cases error is called.
512
513 If DOFOLD is TRUE, we try to simplify newly-created patterns by folding.
514
515 The result of this is always supposed to be a newly created tree node
516 not in use in any existing structure. */
517
518 static tree
convert_to_integer_1(tree type,tree expr,bool dofold)519 convert_to_integer_1 (tree type, tree expr, bool dofold)
520 {
521 enum tree_code ex_form = TREE_CODE (expr);
522 tree intype = TREE_TYPE (expr);
523 unsigned int inprec = element_precision (intype);
524 unsigned int outprec = element_precision (type);
525 location_t loc = EXPR_LOCATION (expr);
526
527 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
528 be. Consider `enum E = { a, b = (enum E) 3 };'. */
529 if (!COMPLETE_TYPE_P (type))
530 {
531 error ("conversion to incomplete type");
532 return error_mark_node;
533 }
534
535 if (ex_form == COMPOUND_EXPR)
536 {
537 tree t = convert_to_integer_1 (type, TREE_OPERAND (expr, 1), dofold);
538 if (t == TREE_OPERAND (expr, 1))
539 return expr;
540 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
541 TREE_OPERAND (expr, 0), t);
542 }
543
544 /* Convert e.g. (long)round(d) -> lround(d). */
545 /* If we're converting to char, we may encounter differing behavior
546 between converting from double->char vs double->long->char.
547 We're in "undefined" territory but we prefer to be conservative,
548 so only proceed in "unsafe" math mode. */
549 if (optimize
550 && (flag_unsafe_math_optimizations
551 || (long_integer_type_node
552 && outprec >= TYPE_PRECISION (long_integer_type_node))))
553 {
554 tree s_expr = strip_float_extensions (expr);
555 tree s_intype = TREE_TYPE (s_expr);
556 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
557 tree fn = 0;
558
559 switch (fcode)
560 {
561 CASE_FLT_FN (BUILT_IN_CEIL):
562 CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL):
563 /* Only convert in ISO C99 mode. */
564 if (!targetm.libc_has_function (function_c99_misc))
565 break;
566 if (outprec < TYPE_PRECISION (integer_type_node)
567 || (outprec == TYPE_PRECISION (integer_type_node)
568 && !TYPE_UNSIGNED (type)))
569 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
570 else if (outprec == TYPE_PRECISION (long_integer_type_node)
571 && !TYPE_UNSIGNED (type))
572 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
573 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
574 && !TYPE_UNSIGNED (type))
575 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
576 break;
577
578 CASE_FLT_FN (BUILT_IN_FLOOR):
579 CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR):
580 /* Only convert in ISO C99 mode. */
581 if (!targetm.libc_has_function (function_c99_misc))
582 break;
583 if (outprec < TYPE_PRECISION (integer_type_node)
584 || (outprec == TYPE_PRECISION (integer_type_node)
585 && !TYPE_UNSIGNED (type)))
586 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
587 else if (outprec == TYPE_PRECISION (long_integer_type_node)
588 && !TYPE_UNSIGNED (type))
589 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
590 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
591 && !TYPE_UNSIGNED (type))
592 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
593 break;
594
595 CASE_FLT_FN (BUILT_IN_ROUND):
596 CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND):
597 /* Only convert in ISO C99 mode and with -fno-math-errno. */
598 if (!targetm.libc_has_function (function_c99_misc)
599 || flag_errno_math)
600 break;
601 if (outprec < TYPE_PRECISION (integer_type_node)
602 || (outprec == TYPE_PRECISION (integer_type_node)
603 && !TYPE_UNSIGNED (type)))
604 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
605 else if (outprec == TYPE_PRECISION (long_integer_type_node)
606 && !TYPE_UNSIGNED (type))
607 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
608 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
609 && !TYPE_UNSIGNED (type))
610 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
611 break;
612
613 CASE_FLT_FN (BUILT_IN_NEARBYINT):
614 CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT):
615 /* Only convert nearbyint* if we can ignore math exceptions. */
616 if (flag_trapping_math)
617 break;
618 gcc_fallthrough ();
619 CASE_FLT_FN (BUILT_IN_RINT):
620 CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT):
621 /* Only convert in ISO C99 mode and with -fno-math-errno. */
622 if (!targetm.libc_has_function (function_c99_misc)
623 || flag_errno_math)
624 break;
625 if (outprec < TYPE_PRECISION (integer_type_node)
626 || (outprec == TYPE_PRECISION (integer_type_node)
627 && !TYPE_UNSIGNED (type)))
628 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
629 else if (outprec == TYPE_PRECISION (long_integer_type_node)
630 && !TYPE_UNSIGNED (type))
631 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
632 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
633 && !TYPE_UNSIGNED (type))
634 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
635 break;
636
637 CASE_FLT_FN (BUILT_IN_TRUNC):
638 CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC):
639 if (call_expr_nargs (s_expr) != 1
640 || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (CALL_EXPR_ARG (s_expr, 0))))
641 break;
642 return convert_to_integer_1 (type, CALL_EXPR_ARG (s_expr, 0),
643 dofold);
644
645 default:
646 break;
647 }
648
649 if (fn
650 && call_expr_nargs (s_expr) == 1
651 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (CALL_EXPR_ARG (s_expr, 0))))
652 {
653 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
654 return convert_to_integer_1 (type, newexpr, dofold);
655 }
656 }
657
658 /* Convert (int)logb(d) -> ilogb(d). */
659 if (optimize
660 && flag_unsafe_math_optimizations
661 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
662 && integer_type_node
663 && (outprec > TYPE_PRECISION (integer_type_node)
664 || (outprec == TYPE_PRECISION (integer_type_node)
665 && !TYPE_UNSIGNED (type))))
666 {
667 tree s_expr = strip_float_extensions (expr);
668 tree s_intype = TREE_TYPE (s_expr);
669 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
670 tree fn = 0;
671
672 switch (fcode)
673 {
674 CASE_FLT_FN (BUILT_IN_LOGB):
675 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
676 break;
677
678 default:
679 break;
680 }
681
682 if (fn
683 && call_expr_nargs (s_expr) == 1
684 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (CALL_EXPR_ARG (s_expr, 0))))
685 {
686 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
687 return convert_to_integer_1 (type, newexpr, dofold);
688 }
689 }
690
691 switch (TREE_CODE (intype))
692 {
693 case POINTER_TYPE:
694 case REFERENCE_TYPE:
695 if (integer_zerop (expr) && !TREE_OVERFLOW (expr))
696 return build_int_cst (type, 0);
697
698 /* Convert to an unsigned integer of the correct width first, and from
699 there widen/truncate to the required type. Some targets support the
700 coexistence of multiple valid pointer sizes, so fetch the one we need
701 from the type. */
702 if (!dofold)
703 return build1 (CONVERT_EXPR, type, expr);
704 expr = fold_build1 (CONVERT_EXPR,
705 lang_hooks.types.type_for_size
706 (TYPE_PRECISION (intype), 0),
707 expr);
708 return fold_convert (type, expr);
709
710 case INTEGER_TYPE:
711 case ENUMERAL_TYPE:
712 case BOOLEAN_TYPE:
713 case OFFSET_TYPE:
714 /* If this is a logical operation, which just returns 0 or 1, we can
715 change the type of the expression. */
716
717 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
718 {
719 expr = copy_node (expr);
720 TREE_TYPE (expr) = type;
721 return expr;
722 }
723
724 /* If we are widening the type, put in an explicit conversion.
725 Similarly if we are not changing the width. After this, we know
726 we are truncating EXPR. */
727
728 else if (outprec >= inprec)
729 {
730 enum tree_code code;
731
732 /* If the precision of the EXPR's type is K bits and the
733 destination mode has more bits, and the sign is changing,
734 it is not safe to use a NOP_EXPR. For example, suppose
735 that EXPR's type is a 3-bit unsigned integer type, the
736 TYPE is a 3-bit signed integer type, and the machine mode
737 for the types is 8-bit QImode. In that case, the
738 conversion necessitates an explicit sign-extension. In
739 the signed-to-unsigned case the high-order bits have to
740 be cleared. */
741 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
742 && !type_has_mode_precision_p (TREE_TYPE (expr)))
743 code = CONVERT_EXPR;
744 else
745 code = NOP_EXPR;
746
747 return maybe_fold_build1_loc (dofold, loc, code, type, expr);
748 }
749
750 /* If TYPE is an enumeral type or a type with a precision less
751 than the number of bits in its mode, do the conversion to the
752 type corresponding to its mode, then do a nop conversion
753 to TYPE. */
754 else if (TREE_CODE (type) == ENUMERAL_TYPE
755 || maybe_ne (outprec, GET_MODE_PRECISION (TYPE_MODE (type))))
756 {
757 expr
758 = convert_to_integer_1 (lang_hooks.types.type_for_mode
759 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
760 expr, dofold);
761 return maybe_fold_build1_loc (dofold, loc, NOP_EXPR, type, expr);
762 }
763
764 /* Here detect when we can distribute the truncation down past some
765 arithmetic. For example, if adding two longs and converting to an
766 int, we can equally well convert both to ints and then add.
767 For the operations handled here, such truncation distribution
768 is always safe.
769 It is desirable in these cases:
770 1) when truncating down to full-word from a larger size
771 2) when truncating takes no work.
772 3) when at least one operand of the arithmetic has been extended
773 (as by C's default conversions). In this case we need two conversions
774 if we do the arithmetic as already requested, so we might as well
775 truncate both and then combine. Perhaps that way we need only one.
776
777 Note that in general we cannot do the arithmetic in a type
778 shorter than the desired result of conversion, even if the operands
779 are both extended from a shorter type, because they might overflow
780 if combined in that type. The exceptions to this--the times when
781 two narrow values can be combined in their narrow type even to
782 make a wider result--are handled by "shorten" in build_binary_op. */
783
784 if (dofold)
785 switch (ex_form)
786 {
787 case RSHIFT_EXPR:
788 /* We can pass truncation down through right shifting
789 when the shift count is a nonpositive constant. */
790 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
791 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
792 goto trunc1;
793 break;
794
795 case LSHIFT_EXPR:
796 /* We can pass truncation down through left shifting
797 when the shift count is a nonnegative constant and
798 the target type is unsigned. */
799 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
800 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
801 && TYPE_UNSIGNED (type)
802 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
803 {
804 /* If shift count is less than the width of the truncated type,
805 really shift. */
806 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
807 /* In this case, shifting is like multiplication. */
808 goto trunc1;
809 else
810 {
811 /* If it is >= that width, result is zero.
812 Handling this with trunc1 would give the wrong result:
813 (int) ((long long) a << 32) is well defined (as 0)
814 but (int) a << 32 is undefined and would get a
815 warning. */
816
817 tree t = build_int_cst (type, 0);
818
819 /* If the original expression had side-effects, we must
820 preserve it. */
821 if (TREE_SIDE_EFFECTS (expr))
822 return build2 (COMPOUND_EXPR, type, expr, t);
823 else
824 return t;
825 }
826 }
827 break;
828
829 case TRUNC_DIV_EXPR:
830 {
831 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), NULL_TREE);
832 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), NULL_TREE);
833
834 /* Don't distribute unless the output precision is at least as
835 big as the actual inputs and it has the same signedness. */
836 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
837 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
838 /* If signedness of arg0 and arg1 don't match,
839 we can't necessarily find a type to compare them in. */
840 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
841 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
842 /* Do not change the sign of the division. */
843 && (TYPE_UNSIGNED (TREE_TYPE (expr))
844 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
845 /* Either require unsigned division or a division by
846 a constant that is not -1. */
847 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
848 || (TREE_CODE (arg1) == INTEGER_CST
849 && !integer_all_onesp (arg1))))
850 {
851 tree tem = do_narrow (loc, ex_form, type, arg0, arg1,
852 expr, inprec, outprec, dofold);
853 if (tem)
854 return tem;
855 }
856 break;
857 }
858
859 case MAX_EXPR:
860 case MIN_EXPR:
861 case MULT_EXPR:
862 {
863 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
864 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
865
866 /* Don't distribute unless the output precision is at least as
867 big as the actual inputs. Otherwise, the comparison of the
868 truncated values will be wrong. */
869 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
870 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
871 /* If signedness of arg0 and arg1 don't match,
872 we can't necessarily find a type to compare them in. */
873 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
874 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
875 goto trunc1;
876 break;
877 }
878
879 case PLUS_EXPR:
880 case MINUS_EXPR:
881 case BIT_AND_EXPR:
882 case BIT_IOR_EXPR:
883 case BIT_XOR_EXPR:
884 trunc1:
885 {
886 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
887 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
888
889 /* Do not try to narrow operands of pointer subtraction;
890 that will interfere with other folding. */
891 if (ex_form == MINUS_EXPR
892 && CONVERT_EXPR_P (arg0)
893 && CONVERT_EXPR_P (arg1)
894 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
895 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
896 break;
897
898 if (outprec >= BITS_PER_WORD
899 || targetm.truly_noop_truncation (outprec, inprec)
900 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
901 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
902 {
903 tree tem = do_narrow (loc, ex_form, type, arg0, arg1,
904 expr, inprec, outprec, dofold);
905 if (tem)
906 return tem;
907 }
908 }
909 break;
910
911 case NEGATE_EXPR:
912 /* Using unsigned arithmetic for signed types may hide overflow
913 bugs. */
914 if (!TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (expr, 0)))
915 && sanitize_flags_p (SANITIZE_SI_OVERFLOW))
916 break;
917 /* Fall through. */
918 case BIT_NOT_EXPR:
919 /* This is not correct for ABS_EXPR,
920 since we must test the sign before truncation. */
921 {
922 /* Do the arithmetic in type TYPEX,
923 then convert result to TYPE. */
924 tree typex = type;
925
926 /* Can't do arithmetic in enumeral types
927 so use an integer type that will hold the values. */
928 if (TREE_CODE (typex) == ENUMERAL_TYPE)
929 typex
930 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
931 TYPE_UNSIGNED (typex));
932
933 if (!TYPE_UNSIGNED (typex))
934 typex = unsigned_type_for (typex);
935 return convert (type,
936 fold_build1 (ex_form, typex,
937 convert (typex,
938 TREE_OPERAND (expr, 0))));
939 }
940
941 CASE_CONVERT:
942 {
943 tree argtype = TREE_TYPE (TREE_OPERAND (expr, 0));
944 /* Don't introduce a "can't convert between vector values
945 of different size" error. */
946 if (TREE_CODE (argtype) == VECTOR_TYPE
947 && maybe_ne (GET_MODE_SIZE (TYPE_MODE (argtype)),
948 GET_MODE_SIZE (TYPE_MODE (type))))
949 break;
950 }
951 /* If truncating after truncating, might as well do all at once.
952 If truncating after extending, we may get rid of wasted work. */
953 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
954
955 case COND_EXPR:
956 /* It is sometimes worthwhile to push the narrowing down through
957 the conditional and never loses. A COND_EXPR may have a throw
958 as one operand, which then has void type. Just leave void
959 operands as they are. */
960 return
961 fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
962 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
963 ? TREE_OPERAND (expr, 1)
964 : convert (type, TREE_OPERAND (expr, 1)),
965 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
966 ? TREE_OPERAND (expr, 2)
967 : convert (type, TREE_OPERAND (expr, 2)));
968
969 default:
970 break;
971 }
972
973 /* When parsing long initializers, we might end up with a lot of casts.
974 Shortcut this. */
975 if (TREE_CODE (expr) == INTEGER_CST)
976 return fold_convert (type, expr);
977 return build1 (CONVERT_EXPR, type, expr);
978
979 case REAL_TYPE:
980 if (sanitize_flags_p (SANITIZE_FLOAT_CAST)
981 && current_function_decl != NULL_TREE)
982 {
983 expr = save_expr (expr);
984 tree check = ubsan_instrument_float_cast (loc, type, expr);
985 expr = build1 (FIX_TRUNC_EXPR, type, expr);
986 if (check == NULL_TREE)
987 return expr;
988 return maybe_fold_build2_loc (dofold, loc, COMPOUND_EXPR,
989 TREE_TYPE (expr), check, expr);
990 }
991 else
992 return build1 (FIX_TRUNC_EXPR, type, expr);
993
994 case FIXED_POINT_TYPE:
995 return build1 (FIXED_CONVERT_EXPR, type, expr);
996
997 case COMPLEX_TYPE:
998 expr = maybe_fold_build1_loc (dofold, loc, REALPART_EXPR,
999 TREE_TYPE (TREE_TYPE (expr)), expr);
1000 return convert (type, expr);
1001
1002 case VECTOR_TYPE:
1003 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
1004 {
1005 error ("can%'t convert a vector of type %qT"
1006 " to type %qT which has different size",
1007 TREE_TYPE (expr), type);
1008 return error_mark_node;
1009 }
1010 return build1 (VIEW_CONVERT_EXPR, type, expr);
1011
1012 default:
1013 error ("aggregate value used where an integer was expected");
1014 return convert (type, integer_zero_node);
1015 }
1016 }
1017
1018 /* Convert EXPR to some integer (or enum) type TYPE.
1019
1020 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
1021 fixed-point or vector; in other cases error is called.
1022
1023 The result of this is always supposed to be a newly created tree node
1024 not in use in any existing structure. */
1025
1026 tree
convert_to_integer(tree type,tree expr)1027 convert_to_integer (tree type, tree expr)
1028 {
1029 return convert_to_integer_1 (type, expr, true);
1030 }
1031
1032 /* A wrapper around convert_to_complex_1 that only folds the
1033 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
1034
1035 tree
convert_to_integer_maybe_fold(tree type,tree expr,bool dofold)1036 convert_to_integer_maybe_fold (tree type, tree expr, bool dofold)
1037 {
1038 return convert_to_integer_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
1039 }
1040
1041 /* Convert EXPR to the complex type TYPE in the usual ways. If FOLD_P is
1042 true, try to fold the expression. */
1043
1044 static tree
convert_to_complex_1(tree type,tree expr,bool fold_p)1045 convert_to_complex_1 (tree type, tree expr, bool fold_p)
1046 {
1047 location_t loc = EXPR_LOCATION (expr);
1048 tree subtype = TREE_TYPE (type);
1049
1050 switch (TREE_CODE (TREE_TYPE (expr)))
1051 {
1052 case REAL_TYPE:
1053 case FIXED_POINT_TYPE:
1054 case INTEGER_TYPE:
1055 case ENUMERAL_TYPE:
1056 case BOOLEAN_TYPE:
1057 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
1058 convert (subtype, integer_zero_node));
1059
1060 case COMPLEX_TYPE:
1061 {
1062 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
1063
1064 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
1065 return expr;
1066 else if (TREE_CODE (expr) == COMPOUND_EXPR)
1067 {
1068 tree t = convert_to_complex_1 (type, TREE_OPERAND (expr, 1),
1069 fold_p);
1070 if (t == TREE_OPERAND (expr, 1))
1071 return expr;
1072 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR,
1073 TREE_TYPE (t), TREE_OPERAND (expr, 0), t);
1074 }
1075 else if (TREE_CODE (expr) == COMPLEX_EXPR)
1076 return maybe_fold_build2_loc (fold_p, loc, COMPLEX_EXPR, type,
1077 convert (subtype,
1078 TREE_OPERAND (expr, 0)),
1079 convert (subtype,
1080 TREE_OPERAND (expr, 1)));
1081 else
1082 {
1083 expr = save_expr (expr);
1084 tree realp = maybe_fold_build1_loc (fold_p, loc, REALPART_EXPR,
1085 TREE_TYPE (TREE_TYPE (expr)),
1086 expr);
1087 tree imagp = maybe_fold_build1_loc (fold_p, loc, IMAGPART_EXPR,
1088 TREE_TYPE (TREE_TYPE (expr)),
1089 expr);
1090 return maybe_fold_build2_loc (fold_p, loc, COMPLEX_EXPR, type,
1091 convert (subtype, realp),
1092 convert (subtype, imagp));
1093 }
1094 }
1095
1096 case POINTER_TYPE:
1097 case REFERENCE_TYPE:
1098 error ("pointer value used where a complex was expected");
1099 return convert_to_complex_1 (type, integer_zero_node, fold_p);
1100
1101 default:
1102 error ("aggregate value used where a complex was expected");
1103 return convert_to_complex_1 (type, integer_zero_node, fold_p);
1104 }
1105 }
1106
1107 /* A wrapper around convert_to_complex_1 that always folds the
1108 expression. */
1109
1110 tree
convert_to_complex(tree type,tree expr)1111 convert_to_complex (tree type, tree expr)
1112 {
1113 return convert_to_complex_1 (type, expr, true);
1114 }
1115
1116 /* A wrapper around convert_to_complex_1 that only folds the
1117 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
1118
1119 tree
convert_to_complex_maybe_fold(tree type,tree expr,bool dofold)1120 convert_to_complex_maybe_fold (tree type, tree expr, bool dofold)
1121 {
1122 return convert_to_complex_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
1123 }
1124
1125 /* Convert EXPR to the vector type TYPE in the usual ways. */
1126
1127 tree
convert_to_vector(tree type,tree expr)1128 convert_to_vector (tree type, tree expr)
1129 {
1130 switch (TREE_CODE (TREE_TYPE (expr)))
1131 {
1132 case INTEGER_TYPE:
1133 case VECTOR_TYPE:
1134 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
1135 {
1136 error ("can%'t convert a value of type %qT"
1137 " to vector type %qT which has different size",
1138 TREE_TYPE (expr), type);
1139 return error_mark_node;
1140 }
1141 return build1 (VIEW_CONVERT_EXPR, type, expr);
1142
1143 default:
1144 error ("can%'t convert value to a vector");
1145 return error_mark_node;
1146 }
1147 }
1148
1149 /* Convert EXPR to some fixed-point type TYPE.
1150
1151 EXPR must be fixed-point, float, integer, or enumeral;
1152 in other cases error is called. */
1153
1154 tree
convert_to_fixed(tree type,tree expr)1155 convert_to_fixed (tree type, tree expr)
1156 {
1157 if (integer_zerop (expr))
1158 {
1159 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
1160 return fixed_zero_node;
1161 }
1162 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
1163 {
1164 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
1165 return fixed_one_node;
1166 }
1167
1168 switch (TREE_CODE (TREE_TYPE (expr)))
1169 {
1170 case FIXED_POINT_TYPE:
1171 case INTEGER_TYPE:
1172 case ENUMERAL_TYPE:
1173 case BOOLEAN_TYPE:
1174 case REAL_TYPE:
1175 return build1 (FIXED_CONVERT_EXPR, type, expr);
1176
1177 case COMPLEX_TYPE:
1178 return convert (type,
1179 fold_build1 (REALPART_EXPR,
1180 TREE_TYPE (TREE_TYPE (expr)), expr));
1181
1182 default:
1183 error ("aggregate value used where a fixed-point was expected");
1184 return error_mark_node;
1185 }
1186 }
1187