1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29
30 /* Include insn-config.h before expr.h so that HAVE_conditional_move
31 is properly defined. */
32 #include "insn-config.h"
33 #include "rtl.h"
34 #include "tree.h"
35 #include "tm_p.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "except.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "libfuncs.h"
42 #include "recog.h"
43 #include "reload.h"
44 #include "ggc.h"
45 #include "real.h"
46 #include "basic-block.h"
47 #include "target.h"
48
49 /* Each optab contains info on how this target machine
50 can perform a particular operation
51 for all sizes and kinds of operands.
52
53 The operation to be performed is often specified
54 by passing one of these optabs as an argument.
55
56 See expr.h for documentation of these optabs. */
57
58 optab optab_table[OTI_MAX];
59
60 rtx libfunc_table[LTI_MAX];
61
62 /* Tables of patterns for converting one mode to another. */
63 convert_optab convert_optab_table[COI_MAX];
64
65 /* Contains the optab used for each rtx code. */
66 optab code_to_optab[NUM_RTX_CODE + 1];
67
68 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69 gives the gen_function to make a branch to test that condition. */
70
71 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72
73 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74 gives the insn code to make a store-condition insn
75 to test that condition. */
76
77 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78
79 #ifdef HAVE_conditional_move
80 /* Indexed by the machine mode, gives the insn code to make a conditional
81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82 setcc_gen_code to cut down on the number of named patterns. Consider a day
83 when a lot more rtx codes are conditional (eg: for the ARM). */
84
85 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
86 #endif
87
88 /* Indexed by the machine mode, gives the insn code for vector conditional
89 operation. */
90
91 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93
94 /* The insn generating function can not take an rtx_code argument.
95 TRAP_RTX is used as an rtx argument. Its code is replaced with
96 the code to be used in the trap insn and all other fields are ignored. */
97 static GTY(()) rtx trap_rtx;
98
99 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 int);
102 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103 enum machine_mode *, int *,
104 enum can_compare_purpose);
105 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 int *);
107 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108 static optab new_optab (void);
109 static convert_optab new_convert_optab (void);
110 static inline optab init_optab (enum rtx_code);
111 static inline optab init_optabv (enum rtx_code);
112 static inline convert_optab init_convert_optab (enum rtx_code);
113 static void init_libfuncs (optab, int, int, const char *, int);
114 static void init_integral_libfuncs (optab, const char *, int);
115 static void init_floating_libfuncs (optab, const char *, int);
116 static void init_interclass_conv_libfuncs (convert_optab, const char *,
117 enum mode_class, enum mode_class);
118 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119 enum mode_class, bool);
120 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121 enum rtx_code, int, rtx);
122 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123 enum machine_mode *, int *);
124 static rtx widen_clz (enum machine_mode, rtx, rtx);
125 static rtx expand_parity (enum machine_mode, rtx, rtx);
126 static enum rtx_code get_rtx_code (enum tree_code, bool);
127 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
132 #endif
133
134 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135 the result of operation CODE applied to OP0 (and OP1 if it is a binary
136 operation).
137
138 If the last insn does not set TARGET, don't do anything, but return 1.
139
140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141 don't add the REG_EQUAL note but return 0. Our caller can then try
142 again, ensuring that TARGET is not one of the operands. */
143
144 static int
add_equal_note(rtx insns,rtx target,enum rtx_code code,rtx op0,rtx op1)145 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 {
147 rtx last_insn, insn, set;
148 rtx note;
149
150 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151
152 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
153 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
154 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
155 && GET_RTX_CLASS (code) != RTX_COMPARE
156 && GET_RTX_CLASS (code) != RTX_UNARY)
157 return 1;
158
159 if (GET_CODE (target) == ZERO_EXTRACT)
160 return 1;
161
162 for (last_insn = insns;
163 NEXT_INSN (last_insn) != NULL_RTX;
164 last_insn = NEXT_INSN (last_insn))
165 ;
166
167 set = single_set (last_insn);
168 if (set == NULL_RTX)
169 return 1;
170
171 if (! rtx_equal_p (SET_DEST (set), target)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
174 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
175 return 1;
176
177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178 besides the last insn. */
179 if (reg_overlap_mentioned_p (target, op0)
180 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 {
182 insn = PREV_INSN (last_insn);
183 while (insn != NULL_RTX)
184 {
185 if (reg_set_p (target, insn))
186 return 0;
187
188 insn = PREV_INSN (insn);
189 }
190 }
191
192 if (GET_RTX_CLASS (code) == RTX_UNARY)
193 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 else
195 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196
197 set_unique_reg_note (last_insn, REG_EQUAL, note);
198
199 return 1;
200 }
201
202 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204 not actually do a sign-extend or zero-extend, but can leave the
205 higher-order bits of the result rtx undefined, for example, in the case
206 of logical operations, but not right shifts. */
207
208 static rtx
widen_operand(rtx op,enum machine_mode mode,enum machine_mode oldmode,int unsignedp,int no_extend)209 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
210 int unsignedp, int no_extend)
211 {
212 rtx result;
213
214 /* If we don't have to extend and this is a constant, return it. */
215 if (no_extend && GET_MODE (op) == VOIDmode)
216 return op;
217
218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219 extend since it will be more efficient to do so unless the signedness of
220 a promoted object differs from our extension. */
221 if (! no_extend
222 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
223 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
224 return convert_modes (mode, oldmode, op, unsignedp);
225
226 /* If MODE is no wider than a single word, we return a paradoxical
227 SUBREG. */
228 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
229 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230
231 /* Otherwise, get an object of MODE, clobber it, and set the low-order
232 part to OP. */
233
234 result = gen_reg_rtx (mode);
235 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
236 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
237 return result;
238 }
239
240 /* Return the optab used for computing the operation given by
241 the tree code, CODE. This function is not always usable (for
242 example, it cannot give complete results for multiplication
243 or division) but probably ought to be relied on more widely
244 throughout the expander. */
245 optab
optab_for_tree_code(enum tree_code code,tree type)246 optab_for_tree_code (enum tree_code code, tree type)
247 {
248 bool trapv;
249 switch (code)
250 {
251 case BIT_AND_EXPR:
252 return and_optab;
253
254 case BIT_IOR_EXPR:
255 return ior_optab;
256
257 case BIT_NOT_EXPR:
258 return one_cmpl_optab;
259
260 case BIT_XOR_EXPR:
261 return xor_optab;
262
263 case TRUNC_MOD_EXPR:
264 case CEIL_MOD_EXPR:
265 case FLOOR_MOD_EXPR:
266 case ROUND_MOD_EXPR:
267 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268
269 case RDIV_EXPR:
270 case TRUNC_DIV_EXPR:
271 case CEIL_DIV_EXPR:
272 case FLOOR_DIV_EXPR:
273 case ROUND_DIV_EXPR:
274 case EXACT_DIV_EXPR:
275 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276
277 case LSHIFT_EXPR:
278 return ashl_optab;
279
280 case RSHIFT_EXPR:
281 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282
283 case LROTATE_EXPR:
284 return rotl_optab;
285
286 case RROTATE_EXPR:
287 return rotr_optab;
288
289 case MAX_EXPR:
290 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291
292 case MIN_EXPR:
293 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294
295 case REALIGN_LOAD_EXPR:
296 return vec_realign_load_optab;
297
298 case WIDEN_SUM_EXPR:
299 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
300
301 case DOT_PROD_EXPR:
302 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
303
304 case REDUC_MAX_EXPR:
305 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
306
307 case REDUC_MIN_EXPR:
308 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309
310 case REDUC_PLUS_EXPR:
311 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312
313 case VEC_LSHIFT_EXPR:
314 return vec_shl_optab;
315
316 case VEC_RSHIFT_EXPR:
317 return vec_shr_optab;
318
319 default:
320 break;
321 }
322
323 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
324 switch (code)
325 {
326 case PLUS_EXPR:
327 return trapv ? addv_optab : add_optab;
328
329 case MINUS_EXPR:
330 return trapv ? subv_optab : sub_optab;
331
332 case MULT_EXPR:
333 return trapv ? smulv_optab : smul_optab;
334
335 case NEGATE_EXPR:
336 return trapv ? negv_optab : neg_optab;
337
338 case ABS_EXPR:
339 return trapv ? absv_optab : abs_optab;
340
341 default:
342 return NULL;
343 }
344 }
345
346
347 /* Expand vector widening operations.
348
349 There are two different classes of operations handled here:
350 1) Operations whose result is wider than all the arguments to the operation.
351 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
352 In this case OP0 and optionally OP1 would be initialized,
353 but WIDE_OP wouldn't (not relevant for this case).
354 2) Operations whose result is of the same size as the last argument to the
355 operation, but wider than all the other arguments to the operation.
356 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
357 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
358
359 E.g, when called to expand the following operations, this is how
360 the arguments will be initialized:
361 nops OP0 OP1 WIDE_OP
362 widening-sum 2 oprnd0 - oprnd1
363 widening-dot-product 3 oprnd0 oprnd1 oprnd2
364 widening-mult 2 oprnd0 oprnd1 -
365 type-promotion (vec-unpack) 1 oprnd0 - - */
366
367 rtx
expand_widen_pattern_expr(tree exp,rtx op0,rtx op1,rtx wide_op,rtx target,int unsignedp)368 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
369 int unsignedp)
370 {
371 tree oprnd0, oprnd1, oprnd2;
372 enum machine_mode wmode = 0, tmode0, tmode1 = 0;
373 optab widen_pattern_optab;
374 int icode;
375 enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
376 rtx temp;
377 rtx pat;
378 rtx xop0, xop1, wxop;
379 int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
380
381 oprnd0 = TREE_OPERAND (exp, 0);
382 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
383 widen_pattern_optab =
384 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
385 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
386 gcc_assert (icode != CODE_FOR_nothing);
387 xmode0 = insn_data[icode].operand[1].mode;
388
389 if (nops >= 2)
390 {
391 oprnd1 = TREE_OPERAND (exp, 1);
392 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
393 xmode1 = insn_data[icode].operand[2].mode;
394 }
395
396 /* The last operand is of a wider mode than the rest of the operands. */
397 if (nops == 2)
398 {
399 wmode = tmode1;
400 wxmode = xmode1;
401 }
402 else if (nops == 3)
403 {
404 gcc_assert (tmode1 == tmode0);
405 gcc_assert (op1);
406 oprnd2 = TREE_OPERAND (exp, 2);
407 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
408 wxmode = insn_data[icode].operand[3].mode;
409 }
410
411 if (!wide_op)
412 wmode = wxmode = insn_data[icode].operand[0].mode;
413
414 if (!target
415 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
416 temp = gen_reg_rtx (wmode);
417 else
418 temp = target;
419
420 xop0 = op0;
421 xop1 = op1;
422 wxop = wide_op;
423
424 /* In case the insn wants input operands in modes different from
425 those of the actual operands, convert the operands. It would
426 seem that we don't need to convert CONST_INTs, but we do, so
427 that they're properly zero-extended, sign-extended or truncated
428 for their mode. */
429
430 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
431 xop0 = convert_modes (xmode0,
432 GET_MODE (op0) != VOIDmode
433 ? GET_MODE (op0)
434 : tmode0,
435 xop0, unsignedp);
436
437 if (op1)
438 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
439 xop1 = convert_modes (xmode1,
440 GET_MODE (op1) != VOIDmode
441 ? GET_MODE (op1)
442 : tmode1,
443 xop1, unsignedp);
444
445 if (wide_op)
446 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
447 wxop = convert_modes (wxmode,
448 GET_MODE (wide_op) != VOIDmode
449 ? GET_MODE (wide_op)
450 : wmode,
451 wxop, unsignedp);
452
453 /* Now, if insn's predicates don't allow our operands, put them into
454 pseudo regs. */
455
456 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
457 && xmode0 != VOIDmode)
458 xop0 = copy_to_mode_reg (xmode0, xop0);
459
460 if (op1)
461 {
462 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
463 && xmode1 != VOIDmode)
464 xop1 = copy_to_mode_reg (xmode1, xop1);
465
466 if (wide_op)
467 {
468 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
469 && wxmode != VOIDmode)
470 wxop = copy_to_mode_reg (wxmode, wxop);
471
472 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
473 }
474 else
475 pat = GEN_FCN (icode) (temp, xop0, xop1);
476 }
477 else
478 {
479 if (wide_op)
480 {
481 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
482 && wxmode != VOIDmode)
483 wxop = copy_to_mode_reg (wxmode, wxop);
484
485 pat = GEN_FCN (icode) (temp, xop0, wxop);
486 }
487 else
488 pat = GEN_FCN (icode) (temp, xop0);
489 }
490
491 emit_insn (pat);
492 return temp;
493 }
494
495 /* Generate code to perform an operation specified by TERNARY_OPTAB
496 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
497
498 UNSIGNEDP is for the case where we have to widen the operands
499 to perform the operation. It says to use zero-extension.
500
501 If TARGET is nonzero, the value
502 is generated there, if it is convenient to do so.
503 In all cases an rtx is returned for the locus of the value;
504 this may or may not be TARGET. */
505
506 rtx
expand_ternary_op(enum machine_mode mode,optab ternary_optab,rtx op0,rtx op1,rtx op2,rtx target,int unsignedp)507 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
508 rtx op1, rtx op2, rtx target, int unsignedp)
509 {
510 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
511 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
512 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
513 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
514 rtx temp;
515 rtx pat;
516 rtx xop0 = op0, xop1 = op1, xop2 = op2;
517
518 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
519 != CODE_FOR_nothing);
520
521 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
522 temp = gen_reg_rtx (mode);
523 else
524 temp = target;
525
526 /* In case the insn wants input operands in modes different from
527 those of the actual operands, convert the operands. It would
528 seem that we don't need to convert CONST_INTs, but we do, so
529 that they're properly zero-extended, sign-extended or truncated
530 for their mode. */
531
532 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
533 xop0 = convert_modes (mode0,
534 GET_MODE (op0) != VOIDmode
535 ? GET_MODE (op0)
536 : mode,
537 xop0, unsignedp);
538
539 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
540 xop1 = convert_modes (mode1,
541 GET_MODE (op1) != VOIDmode
542 ? GET_MODE (op1)
543 : mode,
544 xop1, unsignedp);
545
546 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
547 xop2 = convert_modes (mode2,
548 GET_MODE (op2) != VOIDmode
549 ? GET_MODE (op2)
550 : mode,
551 xop2, unsignedp);
552
553 /* Now, if insn's predicates don't allow our operands, put them into
554 pseudo regs. */
555
556 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
557 && mode0 != VOIDmode)
558 xop0 = copy_to_mode_reg (mode0, xop0);
559
560 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
561 && mode1 != VOIDmode)
562 xop1 = copy_to_mode_reg (mode1, xop1);
563
564 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
565 && mode2 != VOIDmode)
566 xop2 = copy_to_mode_reg (mode2, xop2);
567
568 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
569
570 emit_insn (pat);
571 return temp;
572 }
573
574
575 /* Like expand_binop, but return a constant rtx if the result can be
576 calculated at compile time. The arguments and return value are
577 otherwise the same as for expand_binop. */
578
579 static rtx
simplify_expand_binop(enum machine_mode mode,optab binoptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)580 simplify_expand_binop (enum machine_mode mode, optab binoptab,
581 rtx op0, rtx op1, rtx target, int unsignedp,
582 enum optab_methods methods)
583 {
584 if (CONSTANT_P (op0) && CONSTANT_P (op1))
585 {
586 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
587
588 if (x)
589 return x;
590 }
591
592 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
593 }
594
595 /* Like simplify_expand_binop, but always put the result in TARGET.
596 Return true if the expansion succeeded. */
597
598 bool
force_expand_binop(enum machine_mode mode,optab binoptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)599 force_expand_binop (enum machine_mode mode, optab binoptab,
600 rtx op0, rtx op1, rtx target, int unsignedp,
601 enum optab_methods methods)
602 {
603 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
604 target, unsignedp, methods);
605 if (x == 0)
606 return false;
607 if (x != target)
608 emit_move_insn (target, x);
609 return true;
610 }
611
612 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
613
614 rtx
expand_vec_shift_expr(tree vec_shift_expr,rtx target)615 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
616 {
617 enum insn_code icode;
618 rtx rtx_op1, rtx_op2;
619 enum machine_mode mode1;
620 enum machine_mode mode2;
621 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
622 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
623 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
624 optab shift_optab;
625 rtx pat;
626
627 switch (TREE_CODE (vec_shift_expr))
628 {
629 case VEC_RSHIFT_EXPR:
630 shift_optab = vec_shr_optab;
631 break;
632 case VEC_LSHIFT_EXPR:
633 shift_optab = vec_shl_optab;
634 break;
635 default:
636 gcc_unreachable ();
637 }
638
639 icode = (int) shift_optab->handlers[(int) mode].insn_code;
640 gcc_assert (icode != CODE_FOR_nothing);
641
642 mode1 = insn_data[icode].operand[1].mode;
643 mode2 = insn_data[icode].operand[2].mode;
644
645 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
646 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
647 && mode1 != VOIDmode)
648 rtx_op1 = force_reg (mode1, rtx_op1);
649
650 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
651 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
652 && mode2 != VOIDmode)
653 rtx_op2 = force_reg (mode2, rtx_op2);
654
655 if (!target
656 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
657 target = gen_reg_rtx (mode);
658
659 /* Emit instruction */
660 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
661 gcc_assert (pat);
662 emit_insn (pat);
663
664 return target;
665 }
666
667 /* This subroutine of expand_doubleword_shift handles the cases in which
668 the effective shift value is >= BITS_PER_WORD. The arguments and return
669 value are the same as for the parent routine, except that SUPERWORD_OP1
670 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
671 INTO_TARGET may be null if the caller has decided to calculate it. */
672
673 static bool
expand_superword_shift(optab binoptab,rtx outof_input,rtx superword_op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods)674 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
675 rtx outof_target, rtx into_target,
676 int unsignedp, enum optab_methods methods)
677 {
678 if (into_target != 0)
679 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
680 into_target, unsignedp, methods))
681 return false;
682
683 if (outof_target != 0)
684 {
685 /* For a signed right shift, we must fill OUTOF_TARGET with copies
686 of the sign bit, otherwise we must fill it with zeros. */
687 if (binoptab != ashr_optab)
688 emit_move_insn (outof_target, CONST0_RTX (word_mode));
689 else
690 if (!force_expand_binop (word_mode, binoptab,
691 outof_input, GEN_INT (BITS_PER_WORD - 1),
692 outof_target, unsignedp, methods))
693 return false;
694 }
695 return true;
696 }
697
698 /* This subroutine of expand_doubleword_shift handles the cases in which
699 the effective shift value is < BITS_PER_WORD. The arguments and return
700 value are the same as for the parent routine. */
701
702 static bool
expand_subword_shift(enum machine_mode op1_mode,optab binoptab,rtx outof_input,rtx into_input,rtx op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods,unsigned HOST_WIDE_INT shift_mask)703 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
704 rtx outof_input, rtx into_input, rtx op1,
705 rtx outof_target, rtx into_target,
706 int unsignedp, enum optab_methods methods,
707 unsigned HOST_WIDE_INT shift_mask)
708 {
709 optab reverse_unsigned_shift, unsigned_shift;
710 rtx tmp, carries;
711
712 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
713 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
714
715 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
716 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
717 the opposite direction to BINOPTAB. */
718 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
719 {
720 carries = outof_input;
721 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
722 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
723 0, true, methods);
724 }
725 else
726 {
727 /* We must avoid shifting by BITS_PER_WORD bits since that is either
728 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
729 has unknown behavior. Do a single shift first, then shift by the
730 remainder. It's OK to use ~OP1 as the remainder if shift counts
731 are truncated to the mode size. */
732 carries = expand_binop (word_mode, reverse_unsigned_shift,
733 outof_input, const1_rtx, 0, unsignedp, methods);
734 if (shift_mask == BITS_PER_WORD - 1)
735 {
736 tmp = immed_double_const (-1, -1, op1_mode);
737 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
738 0, true, methods);
739 }
740 else
741 {
742 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
743 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
744 0, true, methods);
745 }
746 }
747 if (tmp == 0 || carries == 0)
748 return false;
749 carries = expand_binop (word_mode, reverse_unsigned_shift,
750 carries, tmp, 0, unsignedp, methods);
751 if (carries == 0)
752 return false;
753
754 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
755 so the result can go directly into INTO_TARGET if convenient. */
756 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
757 into_target, unsignedp, methods);
758 if (tmp == 0)
759 return false;
760
761 /* Now OR in the bits carried over from OUTOF_INPUT. */
762 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
763 into_target, unsignedp, methods))
764 return false;
765
766 /* Use a standard word_mode shift for the out-of half. */
767 if (outof_target != 0)
768 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
769 outof_target, unsignedp, methods))
770 return false;
771
772 return true;
773 }
774
775
776 #ifdef HAVE_conditional_move
777 /* Try implementing expand_doubleword_shift using conditional moves.
778 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
779 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
780 are the shift counts to use in the former and latter case. All other
781 arguments are the same as the parent routine. */
782
783 static bool
expand_doubleword_shift_condmove(enum machine_mode op1_mode,optab binoptab,enum rtx_code cmp_code,rtx cmp1,rtx cmp2,rtx outof_input,rtx into_input,rtx subword_op1,rtx superword_op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods,unsigned HOST_WIDE_INT shift_mask)784 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
785 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
786 rtx outof_input, rtx into_input,
787 rtx subword_op1, rtx superword_op1,
788 rtx outof_target, rtx into_target,
789 int unsignedp, enum optab_methods methods,
790 unsigned HOST_WIDE_INT shift_mask)
791 {
792 rtx outof_superword, into_superword;
793
794 /* Put the superword version of the output into OUTOF_SUPERWORD and
795 INTO_SUPERWORD. */
796 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
797 if (outof_target != 0 && subword_op1 == superword_op1)
798 {
799 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
800 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
801 into_superword = outof_target;
802 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
803 outof_superword, 0, unsignedp, methods))
804 return false;
805 }
806 else
807 {
808 into_superword = gen_reg_rtx (word_mode);
809 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
810 outof_superword, into_superword,
811 unsignedp, methods))
812 return false;
813 }
814
815 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
816 if (!expand_subword_shift (op1_mode, binoptab,
817 outof_input, into_input, subword_op1,
818 outof_target, into_target,
819 unsignedp, methods, shift_mask))
820 return false;
821
822 /* Select between them. Do the INTO half first because INTO_SUPERWORD
823 might be the current value of OUTOF_TARGET. */
824 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
825 into_target, into_superword, word_mode, false))
826 return false;
827
828 if (outof_target != 0)
829 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
830 outof_target, outof_superword,
831 word_mode, false))
832 return false;
833
834 return true;
835 }
836 #endif
837
838 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
839 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
840 input operand; the shift moves bits in the direction OUTOF_INPUT->
841 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
842 of the target. OP1 is the shift count and OP1_MODE is its mode.
843 If OP1 is constant, it will have been truncated as appropriate
844 and is known to be nonzero.
845
846 If SHIFT_MASK is zero, the result of word shifts is undefined when the
847 shift count is outside the range [0, BITS_PER_WORD). This routine must
848 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
849
850 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
851 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
852 fill with zeros or sign bits as appropriate.
853
854 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
855 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
856 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
857 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
858 are undefined.
859
860 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
861 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
862 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
863 function wants to calculate it itself.
864
865 Return true if the shift could be successfully synthesized. */
866
867 static bool
expand_doubleword_shift(enum machine_mode op1_mode,optab binoptab,rtx outof_input,rtx into_input,rtx op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods,unsigned HOST_WIDE_INT shift_mask)868 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
869 rtx outof_input, rtx into_input, rtx op1,
870 rtx outof_target, rtx into_target,
871 int unsignedp, enum optab_methods methods,
872 unsigned HOST_WIDE_INT shift_mask)
873 {
874 rtx superword_op1, tmp, cmp1, cmp2;
875 rtx subword_label, done_label;
876 enum rtx_code cmp_code;
877
878 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
879 fill the result with sign or zero bits as appropriate. If so, the value
880 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
881 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
882 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
883
884 This isn't worthwhile for constant shifts since the optimizers will
885 cope better with in-range shift counts. */
886 if (shift_mask >= BITS_PER_WORD
887 && outof_target != 0
888 && !CONSTANT_P (op1))
889 {
890 if (!expand_doubleword_shift (op1_mode, binoptab,
891 outof_input, into_input, op1,
892 0, into_target,
893 unsignedp, methods, shift_mask))
894 return false;
895 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
896 outof_target, unsignedp, methods))
897 return false;
898 return true;
899 }
900
901 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
902 is true when the effective shift value is less than BITS_PER_WORD.
903 Set SUPERWORD_OP1 to the shift count that should be used to shift
904 OUTOF_INPUT into INTO_TARGET when the condition is false. */
905 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
906 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
907 {
908 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
909 is a subword shift count. */
910 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
911 0, true, methods);
912 cmp2 = CONST0_RTX (op1_mode);
913 cmp_code = EQ;
914 superword_op1 = op1;
915 }
916 else
917 {
918 /* Set CMP1 to OP1 - BITS_PER_WORD. */
919 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
920 0, true, methods);
921 cmp2 = CONST0_RTX (op1_mode);
922 cmp_code = LT;
923 superword_op1 = cmp1;
924 }
925 if (cmp1 == 0)
926 return false;
927
928 /* If we can compute the condition at compile time, pick the
929 appropriate subroutine. */
930 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
931 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
932 {
933 if (tmp == const0_rtx)
934 return expand_superword_shift (binoptab, outof_input, superword_op1,
935 outof_target, into_target,
936 unsignedp, methods);
937 else
938 return expand_subword_shift (op1_mode, binoptab,
939 outof_input, into_input, op1,
940 outof_target, into_target,
941 unsignedp, methods, shift_mask);
942 }
943
944 #ifdef HAVE_conditional_move
945 /* Try using conditional moves to generate straight-line code. */
946 {
947 rtx start = get_last_insn ();
948 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
949 cmp_code, cmp1, cmp2,
950 outof_input, into_input,
951 op1, superword_op1,
952 outof_target, into_target,
953 unsignedp, methods, shift_mask))
954 return true;
955 delete_insns_since (start);
956 }
957 #endif
958
959 /* As a last resort, use branches to select the correct alternative. */
960 subword_label = gen_label_rtx ();
961 done_label = gen_label_rtx ();
962
963 NO_DEFER_POP;
964 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
965 0, 0, subword_label);
966 OK_DEFER_POP;
967
968 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
969 outof_target, into_target,
970 unsignedp, methods))
971 return false;
972
973 emit_jump_insn (gen_jump (done_label));
974 emit_barrier ();
975 emit_label (subword_label);
976
977 if (!expand_subword_shift (op1_mode, binoptab,
978 outof_input, into_input, op1,
979 outof_target, into_target,
980 unsignedp, methods, shift_mask))
981 return false;
982
983 emit_label (done_label);
984 return true;
985 }
986
987 /* Subroutine of expand_binop. Perform a double word multiplication of
988 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
989 as the target's word_mode. This function return NULL_RTX if anything
990 goes wrong, in which case it may have already emitted instructions
991 which need to be deleted.
992
993 If we want to multiply two two-word values and have normal and widening
994 multiplies of single-word values, we can do this with three smaller
995 multiplications. Note that we do not make a REG_NO_CONFLICT block here
996 because we are not operating on one word at a time.
997
998 The multiplication proceeds as follows:
999 _______________________
1000 [__op0_high_|__op0_low__]
1001 _______________________
1002 * [__op1_high_|__op1_low__]
1003 _______________________________________________
1004 _______________________
1005 (1) [__op0_low__*__op1_low__]
1006 _______________________
1007 (2a) [__op0_low__*__op1_high_]
1008 _______________________
1009 (2b) [__op0_high_*__op1_low__]
1010 _______________________
1011 (3) [__op0_high_*__op1_high_]
1012
1013
1014 This gives a 4-word result. Since we are only interested in the
1015 lower 2 words, partial result (3) and the upper words of (2a) and
1016 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1017 calculated using non-widening multiplication.
1018
1019 (1), however, needs to be calculated with an unsigned widening
1020 multiplication. If this operation is not directly supported we
1021 try using a signed widening multiplication and adjust the result.
1022 This adjustment works as follows:
1023
1024 If both operands are positive then no adjustment is needed.
1025
1026 If the operands have different signs, for example op0_low < 0 and
1027 op1_low >= 0, the instruction treats the most significant bit of
1028 op0_low as a sign bit instead of a bit with significance
1029 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1030 with 2**BITS_PER_WORD - op0_low, and two's complements the
1031 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1032 the result.
1033
1034 Similarly, if both operands are negative, we need to add
1035 (op0_low + op1_low) * 2**BITS_PER_WORD.
1036
1037 We use a trick to adjust quickly. We logically shift op0_low right
1038 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1039 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1040 logical shift exists, we do an arithmetic right shift and subtract
1041 the 0 or -1. */
1042
1043 static rtx
expand_doubleword_mult(enum machine_mode mode,rtx op0,rtx op1,rtx target,bool umulp,enum optab_methods methods)1044 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1045 bool umulp, enum optab_methods methods)
1046 {
1047 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1048 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1049 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1050 rtx product, adjust, product_high, temp;
1051
1052 rtx op0_high = operand_subword_force (op0, high, mode);
1053 rtx op0_low = operand_subword_force (op0, low, mode);
1054 rtx op1_high = operand_subword_force (op1, high, mode);
1055 rtx op1_low = operand_subword_force (op1, low, mode);
1056
1057 /* If we're using an unsigned multiply to directly compute the product
1058 of the low-order words of the operands and perform any required
1059 adjustments of the operands, we begin by trying two more multiplications
1060 and then computing the appropriate sum.
1061
1062 We have checked above that the required addition is provided.
1063 Full-word addition will normally always succeed, especially if
1064 it is provided at all, so we don't worry about its failure. The
1065 multiplication may well fail, however, so we do handle that. */
1066
1067 if (!umulp)
1068 {
1069 /* ??? This could be done with emit_store_flag where available. */
1070 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1071 NULL_RTX, 1, methods);
1072 if (temp)
1073 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1074 NULL_RTX, 0, OPTAB_DIRECT);
1075 else
1076 {
1077 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1078 NULL_RTX, 0, methods);
1079 if (!temp)
1080 return NULL_RTX;
1081 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1082 NULL_RTX, 0, OPTAB_DIRECT);
1083 }
1084
1085 if (!op0_high)
1086 return NULL_RTX;
1087 }
1088
1089 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1090 NULL_RTX, 0, OPTAB_DIRECT);
1091 if (!adjust)
1092 return NULL_RTX;
1093
1094 /* OP0_HIGH should now be dead. */
1095
1096 if (!umulp)
1097 {
1098 /* ??? This could be done with emit_store_flag where available. */
1099 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1100 NULL_RTX, 1, methods);
1101 if (temp)
1102 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1103 NULL_RTX, 0, OPTAB_DIRECT);
1104 else
1105 {
1106 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1107 NULL_RTX, 0, methods);
1108 if (!temp)
1109 return NULL_RTX;
1110 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1111 NULL_RTX, 0, OPTAB_DIRECT);
1112 }
1113
1114 if (!op1_high)
1115 return NULL_RTX;
1116 }
1117
1118 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1119 NULL_RTX, 0, OPTAB_DIRECT);
1120 if (!temp)
1121 return NULL_RTX;
1122
1123 /* OP1_HIGH should now be dead. */
1124
1125 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1126 adjust, 0, OPTAB_DIRECT);
1127
1128 if (target && !REG_P (target))
1129 target = NULL_RTX;
1130
1131 if (umulp)
1132 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1133 target, 1, OPTAB_DIRECT);
1134 else
1135 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1136 target, 1, OPTAB_DIRECT);
1137
1138 if (!product)
1139 return NULL_RTX;
1140
1141 product_high = operand_subword (product, high, 1, mode);
1142 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1143 REG_P (product_high) ? product_high : adjust,
1144 0, OPTAB_DIRECT);
1145 emit_move_insn (product_high, adjust);
1146 return product;
1147 }
1148
1149 /* Wrapper around expand_binop which takes an rtx code to specify
1150 the operation to perform, not an optab pointer. All other
1151 arguments are the same. */
1152 rtx
expand_simple_binop(enum machine_mode mode,enum rtx_code code,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)1153 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1154 rtx op1, rtx target, int unsignedp,
1155 enum optab_methods methods)
1156 {
1157 optab binop = code_to_optab[(int) code];
1158 gcc_assert (binop);
1159
1160 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1161 }
1162
1163 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1164 binop. Order them according to commutative_operand_precedence and, if
1165 possible, try to put TARGET or a pseudo first. */
1166 static bool
swap_commutative_operands_with_target(rtx target,rtx op0,rtx op1)1167 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1168 {
1169 int op0_prec = commutative_operand_precedence (op0);
1170 int op1_prec = commutative_operand_precedence (op1);
1171
1172 if (op0_prec < op1_prec)
1173 return true;
1174
1175 if (op0_prec > op1_prec)
1176 return false;
1177
1178 /* With equal precedence, both orders are ok, but it is better if the
1179 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1180 if (target == 0 || REG_P (target))
1181 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1182 else
1183 return rtx_equal_p (op1, target);
1184 }
1185
1186
1187 /* Generate code to perform an operation specified by BINOPTAB
1188 on operands OP0 and OP1, with result having machine-mode MODE.
1189
1190 UNSIGNEDP is for the case where we have to widen the operands
1191 to perform the operation. It says to use zero-extension.
1192
1193 If TARGET is nonzero, the value
1194 is generated there, if it is convenient to do so.
1195 In all cases an rtx is returned for the locus of the value;
1196 this may or may not be TARGET. */
1197
1198 rtx
expand_binop(enum machine_mode mode,optab binoptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)1199 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1200 rtx target, int unsignedp, enum optab_methods methods)
1201 {
1202 enum optab_methods next_methods
1203 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1204 ? OPTAB_WIDEN : methods);
1205 enum mode_class class;
1206 enum machine_mode wider_mode;
1207 rtx temp;
1208 int commutative_op = 0;
1209 int shift_op = (binoptab->code == ASHIFT
1210 || binoptab->code == ASHIFTRT
1211 || binoptab->code == LSHIFTRT
1212 || binoptab->code == ROTATE
1213 || binoptab->code == ROTATERT);
1214 rtx entry_last = get_last_insn ();
1215 rtx last;
1216 bool first_pass_p = true;
1217
1218 class = GET_MODE_CLASS (mode);
1219
1220 /* If subtracting an integer constant, convert this into an addition of
1221 the negated constant. */
1222
1223 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1224 {
1225 op1 = negate_rtx (mode, op1);
1226 binoptab = add_optab;
1227 }
1228
1229 /* If we are inside an appropriately-short loop and we are optimizing,
1230 force expensive constants into a register. */
1231 if (CONSTANT_P (op0) && optimize
1232 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1233 {
1234 if (GET_MODE (op0) != VOIDmode)
1235 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1236 op0 = force_reg (mode, op0);
1237 }
1238
1239 if (CONSTANT_P (op1) && optimize
1240 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1241 {
1242 if (GET_MODE (op1) != VOIDmode)
1243 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1244 op1 = force_reg (mode, op1);
1245 }
1246
1247 /* Record where to delete back to if we backtrack. */
1248 last = get_last_insn ();
1249
1250 /* If operation is commutative,
1251 try to make the first operand a register.
1252 Even better, try to make it the same as the target.
1253 Also try to make the last operand a constant. */
1254 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1255 || binoptab == smul_widen_optab
1256 || binoptab == umul_widen_optab
1257 || binoptab == smul_highpart_optab
1258 || binoptab == umul_highpart_optab)
1259 {
1260 commutative_op = 1;
1261
1262 if (swap_commutative_operands_with_target (target, op0, op1))
1263 {
1264 temp = op1;
1265 op1 = op0;
1266 op0 = temp;
1267 }
1268 }
1269
1270 retry:
1271
1272 /* If we can do it with a three-operand insn, do so. */
1273
1274 if (methods != OPTAB_MUST_WIDEN
1275 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1276 {
1277 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1278 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1279 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1280 rtx pat;
1281 rtx xop0 = op0, xop1 = op1;
1282
1283 if (target)
1284 temp = target;
1285 else
1286 temp = gen_reg_rtx (mode);
1287
1288 /* If it is a commutative operator and the modes would match
1289 if we would swap the operands, we can save the conversions. */
1290 if (commutative_op)
1291 {
1292 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1293 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1294 {
1295 rtx tmp;
1296
1297 tmp = op0; op0 = op1; op1 = tmp;
1298 tmp = xop0; xop0 = xop1; xop1 = tmp;
1299 }
1300 }
1301
1302 /* In case the insn wants input operands in modes different from
1303 those of the actual operands, convert the operands. It would
1304 seem that we don't need to convert CONST_INTs, but we do, so
1305 that they're properly zero-extended, sign-extended or truncated
1306 for their mode. */
1307
1308 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1309 xop0 = convert_modes (mode0,
1310 GET_MODE (op0) != VOIDmode
1311 ? GET_MODE (op0)
1312 : mode,
1313 xop0, unsignedp);
1314
1315 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1316 xop1 = convert_modes (mode1,
1317 GET_MODE (op1) != VOIDmode
1318 ? GET_MODE (op1)
1319 : mode,
1320 xop1, unsignedp);
1321
1322 /* Now, if insn's predicates don't allow our operands, put them into
1323 pseudo regs. */
1324
1325 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1326 && mode0 != VOIDmode)
1327 xop0 = copy_to_mode_reg (mode0, xop0);
1328
1329 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1330 && mode1 != VOIDmode)
1331 xop1 = copy_to_mode_reg (mode1, xop1);
1332
1333 if (!insn_data[icode].operand[0].predicate (temp, mode))
1334 temp = gen_reg_rtx (mode);
1335
1336 pat = GEN_FCN (icode) (temp, xop0, xop1);
1337 if (pat)
1338 {
1339 /* If PAT is composed of more than one insn, try to add an appropriate
1340 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1341 operand, call ourselves again, this time without a target. */
1342 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1343 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1344 {
1345 delete_insns_since (last);
1346 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1347 unsignedp, methods);
1348 }
1349
1350 emit_insn (pat);
1351 return temp;
1352 }
1353 else
1354 delete_insns_since (last);
1355 }
1356
1357 /* If we were trying to rotate by a constant value, and that didn't
1358 work, try rotating the other direction before falling back to
1359 shifts and bitwise-or. */
1360 if (first_pass_p
1361 && (binoptab == rotl_optab || binoptab == rotr_optab)
1362 && class == MODE_INT
1363 && GET_CODE (op1) == CONST_INT
1364 && INTVAL (op1) > 0
1365 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1366 {
1367 first_pass_p = false;
1368 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1369 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1370 goto retry;
1371 }
1372
1373 /* If this is a multiply, see if we can do a widening operation that
1374 takes operands of this mode and makes a wider mode. */
1375
1376 if (binoptab == smul_optab
1377 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1378 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1379 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1380 != CODE_FOR_nothing))
1381 {
1382 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1383 unsignedp ? umul_widen_optab : smul_widen_optab,
1384 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1385
1386 if (temp != 0)
1387 {
1388 if (GET_MODE_CLASS (mode) == MODE_INT
1389 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1390 GET_MODE_BITSIZE (GET_MODE (temp))))
1391 return gen_lowpart (mode, temp);
1392 else
1393 return convert_to_mode (mode, temp, unsignedp);
1394 }
1395 }
1396
1397 /* Look for a wider mode of the same class for which we think we
1398 can open-code the operation. Check for a widening multiply at the
1399 wider mode as well. */
1400
1401 if (CLASS_HAS_WIDER_MODES_P (class)
1402 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1403 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1404 wider_mode != VOIDmode;
1405 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1406 {
1407 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1408 || (binoptab == smul_optab
1409 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1410 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1411 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1412 != CODE_FOR_nothing)))
1413 {
1414 rtx xop0 = op0, xop1 = op1;
1415 int no_extend = 0;
1416
1417 /* For certain integer operations, we need not actually extend
1418 the narrow operands, as long as we will truncate
1419 the results to the same narrowness. */
1420
1421 if ((binoptab == ior_optab || binoptab == and_optab
1422 || binoptab == xor_optab
1423 || binoptab == add_optab || binoptab == sub_optab
1424 || binoptab == smul_optab || binoptab == ashl_optab)
1425 && class == MODE_INT)
1426 no_extend = 1;
1427
1428 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1429
1430 /* The second operand of a shift must always be extended. */
1431 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1432 no_extend && binoptab != ashl_optab);
1433
1434 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1435 unsignedp, OPTAB_DIRECT);
1436 if (temp)
1437 {
1438 if (class != MODE_INT
1439 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1440 GET_MODE_BITSIZE (wider_mode)))
1441 {
1442 if (target == 0)
1443 target = gen_reg_rtx (mode);
1444 convert_move (target, temp, 0);
1445 return target;
1446 }
1447 else
1448 return gen_lowpart (mode, temp);
1449 }
1450 else
1451 delete_insns_since (last);
1452 }
1453 }
1454
1455 /* These can be done a word at a time. */
1456 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1457 && class == MODE_INT
1458 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1459 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1460 {
1461 int i;
1462 rtx insns;
1463 rtx equiv_value;
1464
1465 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1466 won't be accurate, so use a new target. */
1467 if (target == 0 || target == op0 || target == op1)
1468 target = gen_reg_rtx (mode);
1469
1470 start_sequence ();
1471
1472 /* Do the actual arithmetic. */
1473 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1474 {
1475 rtx target_piece = operand_subword (target, i, 1, mode);
1476 rtx x = expand_binop (word_mode, binoptab,
1477 operand_subword_force (op0, i, mode),
1478 operand_subword_force (op1, i, mode),
1479 target_piece, unsignedp, next_methods);
1480
1481 if (x == 0)
1482 break;
1483
1484 if (target_piece != x)
1485 emit_move_insn (target_piece, x);
1486 }
1487
1488 insns = get_insns ();
1489 end_sequence ();
1490
1491 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1492 {
1493 if (binoptab->code != UNKNOWN)
1494 equiv_value
1495 = gen_rtx_fmt_ee (binoptab->code, mode,
1496 copy_rtx (op0), copy_rtx (op1));
1497 else
1498 equiv_value = 0;
1499
1500 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1501 return target;
1502 }
1503 }
1504
1505 /* Synthesize double word shifts from single word shifts. */
1506 if ((binoptab == lshr_optab || binoptab == ashl_optab
1507 || binoptab == ashr_optab)
1508 && class == MODE_INT
1509 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1510 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1511 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1512 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1513 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1514 {
1515 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1516 enum machine_mode op1_mode;
1517
1518 double_shift_mask = targetm.shift_truncation_mask (mode);
1519 shift_mask = targetm.shift_truncation_mask (word_mode);
1520 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1521
1522 /* Apply the truncation to constant shifts. */
1523 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1524 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1525
1526 if (op1 == CONST0_RTX (op1_mode))
1527 return op0;
1528
1529 /* Make sure that this is a combination that expand_doubleword_shift
1530 can handle. See the comments there for details. */
1531 if (double_shift_mask == 0
1532 || (shift_mask == BITS_PER_WORD - 1
1533 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1534 {
1535 rtx insns, equiv_value;
1536 rtx into_target, outof_target;
1537 rtx into_input, outof_input;
1538 int left_shift, outof_word;
1539
1540 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1541 won't be accurate, so use a new target. */
1542 if (target == 0 || target == op0 || target == op1)
1543 target = gen_reg_rtx (mode);
1544
1545 start_sequence ();
1546
1547 /* OUTOF_* is the word we are shifting bits away from, and
1548 INTO_* is the word that we are shifting bits towards, thus
1549 they differ depending on the direction of the shift and
1550 WORDS_BIG_ENDIAN. */
1551
1552 left_shift = binoptab == ashl_optab;
1553 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1554
1555 outof_target = operand_subword (target, outof_word, 1, mode);
1556 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1557
1558 outof_input = operand_subword_force (op0, outof_word, mode);
1559 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1560
1561 if (expand_doubleword_shift (op1_mode, binoptab,
1562 outof_input, into_input, op1,
1563 outof_target, into_target,
1564 unsignedp, next_methods, shift_mask))
1565 {
1566 insns = get_insns ();
1567 end_sequence ();
1568
1569 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1570 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1571 return target;
1572 }
1573 end_sequence ();
1574 }
1575 }
1576
1577 /* Synthesize double word rotates from single word shifts. */
1578 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1579 && class == MODE_INT
1580 && GET_CODE (op1) == CONST_INT
1581 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1582 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1583 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1584 {
1585 rtx insns;
1586 rtx into_target, outof_target;
1587 rtx into_input, outof_input;
1588 rtx inter;
1589 int shift_count, left_shift, outof_word;
1590
1591 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1592 won't be accurate, so use a new target. Do this also if target is not
1593 a REG, first because having a register instead may open optimization
1594 opportunities, and second because if target and op0 happen to be MEMs
1595 designating the same location, we would risk clobbering it too early
1596 in the code sequence we generate below. */
1597 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1598 target = gen_reg_rtx (mode);
1599
1600 start_sequence ();
1601
1602 shift_count = INTVAL (op1);
1603
1604 /* OUTOF_* is the word we are shifting bits away from, and
1605 INTO_* is the word that we are shifting bits towards, thus
1606 they differ depending on the direction of the shift and
1607 WORDS_BIG_ENDIAN. */
1608
1609 left_shift = (binoptab == rotl_optab);
1610 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1611
1612 outof_target = operand_subword (target, outof_word, 1, mode);
1613 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1614
1615 outof_input = operand_subword_force (op0, outof_word, mode);
1616 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1617
1618 if (shift_count == BITS_PER_WORD)
1619 {
1620 /* This is just a word swap. */
1621 emit_move_insn (outof_target, into_input);
1622 emit_move_insn (into_target, outof_input);
1623 inter = const0_rtx;
1624 }
1625 else
1626 {
1627 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1628 rtx first_shift_count, second_shift_count;
1629 optab reverse_unsigned_shift, unsigned_shift;
1630
1631 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1632 ? lshr_optab : ashl_optab);
1633
1634 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1635 ? ashl_optab : lshr_optab);
1636
1637 if (shift_count > BITS_PER_WORD)
1638 {
1639 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1640 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1641 }
1642 else
1643 {
1644 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1645 second_shift_count = GEN_INT (shift_count);
1646 }
1647
1648 into_temp1 = expand_binop (word_mode, unsigned_shift,
1649 outof_input, first_shift_count,
1650 NULL_RTX, unsignedp, next_methods);
1651 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1652 into_input, second_shift_count,
1653 NULL_RTX, unsignedp, next_methods);
1654
1655 if (into_temp1 != 0 && into_temp2 != 0)
1656 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1657 into_target, unsignedp, next_methods);
1658 else
1659 inter = 0;
1660
1661 if (inter != 0 && inter != into_target)
1662 emit_move_insn (into_target, inter);
1663
1664 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1665 into_input, first_shift_count,
1666 NULL_RTX, unsignedp, next_methods);
1667 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1668 outof_input, second_shift_count,
1669 NULL_RTX, unsignedp, next_methods);
1670
1671 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1672 inter = expand_binop (word_mode, ior_optab,
1673 outof_temp1, outof_temp2,
1674 outof_target, unsignedp, next_methods);
1675
1676 if (inter != 0 && inter != outof_target)
1677 emit_move_insn (outof_target, inter);
1678 }
1679
1680 insns = get_insns ();
1681 end_sequence ();
1682
1683 if (inter != 0)
1684 {
1685 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1686 block to help the register allocator a bit. But a multi-word
1687 rotate will need all the input bits when setting the output
1688 bits, so there clearly is a conflict between the input and
1689 output registers. So we can't use a no-conflict block here. */
1690 emit_insn (insns);
1691 return target;
1692 }
1693 }
1694
1695 /* These can be done a word at a time by propagating carries. */
1696 if ((binoptab == add_optab || binoptab == sub_optab)
1697 && class == MODE_INT
1698 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1699 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1700 {
1701 unsigned int i;
1702 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1703 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1704 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1705 rtx xop0, xop1, xtarget;
1706
1707 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1708 value is one of those, use it. Otherwise, use 1 since it is the
1709 one easiest to get. */
1710 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1711 int normalizep = STORE_FLAG_VALUE;
1712 #else
1713 int normalizep = 1;
1714 #endif
1715
1716 /* Prepare the operands. */
1717 xop0 = force_reg (mode, op0);
1718 xop1 = force_reg (mode, op1);
1719
1720 xtarget = gen_reg_rtx (mode);
1721
1722 if (target == 0 || !REG_P (target))
1723 target = xtarget;
1724
1725 /* Indicate for flow that the entire target reg is being set. */
1726 if (REG_P (target))
1727 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1728
1729 /* Do the actual arithmetic. */
1730 for (i = 0; i < nwords; i++)
1731 {
1732 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1733 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1734 rtx op0_piece = operand_subword_force (xop0, index, mode);
1735 rtx op1_piece = operand_subword_force (xop1, index, mode);
1736 rtx x;
1737
1738 /* Main add/subtract of the input operands. */
1739 x = expand_binop (word_mode, binoptab,
1740 op0_piece, op1_piece,
1741 target_piece, unsignedp, next_methods);
1742 if (x == 0)
1743 break;
1744
1745 if (i + 1 < nwords)
1746 {
1747 /* Store carry from main add/subtract. */
1748 carry_out = gen_reg_rtx (word_mode);
1749 carry_out = emit_store_flag_force (carry_out,
1750 (binoptab == add_optab
1751 ? LT : GT),
1752 x, op0_piece,
1753 word_mode, 1, normalizep);
1754 }
1755
1756 if (i > 0)
1757 {
1758 rtx newx;
1759
1760 /* Add/subtract previous carry to main result. */
1761 newx = expand_binop (word_mode,
1762 normalizep == 1 ? binoptab : otheroptab,
1763 x, carry_in,
1764 NULL_RTX, 1, next_methods);
1765
1766 if (i + 1 < nwords)
1767 {
1768 /* Get out carry from adding/subtracting carry in. */
1769 rtx carry_tmp = gen_reg_rtx (word_mode);
1770 carry_tmp = emit_store_flag_force (carry_tmp,
1771 (binoptab == add_optab
1772 ? LT : GT),
1773 newx, x,
1774 word_mode, 1, normalizep);
1775
1776 /* Logical-ior the two poss. carry together. */
1777 carry_out = expand_binop (word_mode, ior_optab,
1778 carry_out, carry_tmp,
1779 carry_out, 0, next_methods);
1780 if (carry_out == 0)
1781 break;
1782 }
1783 emit_move_insn (target_piece, newx);
1784 }
1785 else
1786 {
1787 if (x != target_piece)
1788 emit_move_insn (target_piece, x);
1789 }
1790
1791 carry_in = carry_out;
1792 }
1793
1794 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1795 {
1796 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1797 || ! rtx_equal_p (target, xtarget))
1798 {
1799 rtx temp = emit_move_insn (target, xtarget);
1800
1801 set_unique_reg_note (temp,
1802 REG_EQUAL,
1803 gen_rtx_fmt_ee (binoptab->code, mode,
1804 copy_rtx (xop0),
1805 copy_rtx (xop1)));
1806 }
1807 else
1808 target = xtarget;
1809
1810 return target;
1811 }
1812
1813 else
1814 delete_insns_since (last);
1815 }
1816
1817 /* Attempt to synthesize double word multiplies using a sequence of word
1818 mode multiplications. We first attempt to generate a sequence using a
1819 more efficient unsigned widening multiply, and if that fails we then
1820 try using a signed widening multiply. */
1821
1822 if (binoptab == smul_optab
1823 && class == MODE_INT
1824 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1825 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1826 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1827 {
1828 rtx product = NULL_RTX;
1829
1830 if (umul_widen_optab->handlers[(int) mode].insn_code
1831 != CODE_FOR_nothing)
1832 {
1833 product = expand_doubleword_mult (mode, op0, op1, target,
1834 true, methods);
1835 if (!product)
1836 delete_insns_since (last);
1837 }
1838
1839 if (product == NULL_RTX
1840 && smul_widen_optab->handlers[(int) mode].insn_code
1841 != CODE_FOR_nothing)
1842 {
1843 product = expand_doubleword_mult (mode, op0, op1, target,
1844 false, methods);
1845 if (!product)
1846 delete_insns_since (last);
1847 }
1848
1849 if (product != NULL_RTX)
1850 {
1851 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1852 {
1853 temp = emit_move_insn (target ? target : product, product);
1854 set_unique_reg_note (temp,
1855 REG_EQUAL,
1856 gen_rtx_fmt_ee (MULT, mode,
1857 copy_rtx (op0),
1858 copy_rtx (op1)));
1859 }
1860 return product;
1861 }
1862 }
1863
1864 /* It can't be open-coded in this mode.
1865 Use a library call if one is available and caller says that's ok. */
1866
1867 if (binoptab->handlers[(int) mode].libfunc
1868 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1869 {
1870 rtx insns;
1871 rtx op1x = op1;
1872 enum machine_mode op1_mode = mode;
1873 rtx value;
1874
1875 start_sequence ();
1876
1877 if (shift_op)
1878 {
1879 op1_mode = word_mode;
1880 /* Specify unsigned here,
1881 since negative shift counts are meaningless. */
1882 op1x = convert_to_mode (word_mode, op1, 1);
1883 }
1884
1885 if (GET_MODE (op0) != VOIDmode
1886 && GET_MODE (op0) != mode)
1887 op0 = convert_to_mode (mode, op0, unsignedp);
1888
1889 /* Pass 1 for NO_QUEUE so we don't lose any increments
1890 if the libcall is cse'd or moved. */
1891 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1892 NULL_RTX, LCT_CONST, mode, 2,
1893 op0, mode, op1x, op1_mode);
1894
1895 insns = get_insns ();
1896 end_sequence ();
1897
1898 target = gen_reg_rtx (mode);
1899 emit_libcall_block (insns, target, value,
1900 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1901
1902 return target;
1903 }
1904
1905 delete_insns_since (last);
1906
1907 /* It can't be done in this mode. Can we do it in a wider mode? */
1908
1909 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1910 || methods == OPTAB_MUST_WIDEN))
1911 {
1912 /* Caller says, don't even try. */
1913 delete_insns_since (entry_last);
1914 return 0;
1915 }
1916
1917 /* Compute the value of METHODS to pass to recursive calls.
1918 Don't allow widening to be tried recursively. */
1919
1920 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1921
1922 /* Look for a wider mode of the same class for which it appears we can do
1923 the operation. */
1924
1925 if (CLASS_HAS_WIDER_MODES_P (class))
1926 {
1927 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1928 wider_mode != VOIDmode;
1929 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1930 {
1931 if ((binoptab->handlers[(int) wider_mode].insn_code
1932 != CODE_FOR_nothing)
1933 || (methods == OPTAB_LIB
1934 && binoptab->handlers[(int) wider_mode].libfunc))
1935 {
1936 rtx xop0 = op0, xop1 = op1;
1937 int no_extend = 0;
1938
1939 /* For certain integer operations, we need not actually extend
1940 the narrow operands, as long as we will truncate
1941 the results to the same narrowness. */
1942
1943 if ((binoptab == ior_optab || binoptab == and_optab
1944 || binoptab == xor_optab
1945 || binoptab == add_optab || binoptab == sub_optab
1946 || binoptab == smul_optab || binoptab == ashl_optab)
1947 && class == MODE_INT)
1948 no_extend = 1;
1949
1950 xop0 = widen_operand (xop0, wider_mode, mode,
1951 unsignedp, no_extend);
1952
1953 /* The second operand of a shift must always be extended. */
1954 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1955 no_extend && binoptab != ashl_optab);
1956
1957 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1958 unsignedp, methods);
1959 if (temp)
1960 {
1961 if (class != MODE_INT
1962 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1963 GET_MODE_BITSIZE (wider_mode)))
1964 {
1965 if (target == 0)
1966 target = gen_reg_rtx (mode);
1967 convert_move (target, temp, 0);
1968 return target;
1969 }
1970 else
1971 return gen_lowpart (mode, temp);
1972 }
1973 else
1974 delete_insns_since (last);
1975 }
1976 }
1977 }
1978
1979 delete_insns_since (entry_last);
1980 return 0;
1981 }
1982
1983 /* Expand a binary operator which has both signed and unsigned forms.
1984 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1985 signed operations.
1986
1987 If we widen unsigned operands, we may use a signed wider operation instead
1988 of an unsigned wider operation, since the result would be the same. */
1989
1990 rtx
sign_expand_binop(enum machine_mode mode,optab uoptab,optab soptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)1991 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1992 rtx op0, rtx op1, rtx target, int unsignedp,
1993 enum optab_methods methods)
1994 {
1995 rtx temp;
1996 optab direct_optab = unsignedp ? uoptab : soptab;
1997 struct optab wide_soptab;
1998
1999 /* Do it without widening, if possible. */
2000 temp = expand_binop (mode, direct_optab, op0, op1, target,
2001 unsignedp, OPTAB_DIRECT);
2002 if (temp || methods == OPTAB_DIRECT)
2003 return temp;
2004
2005 /* Try widening to a signed int. Make a fake signed optab that
2006 hides any signed insn for direct use. */
2007 wide_soptab = *soptab;
2008 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2009 wide_soptab.handlers[(int) mode].libfunc = 0;
2010
2011 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2012 unsignedp, OPTAB_WIDEN);
2013
2014 /* For unsigned operands, try widening to an unsigned int. */
2015 if (temp == 0 && unsignedp)
2016 temp = expand_binop (mode, uoptab, op0, op1, target,
2017 unsignedp, OPTAB_WIDEN);
2018 if (temp || methods == OPTAB_WIDEN)
2019 return temp;
2020
2021 /* Use the right width lib call if that exists. */
2022 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2023 if (temp || methods == OPTAB_LIB)
2024 return temp;
2025
2026 /* Must widen and use a lib call, use either signed or unsigned. */
2027 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2028 unsignedp, methods);
2029 if (temp != 0)
2030 return temp;
2031 if (unsignedp)
2032 return expand_binop (mode, uoptab, op0, op1, target,
2033 unsignedp, methods);
2034 return 0;
2035 }
2036
2037 /* Generate code to perform an operation specified by UNOPPTAB
2038 on operand OP0, with two results to TARG0 and TARG1.
2039 We assume that the order of the operands for the instruction
2040 is TARG0, TARG1, OP0.
2041
2042 Either TARG0 or TARG1 may be zero, but what that means is that
2043 the result is not actually wanted. We will generate it into
2044 a dummy pseudo-reg and discard it. They may not both be zero.
2045
2046 Returns 1 if this operation can be performed; 0 if not. */
2047
2048 int
expand_twoval_unop(optab unoptab,rtx op0,rtx targ0,rtx targ1,int unsignedp)2049 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2050 int unsignedp)
2051 {
2052 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2053 enum mode_class class;
2054 enum machine_mode wider_mode;
2055 rtx entry_last = get_last_insn ();
2056 rtx last;
2057
2058 class = GET_MODE_CLASS (mode);
2059
2060 if (!targ0)
2061 targ0 = gen_reg_rtx (mode);
2062 if (!targ1)
2063 targ1 = gen_reg_rtx (mode);
2064
2065 /* Record where to go back to if we fail. */
2066 last = get_last_insn ();
2067
2068 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2069 {
2070 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2071 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2072 rtx pat;
2073 rtx xop0 = op0;
2074
2075 if (GET_MODE (xop0) != VOIDmode
2076 && GET_MODE (xop0) != mode0)
2077 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2078
2079 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2080 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2081 xop0 = copy_to_mode_reg (mode0, xop0);
2082
2083 /* We could handle this, but we should always be called with a pseudo
2084 for our targets and all insns should take them as outputs. */
2085 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2086 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2087
2088 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2089 if (pat)
2090 {
2091 emit_insn (pat);
2092 return 1;
2093 }
2094 else
2095 delete_insns_since (last);
2096 }
2097
2098 /* It can't be done in this mode. Can we do it in a wider mode? */
2099
2100 if (CLASS_HAS_WIDER_MODES_P (class))
2101 {
2102 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2103 wider_mode != VOIDmode;
2104 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2105 {
2106 if (unoptab->handlers[(int) wider_mode].insn_code
2107 != CODE_FOR_nothing)
2108 {
2109 rtx t0 = gen_reg_rtx (wider_mode);
2110 rtx t1 = gen_reg_rtx (wider_mode);
2111 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2112
2113 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2114 {
2115 convert_move (targ0, t0, unsignedp);
2116 convert_move (targ1, t1, unsignedp);
2117 return 1;
2118 }
2119 else
2120 delete_insns_since (last);
2121 }
2122 }
2123 }
2124
2125 delete_insns_since (entry_last);
2126 return 0;
2127 }
2128
2129 /* Generate code to perform an operation specified by BINOPTAB
2130 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2131 We assume that the order of the operands for the instruction
2132 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2133 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2134
2135 Either TARG0 or TARG1 may be zero, but what that means is that
2136 the result is not actually wanted. We will generate it into
2137 a dummy pseudo-reg and discard it. They may not both be zero.
2138
2139 Returns 1 if this operation can be performed; 0 if not. */
2140
2141 int
expand_twoval_binop(optab binoptab,rtx op0,rtx op1,rtx targ0,rtx targ1,int unsignedp)2142 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2143 int unsignedp)
2144 {
2145 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2146 enum mode_class class;
2147 enum machine_mode wider_mode;
2148 rtx entry_last = get_last_insn ();
2149 rtx last;
2150
2151 class = GET_MODE_CLASS (mode);
2152
2153 /* If we are inside an appropriately-short loop and we are optimizing,
2154 force expensive constants into a register. */
2155 if (CONSTANT_P (op0) && optimize
2156 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2157 op0 = force_reg (mode, op0);
2158
2159 if (CONSTANT_P (op1) && optimize
2160 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2161 op1 = force_reg (mode, op1);
2162
2163 if (!targ0)
2164 targ0 = gen_reg_rtx (mode);
2165 if (!targ1)
2166 targ1 = gen_reg_rtx (mode);
2167
2168 /* Record where to go back to if we fail. */
2169 last = get_last_insn ();
2170
2171 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2172 {
2173 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2174 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2175 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2176 rtx pat;
2177 rtx xop0 = op0, xop1 = op1;
2178
2179 /* In case the insn wants input operands in modes different from
2180 those of the actual operands, convert the operands. It would
2181 seem that we don't need to convert CONST_INTs, but we do, so
2182 that they're properly zero-extended, sign-extended or truncated
2183 for their mode. */
2184
2185 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2186 xop0 = convert_modes (mode0,
2187 GET_MODE (op0) != VOIDmode
2188 ? GET_MODE (op0)
2189 : mode,
2190 xop0, unsignedp);
2191
2192 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2193 xop1 = convert_modes (mode1,
2194 GET_MODE (op1) != VOIDmode
2195 ? GET_MODE (op1)
2196 : mode,
2197 xop1, unsignedp);
2198
2199 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2200 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2201 xop0 = copy_to_mode_reg (mode0, xop0);
2202
2203 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2204 xop1 = copy_to_mode_reg (mode1, xop1);
2205
2206 /* We could handle this, but we should always be called with a pseudo
2207 for our targets and all insns should take them as outputs. */
2208 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2209 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2210
2211 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2212 if (pat)
2213 {
2214 emit_insn (pat);
2215 return 1;
2216 }
2217 else
2218 delete_insns_since (last);
2219 }
2220
2221 /* It can't be done in this mode. Can we do it in a wider mode? */
2222
2223 if (CLASS_HAS_WIDER_MODES_P (class))
2224 {
2225 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2226 wider_mode != VOIDmode;
2227 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2228 {
2229 if (binoptab->handlers[(int) wider_mode].insn_code
2230 != CODE_FOR_nothing)
2231 {
2232 rtx t0 = gen_reg_rtx (wider_mode);
2233 rtx t1 = gen_reg_rtx (wider_mode);
2234 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2235 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2236
2237 if (expand_twoval_binop (binoptab, cop0, cop1,
2238 t0, t1, unsignedp))
2239 {
2240 convert_move (targ0, t0, unsignedp);
2241 convert_move (targ1, t1, unsignedp);
2242 return 1;
2243 }
2244 else
2245 delete_insns_since (last);
2246 }
2247 }
2248 }
2249
2250 delete_insns_since (entry_last);
2251 return 0;
2252 }
2253
2254 /* Expand the two-valued library call indicated by BINOPTAB, but
2255 preserve only one of the values. If TARG0 is non-NULL, the first
2256 value is placed into TARG0; otherwise the second value is placed
2257 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2258 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2259 This routine assumes that the value returned by the library call is
2260 as if the return value was of an integral mode twice as wide as the
2261 mode of OP0. Returns 1 if the call was successful. */
2262
2263 bool
expand_twoval_binop_libfunc(optab binoptab,rtx op0,rtx op1,rtx targ0,rtx targ1,enum rtx_code code)2264 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2265 rtx targ0, rtx targ1, enum rtx_code code)
2266 {
2267 enum machine_mode mode;
2268 enum machine_mode libval_mode;
2269 rtx libval;
2270 rtx insns;
2271
2272 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2273 gcc_assert (!targ0 != !targ1);
2274
2275 mode = GET_MODE (op0);
2276 if (!binoptab->handlers[(int) mode].libfunc)
2277 return false;
2278
2279 /* The value returned by the library function will have twice as
2280 many bits as the nominal MODE. */
2281 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2282 MODE_INT);
2283 start_sequence ();
2284 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2285 NULL_RTX, LCT_CONST,
2286 libval_mode, 2,
2287 op0, mode,
2288 op1, mode);
2289 /* Get the part of VAL containing the value that we want. */
2290 libval = simplify_gen_subreg (mode, libval, libval_mode,
2291 targ0 ? 0 : GET_MODE_SIZE (mode));
2292 insns = get_insns ();
2293 end_sequence ();
2294 /* Move the into the desired location. */
2295 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2296 gen_rtx_fmt_ee (code, mode, op0, op1));
2297
2298 return true;
2299 }
2300
2301
2302 /* Wrapper around expand_unop which takes an rtx code to specify
2303 the operation to perform, not an optab pointer. All other
2304 arguments are the same. */
2305 rtx
expand_simple_unop(enum machine_mode mode,enum rtx_code code,rtx op0,rtx target,int unsignedp)2306 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2307 rtx target, int unsignedp)
2308 {
2309 optab unop = code_to_optab[(int) code];
2310 gcc_assert (unop);
2311
2312 return expand_unop (mode, unop, op0, target, unsignedp);
2313 }
2314
2315 /* Try calculating
2316 (clz:narrow x)
2317 as
2318 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2319 static rtx
widen_clz(enum machine_mode mode,rtx op0,rtx target)2320 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2321 {
2322 enum mode_class class = GET_MODE_CLASS (mode);
2323 if (CLASS_HAS_WIDER_MODES_P (class))
2324 {
2325 enum machine_mode wider_mode;
2326 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2327 wider_mode != VOIDmode;
2328 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2329 {
2330 if (clz_optab->handlers[(int) wider_mode].insn_code
2331 != CODE_FOR_nothing)
2332 {
2333 rtx xop0, temp, last;
2334
2335 last = get_last_insn ();
2336
2337 if (target == 0)
2338 target = gen_reg_rtx (mode);
2339 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2340 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2341 if (temp != 0)
2342 temp = expand_binop (wider_mode, sub_optab, temp,
2343 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2344 - GET_MODE_BITSIZE (mode)),
2345 target, true, OPTAB_DIRECT);
2346 if (temp == 0)
2347 delete_insns_since (last);
2348
2349 return temp;
2350 }
2351 }
2352 }
2353 return 0;
2354 }
2355
2356 /* Try calculating (parity x) as (and (popcount x) 1), where
2357 popcount can also be done in a wider mode. */
2358 static rtx
expand_parity(enum machine_mode mode,rtx op0,rtx target)2359 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2360 {
2361 enum mode_class class = GET_MODE_CLASS (mode);
2362 if (CLASS_HAS_WIDER_MODES_P (class))
2363 {
2364 enum machine_mode wider_mode;
2365 for (wider_mode = mode; wider_mode != VOIDmode;
2366 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2367 {
2368 if (popcount_optab->handlers[(int) wider_mode].insn_code
2369 != CODE_FOR_nothing)
2370 {
2371 rtx xop0, temp, last;
2372
2373 last = get_last_insn ();
2374
2375 if (target == 0)
2376 target = gen_reg_rtx (mode);
2377 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2378 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2379 true);
2380 if (temp != 0)
2381 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2382 target, true, OPTAB_DIRECT);
2383 if (temp == 0)
2384 delete_insns_since (last);
2385
2386 return temp;
2387 }
2388 }
2389 }
2390 return 0;
2391 }
2392
2393 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2394 conditions, VAL may already be a SUBREG against which we cannot generate
2395 a further SUBREG. In this case, we expect forcing the value into a
2396 register will work around the situation. */
2397
2398 static rtx
lowpart_subreg_maybe_copy(enum machine_mode omode,rtx val,enum machine_mode imode)2399 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2400 enum machine_mode imode)
2401 {
2402 rtx ret;
2403 ret = lowpart_subreg (omode, val, imode);
2404 if (ret == NULL)
2405 {
2406 val = force_reg (imode, val);
2407 ret = lowpart_subreg (omode, val, imode);
2408 gcc_assert (ret != NULL);
2409 }
2410 return ret;
2411 }
2412
2413 /* Expand a floating point absolute value or negation operation via a
2414 logical operation on the sign bit. */
2415
2416 static rtx
expand_absneg_bit(enum rtx_code code,enum machine_mode mode,rtx op0,rtx target)2417 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2418 rtx op0, rtx target)
2419 {
2420 const struct real_format *fmt;
2421 int bitpos, word, nwords, i;
2422 enum machine_mode imode;
2423 HOST_WIDE_INT hi, lo;
2424 rtx temp, insns;
2425
2426 /* The format has to have a simple sign bit. */
2427 fmt = REAL_MODE_FORMAT (mode);
2428 if (fmt == NULL)
2429 return NULL_RTX;
2430
2431 bitpos = fmt->signbit_rw;
2432 if (bitpos < 0)
2433 return NULL_RTX;
2434
2435 /* Don't create negative zeros if the format doesn't support them. */
2436 if (code == NEG && !fmt->has_signed_zero)
2437 return NULL_RTX;
2438
2439 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2440 {
2441 imode = int_mode_for_mode (mode);
2442 if (imode == BLKmode)
2443 return NULL_RTX;
2444 word = 0;
2445 nwords = 1;
2446 }
2447 else
2448 {
2449 imode = word_mode;
2450
2451 if (FLOAT_WORDS_BIG_ENDIAN)
2452 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2453 else
2454 word = bitpos / BITS_PER_WORD;
2455 bitpos = bitpos % BITS_PER_WORD;
2456 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2457 }
2458
2459 if (bitpos < HOST_BITS_PER_WIDE_INT)
2460 {
2461 hi = 0;
2462 lo = (HOST_WIDE_INT) 1 << bitpos;
2463 }
2464 else
2465 {
2466 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2467 lo = 0;
2468 }
2469 if (code == ABS)
2470 lo = ~lo, hi = ~hi;
2471
2472 if (target == 0 || target == op0)
2473 target = gen_reg_rtx (mode);
2474
2475 if (nwords > 1)
2476 {
2477 start_sequence ();
2478
2479 for (i = 0; i < nwords; ++i)
2480 {
2481 rtx targ_piece = operand_subword (target, i, 1, mode);
2482 rtx op0_piece = operand_subword_force (op0, i, mode);
2483
2484 if (i == word)
2485 {
2486 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2487 op0_piece,
2488 immed_double_const (lo, hi, imode),
2489 targ_piece, 1, OPTAB_LIB_WIDEN);
2490 if (temp != targ_piece)
2491 emit_move_insn (targ_piece, temp);
2492 }
2493 else
2494 emit_move_insn (targ_piece, op0_piece);
2495 }
2496
2497 insns = get_insns ();
2498 end_sequence ();
2499
2500 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2501 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2502 }
2503 else
2504 {
2505 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2506 gen_lowpart (imode, op0),
2507 immed_double_const (lo, hi, imode),
2508 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2509 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2510
2511 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2512 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2513 }
2514
2515 return target;
2516 }
2517
2518 /* Generate code to perform an operation specified by UNOPTAB
2519 on operand OP0, with result having machine-mode MODE.
2520
2521 UNSIGNEDP is for the case where we have to widen the operands
2522 to perform the operation. It says to use zero-extension.
2523
2524 If TARGET is nonzero, the value
2525 is generated there, if it is convenient to do so.
2526 In all cases an rtx is returned for the locus of the value;
2527 this may or may not be TARGET. */
2528
2529 rtx
expand_unop(enum machine_mode mode,optab unoptab,rtx op0,rtx target,int unsignedp)2530 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2531 int unsignedp)
2532 {
2533 enum mode_class class;
2534 enum machine_mode wider_mode;
2535 rtx temp;
2536 rtx last = get_last_insn ();
2537 rtx pat;
2538
2539 class = GET_MODE_CLASS (mode);
2540
2541 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2542 {
2543 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2544 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2545 rtx xop0 = op0;
2546
2547 if (target)
2548 temp = target;
2549 else
2550 temp = gen_reg_rtx (mode);
2551
2552 if (GET_MODE (xop0) != VOIDmode
2553 && GET_MODE (xop0) != mode0)
2554 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2555
2556 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2557
2558 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2559 xop0 = copy_to_mode_reg (mode0, xop0);
2560
2561 if (!insn_data[icode].operand[0].predicate (temp, mode))
2562 temp = gen_reg_rtx (mode);
2563
2564 pat = GEN_FCN (icode) (temp, xop0);
2565 if (pat)
2566 {
2567 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2568 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2569 {
2570 delete_insns_since (last);
2571 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2572 }
2573
2574 emit_insn (pat);
2575
2576 return temp;
2577 }
2578 else
2579 delete_insns_since (last);
2580 }
2581
2582 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2583
2584 /* Widening clz needs special treatment. */
2585 if (unoptab == clz_optab)
2586 {
2587 temp = widen_clz (mode, op0, target);
2588 if (temp)
2589 return temp;
2590 else
2591 goto try_libcall;
2592 }
2593
2594 if (CLASS_HAS_WIDER_MODES_P (class))
2595 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2596 wider_mode != VOIDmode;
2597 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2598 {
2599 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2600 {
2601 rtx xop0 = op0;
2602
2603 /* For certain operations, we need not actually extend
2604 the narrow operand, as long as we will truncate the
2605 results to the same narrowness. */
2606
2607 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2608 (unoptab == neg_optab
2609 || unoptab == one_cmpl_optab)
2610 && class == MODE_INT);
2611
2612 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2613 unsignedp);
2614
2615 if (temp)
2616 {
2617 if (class != MODE_INT
2618 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2619 GET_MODE_BITSIZE (wider_mode)))
2620 {
2621 if (target == 0)
2622 target = gen_reg_rtx (mode);
2623 convert_move (target, temp, 0);
2624 return target;
2625 }
2626 else
2627 return gen_lowpart (mode, temp);
2628 }
2629 else
2630 delete_insns_since (last);
2631 }
2632 }
2633
2634 /* These can be done a word at a time. */
2635 if (unoptab == one_cmpl_optab
2636 && class == MODE_INT
2637 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2638 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2639 {
2640 int i;
2641 rtx insns;
2642
2643 if (target == 0 || target == op0)
2644 target = gen_reg_rtx (mode);
2645
2646 start_sequence ();
2647
2648 /* Do the actual arithmetic. */
2649 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2650 {
2651 rtx target_piece = operand_subword (target, i, 1, mode);
2652 rtx x = expand_unop (word_mode, unoptab,
2653 operand_subword_force (op0, i, mode),
2654 target_piece, unsignedp);
2655
2656 if (target_piece != x)
2657 emit_move_insn (target_piece, x);
2658 }
2659
2660 insns = get_insns ();
2661 end_sequence ();
2662
2663 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2664 gen_rtx_fmt_e (unoptab->code, mode,
2665 copy_rtx (op0)));
2666 return target;
2667 }
2668
2669 if (unoptab->code == NEG)
2670 {
2671 /* Try negating floating point values by flipping the sign bit. */
2672 if (SCALAR_FLOAT_MODE_P (mode))
2673 {
2674 temp = expand_absneg_bit (NEG, mode, op0, target);
2675 if (temp)
2676 return temp;
2677 }
2678
2679 /* If there is no negation pattern, and we have no negative zero,
2680 try subtracting from zero. */
2681 if (!HONOR_SIGNED_ZEROS (mode))
2682 {
2683 temp = expand_binop (mode, (unoptab == negv_optab
2684 ? subv_optab : sub_optab),
2685 CONST0_RTX (mode), op0, target,
2686 unsignedp, OPTAB_DIRECT);
2687 if (temp)
2688 return temp;
2689 }
2690 }
2691
2692 /* Try calculating parity (x) as popcount (x) % 2. */
2693 if (unoptab == parity_optab)
2694 {
2695 temp = expand_parity (mode, op0, target);
2696 if (temp)
2697 return temp;
2698 }
2699
2700 try_libcall:
2701 /* Now try a library call in this mode. */
2702 if (unoptab->handlers[(int) mode].libfunc)
2703 {
2704 rtx insns;
2705 rtx value;
2706 enum machine_mode outmode = mode;
2707
2708 /* All of these functions return small values. Thus we choose to
2709 have them return something that isn't a double-word. */
2710 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2711 || unoptab == popcount_optab || unoptab == parity_optab)
2712 outmode
2713 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2714
2715 start_sequence ();
2716
2717 /* Pass 1 for NO_QUEUE so we don't lose any increments
2718 if the libcall is cse'd or moved. */
2719 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2720 NULL_RTX, LCT_CONST, outmode,
2721 1, op0, mode);
2722 insns = get_insns ();
2723 end_sequence ();
2724
2725 target = gen_reg_rtx (outmode);
2726 emit_libcall_block (insns, target, value,
2727 gen_rtx_fmt_e (unoptab->code, outmode, op0));
2728
2729 return target;
2730 }
2731
2732 /* It can't be done in this mode. Can we do it in a wider mode? */
2733
2734 if (CLASS_HAS_WIDER_MODES_P (class))
2735 {
2736 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2737 wider_mode != VOIDmode;
2738 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2739 {
2740 if ((unoptab->handlers[(int) wider_mode].insn_code
2741 != CODE_FOR_nothing)
2742 || unoptab->handlers[(int) wider_mode].libfunc)
2743 {
2744 rtx xop0 = op0;
2745
2746 /* For certain operations, we need not actually extend
2747 the narrow operand, as long as we will truncate the
2748 results to the same narrowness. */
2749
2750 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2751 (unoptab == neg_optab
2752 || unoptab == one_cmpl_optab)
2753 && class == MODE_INT);
2754
2755 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2756 unsignedp);
2757
2758 /* If we are generating clz using wider mode, adjust the
2759 result. */
2760 if (unoptab == clz_optab && temp != 0)
2761 temp = expand_binop (wider_mode, sub_optab, temp,
2762 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2763 - GET_MODE_BITSIZE (mode)),
2764 target, true, OPTAB_DIRECT);
2765
2766 if (temp)
2767 {
2768 if (class != MODE_INT)
2769 {
2770 if (target == 0)
2771 target = gen_reg_rtx (mode);
2772 convert_move (target, temp, 0);
2773 return target;
2774 }
2775 else
2776 return gen_lowpart (mode, temp);
2777 }
2778 else
2779 delete_insns_since (last);
2780 }
2781 }
2782 }
2783
2784 /* One final attempt at implementing negation via subtraction,
2785 this time allowing widening of the operand. */
2786 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2787 {
2788 rtx temp;
2789 temp = expand_binop (mode,
2790 unoptab == negv_optab ? subv_optab : sub_optab,
2791 CONST0_RTX (mode), op0,
2792 target, unsignedp, OPTAB_LIB_WIDEN);
2793 if (temp)
2794 return temp;
2795 }
2796
2797 return 0;
2798 }
2799
2800 /* Emit code to compute the absolute value of OP0, with result to
2801 TARGET if convenient. (TARGET may be 0.) The return value says
2802 where the result actually is to be found.
2803
2804 MODE is the mode of the operand; the mode of the result is
2805 different but can be deduced from MODE.
2806
2807 */
2808
2809 rtx
expand_abs_nojump(enum machine_mode mode,rtx op0,rtx target,int result_unsignedp)2810 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2811 int result_unsignedp)
2812 {
2813 rtx temp;
2814
2815 if (! flag_trapv)
2816 result_unsignedp = 1;
2817
2818 /* First try to do it with a special abs instruction. */
2819 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2820 op0, target, 0);
2821 if (temp != 0)
2822 return temp;
2823
2824 /* For floating point modes, try clearing the sign bit. */
2825 if (SCALAR_FLOAT_MODE_P (mode))
2826 {
2827 temp = expand_absneg_bit (ABS, mode, op0, target);
2828 if (temp)
2829 return temp;
2830 }
2831
2832 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2833 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2834 && !HONOR_SIGNED_ZEROS (mode))
2835 {
2836 rtx last = get_last_insn ();
2837
2838 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2839 if (temp != 0)
2840 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2841 OPTAB_WIDEN);
2842
2843 if (temp != 0)
2844 return temp;
2845
2846 delete_insns_since (last);
2847 }
2848
2849 /* If this machine has expensive jumps, we can do integer absolute
2850 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2851 where W is the width of MODE. */
2852
2853 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2854 {
2855 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2856 size_int (GET_MODE_BITSIZE (mode) - 1),
2857 NULL_RTX, 0);
2858
2859 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2860 OPTAB_LIB_WIDEN);
2861 if (temp != 0)
2862 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2863 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2864
2865 if (temp != 0)
2866 return temp;
2867 }
2868
2869 return NULL_RTX;
2870 }
2871
2872 rtx
expand_abs(enum machine_mode mode,rtx op0,rtx target,int result_unsignedp,int safe)2873 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2874 int result_unsignedp, int safe)
2875 {
2876 rtx temp, op1;
2877
2878 if (! flag_trapv)
2879 result_unsignedp = 1;
2880
2881 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2882 if (temp != 0)
2883 return temp;
2884
2885 /* If that does not win, use conditional jump and negate. */
2886
2887 /* It is safe to use the target if it is the same
2888 as the source if this is also a pseudo register */
2889 if (op0 == target && REG_P (op0)
2890 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2891 safe = 1;
2892
2893 op1 = gen_label_rtx ();
2894 if (target == 0 || ! safe
2895 || GET_MODE (target) != mode
2896 || (MEM_P (target) && MEM_VOLATILE_P (target))
2897 || (REG_P (target)
2898 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2899 target = gen_reg_rtx (mode);
2900
2901 emit_move_insn (target, op0);
2902 NO_DEFER_POP;
2903
2904 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2905 NULL_RTX, NULL_RTX, op1);
2906
2907 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2908 target, target, 0);
2909 if (op0 != target)
2910 emit_move_insn (target, op0);
2911 emit_label (op1);
2912 OK_DEFER_POP;
2913 return target;
2914 }
2915
2916 /* A subroutine of expand_copysign, perform the copysign operation using the
2917 abs and neg primitives advertised to exist on the target. The assumption
2918 is that we have a split register file, and leaving op0 in fp registers,
2919 and not playing with subregs so much, will help the register allocator. */
2920
2921 static rtx
expand_copysign_absneg(enum machine_mode mode,rtx op0,rtx op1,rtx target,int bitpos,bool op0_is_abs)2922 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2923 int bitpos, bool op0_is_abs)
2924 {
2925 enum machine_mode imode;
2926 HOST_WIDE_INT hi, lo;
2927 int word;
2928 rtx label;
2929
2930 if (target == op1)
2931 target = NULL_RTX;
2932
2933 if (!op0_is_abs)
2934 {
2935 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2936 if (op0 == NULL)
2937 return NULL_RTX;
2938 target = op0;
2939 }
2940 else
2941 {
2942 if (target == NULL_RTX)
2943 target = copy_to_reg (op0);
2944 else
2945 emit_move_insn (target, op0);
2946 }
2947
2948 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2949 {
2950 imode = int_mode_for_mode (mode);
2951 if (imode == BLKmode)
2952 return NULL_RTX;
2953 op1 = gen_lowpart (imode, op1);
2954 }
2955 else
2956 {
2957 imode = word_mode;
2958 if (FLOAT_WORDS_BIG_ENDIAN)
2959 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2960 else
2961 word = bitpos / BITS_PER_WORD;
2962 bitpos = bitpos % BITS_PER_WORD;
2963 op1 = operand_subword_force (op1, word, mode);
2964 }
2965
2966 if (bitpos < HOST_BITS_PER_WIDE_INT)
2967 {
2968 hi = 0;
2969 lo = (HOST_WIDE_INT) 1 << bitpos;
2970 }
2971 else
2972 {
2973 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2974 lo = 0;
2975 }
2976
2977 op1 = expand_binop (imode, and_optab, op1,
2978 immed_double_const (lo, hi, imode),
2979 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2980
2981 label = gen_label_rtx ();
2982 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2983
2984 if (GET_CODE (op0) == CONST_DOUBLE)
2985 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2986 else
2987 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2988 if (op0 != target)
2989 emit_move_insn (target, op0);
2990
2991 emit_label (label);
2992
2993 return target;
2994 }
2995
2996
2997 /* A subroutine of expand_copysign, perform the entire copysign operation
2998 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2999 is true if op0 is known to have its sign bit clear. */
3000
3001 static rtx
expand_copysign_bit(enum machine_mode mode,rtx op0,rtx op1,rtx target,int bitpos,bool op0_is_abs)3002 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3003 int bitpos, bool op0_is_abs)
3004 {
3005 enum machine_mode imode;
3006 HOST_WIDE_INT hi, lo;
3007 int word, nwords, i;
3008 rtx temp, insns;
3009
3010 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3011 {
3012 imode = int_mode_for_mode (mode);
3013 if (imode == BLKmode)
3014 return NULL_RTX;
3015 word = 0;
3016 nwords = 1;
3017 }
3018 else
3019 {
3020 imode = word_mode;
3021
3022 if (FLOAT_WORDS_BIG_ENDIAN)
3023 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3024 else
3025 word = bitpos / BITS_PER_WORD;
3026 bitpos = bitpos % BITS_PER_WORD;
3027 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3028 }
3029
3030 if (bitpos < HOST_BITS_PER_WIDE_INT)
3031 {
3032 hi = 0;
3033 lo = (HOST_WIDE_INT) 1 << bitpos;
3034 }
3035 else
3036 {
3037 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3038 lo = 0;
3039 }
3040
3041 if (target == 0 || target == op0 || target == op1)
3042 target = gen_reg_rtx (mode);
3043
3044 if (nwords > 1)
3045 {
3046 start_sequence ();
3047
3048 for (i = 0; i < nwords; ++i)
3049 {
3050 rtx targ_piece = operand_subword (target, i, 1, mode);
3051 rtx op0_piece = operand_subword_force (op0, i, mode);
3052
3053 if (i == word)
3054 {
3055 if (!op0_is_abs)
3056 op0_piece = expand_binop (imode, and_optab, op0_piece,
3057 immed_double_const (~lo, ~hi, imode),
3058 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3059
3060 op1 = expand_binop (imode, and_optab,
3061 operand_subword_force (op1, i, mode),
3062 immed_double_const (lo, hi, imode),
3063 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3064
3065 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3066 targ_piece, 1, OPTAB_LIB_WIDEN);
3067 if (temp != targ_piece)
3068 emit_move_insn (targ_piece, temp);
3069 }
3070 else
3071 emit_move_insn (targ_piece, op0_piece);
3072 }
3073
3074 insns = get_insns ();
3075 end_sequence ();
3076
3077 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3078 }
3079 else
3080 {
3081 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3082 immed_double_const (lo, hi, imode),
3083 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3084
3085 op0 = gen_lowpart (imode, op0);
3086 if (!op0_is_abs)
3087 op0 = expand_binop (imode, and_optab, op0,
3088 immed_double_const (~lo, ~hi, imode),
3089 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3090
3091 temp = expand_binop (imode, ior_optab, op0, op1,
3092 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3093 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3094 }
3095
3096 return target;
3097 }
3098
3099 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3100 scalar floating point mode. Return NULL if we do not know how to
3101 expand the operation inline. */
3102
3103 rtx
expand_copysign(rtx op0,rtx op1,rtx target)3104 expand_copysign (rtx op0, rtx op1, rtx target)
3105 {
3106 enum machine_mode mode = GET_MODE (op0);
3107 const struct real_format *fmt;
3108 bool op0_is_abs;
3109 rtx temp;
3110
3111 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3112 gcc_assert (GET_MODE (op1) == mode);
3113
3114 /* First try to do it with a special instruction. */
3115 temp = expand_binop (mode, copysign_optab, op0, op1,
3116 target, 0, OPTAB_DIRECT);
3117 if (temp)
3118 return temp;
3119
3120 fmt = REAL_MODE_FORMAT (mode);
3121 if (fmt == NULL || !fmt->has_signed_zero)
3122 return NULL_RTX;
3123
3124 op0_is_abs = false;
3125 if (GET_CODE (op0) == CONST_DOUBLE)
3126 {
3127 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3128 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3129 op0_is_abs = true;
3130 }
3131
3132 if (fmt->signbit_ro >= 0
3133 && (GET_CODE (op0) == CONST_DOUBLE
3134 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3135 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3136 {
3137 temp = expand_copysign_absneg (mode, op0, op1, target,
3138 fmt->signbit_ro, op0_is_abs);
3139 if (temp)
3140 return temp;
3141 }
3142
3143 if (fmt->signbit_rw < 0)
3144 return NULL_RTX;
3145 return expand_copysign_bit (mode, op0, op1, target,
3146 fmt->signbit_rw, op0_is_abs);
3147 }
3148
3149 /* Generate an instruction whose insn-code is INSN_CODE,
3150 with two operands: an output TARGET and an input OP0.
3151 TARGET *must* be nonzero, and the output is always stored there.
3152 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3153 the value that is stored into TARGET. */
3154
3155 void
emit_unop_insn(int icode,rtx target,rtx op0,enum rtx_code code)3156 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3157 {
3158 rtx temp;
3159 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3160 rtx pat;
3161
3162 temp = target;
3163
3164 /* Now, if insn does not accept our operands, put them into pseudos. */
3165
3166 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3167 op0 = copy_to_mode_reg (mode0, op0);
3168
3169 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3170 temp = gen_reg_rtx (GET_MODE (temp));
3171
3172 pat = GEN_FCN (icode) (temp, op0);
3173
3174 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3175 add_equal_note (pat, temp, code, op0, NULL_RTX);
3176
3177 emit_insn (pat);
3178
3179 if (temp != target)
3180 emit_move_insn (target, temp);
3181 }
3182
3183 struct no_conflict_data
3184 {
3185 rtx target, first, insn;
3186 bool must_stay;
3187 };
3188
3189 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3190 Set P->must_stay if the currently examined clobber / store has to stay
3191 in the list of insns that constitute the actual no_conflict block /
3192 libcall block. */
3193 static void
no_conflict_move_test(rtx dest,rtx set,void * p0)3194 no_conflict_move_test (rtx dest, rtx set, void *p0)
3195 {
3196 struct no_conflict_data *p= p0;
3197
3198 /* If this inns directly contributes to setting the target, it must stay. */
3199 if (reg_overlap_mentioned_p (p->target, dest))
3200 p->must_stay = true;
3201 /* If we haven't committed to keeping any other insns in the list yet,
3202 there is nothing more to check. */
3203 else if (p->insn == p->first)
3204 return;
3205 /* If this insn sets / clobbers a register that feeds one of the insns
3206 already in the list, this insn has to stay too. */
3207 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3208 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3209 || reg_used_between_p (dest, p->first, p->insn)
3210 /* Likewise if this insn depends on a register set by a previous
3211 insn in the list, or if it sets a result (presumably a hard
3212 register) that is set or clobbered by a previous insn.
3213 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3214 SET_DEST perform the former check on the address, and the latter
3215 check on the MEM. */
3216 || (GET_CODE (set) == SET
3217 && (modified_in_p (SET_SRC (set), p->first)
3218 || modified_in_p (SET_DEST (set), p->first)
3219 || modified_between_p (SET_SRC (set), p->first, p->insn)
3220 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3221 p->must_stay = true;
3222 }
3223
3224 /* Encapsulate the block starting at FIRST and ending with LAST, which is
3225 logically equivalent to EQUIV, so it gets manipulated as a unit if it
3226 is possible to do so. */
3227
3228 static void
maybe_encapsulate_block(rtx first,rtx last,rtx equiv)3229 maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3230 {
3231 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3232 {
3233 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3234 encapsulated region would not be in one basic block, i.e. when
3235 there is a control_flow_insn_p insn between FIRST and LAST. */
3236 bool attach_libcall_retval_notes = true;
3237 rtx insn, next = NEXT_INSN (last);
3238
3239 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3240 if (control_flow_insn_p (insn))
3241 {
3242 attach_libcall_retval_notes = false;
3243 break;
3244 }
3245
3246 if (attach_libcall_retval_notes)
3247 {
3248 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3249 REG_NOTES (first));
3250 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3251 REG_NOTES (last));
3252 }
3253 }
3254 }
3255
3256 /* Emit code to perform a series of operations on a multi-word quantity, one
3257 word at a time.
3258
3259 Such a block is preceded by a CLOBBER of the output, consists of multiple
3260 insns, each setting one word of the output, and followed by a SET copying
3261 the output to itself.
3262
3263 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3264 note indicating that it doesn't conflict with the (also multi-word)
3265 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3266 notes.
3267
3268 INSNS is a block of code generated to perform the operation, not including
3269 the CLOBBER and final copy. All insns that compute intermediate values
3270 are first emitted, followed by the block as described above.
3271
3272 TARGET, OP0, and OP1 are the output and inputs of the operations,
3273 respectively. OP1 may be zero for a unary operation.
3274
3275 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3276 on the last insn.
3277
3278 If TARGET is not a register, INSNS is simply emitted with no special
3279 processing. Likewise if anything in INSNS is not an INSN or if
3280 there is a libcall block inside INSNS.
3281
3282 The final insn emitted is returned. */
3283
3284 rtx
emit_no_conflict_block(rtx insns,rtx target,rtx op0,rtx op1,rtx equiv)3285 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3286 {
3287 rtx prev, next, first, last, insn;
3288
3289 if (!REG_P (target) || reload_in_progress)
3290 return emit_insn (insns);
3291 else
3292 for (insn = insns; insn; insn = NEXT_INSN (insn))
3293 if (!NONJUMP_INSN_P (insn)
3294 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3295 return emit_insn (insns);
3296
3297 /* First emit all insns that do not store into words of the output and remove
3298 these from the list. */
3299 for (insn = insns; insn; insn = next)
3300 {
3301 rtx note;
3302 struct no_conflict_data data;
3303
3304 next = NEXT_INSN (insn);
3305
3306 /* Some ports (cris) create a libcall regions at their own. We must
3307 avoid any potential nesting of LIBCALLs. */
3308 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3309 remove_note (insn, note);
3310 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3311 remove_note (insn, note);
3312
3313 data.target = target;
3314 data.first = insns;
3315 data.insn = insn;
3316 data.must_stay = 0;
3317 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3318 if (! data.must_stay)
3319 {
3320 if (PREV_INSN (insn))
3321 NEXT_INSN (PREV_INSN (insn)) = next;
3322 else
3323 insns = next;
3324
3325 if (next)
3326 PREV_INSN (next) = PREV_INSN (insn);
3327
3328 add_insn (insn);
3329 }
3330 }
3331
3332 prev = get_last_insn ();
3333
3334 /* Now write the CLOBBER of the output, followed by the setting of each
3335 of the words, followed by the final copy. */
3336 if (target != op0 && target != op1)
3337 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3338
3339 for (insn = insns; insn; insn = next)
3340 {
3341 next = NEXT_INSN (insn);
3342 add_insn (insn);
3343
3344 if (op1 && REG_P (op1))
3345 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3346 REG_NOTES (insn));
3347
3348 if (op0 && REG_P (op0))
3349 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3350 REG_NOTES (insn));
3351 }
3352
3353 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3354 != CODE_FOR_nothing)
3355 {
3356 last = emit_move_insn (target, target);
3357 if (equiv)
3358 set_unique_reg_note (last, REG_EQUAL, equiv);
3359 }
3360 else
3361 {
3362 last = get_last_insn ();
3363
3364 /* Remove any existing REG_EQUAL note from "last", or else it will
3365 be mistaken for a note referring to the full contents of the
3366 alleged libcall value when found together with the REG_RETVAL
3367 note added below. An existing note can come from an insn
3368 expansion at "last". */
3369 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3370 }
3371
3372 if (prev == 0)
3373 first = get_insns ();
3374 else
3375 first = NEXT_INSN (prev);
3376
3377 maybe_encapsulate_block (first, last, equiv);
3378
3379 return last;
3380 }
3381
3382 /* Emit code to make a call to a constant function or a library call.
3383
3384 INSNS is a list containing all insns emitted in the call.
3385 These insns leave the result in RESULT. Our block is to copy RESULT
3386 to TARGET, which is logically equivalent to EQUIV.
3387
3388 We first emit any insns that set a pseudo on the assumption that these are
3389 loading constants into registers; doing so allows them to be safely cse'ed
3390 between blocks. Then we emit all the other insns in the block, followed by
3391 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3392 note with an operand of EQUIV.
3393
3394 Moving assignments to pseudos outside of the block is done to improve
3395 the generated code, but is not required to generate correct code,
3396 hence being unable to move an assignment is not grounds for not making
3397 a libcall block. There are two reasons why it is safe to leave these
3398 insns inside the block: First, we know that these pseudos cannot be
3399 used in generated RTL outside the block since they are created for
3400 temporary purposes within the block. Second, CSE will not record the
3401 values of anything set inside a libcall block, so we know they must
3402 be dead at the end of the block.
3403
3404 Except for the first group of insns (the ones setting pseudos), the
3405 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3406
3407 void
emit_libcall_block(rtx insns,rtx target,rtx result,rtx equiv)3408 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3409 {
3410 rtx final_dest = target;
3411 rtx prev, next, first, last, insn;
3412
3413 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3414 into a MEM later. Protect the libcall block from this change. */
3415 if (! REG_P (target) || REG_USERVAR_P (target))
3416 target = gen_reg_rtx (GET_MODE (target));
3417
3418 /* If we're using non-call exceptions, a libcall corresponding to an
3419 operation that may trap may also trap. */
3420 if (flag_non_call_exceptions && may_trap_p (equiv))
3421 {
3422 for (insn = insns; insn; insn = NEXT_INSN (insn))
3423 if (CALL_P (insn))
3424 {
3425 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3426
3427 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3428 remove_note (insn, note);
3429 }
3430 }
3431 else
3432 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3433 reg note to indicate that this call cannot throw or execute a nonlocal
3434 goto (unless there is already a REG_EH_REGION note, in which case
3435 we update it). */
3436 for (insn = insns; insn; insn = NEXT_INSN (insn))
3437 if (CALL_P (insn))
3438 {
3439 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3440
3441 if (note != 0)
3442 XEXP (note, 0) = constm1_rtx;
3443 else
3444 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3445 REG_NOTES (insn));
3446 }
3447
3448 /* First emit all insns that set pseudos. Remove them from the list as
3449 we go. Avoid insns that set pseudos which were referenced in previous
3450 insns. These can be generated by move_by_pieces, for example,
3451 to update an address. Similarly, avoid insns that reference things
3452 set in previous insns. */
3453
3454 for (insn = insns; insn; insn = next)
3455 {
3456 rtx set = single_set (insn);
3457 rtx note;
3458
3459 /* Some ports (cris) create a libcall regions at their own. We must
3460 avoid any potential nesting of LIBCALLs. */
3461 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3462 remove_note (insn, note);
3463 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3464 remove_note (insn, note);
3465
3466 next = NEXT_INSN (insn);
3467
3468 if (set != 0 && REG_P (SET_DEST (set))
3469 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3470 {
3471 struct no_conflict_data data;
3472
3473 data.target = const0_rtx;
3474 data.first = insns;
3475 data.insn = insn;
3476 data.must_stay = 0;
3477 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3478 if (! data.must_stay)
3479 {
3480 if (PREV_INSN (insn))
3481 NEXT_INSN (PREV_INSN (insn)) = next;
3482 else
3483 insns = next;
3484
3485 if (next)
3486 PREV_INSN (next) = PREV_INSN (insn);
3487
3488 add_insn (insn);
3489 }
3490 }
3491
3492 /* Some ports use a loop to copy large arguments onto the stack.
3493 Don't move anything outside such a loop. */
3494 if (LABEL_P (insn))
3495 break;
3496 }
3497
3498 prev = get_last_insn ();
3499
3500 /* Write the remaining insns followed by the final copy. */
3501
3502 for (insn = insns; insn; insn = next)
3503 {
3504 next = NEXT_INSN (insn);
3505
3506 add_insn (insn);
3507 }
3508
3509 last = emit_move_insn (target, result);
3510 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3511 != CODE_FOR_nothing)
3512 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3513 else
3514 {
3515 /* Remove any existing REG_EQUAL note from "last", or else it will
3516 be mistaken for a note referring to the full contents of the
3517 libcall value when found together with the REG_RETVAL note added
3518 below. An existing note can come from an insn expansion at
3519 "last". */
3520 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3521 }
3522
3523 if (final_dest != target)
3524 emit_move_insn (final_dest, target);
3525
3526 if (prev == 0)
3527 first = get_insns ();
3528 else
3529 first = NEXT_INSN (prev);
3530
3531 maybe_encapsulate_block (first, last, equiv);
3532 }
3533
3534 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3535 PURPOSE describes how this comparison will be used. CODE is the rtx
3536 comparison code we will be using.
3537
3538 ??? Actually, CODE is slightly weaker than that. A target is still
3539 required to implement all of the normal bcc operations, but not
3540 required to implement all (or any) of the unordered bcc operations. */
3541
3542 int
can_compare_p(enum rtx_code code,enum machine_mode mode,enum can_compare_purpose purpose)3543 can_compare_p (enum rtx_code code, enum machine_mode mode,
3544 enum can_compare_purpose purpose)
3545 {
3546 do
3547 {
3548 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3549 {
3550 if (purpose == ccp_jump)
3551 return bcc_gen_fctn[(int) code] != NULL;
3552 else if (purpose == ccp_store_flag)
3553 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3554 else
3555 /* There's only one cmov entry point, and it's allowed to fail. */
3556 return 1;
3557 }
3558 if (purpose == ccp_jump
3559 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3560 return 1;
3561 if (purpose == ccp_cmov
3562 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3563 return 1;
3564 if (purpose == ccp_store_flag
3565 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3566 return 1;
3567 mode = GET_MODE_WIDER_MODE (mode);
3568 }
3569 while (mode != VOIDmode);
3570
3571 return 0;
3572 }
3573
3574 /* This function is called when we are going to emit a compare instruction that
3575 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3576
3577 *PMODE is the mode of the inputs (in case they are const_int).
3578 *PUNSIGNEDP nonzero says that the operands are unsigned;
3579 this matters if they need to be widened.
3580
3581 If they have mode BLKmode, then SIZE specifies the size of both operands.
3582
3583 This function performs all the setup necessary so that the caller only has
3584 to emit a single comparison insn. This setup can involve doing a BLKmode
3585 comparison or emitting a library call to perform the comparison if no insn
3586 is available to handle it.
3587 The values which are passed in through pointers can be modified; the caller
3588 should perform the comparison on the modified values. Constant
3589 comparisons must have already been folded. */
3590
3591 static void
prepare_cmp_insn(rtx * px,rtx * py,enum rtx_code * pcomparison,rtx size,enum machine_mode * pmode,int * punsignedp,enum can_compare_purpose purpose)3592 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3593 enum machine_mode *pmode, int *punsignedp,
3594 enum can_compare_purpose purpose)
3595 {
3596 enum machine_mode mode = *pmode;
3597 rtx x = *px, y = *py;
3598 int unsignedp = *punsignedp;
3599
3600 /* If we are inside an appropriately-short loop and we are optimizing,
3601 force expensive constants into a register. */
3602 if (CONSTANT_P (x) && optimize
3603 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3604 x = force_reg (mode, x);
3605
3606 if (CONSTANT_P (y) && optimize
3607 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3608 y = force_reg (mode, y);
3609
3610 #ifdef HAVE_cc0
3611 /* Make sure if we have a canonical comparison. The RTL
3612 documentation states that canonical comparisons are required only
3613 for targets which have cc0. */
3614 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3615 #endif
3616
3617 /* Don't let both operands fail to indicate the mode. */
3618 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3619 x = force_reg (mode, x);
3620
3621 /* Handle all BLKmode compares. */
3622
3623 if (mode == BLKmode)
3624 {
3625 enum machine_mode cmp_mode, result_mode;
3626 enum insn_code cmp_code;
3627 tree length_type;
3628 rtx libfunc;
3629 rtx result;
3630 rtx opalign
3631 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3632
3633 gcc_assert (size);
3634
3635 /* Try to use a memory block compare insn - either cmpstr
3636 or cmpmem will do. */
3637 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3638 cmp_mode != VOIDmode;
3639 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3640 {
3641 cmp_code = cmpmem_optab[cmp_mode];
3642 if (cmp_code == CODE_FOR_nothing)
3643 cmp_code = cmpstr_optab[cmp_mode];
3644 if (cmp_code == CODE_FOR_nothing)
3645 cmp_code = cmpstrn_optab[cmp_mode];
3646 if (cmp_code == CODE_FOR_nothing)
3647 continue;
3648
3649 /* Must make sure the size fits the insn's mode. */
3650 if ((GET_CODE (size) == CONST_INT
3651 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3652 || (GET_MODE_BITSIZE (GET_MODE (size))
3653 > GET_MODE_BITSIZE (cmp_mode)))
3654 continue;
3655
3656 result_mode = insn_data[cmp_code].operand[0].mode;
3657 result = gen_reg_rtx (result_mode);
3658 size = convert_to_mode (cmp_mode, size, 1);
3659 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3660
3661 *px = result;
3662 *py = const0_rtx;
3663 *pmode = result_mode;
3664 return;
3665 }
3666
3667 /* Otherwise call a library function, memcmp. */
3668 libfunc = memcmp_libfunc;
3669 length_type = sizetype;
3670 result_mode = TYPE_MODE (integer_type_node);
3671 cmp_mode = TYPE_MODE (length_type);
3672 size = convert_to_mode (TYPE_MODE (length_type), size,
3673 TYPE_UNSIGNED (length_type));
3674
3675 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3676 result_mode, 3,
3677 XEXP (x, 0), Pmode,
3678 XEXP (y, 0), Pmode,
3679 size, cmp_mode);
3680 *px = result;
3681 *py = const0_rtx;
3682 *pmode = result_mode;
3683 return;
3684 }
3685
3686 /* Don't allow operands to the compare to trap, as that can put the
3687 compare and branch in different basic blocks. */
3688 if (flag_non_call_exceptions)
3689 {
3690 if (may_trap_p (x))
3691 x = force_reg (mode, x);
3692 if (may_trap_p (y))
3693 y = force_reg (mode, y);
3694 }
3695
3696 *px = x;
3697 *py = y;
3698 if (can_compare_p (*pcomparison, mode, purpose))
3699 return;
3700
3701 /* Handle a lib call just for the mode we are using. */
3702
3703 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3704 {
3705 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3706 rtx result;
3707
3708 /* If we want unsigned, and this mode has a distinct unsigned
3709 comparison routine, use that. */
3710 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3711 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3712
3713 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3714 word_mode, 2, x, mode, y, mode);
3715
3716 /* There are two kinds of comparison routines. Biased routines
3717 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3718 of gcc expect that the comparison operation is equivalent
3719 to the modified comparison. For signed comparisons compare the
3720 result against 1 in the biased case, and zero in the unbiased
3721 case. For unsigned comparisons always compare against 1 after
3722 biasing the unbiased result by adding 1. This gives us a way to
3723 represent LTU. */
3724 *px = result;
3725 *pmode = word_mode;
3726 *py = const1_rtx;
3727
3728 if (!TARGET_LIB_INT_CMP_BIASED)
3729 {
3730 if (*punsignedp)
3731 *px = plus_constant (result, 1);
3732 else
3733 *py = const0_rtx;
3734 }
3735 return;
3736 }
3737
3738 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3739 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3740 }
3741
3742 /* Before emitting an insn with code ICODE, make sure that X, which is going
3743 to be used for operand OPNUM of the insn, is converted from mode MODE to
3744 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3745 that it is accepted by the operand predicate. Return the new value. */
3746
3747 static rtx
prepare_operand(int icode,rtx x,int opnum,enum machine_mode mode,enum machine_mode wider_mode,int unsignedp)3748 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3749 enum machine_mode wider_mode, int unsignedp)
3750 {
3751 if (mode != wider_mode)
3752 x = convert_modes (wider_mode, mode, x, unsignedp);
3753
3754 if (!insn_data[icode].operand[opnum].predicate
3755 (x, insn_data[icode].operand[opnum].mode))
3756 {
3757 if (no_new_pseudos)
3758 return NULL_RTX;
3759 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3760 }
3761
3762 return x;
3763 }
3764
3765 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3766 we can do the comparison.
3767 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3768 be NULL_RTX which indicates that only a comparison is to be generated. */
3769
3770 static void
emit_cmp_and_jump_insn_1(rtx x,rtx y,enum machine_mode mode,enum rtx_code comparison,int unsignedp,rtx label)3771 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3772 enum rtx_code comparison, int unsignedp, rtx label)
3773 {
3774 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3775 enum mode_class class = GET_MODE_CLASS (mode);
3776 enum machine_mode wider_mode = mode;
3777
3778 /* Try combined insns first. */
3779 do
3780 {
3781 enum insn_code icode;
3782 PUT_MODE (test, wider_mode);
3783
3784 if (label)
3785 {
3786 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3787
3788 if (icode != CODE_FOR_nothing
3789 && insn_data[icode].operand[0].predicate (test, wider_mode))
3790 {
3791 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3792 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3793 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3794 return;
3795 }
3796 }
3797
3798 /* Handle some compares against zero. */
3799 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3800 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3801 {
3802 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3803 emit_insn (GEN_FCN (icode) (x));
3804 if (label)
3805 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3806 return;
3807 }
3808
3809 /* Handle compares for which there is a directly suitable insn. */
3810
3811 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3812 if (icode != CODE_FOR_nothing)
3813 {
3814 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3815 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3816 emit_insn (GEN_FCN (icode) (x, y));
3817 if (label)
3818 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3819 return;
3820 }
3821
3822 if (!CLASS_HAS_WIDER_MODES_P (class))
3823 break;
3824
3825 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3826 }
3827 while (wider_mode != VOIDmode);
3828
3829 gcc_unreachable ();
3830 }
3831
3832 /* Generate code to compare X with Y so that the condition codes are
3833 set and to jump to LABEL if the condition is true. If X is a
3834 constant and Y is not a constant, then the comparison is swapped to
3835 ensure that the comparison RTL has the canonical form.
3836
3837 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3838 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3839 the proper branch condition code.
3840
3841 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3842
3843 MODE is the mode of the inputs (in case they are const_int).
3844
3845 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3846 be passed unchanged to emit_cmp_insn, then potentially converted into an
3847 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3848
3849 void
emit_cmp_and_jump_insns(rtx x,rtx y,enum rtx_code comparison,rtx size,enum machine_mode mode,int unsignedp,rtx label)3850 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3851 enum machine_mode mode, int unsignedp, rtx label)
3852 {
3853 rtx op0 = x, op1 = y;
3854
3855 /* Swap operands and condition to ensure canonical RTL. */
3856 if (swap_commutative_operands_p (x, y))
3857 {
3858 /* If we're not emitting a branch, this means some caller
3859 is out of sync. */
3860 gcc_assert (label);
3861
3862 op0 = y, op1 = x;
3863 comparison = swap_condition (comparison);
3864 }
3865
3866 #ifdef HAVE_cc0
3867 /* If OP0 is still a constant, then both X and Y must be constants.
3868 Force X into a register to create canonical RTL. */
3869 if (CONSTANT_P (op0))
3870 op0 = force_reg (mode, op0);
3871 #endif
3872
3873 if (unsignedp)
3874 comparison = unsigned_condition (comparison);
3875
3876 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3877 ccp_jump);
3878 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3879 }
3880
3881 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3882
3883 void
emit_cmp_insn(rtx x,rtx y,enum rtx_code comparison,rtx size,enum machine_mode mode,int unsignedp)3884 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3885 enum machine_mode mode, int unsignedp)
3886 {
3887 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3888 }
3889
3890 /* Emit a library call comparison between floating point X and Y.
3891 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3892
3893 static void
prepare_float_lib_cmp(rtx * px,rtx * py,enum rtx_code * pcomparison,enum machine_mode * pmode,int * punsignedp)3894 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3895 enum machine_mode *pmode, int *punsignedp)
3896 {
3897 enum rtx_code comparison = *pcomparison;
3898 enum rtx_code swapped = swap_condition (comparison);
3899 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3900 rtx x = *px;
3901 rtx y = *py;
3902 enum machine_mode orig_mode = GET_MODE (x);
3903 enum machine_mode mode;
3904 rtx value, target, insns, equiv;
3905 rtx libfunc = 0;
3906 bool reversed_p = false;
3907
3908 for (mode = orig_mode;
3909 mode != VOIDmode;
3910 mode = GET_MODE_WIDER_MODE (mode))
3911 {
3912 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3913 break;
3914
3915 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3916 {
3917 rtx tmp;
3918 tmp = x; x = y; y = tmp;
3919 comparison = swapped;
3920 break;
3921 }
3922
3923 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3924 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3925 {
3926 comparison = reversed;
3927 reversed_p = true;
3928 break;
3929 }
3930 }
3931
3932 gcc_assert (mode != VOIDmode);
3933
3934 if (mode != orig_mode)
3935 {
3936 x = convert_to_mode (mode, x, 0);
3937 y = convert_to_mode (mode, y, 0);
3938 }
3939
3940 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3941 the RTL. The allows the RTL optimizers to delete the libcall if the
3942 condition can be determined at compile-time. */
3943 if (comparison == UNORDERED)
3944 {
3945 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3946 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3947 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3948 temp, const_true_rtx, equiv);
3949 }
3950 else
3951 {
3952 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3953 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3954 {
3955 rtx true_rtx, false_rtx;
3956
3957 switch (comparison)
3958 {
3959 case EQ:
3960 true_rtx = const0_rtx;
3961 false_rtx = const_true_rtx;
3962 break;
3963
3964 case NE:
3965 true_rtx = const_true_rtx;
3966 false_rtx = const0_rtx;
3967 break;
3968
3969 case GT:
3970 true_rtx = const1_rtx;
3971 false_rtx = const0_rtx;
3972 break;
3973
3974 case GE:
3975 true_rtx = const0_rtx;
3976 false_rtx = constm1_rtx;
3977 break;
3978
3979 case LT:
3980 true_rtx = constm1_rtx;
3981 false_rtx = const0_rtx;
3982 break;
3983
3984 case LE:
3985 true_rtx = const0_rtx;
3986 false_rtx = const1_rtx;
3987 break;
3988
3989 default:
3990 gcc_unreachable ();
3991 }
3992 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3993 equiv, true_rtx, false_rtx);
3994 }
3995 }
3996
3997 start_sequence ();
3998 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3999 word_mode, 2, x, mode, y, mode);
4000 insns = get_insns ();
4001 end_sequence ();
4002
4003 target = gen_reg_rtx (word_mode);
4004 emit_libcall_block (insns, target, value, equiv);
4005
4006 if (comparison == UNORDERED
4007 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4008 comparison = reversed_p ? EQ : NE;
4009
4010 *px = target;
4011 *py = const0_rtx;
4012 *pmode = word_mode;
4013 *pcomparison = comparison;
4014 *punsignedp = 0;
4015 }
4016
4017 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4018
4019 void
emit_indirect_jump(rtx loc)4020 emit_indirect_jump (rtx loc)
4021 {
4022 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4023 (loc, Pmode))
4024 loc = copy_to_mode_reg (Pmode, loc);
4025
4026 emit_jump_insn (gen_indirect_jump (loc));
4027 emit_barrier ();
4028 }
4029
4030 #ifdef HAVE_conditional_move
4031
4032 /* Emit a conditional move instruction if the machine supports one for that
4033 condition and machine mode.
4034
4035 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4036 the mode to use should they be constants. If it is VOIDmode, they cannot
4037 both be constants.
4038
4039 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4040 should be stored there. MODE is the mode to use should they be constants.
4041 If it is VOIDmode, they cannot both be constants.
4042
4043 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4044 is not supported. */
4045
4046 rtx
emit_conditional_move(rtx target,enum rtx_code code,rtx op0,rtx op1,enum machine_mode cmode,rtx op2,rtx op3,enum machine_mode mode,int unsignedp)4047 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4048 enum machine_mode cmode, rtx op2, rtx op3,
4049 enum machine_mode mode, int unsignedp)
4050 {
4051 rtx tem, subtarget, comparison, insn;
4052 enum insn_code icode;
4053 enum rtx_code reversed;
4054
4055 /* If one operand is constant, make it the second one. Only do this
4056 if the other operand is not constant as well. */
4057
4058 if (swap_commutative_operands_p (op0, op1))
4059 {
4060 tem = op0;
4061 op0 = op1;
4062 op1 = tem;
4063 code = swap_condition (code);
4064 }
4065
4066 /* get_condition will prefer to generate LT and GT even if the old
4067 comparison was against zero, so undo that canonicalization here since
4068 comparisons against zero are cheaper. */
4069 if (code == LT && op1 == const1_rtx)
4070 code = LE, op1 = const0_rtx;
4071 else if (code == GT && op1 == constm1_rtx)
4072 code = GE, op1 = const0_rtx;
4073
4074 if (cmode == VOIDmode)
4075 cmode = GET_MODE (op0);
4076
4077 if (swap_commutative_operands_p (op2, op3)
4078 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4079 != UNKNOWN))
4080 {
4081 tem = op2;
4082 op2 = op3;
4083 op3 = tem;
4084 code = reversed;
4085 }
4086
4087 if (mode == VOIDmode)
4088 mode = GET_MODE (op2);
4089
4090 icode = movcc_gen_code[mode];
4091
4092 if (icode == CODE_FOR_nothing)
4093 return 0;
4094
4095 if (!target)
4096 target = gen_reg_rtx (mode);
4097
4098 subtarget = target;
4099
4100 /* If the insn doesn't accept these operands, put them in pseudos. */
4101
4102 if (!insn_data[icode].operand[0].predicate
4103 (subtarget, insn_data[icode].operand[0].mode))
4104 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4105
4106 if (!insn_data[icode].operand[2].predicate
4107 (op2, insn_data[icode].operand[2].mode))
4108 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4109
4110 if (!insn_data[icode].operand[3].predicate
4111 (op3, insn_data[icode].operand[3].mode))
4112 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4113
4114 /* Everything should now be in the suitable form, so emit the compare insn
4115 and then the conditional move. */
4116
4117 comparison
4118 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4119
4120 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4121 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4122 return NULL and let the caller figure out how best to deal with this
4123 situation. */
4124 if (GET_CODE (comparison) != code)
4125 return NULL_RTX;
4126
4127 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4128
4129 /* If that failed, then give up. */
4130 if (insn == 0)
4131 return 0;
4132
4133 emit_insn (insn);
4134
4135 if (subtarget != target)
4136 convert_move (target, subtarget, 0);
4137
4138 return target;
4139 }
4140
4141 /* Return nonzero if a conditional move of mode MODE is supported.
4142
4143 This function is for combine so it can tell whether an insn that looks
4144 like a conditional move is actually supported by the hardware. If we
4145 guess wrong we lose a bit on optimization, but that's it. */
4146 /* ??? sparc64 supports conditionally moving integers values based on fp
4147 comparisons, and vice versa. How do we handle them? */
4148
4149 int
can_conditionally_move_p(enum machine_mode mode)4150 can_conditionally_move_p (enum machine_mode mode)
4151 {
4152 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4153 return 1;
4154
4155 return 0;
4156 }
4157
4158 #endif /* HAVE_conditional_move */
4159
4160 /* Emit a conditional addition instruction if the machine supports one for that
4161 condition and machine mode.
4162
4163 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4164 the mode to use should they be constants. If it is VOIDmode, they cannot
4165 both be constants.
4166
4167 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4168 should be stored there. MODE is the mode to use should they be constants.
4169 If it is VOIDmode, they cannot both be constants.
4170
4171 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4172 is not supported. */
4173
4174 rtx
emit_conditional_add(rtx target,enum rtx_code code,rtx op0,rtx op1,enum machine_mode cmode,rtx op2,rtx op3,enum machine_mode mode,int unsignedp)4175 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4176 enum machine_mode cmode, rtx op2, rtx op3,
4177 enum machine_mode mode, int unsignedp)
4178 {
4179 rtx tem, subtarget, comparison, insn;
4180 enum insn_code icode;
4181 enum rtx_code reversed;
4182
4183 /* If one operand is constant, make it the second one. Only do this
4184 if the other operand is not constant as well. */
4185
4186 if (swap_commutative_operands_p (op0, op1))
4187 {
4188 tem = op0;
4189 op0 = op1;
4190 op1 = tem;
4191 code = swap_condition (code);
4192 }
4193
4194 /* get_condition will prefer to generate LT and GT even if the old
4195 comparison was against zero, so undo that canonicalization here since
4196 comparisons against zero are cheaper. */
4197 if (code == LT && op1 == const1_rtx)
4198 code = LE, op1 = const0_rtx;
4199 else if (code == GT && op1 == constm1_rtx)
4200 code = GE, op1 = const0_rtx;
4201
4202 if (cmode == VOIDmode)
4203 cmode = GET_MODE (op0);
4204
4205 if (swap_commutative_operands_p (op2, op3)
4206 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4207 != UNKNOWN))
4208 {
4209 tem = op2;
4210 op2 = op3;
4211 op3 = tem;
4212 code = reversed;
4213 }
4214
4215 if (mode == VOIDmode)
4216 mode = GET_MODE (op2);
4217
4218 icode = addcc_optab->handlers[(int) mode].insn_code;
4219
4220 if (icode == CODE_FOR_nothing)
4221 return 0;
4222
4223 if (!target)
4224 target = gen_reg_rtx (mode);
4225
4226 /* If the insn doesn't accept these operands, put them in pseudos. */
4227
4228 if (!insn_data[icode].operand[0].predicate
4229 (target, insn_data[icode].operand[0].mode))
4230 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4231 else
4232 subtarget = target;
4233
4234 if (!insn_data[icode].operand[2].predicate
4235 (op2, insn_data[icode].operand[2].mode))
4236 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4237
4238 if (!insn_data[icode].operand[3].predicate
4239 (op3, insn_data[icode].operand[3].mode))
4240 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4241
4242 /* Everything should now be in the suitable form, so emit the compare insn
4243 and then the conditional move. */
4244
4245 comparison
4246 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4247
4248 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4249 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4250 return NULL and let the caller figure out how best to deal with this
4251 situation. */
4252 if (GET_CODE (comparison) != code)
4253 return NULL_RTX;
4254
4255 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4256
4257 /* If that failed, then give up. */
4258 if (insn == 0)
4259 return 0;
4260
4261 emit_insn (insn);
4262
4263 if (subtarget != target)
4264 convert_move (target, subtarget, 0);
4265
4266 return target;
4267 }
4268
4269 /* These functions attempt to generate an insn body, rather than
4270 emitting the insn, but if the gen function already emits them, we
4271 make no attempt to turn them back into naked patterns. */
4272
4273 /* Generate and return an insn body to add Y to X. */
4274
4275 rtx
gen_add2_insn(rtx x,rtx y)4276 gen_add2_insn (rtx x, rtx y)
4277 {
4278 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4279
4280 gcc_assert (insn_data[icode].operand[0].predicate
4281 (x, insn_data[icode].operand[0].mode));
4282 gcc_assert (insn_data[icode].operand[1].predicate
4283 (x, insn_data[icode].operand[1].mode));
4284 gcc_assert (insn_data[icode].operand[2].predicate
4285 (y, insn_data[icode].operand[2].mode));
4286
4287 return GEN_FCN (icode) (x, x, y);
4288 }
4289
4290 /* Generate and return an insn body to add r1 and c,
4291 storing the result in r0. */
4292 rtx
gen_add3_insn(rtx r0,rtx r1,rtx c)4293 gen_add3_insn (rtx r0, rtx r1, rtx c)
4294 {
4295 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4296
4297 if (icode == CODE_FOR_nothing
4298 || !(insn_data[icode].operand[0].predicate
4299 (r0, insn_data[icode].operand[0].mode))
4300 || !(insn_data[icode].operand[1].predicate
4301 (r1, insn_data[icode].operand[1].mode))
4302 || !(insn_data[icode].operand[2].predicate
4303 (c, insn_data[icode].operand[2].mode)))
4304 return NULL_RTX;
4305
4306 return GEN_FCN (icode) (r0, r1, c);
4307 }
4308
4309 int
have_add2_insn(rtx x,rtx y)4310 have_add2_insn (rtx x, rtx y)
4311 {
4312 int icode;
4313
4314 gcc_assert (GET_MODE (x) != VOIDmode);
4315
4316 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4317
4318 if (icode == CODE_FOR_nothing)
4319 return 0;
4320
4321 if (!(insn_data[icode].operand[0].predicate
4322 (x, insn_data[icode].operand[0].mode))
4323 || !(insn_data[icode].operand[1].predicate
4324 (x, insn_data[icode].operand[1].mode))
4325 || !(insn_data[icode].operand[2].predicate
4326 (y, insn_data[icode].operand[2].mode)))
4327 return 0;
4328
4329 return 1;
4330 }
4331
4332 /* Generate and return an insn body to subtract Y from X. */
4333
4334 rtx
gen_sub2_insn(rtx x,rtx y)4335 gen_sub2_insn (rtx x, rtx y)
4336 {
4337 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4338
4339 gcc_assert (insn_data[icode].operand[0].predicate
4340 (x, insn_data[icode].operand[0].mode));
4341 gcc_assert (insn_data[icode].operand[1].predicate
4342 (x, insn_data[icode].operand[1].mode));
4343 gcc_assert (insn_data[icode].operand[2].predicate
4344 (y, insn_data[icode].operand[2].mode));
4345
4346 return GEN_FCN (icode) (x, x, y);
4347 }
4348
4349 /* Generate and return an insn body to subtract r1 and c,
4350 storing the result in r0. */
4351 rtx
gen_sub3_insn(rtx r0,rtx r1,rtx c)4352 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4353 {
4354 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4355
4356 if (icode == CODE_FOR_nothing
4357 || !(insn_data[icode].operand[0].predicate
4358 (r0, insn_data[icode].operand[0].mode))
4359 || !(insn_data[icode].operand[1].predicate
4360 (r1, insn_data[icode].operand[1].mode))
4361 || !(insn_data[icode].operand[2].predicate
4362 (c, insn_data[icode].operand[2].mode)))
4363 return NULL_RTX;
4364
4365 return GEN_FCN (icode) (r0, r1, c);
4366 }
4367
4368 int
have_sub2_insn(rtx x,rtx y)4369 have_sub2_insn (rtx x, rtx y)
4370 {
4371 int icode;
4372
4373 gcc_assert (GET_MODE (x) != VOIDmode);
4374
4375 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4376
4377 if (icode == CODE_FOR_nothing)
4378 return 0;
4379
4380 if (!(insn_data[icode].operand[0].predicate
4381 (x, insn_data[icode].operand[0].mode))
4382 || !(insn_data[icode].operand[1].predicate
4383 (x, insn_data[icode].operand[1].mode))
4384 || !(insn_data[icode].operand[2].predicate
4385 (y, insn_data[icode].operand[2].mode)))
4386 return 0;
4387
4388 return 1;
4389 }
4390
4391 /* Generate the body of an instruction to copy Y into X.
4392 It may be a list of insns, if one insn isn't enough. */
4393
4394 rtx
gen_move_insn(rtx x,rtx y)4395 gen_move_insn (rtx x, rtx y)
4396 {
4397 rtx seq;
4398
4399 start_sequence ();
4400 emit_move_insn_1 (x, y);
4401 seq = get_insns ();
4402 end_sequence ();
4403 return seq;
4404 }
4405
4406 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4407 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4408 no such operation exists, CODE_FOR_nothing will be returned. */
4409
4410 enum insn_code
can_extend_p(enum machine_mode to_mode,enum machine_mode from_mode,int unsignedp)4411 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4412 int unsignedp)
4413 {
4414 convert_optab tab;
4415 #ifdef HAVE_ptr_extend
4416 if (unsignedp < 0)
4417 return CODE_FOR_ptr_extend;
4418 #endif
4419
4420 tab = unsignedp ? zext_optab : sext_optab;
4421 return tab->handlers[to_mode][from_mode].insn_code;
4422 }
4423
4424 /* Generate the body of an insn to extend Y (with mode MFROM)
4425 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4426
4427 rtx
gen_extend_insn(rtx x,rtx y,enum machine_mode mto,enum machine_mode mfrom,int unsignedp)4428 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4429 enum machine_mode mfrom, int unsignedp)
4430 {
4431 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4432 return GEN_FCN (icode) (x, y);
4433 }
4434
4435 /* can_fix_p and can_float_p say whether the target machine
4436 can directly convert a given fixed point type to
4437 a given floating point type, or vice versa.
4438 The returned value is the CODE_FOR_... value to use,
4439 or CODE_FOR_nothing if these modes cannot be directly converted.
4440
4441 *TRUNCP_PTR is set to 1 if it is necessary to output
4442 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4443
4444 static enum insn_code
can_fix_p(enum machine_mode fixmode,enum machine_mode fltmode,int unsignedp,int * truncp_ptr)4445 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4446 int unsignedp, int *truncp_ptr)
4447 {
4448 convert_optab tab;
4449 enum insn_code icode;
4450
4451 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4452 icode = tab->handlers[fixmode][fltmode].insn_code;
4453 if (icode != CODE_FOR_nothing)
4454 {
4455 *truncp_ptr = 0;
4456 return icode;
4457 }
4458
4459 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4460 for this to work. We need to rework the fix* and ftrunc* patterns
4461 and documentation. */
4462 tab = unsignedp ? ufix_optab : sfix_optab;
4463 icode = tab->handlers[fixmode][fltmode].insn_code;
4464 if (icode != CODE_FOR_nothing
4465 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4466 {
4467 *truncp_ptr = 1;
4468 return icode;
4469 }
4470
4471 *truncp_ptr = 0;
4472 return CODE_FOR_nothing;
4473 }
4474
4475 static enum insn_code
can_float_p(enum machine_mode fltmode,enum machine_mode fixmode,int unsignedp)4476 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4477 int unsignedp)
4478 {
4479 convert_optab tab;
4480
4481 tab = unsignedp ? ufloat_optab : sfloat_optab;
4482 return tab->handlers[fltmode][fixmode].insn_code;
4483 }
4484
4485 /* Generate code to convert FROM to floating point
4486 and store in TO. FROM must be fixed point and not VOIDmode.
4487 UNSIGNEDP nonzero means regard FROM as unsigned.
4488 Normally this is done by correcting the final value
4489 if it is negative. */
4490
4491 void
expand_float(rtx to,rtx from,int unsignedp)4492 expand_float (rtx to, rtx from, int unsignedp)
4493 {
4494 enum insn_code icode;
4495 rtx target = to;
4496 enum machine_mode fmode, imode;
4497 bool can_do_signed = false;
4498
4499 /* Crash now, because we won't be able to decide which mode to use. */
4500 gcc_assert (GET_MODE (from) != VOIDmode);
4501
4502 /* Look for an insn to do the conversion. Do it in the specified
4503 modes if possible; otherwise convert either input, output or both to
4504 wider mode. If the integer mode is wider than the mode of FROM,
4505 we can do the conversion signed even if the input is unsigned. */
4506
4507 for (fmode = GET_MODE (to); fmode != VOIDmode;
4508 fmode = GET_MODE_WIDER_MODE (fmode))
4509 for (imode = GET_MODE (from); imode != VOIDmode;
4510 imode = GET_MODE_WIDER_MODE (imode))
4511 {
4512 int doing_unsigned = unsignedp;
4513
4514 if (fmode != GET_MODE (to)
4515 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4516 continue;
4517
4518 icode = can_float_p (fmode, imode, unsignedp);
4519 if (icode == CODE_FOR_nothing && unsignedp)
4520 {
4521 enum insn_code scode = can_float_p (fmode, imode, 0);
4522 if (scode != CODE_FOR_nothing)
4523 can_do_signed = true;
4524 if (imode != GET_MODE (from))
4525 icode = scode, doing_unsigned = 0;
4526 }
4527
4528 if (icode != CODE_FOR_nothing)
4529 {
4530 if (imode != GET_MODE (from))
4531 from = convert_to_mode (imode, from, unsignedp);
4532
4533 if (fmode != GET_MODE (to))
4534 target = gen_reg_rtx (fmode);
4535
4536 emit_unop_insn (icode, target, from,
4537 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4538
4539 if (target != to)
4540 convert_move (to, target, 0);
4541 return;
4542 }
4543 }
4544
4545 /* Unsigned integer, and no way to convert directly. For binary
4546 floating point modes, convert as signed, then conditionally adjust
4547 the result. */
4548 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4549 {
4550 rtx label = gen_label_rtx ();
4551 rtx temp;
4552 REAL_VALUE_TYPE offset;
4553
4554 /* Look for a usable floating mode FMODE wider than the source and at
4555 least as wide as the target. Using FMODE will avoid rounding woes
4556 with unsigned values greater than the signed maximum value. */
4557
4558 for (fmode = GET_MODE (to); fmode != VOIDmode;
4559 fmode = GET_MODE_WIDER_MODE (fmode))
4560 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4561 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4562 break;
4563
4564 if (fmode == VOIDmode)
4565 {
4566 /* There is no such mode. Pretend the target is wide enough. */
4567 fmode = GET_MODE (to);
4568
4569 /* Avoid double-rounding when TO is narrower than FROM. */
4570 if ((significand_size (fmode) + 1)
4571 < GET_MODE_BITSIZE (GET_MODE (from)))
4572 {
4573 rtx temp1;
4574 rtx neglabel = gen_label_rtx ();
4575
4576 /* Don't use TARGET if it isn't a register, is a hard register,
4577 or is the wrong mode. */
4578 if (!REG_P (target)
4579 || REGNO (target) < FIRST_PSEUDO_REGISTER
4580 || GET_MODE (target) != fmode)
4581 target = gen_reg_rtx (fmode);
4582
4583 imode = GET_MODE (from);
4584 do_pending_stack_adjust ();
4585
4586 /* Test whether the sign bit is set. */
4587 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4588 0, neglabel);
4589
4590 /* The sign bit is not set. Convert as signed. */
4591 expand_float (target, from, 0);
4592 emit_jump_insn (gen_jump (label));
4593 emit_barrier ();
4594
4595 /* The sign bit is set.
4596 Convert to a usable (positive signed) value by shifting right
4597 one bit, while remembering if a nonzero bit was shifted
4598 out; i.e., compute (from & 1) | (from >> 1). */
4599
4600 emit_label (neglabel);
4601 temp = expand_binop (imode, and_optab, from, const1_rtx,
4602 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4603 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4604 NULL_RTX, 1);
4605 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4606 OPTAB_LIB_WIDEN);
4607 expand_float (target, temp, 0);
4608
4609 /* Multiply by 2 to undo the shift above. */
4610 temp = expand_binop (fmode, add_optab, target, target,
4611 target, 0, OPTAB_LIB_WIDEN);
4612 if (temp != target)
4613 emit_move_insn (target, temp);
4614
4615 do_pending_stack_adjust ();
4616 emit_label (label);
4617 goto done;
4618 }
4619 }
4620
4621 /* If we are about to do some arithmetic to correct for an
4622 unsigned operand, do it in a pseudo-register. */
4623
4624 if (GET_MODE (to) != fmode
4625 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4626 target = gen_reg_rtx (fmode);
4627
4628 /* Convert as signed integer to floating. */
4629 expand_float (target, from, 0);
4630
4631 /* If FROM is negative (and therefore TO is negative),
4632 correct its value by 2**bitwidth. */
4633
4634 do_pending_stack_adjust ();
4635 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4636 0, label);
4637
4638
4639 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4640 temp = expand_binop (fmode, add_optab, target,
4641 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4642 target, 0, OPTAB_LIB_WIDEN);
4643 if (temp != target)
4644 emit_move_insn (target, temp);
4645
4646 do_pending_stack_adjust ();
4647 emit_label (label);
4648 goto done;
4649 }
4650
4651 /* No hardware instruction available; call a library routine. */
4652 {
4653 rtx libfunc;
4654 rtx insns;
4655 rtx value;
4656 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4657
4658 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4659 from = convert_to_mode (SImode, from, unsignedp);
4660
4661 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4662 gcc_assert (libfunc);
4663
4664 start_sequence ();
4665
4666 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4667 GET_MODE (to), 1, from,
4668 GET_MODE (from));
4669 insns = get_insns ();
4670 end_sequence ();
4671
4672 emit_libcall_block (insns, target, value,
4673 gen_rtx_FLOAT (GET_MODE (to), from));
4674 }
4675
4676 done:
4677
4678 /* Copy result to requested destination
4679 if we have been computing in a temp location. */
4680
4681 if (target != to)
4682 {
4683 if (GET_MODE (target) == GET_MODE (to))
4684 emit_move_insn (to, target);
4685 else
4686 convert_move (to, target, 0);
4687 }
4688 }
4689
4690 /* Generate code to convert FROM to fixed point and store in TO. FROM
4691 must be floating point. */
4692
4693 void
expand_fix(rtx to,rtx from,int unsignedp)4694 expand_fix (rtx to, rtx from, int unsignedp)
4695 {
4696 enum insn_code icode;
4697 rtx target = to;
4698 enum machine_mode fmode, imode;
4699 int must_trunc = 0;
4700
4701 /* We first try to find a pair of modes, one real and one integer, at
4702 least as wide as FROM and TO, respectively, in which we can open-code
4703 this conversion. If the integer mode is wider than the mode of TO,
4704 we can do the conversion either signed or unsigned. */
4705
4706 for (fmode = GET_MODE (from); fmode != VOIDmode;
4707 fmode = GET_MODE_WIDER_MODE (fmode))
4708 for (imode = GET_MODE (to); imode != VOIDmode;
4709 imode = GET_MODE_WIDER_MODE (imode))
4710 {
4711 int doing_unsigned = unsignedp;
4712
4713 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4714 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4715 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4716
4717 if (icode != CODE_FOR_nothing)
4718 {
4719 if (fmode != GET_MODE (from))
4720 from = convert_to_mode (fmode, from, 0);
4721
4722 if (must_trunc)
4723 {
4724 rtx temp = gen_reg_rtx (GET_MODE (from));
4725 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4726 temp, 0);
4727 }
4728
4729 if (imode != GET_MODE (to))
4730 target = gen_reg_rtx (imode);
4731
4732 emit_unop_insn (icode, target, from,
4733 doing_unsigned ? UNSIGNED_FIX : FIX);
4734 if (target != to)
4735 convert_move (to, target, unsignedp);
4736 return;
4737 }
4738 }
4739
4740 /* For an unsigned conversion, there is one more way to do it.
4741 If we have a signed conversion, we generate code that compares
4742 the real value to the largest representable positive number. If if
4743 is smaller, the conversion is done normally. Otherwise, subtract
4744 one plus the highest signed number, convert, and add it back.
4745
4746 We only need to check all real modes, since we know we didn't find
4747 anything with a wider integer mode.
4748
4749 This code used to extend FP value into mode wider than the destination.
4750 This is not needed. Consider, for instance conversion from SFmode
4751 into DImode.
4752
4753 The hot path through the code is dealing with inputs smaller than 2^63
4754 and doing just the conversion, so there is no bits to lose.
4755
4756 In the other path we know the value is positive in the range 2^63..2^64-1
4757 inclusive. (as for other imput overflow happens and result is undefined)
4758 So we know that the most important bit set in mantissa corresponds to
4759 2^63. The subtraction of 2^63 should not generate any rounding as it
4760 simply clears out that bit. The rest is trivial. */
4761
4762 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4763 for (fmode = GET_MODE (from); fmode != VOIDmode;
4764 fmode = GET_MODE_WIDER_MODE (fmode))
4765 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4766 &must_trunc))
4767 {
4768 int bitsize;
4769 REAL_VALUE_TYPE offset;
4770 rtx limit, lab1, lab2, insn;
4771
4772 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4773 real_2expN (&offset, bitsize - 1);
4774 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4775 lab1 = gen_label_rtx ();
4776 lab2 = gen_label_rtx ();
4777
4778 if (fmode != GET_MODE (from))
4779 from = convert_to_mode (fmode, from, 0);
4780
4781 /* See if we need to do the subtraction. */
4782 do_pending_stack_adjust ();
4783 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4784 0, lab1);
4785
4786 /* If not, do the signed "fix" and branch around fixup code. */
4787 expand_fix (to, from, 0);
4788 emit_jump_insn (gen_jump (lab2));
4789 emit_barrier ();
4790
4791 /* Otherwise, subtract 2**(N-1), convert to signed number,
4792 then add 2**(N-1). Do the addition using XOR since this
4793 will often generate better code. */
4794 emit_label (lab1);
4795 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4796 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4797 expand_fix (to, target, 0);
4798 target = expand_binop (GET_MODE (to), xor_optab, to,
4799 gen_int_mode
4800 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4801 GET_MODE (to)),
4802 to, 1, OPTAB_LIB_WIDEN);
4803
4804 if (target != to)
4805 emit_move_insn (to, target);
4806
4807 emit_label (lab2);
4808
4809 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4810 != CODE_FOR_nothing)
4811 {
4812 /* Make a place for a REG_NOTE and add it. */
4813 insn = emit_move_insn (to, to);
4814 set_unique_reg_note (insn,
4815 REG_EQUAL,
4816 gen_rtx_fmt_e (UNSIGNED_FIX,
4817 GET_MODE (to),
4818 copy_rtx (from)));
4819 }
4820
4821 return;
4822 }
4823
4824 /* We can't do it with an insn, so use a library call. But first ensure
4825 that the mode of TO is at least as wide as SImode, since those are the
4826 only library calls we know about. */
4827
4828 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4829 {
4830 target = gen_reg_rtx (SImode);
4831
4832 expand_fix (target, from, unsignedp);
4833 }
4834 else
4835 {
4836 rtx insns;
4837 rtx value;
4838 rtx libfunc;
4839
4840 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4841 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4842 gcc_assert (libfunc);
4843
4844 start_sequence ();
4845
4846 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4847 GET_MODE (to), 1, from,
4848 GET_MODE (from));
4849 insns = get_insns ();
4850 end_sequence ();
4851
4852 emit_libcall_block (insns, target, value,
4853 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4854 GET_MODE (to), from));
4855 }
4856
4857 if (target != to)
4858 {
4859 if (GET_MODE (to) == GET_MODE (target))
4860 emit_move_insn (to, target);
4861 else
4862 convert_move (to, target, 0);
4863 }
4864 }
4865
4866 /* Report whether we have an instruction to perform the operation
4867 specified by CODE on operands of mode MODE. */
4868 int
have_insn_for(enum rtx_code code,enum machine_mode mode)4869 have_insn_for (enum rtx_code code, enum machine_mode mode)
4870 {
4871 return (code_to_optab[(int) code] != 0
4872 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4873 != CODE_FOR_nothing));
4874 }
4875
4876 /* Create a blank optab. */
4877 static optab
new_optab(void)4878 new_optab (void)
4879 {
4880 int i;
4881 optab op = ggc_alloc (sizeof (struct optab));
4882 for (i = 0; i < NUM_MACHINE_MODES; i++)
4883 {
4884 op->handlers[i].insn_code = CODE_FOR_nothing;
4885 op->handlers[i].libfunc = 0;
4886 }
4887
4888 return op;
4889 }
4890
4891 static convert_optab
new_convert_optab(void)4892 new_convert_optab (void)
4893 {
4894 int i, j;
4895 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4896 for (i = 0; i < NUM_MACHINE_MODES; i++)
4897 for (j = 0; j < NUM_MACHINE_MODES; j++)
4898 {
4899 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4900 op->handlers[i][j].libfunc = 0;
4901 }
4902 return op;
4903 }
4904
4905 /* Same, but fill in its code as CODE, and write it into the
4906 code_to_optab table. */
4907 static inline optab
init_optab(enum rtx_code code)4908 init_optab (enum rtx_code code)
4909 {
4910 optab op = new_optab ();
4911 op->code = code;
4912 code_to_optab[(int) code] = op;
4913 return op;
4914 }
4915
4916 /* Same, but fill in its code as CODE, and do _not_ write it into
4917 the code_to_optab table. */
4918 static inline optab
init_optabv(enum rtx_code code)4919 init_optabv (enum rtx_code code)
4920 {
4921 optab op = new_optab ();
4922 op->code = code;
4923 return op;
4924 }
4925
4926 /* Conversion optabs never go in the code_to_optab table. */
4927 static inline convert_optab
init_convert_optab(enum rtx_code code)4928 init_convert_optab (enum rtx_code code)
4929 {
4930 convert_optab op = new_convert_optab ();
4931 op->code = code;
4932 return op;
4933 }
4934
4935 /* Initialize the libfunc fields of an entire group of entries in some
4936 optab. Each entry is set equal to a string consisting of a leading
4937 pair of underscores followed by a generic operation name followed by
4938 a mode name (downshifted to lowercase) followed by a single character
4939 representing the number of operands for the given operation (which is
4940 usually one of the characters '2', '3', or '4').
4941
4942 OPTABLE is the table in which libfunc fields are to be initialized.
4943 FIRST_MODE is the first machine mode index in the given optab to
4944 initialize.
4945 LAST_MODE is the last machine mode index in the given optab to
4946 initialize.
4947 OPNAME is the generic (string) name of the operation.
4948 SUFFIX is the character which specifies the number of operands for
4949 the given generic operation.
4950 */
4951
4952 static void
init_libfuncs(optab optable,int first_mode,int last_mode,const char * opname,int suffix)4953 init_libfuncs (optab optable, int first_mode, int last_mode,
4954 const char *opname, int suffix)
4955 {
4956 int mode;
4957 unsigned opname_len = strlen (opname);
4958
4959 for (mode = first_mode; (int) mode <= (int) last_mode;
4960 mode = (enum machine_mode) ((int) mode + 1))
4961 {
4962 const char *mname = GET_MODE_NAME (mode);
4963 unsigned mname_len = strlen (mname);
4964 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4965 char *p;
4966 const char *q;
4967
4968 p = libfunc_name;
4969 *p++ = '_';
4970 *p++ = '_';
4971 for (q = opname; *q; )
4972 *p++ = *q++;
4973 for (q = mname; *q; q++)
4974 *p++ = TOLOWER (*q);
4975 *p++ = suffix;
4976 *p = '\0';
4977
4978 optable->handlers[(int) mode].libfunc
4979 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4980 }
4981 }
4982
4983 /* Initialize the libfunc fields of an entire group of entries in some
4984 optab which correspond to all integer mode operations. The parameters
4985 have the same meaning as similarly named ones for the `init_libfuncs'
4986 routine. (See above). */
4987
4988 static void
init_integral_libfuncs(optab optable,const char * opname,int suffix)4989 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4990 {
4991 int maxsize = 2*BITS_PER_WORD;
4992 if (maxsize < LONG_LONG_TYPE_SIZE)
4993 maxsize = LONG_LONG_TYPE_SIZE;
4994 init_libfuncs (optable, word_mode,
4995 mode_for_size (maxsize, MODE_INT, 0),
4996 opname, suffix);
4997 }
4998
4999 /* Initialize the libfunc fields of an entire group of entries in some
5000 optab which correspond to all real mode operations. The parameters
5001 have the same meaning as similarly named ones for the `init_libfuncs'
5002 routine. (See above). */
5003
5004 static void
init_floating_libfuncs(optab optable,const char * opname,int suffix)5005 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5006 {
5007 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5008 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5009 opname, suffix);
5010 }
5011
5012 /* Initialize the libfunc fields of an entire group of entries of an
5013 inter-mode-class conversion optab. The string formation rules are
5014 similar to the ones for init_libfuncs, above, but instead of having
5015 a mode name and an operand count these functions have two mode names
5016 and no operand count. */
5017 static void
init_interclass_conv_libfuncs(convert_optab tab,const char * opname,enum mode_class from_class,enum mode_class to_class)5018 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5019 enum mode_class from_class,
5020 enum mode_class to_class)
5021 {
5022 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5023 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5024 size_t opname_len = strlen (opname);
5025 size_t max_mname_len = 0;
5026
5027 enum machine_mode fmode, tmode;
5028 const char *fname, *tname;
5029 const char *q;
5030 char *libfunc_name, *suffix;
5031 char *p;
5032
5033 for (fmode = first_from_mode;
5034 fmode != VOIDmode;
5035 fmode = GET_MODE_WIDER_MODE (fmode))
5036 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5037
5038 for (tmode = first_to_mode;
5039 tmode != VOIDmode;
5040 tmode = GET_MODE_WIDER_MODE (tmode))
5041 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5042
5043 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5044 libfunc_name[0] = '_';
5045 libfunc_name[1] = '_';
5046 memcpy (&libfunc_name[2], opname, opname_len);
5047 suffix = libfunc_name + opname_len + 2;
5048
5049 for (fmode = first_from_mode; fmode != VOIDmode;
5050 fmode = GET_MODE_WIDER_MODE (fmode))
5051 for (tmode = first_to_mode; tmode != VOIDmode;
5052 tmode = GET_MODE_WIDER_MODE (tmode))
5053 {
5054 fname = GET_MODE_NAME (fmode);
5055 tname = GET_MODE_NAME (tmode);
5056
5057 p = suffix;
5058 for (q = fname; *q; p++, q++)
5059 *p = TOLOWER (*q);
5060 for (q = tname; *q; p++, q++)
5061 *p = TOLOWER (*q);
5062
5063 *p = '\0';
5064
5065 tab->handlers[tmode][fmode].libfunc
5066 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5067 p - libfunc_name));
5068 }
5069 }
5070
5071 /* Initialize the libfunc fields of an entire group of entries of an
5072 intra-mode-class conversion optab. The string formation rules are
5073 similar to the ones for init_libfunc, above. WIDENING says whether
5074 the optab goes from narrow to wide modes or vice versa. These functions
5075 have two mode names _and_ an operand count. */
5076 static void
init_intraclass_conv_libfuncs(convert_optab tab,const char * opname,enum mode_class class,bool widening)5077 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5078 enum mode_class class, bool widening)
5079 {
5080 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5081 size_t opname_len = strlen (opname);
5082 size_t max_mname_len = 0;
5083
5084 enum machine_mode nmode, wmode;
5085 const char *nname, *wname;
5086 const char *q;
5087 char *libfunc_name, *suffix;
5088 char *p;
5089
5090 for (nmode = first_mode; nmode != VOIDmode;
5091 nmode = GET_MODE_WIDER_MODE (nmode))
5092 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5093
5094 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5095 libfunc_name[0] = '_';
5096 libfunc_name[1] = '_';
5097 memcpy (&libfunc_name[2], opname, opname_len);
5098 suffix = libfunc_name + opname_len + 2;
5099
5100 for (nmode = first_mode; nmode != VOIDmode;
5101 nmode = GET_MODE_WIDER_MODE (nmode))
5102 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5103 wmode = GET_MODE_WIDER_MODE (wmode))
5104 {
5105 nname = GET_MODE_NAME (nmode);
5106 wname = GET_MODE_NAME (wmode);
5107
5108 p = suffix;
5109 for (q = widening ? nname : wname; *q; p++, q++)
5110 *p = TOLOWER (*q);
5111 for (q = widening ? wname : nname; *q; p++, q++)
5112 *p = TOLOWER (*q);
5113
5114 *p++ = '2';
5115 *p = '\0';
5116
5117 tab->handlers[widening ? wmode : nmode]
5118 [widening ? nmode : wmode].libfunc
5119 = init_one_libfunc (ggc_alloc_string (libfunc_name,
5120 p - libfunc_name));
5121 }
5122 }
5123
5124
5125 rtx
init_one_libfunc(const char * name)5126 init_one_libfunc (const char *name)
5127 {
5128 rtx symbol;
5129
5130 /* Create a FUNCTION_DECL that can be passed to
5131 targetm.encode_section_info. */
5132 /* ??? We don't have any type information except for this is
5133 a function. Pretend this is "int foo()". */
5134 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5135 build_function_type (integer_type_node, NULL_TREE));
5136 DECL_ARTIFICIAL (decl) = 1;
5137 DECL_EXTERNAL (decl) = 1;
5138 TREE_PUBLIC (decl) = 1;
5139
5140 symbol = XEXP (DECL_RTL (decl), 0);
5141
5142 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5143 are the flags assigned by targetm.encode_section_info. */
5144 SET_SYMBOL_REF_DECL (symbol, 0);
5145
5146 return symbol;
5147 }
5148
5149 /* Call this to reset the function entry for one optab (OPTABLE) in mode
5150 MODE to NAME, which should be either 0 or a string constant. */
5151 void
set_optab_libfunc(optab optable,enum machine_mode mode,const char * name)5152 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5153 {
5154 if (name)
5155 optable->handlers[mode].libfunc = init_one_libfunc (name);
5156 else
5157 optable->handlers[mode].libfunc = 0;
5158 }
5159
5160 /* Call this to reset the function entry for one conversion optab
5161 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5162 either 0 or a string constant. */
5163 void
set_conv_libfunc(convert_optab optable,enum machine_mode tmode,enum machine_mode fmode,const char * name)5164 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5165 enum machine_mode fmode, const char *name)
5166 {
5167 if (name)
5168 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5169 else
5170 optable->handlers[tmode][fmode].libfunc = 0;
5171 }
5172
5173 /* Call this once to initialize the contents of the optabs
5174 appropriately for the current target machine. */
5175
5176 void
init_optabs(void)5177 init_optabs (void)
5178 {
5179 unsigned int i;
5180
5181 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5182
5183 for (i = 0; i < NUM_RTX_CODE; i++)
5184 setcc_gen_code[i] = CODE_FOR_nothing;
5185
5186 #ifdef HAVE_conditional_move
5187 for (i = 0; i < NUM_MACHINE_MODES; i++)
5188 movcc_gen_code[i] = CODE_FOR_nothing;
5189 #endif
5190
5191 for (i = 0; i < NUM_MACHINE_MODES; i++)
5192 {
5193 vcond_gen_code[i] = CODE_FOR_nothing;
5194 vcondu_gen_code[i] = CODE_FOR_nothing;
5195 }
5196
5197 add_optab = init_optab (PLUS);
5198 addv_optab = init_optabv (PLUS);
5199 sub_optab = init_optab (MINUS);
5200 subv_optab = init_optabv (MINUS);
5201 smul_optab = init_optab (MULT);
5202 smulv_optab = init_optabv (MULT);
5203 smul_highpart_optab = init_optab (UNKNOWN);
5204 umul_highpart_optab = init_optab (UNKNOWN);
5205 smul_widen_optab = init_optab (UNKNOWN);
5206 umul_widen_optab = init_optab (UNKNOWN);
5207 usmul_widen_optab = init_optab (UNKNOWN);
5208 sdiv_optab = init_optab (DIV);
5209 sdivv_optab = init_optabv (DIV);
5210 sdivmod_optab = init_optab (UNKNOWN);
5211 udiv_optab = init_optab (UDIV);
5212 udivmod_optab = init_optab (UNKNOWN);
5213 smod_optab = init_optab (MOD);
5214 umod_optab = init_optab (UMOD);
5215 fmod_optab = init_optab (UNKNOWN);
5216 drem_optab = init_optab (UNKNOWN);
5217 ftrunc_optab = init_optab (UNKNOWN);
5218 and_optab = init_optab (AND);
5219 ior_optab = init_optab (IOR);
5220 xor_optab = init_optab (XOR);
5221 ashl_optab = init_optab (ASHIFT);
5222 ashr_optab = init_optab (ASHIFTRT);
5223 lshr_optab = init_optab (LSHIFTRT);
5224 rotl_optab = init_optab (ROTATE);
5225 rotr_optab = init_optab (ROTATERT);
5226 smin_optab = init_optab (SMIN);
5227 smax_optab = init_optab (SMAX);
5228 umin_optab = init_optab (UMIN);
5229 umax_optab = init_optab (UMAX);
5230 pow_optab = init_optab (UNKNOWN);
5231 atan2_optab = init_optab (UNKNOWN);
5232
5233 /* These three have codes assigned exclusively for the sake of
5234 have_insn_for. */
5235 mov_optab = init_optab (SET);
5236 movstrict_optab = init_optab (STRICT_LOW_PART);
5237 cmp_optab = init_optab (COMPARE);
5238
5239 ucmp_optab = init_optab (UNKNOWN);
5240 tst_optab = init_optab (UNKNOWN);
5241
5242 eq_optab = init_optab (EQ);
5243 ne_optab = init_optab (NE);
5244 gt_optab = init_optab (GT);
5245 ge_optab = init_optab (GE);
5246 lt_optab = init_optab (LT);
5247 le_optab = init_optab (LE);
5248 unord_optab = init_optab (UNORDERED);
5249
5250 neg_optab = init_optab (NEG);
5251 negv_optab = init_optabv (NEG);
5252 abs_optab = init_optab (ABS);
5253 absv_optab = init_optabv (ABS);
5254 addcc_optab = init_optab (UNKNOWN);
5255 one_cmpl_optab = init_optab (NOT);
5256 ffs_optab = init_optab (FFS);
5257 clz_optab = init_optab (CLZ);
5258 ctz_optab = init_optab (CTZ);
5259 popcount_optab = init_optab (POPCOUNT);
5260 parity_optab = init_optab (PARITY);
5261 sqrt_optab = init_optab (SQRT);
5262 floor_optab = init_optab (UNKNOWN);
5263 lfloor_optab = init_optab (UNKNOWN);
5264 ceil_optab = init_optab (UNKNOWN);
5265 lceil_optab = init_optab (UNKNOWN);
5266 round_optab = init_optab (UNKNOWN);
5267 btrunc_optab = init_optab (UNKNOWN);
5268 nearbyint_optab = init_optab (UNKNOWN);
5269 rint_optab = init_optab (UNKNOWN);
5270 lrint_optab = init_optab (UNKNOWN);
5271 sincos_optab = init_optab (UNKNOWN);
5272 sin_optab = init_optab (UNKNOWN);
5273 asin_optab = init_optab (UNKNOWN);
5274 cos_optab = init_optab (UNKNOWN);
5275 acos_optab = init_optab (UNKNOWN);
5276 exp_optab = init_optab (UNKNOWN);
5277 exp10_optab = init_optab (UNKNOWN);
5278 exp2_optab = init_optab (UNKNOWN);
5279 expm1_optab = init_optab (UNKNOWN);
5280 ldexp_optab = init_optab (UNKNOWN);
5281 logb_optab = init_optab (UNKNOWN);
5282 ilogb_optab = init_optab (UNKNOWN);
5283 log_optab = init_optab (UNKNOWN);
5284 log10_optab = init_optab (UNKNOWN);
5285 log2_optab = init_optab (UNKNOWN);
5286 log1p_optab = init_optab (UNKNOWN);
5287 tan_optab = init_optab (UNKNOWN);
5288 atan_optab = init_optab (UNKNOWN);
5289 copysign_optab = init_optab (UNKNOWN);
5290
5291 strlen_optab = init_optab (UNKNOWN);
5292 cbranch_optab = init_optab (UNKNOWN);
5293 cmov_optab = init_optab (UNKNOWN);
5294 cstore_optab = init_optab (UNKNOWN);
5295 push_optab = init_optab (UNKNOWN);
5296
5297 reduc_smax_optab = init_optab (UNKNOWN);
5298 reduc_umax_optab = init_optab (UNKNOWN);
5299 reduc_smin_optab = init_optab (UNKNOWN);
5300 reduc_umin_optab = init_optab (UNKNOWN);
5301 reduc_splus_optab = init_optab (UNKNOWN);
5302 reduc_uplus_optab = init_optab (UNKNOWN);
5303
5304 ssum_widen_optab = init_optab (UNKNOWN);
5305 usum_widen_optab = init_optab (UNKNOWN);
5306 sdot_prod_optab = init_optab (UNKNOWN);
5307 udot_prod_optab = init_optab (UNKNOWN);
5308
5309 vec_extract_optab = init_optab (UNKNOWN);
5310 vec_set_optab = init_optab (UNKNOWN);
5311 vec_init_optab = init_optab (UNKNOWN);
5312 vec_shl_optab = init_optab (UNKNOWN);
5313 vec_shr_optab = init_optab (UNKNOWN);
5314 vec_realign_load_optab = init_optab (UNKNOWN);
5315 movmisalign_optab = init_optab (UNKNOWN);
5316
5317 powi_optab = init_optab (UNKNOWN);
5318
5319 /* Conversions. */
5320 sext_optab = init_convert_optab (SIGN_EXTEND);
5321 zext_optab = init_convert_optab (ZERO_EXTEND);
5322 trunc_optab = init_convert_optab (TRUNCATE);
5323 sfix_optab = init_convert_optab (FIX);
5324 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5325 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5326 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5327 sfloat_optab = init_convert_optab (FLOAT);
5328 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5329
5330 for (i = 0; i < NUM_MACHINE_MODES; i++)
5331 {
5332 movmem_optab[i] = CODE_FOR_nothing;
5333 cmpstr_optab[i] = CODE_FOR_nothing;
5334 cmpstrn_optab[i] = CODE_FOR_nothing;
5335 cmpmem_optab[i] = CODE_FOR_nothing;
5336 setmem_optab[i] = CODE_FOR_nothing;
5337
5338 sync_add_optab[i] = CODE_FOR_nothing;
5339 sync_sub_optab[i] = CODE_FOR_nothing;
5340 sync_ior_optab[i] = CODE_FOR_nothing;
5341 sync_and_optab[i] = CODE_FOR_nothing;
5342 sync_xor_optab[i] = CODE_FOR_nothing;
5343 sync_nand_optab[i] = CODE_FOR_nothing;
5344 sync_old_add_optab[i] = CODE_FOR_nothing;
5345 sync_old_sub_optab[i] = CODE_FOR_nothing;
5346 sync_old_ior_optab[i] = CODE_FOR_nothing;
5347 sync_old_and_optab[i] = CODE_FOR_nothing;
5348 sync_old_xor_optab[i] = CODE_FOR_nothing;
5349 sync_old_nand_optab[i] = CODE_FOR_nothing;
5350 sync_new_add_optab[i] = CODE_FOR_nothing;
5351 sync_new_sub_optab[i] = CODE_FOR_nothing;
5352 sync_new_ior_optab[i] = CODE_FOR_nothing;
5353 sync_new_and_optab[i] = CODE_FOR_nothing;
5354 sync_new_xor_optab[i] = CODE_FOR_nothing;
5355 sync_new_nand_optab[i] = CODE_FOR_nothing;
5356 sync_compare_and_swap[i] = CODE_FOR_nothing;
5357 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5358 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5359 sync_lock_release[i] = CODE_FOR_nothing;
5360
5361 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5362 }
5363
5364 /* Fill in the optabs with the insns we support. */
5365 init_all_optabs ();
5366
5367 /* Initialize the optabs with the names of the library functions. */
5368 init_integral_libfuncs (add_optab, "add", '3');
5369 init_floating_libfuncs (add_optab, "add", '3');
5370 init_integral_libfuncs (addv_optab, "addv", '3');
5371 init_floating_libfuncs (addv_optab, "add", '3');
5372 init_integral_libfuncs (sub_optab, "sub", '3');
5373 init_floating_libfuncs (sub_optab, "sub", '3');
5374 init_integral_libfuncs (subv_optab, "subv", '3');
5375 init_floating_libfuncs (subv_optab, "sub", '3');
5376 init_integral_libfuncs (smul_optab, "mul", '3');
5377 init_floating_libfuncs (smul_optab, "mul", '3');
5378 init_integral_libfuncs (smulv_optab, "mulv", '3');
5379 init_floating_libfuncs (smulv_optab, "mul", '3');
5380 init_integral_libfuncs (sdiv_optab, "div", '3');
5381 init_floating_libfuncs (sdiv_optab, "div", '3');
5382 init_integral_libfuncs (sdivv_optab, "divv", '3');
5383 init_integral_libfuncs (udiv_optab, "udiv", '3');
5384 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5385 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5386 init_integral_libfuncs (smod_optab, "mod", '3');
5387 init_integral_libfuncs (umod_optab, "umod", '3');
5388 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5389 init_integral_libfuncs (and_optab, "and", '3');
5390 init_integral_libfuncs (ior_optab, "ior", '3');
5391 init_integral_libfuncs (xor_optab, "xor", '3');
5392 init_integral_libfuncs (ashl_optab, "ashl", '3');
5393 init_integral_libfuncs (ashr_optab, "ashr", '3');
5394 init_integral_libfuncs (lshr_optab, "lshr", '3');
5395 init_integral_libfuncs (smin_optab, "min", '3');
5396 init_floating_libfuncs (smin_optab, "min", '3');
5397 init_integral_libfuncs (smax_optab, "max", '3');
5398 init_floating_libfuncs (smax_optab, "max", '3');
5399 init_integral_libfuncs (umin_optab, "umin", '3');
5400 init_integral_libfuncs (umax_optab, "umax", '3');
5401 init_integral_libfuncs (neg_optab, "neg", '2');
5402 init_floating_libfuncs (neg_optab, "neg", '2');
5403 init_integral_libfuncs (negv_optab, "negv", '2');
5404 init_floating_libfuncs (negv_optab, "neg", '2');
5405 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5406 init_integral_libfuncs (ffs_optab, "ffs", '2');
5407 init_integral_libfuncs (clz_optab, "clz", '2');
5408 init_integral_libfuncs (ctz_optab, "ctz", '2');
5409 init_integral_libfuncs (popcount_optab, "popcount", '2');
5410 init_integral_libfuncs (parity_optab, "parity", '2');
5411
5412 /* Comparison libcalls for integers MUST come in pairs,
5413 signed/unsigned. */
5414 init_integral_libfuncs (cmp_optab, "cmp", '2');
5415 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5416 init_floating_libfuncs (cmp_optab, "cmp", '2');
5417
5418 /* EQ etc are floating point only. */
5419 init_floating_libfuncs (eq_optab, "eq", '2');
5420 init_floating_libfuncs (ne_optab, "ne", '2');
5421 init_floating_libfuncs (gt_optab, "gt", '2');
5422 init_floating_libfuncs (ge_optab, "ge", '2');
5423 init_floating_libfuncs (lt_optab, "lt", '2');
5424 init_floating_libfuncs (le_optab, "le", '2');
5425 init_floating_libfuncs (unord_optab, "unord", '2');
5426
5427 init_floating_libfuncs (powi_optab, "powi", '2');
5428
5429 /* Conversions. */
5430 init_interclass_conv_libfuncs (sfloat_optab, "float",
5431 MODE_INT, MODE_FLOAT);
5432 init_interclass_conv_libfuncs (sfloat_optab, "float",
5433 MODE_INT, MODE_DECIMAL_FLOAT);
5434 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5435 MODE_INT, MODE_FLOAT);
5436 init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5437 MODE_INT, MODE_DECIMAL_FLOAT);
5438 init_interclass_conv_libfuncs (sfix_optab, "fix",
5439 MODE_FLOAT, MODE_INT);
5440 init_interclass_conv_libfuncs (sfix_optab, "fix",
5441 MODE_DECIMAL_FLOAT, MODE_INT);
5442 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5443 MODE_FLOAT, MODE_INT);
5444 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5445 MODE_DECIMAL_FLOAT, MODE_INT);
5446 init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5447 MODE_INT, MODE_DECIMAL_FLOAT);
5448
5449 /* sext_optab is also used for FLOAT_EXTEND. */
5450 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5451 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5452 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5453 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5454 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5455 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5456 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5457 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5458
5459 /* Use cabs for double complex abs, since systems generally have cabs.
5460 Don't define any libcall for float complex, so that cabs will be used. */
5461 if (complex_double_type_node)
5462 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5463 = init_one_libfunc ("cabs");
5464
5465 /* The ffs function operates on `int'. */
5466 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5467 = init_one_libfunc ("ffs");
5468
5469 abort_libfunc = init_one_libfunc ("abort");
5470 memcpy_libfunc = init_one_libfunc ("memcpy");
5471 memmove_libfunc = init_one_libfunc ("memmove");
5472 memcmp_libfunc = init_one_libfunc ("memcmp");
5473 memset_libfunc = init_one_libfunc ("memset");
5474 setbits_libfunc = init_one_libfunc ("__setbits");
5475
5476 #ifndef DONT_USE_BUILTIN_SETJMP
5477 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5478 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5479 #else
5480 setjmp_libfunc = init_one_libfunc ("setjmp");
5481 longjmp_libfunc = init_one_libfunc ("longjmp");
5482 #endif
5483 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5484 unwind_sjlj_unregister_libfunc
5485 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5486
5487 /* For function entry/exit instrumentation. */
5488 profile_function_entry_libfunc
5489 = init_one_libfunc ("__cyg_profile_func_enter");
5490 profile_function_exit_libfunc
5491 = init_one_libfunc ("__cyg_profile_func_exit");
5492
5493 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5494
5495 if (HAVE_conditional_trap)
5496 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5497
5498 /* Allow the target to add more libcalls or rename some, etc. */
5499 targetm.init_libfuncs ();
5500 }
5501
5502 #ifdef DEBUG
5503
5504 /* Print information about the current contents of the optabs on
5505 STDERR. */
5506
5507 static void
debug_optab_libfuncs(void)5508 debug_optab_libfuncs (void)
5509 {
5510 int i;
5511 int j;
5512 int k;
5513
5514 /* Dump the arithmetic optabs. */
5515 for (i = 0; i != (int) OTI_MAX; i++)
5516 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5517 {
5518 optab o;
5519 struct optab_handlers *h;
5520
5521 o = optab_table[i];
5522 h = &o->handlers[j];
5523 if (h->libfunc)
5524 {
5525 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5526 fprintf (stderr, "%s\t%s:\t%s\n",
5527 GET_RTX_NAME (o->code),
5528 GET_MODE_NAME (j),
5529 XSTR (h->libfunc, 0));
5530 }
5531 }
5532
5533 /* Dump the conversion optabs. */
5534 for (i = 0; i < (int) COI_MAX; ++i)
5535 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5536 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5537 {
5538 convert_optab o;
5539 struct optab_handlers *h;
5540
5541 o = &convert_optab_table[i];
5542 h = &o->handlers[j][k];
5543 if (h->libfunc)
5544 {
5545 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5546 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5547 GET_RTX_NAME (o->code),
5548 GET_MODE_NAME (j),
5549 GET_MODE_NAME (k),
5550 XSTR (h->libfunc, 0));
5551 }
5552 }
5553 }
5554
5555 #endif /* DEBUG */
5556
5557
5558 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5559 CODE. Return 0 on failure. */
5560
5561 rtx
gen_cond_trap(enum rtx_code code ATTRIBUTE_UNUSED,rtx op1,rtx op2 ATTRIBUTE_UNUSED,rtx tcode ATTRIBUTE_UNUSED)5562 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5563 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5564 {
5565 enum machine_mode mode = GET_MODE (op1);
5566 enum insn_code icode;
5567 rtx insn;
5568
5569 if (!HAVE_conditional_trap)
5570 return 0;
5571
5572 if (mode == VOIDmode)
5573 return 0;
5574
5575 icode = cmp_optab->handlers[(int) mode].insn_code;
5576 if (icode == CODE_FOR_nothing)
5577 return 0;
5578
5579 start_sequence ();
5580 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5581 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5582 if (!op1 || !op2)
5583 {
5584 end_sequence ();
5585 return 0;
5586 }
5587 emit_insn (GEN_FCN (icode) (op1, op2));
5588
5589 PUT_CODE (trap_rtx, code);
5590 gcc_assert (HAVE_conditional_trap);
5591 insn = gen_conditional_trap (trap_rtx, tcode);
5592 if (insn)
5593 {
5594 emit_insn (insn);
5595 insn = get_insns ();
5596 }
5597 end_sequence ();
5598
5599 return insn;
5600 }
5601
5602 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5603 or unsigned operation code. */
5604
5605 static enum rtx_code
get_rtx_code(enum tree_code tcode,bool unsignedp)5606 get_rtx_code (enum tree_code tcode, bool unsignedp)
5607 {
5608 enum rtx_code code;
5609 switch (tcode)
5610 {
5611 case EQ_EXPR:
5612 code = EQ;
5613 break;
5614 case NE_EXPR:
5615 code = NE;
5616 break;
5617 case LT_EXPR:
5618 code = unsignedp ? LTU : LT;
5619 break;
5620 case LE_EXPR:
5621 code = unsignedp ? LEU : LE;
5622 break;
5623 case GT_EXPR:
5624 code = unsignedp ? GTU : GT;
5625 break;
5626 case GE_EXPR:
5627 code = unsignedp ? GEU : GE;
5628 break;
5629
5630 case UNORDERED_EXPR:
5631 code = UNORDERED;
5632 break;
5633 case ORDERED_EXPR:
5634 code = ORDERED;
5635 break;
5636 case UNLT_EXPR:
5637 code = UNLT;
5638 break;
5639 case UNLE_EXPR:
5640 code = UNLE;
5641 break;
5642 case UNGT_EXPR:
5643 code = UNGT;
5644 break;
5645 case UNGE_EXPR:
5646 code = UNGE;
5647 break;
5648 case UNEQ_EXPR:
5649 code = UNEQ;
5650 break;
5651 case LTGT_EXPR:
5652 code = LTGT;
5653 break;
5654
5655 default:
5656 gcc_unreachable ();
5657 }
5658 return code;
5659 }
5660
5661 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5662 unsigned operators. Do not generate compare instruction. */
5663
5664 static rtx
vector_compare_rtx(tree cond,bool unsignedp,enum insn_code icode)5665 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5666 {
5667 enum rtx_code rcode;
5668 tree t_op0, t_op1;
5669 rtx rtx_op0, rtx_op1;
5670
5671 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5672 ensures that condition is a relational operation. */
5673 gcc_assert (COMPARISON_CLASS_P (cond));
5674
5675 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5676 t_op0 = TREE_OPERAND (cond, 0);
5677 t_op1 = TREE_OPERAND (cond, 1);
5678
5679 /* Expand operands. */
5680 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5681 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5682
5683 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5684 && GET_MODE (rtx_op0) != VOIDmode)
5685 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5686
5687 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5688 && GET_MODE (rtx_op1) != VOIDmode)
5689 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5690
5691 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5692 }
5693
5694 /* Return insn code for VEC_COND_EXPR EXPR. */
5695
5696 static inline enum insn_code
get_vcond_icode(tree expr,enum machine_mode mode)5697 get_vcond_icode (tree expr, enum machine_mode mode)
5698 {
5699 enum insn_code icode = CODE_FOR_nothing;
5700
5701 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5702 icode = vcondu_gen_code[mode];
5703 else
5704 icode = vcond_gen_code[mode];
5705 return icode;
5706 }
5707
5708 /* Return TRUE iff, appropriate vector insns are available
5709 for vector cond expr expr in VMODE mode. */
5710
5711 bool
expand_vec_cond_expr_p(tree expr,enum machine_mode vmode)5712 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5713 {
5714 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5715 return false;
5716 return true;
5717 }
5718
5719 /* Generate insns for VEC_COND_EXPR. */
5720
5721 rtx
expand_vec_cond_expr(tree vec_cond_expr,rtx target)5722 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5723 {
5724 enum insn_code icode;
5725 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5726 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5727 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5728
5729 icode = get_vcond_icode (vec_cond_expr, mode);
5730 if (icode == CODE_FOR_nothing)
5731 return 0;
5732
5733 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5734 target = gen_reg_rtx (mode);
5735
5736 /* Get comparison rtx. First expand both cond expr operands. */
5737 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5738 unsignedp, icode);
5739 cc_op0 = XEXP (comparison, 0);
5740 cc_op1 = XEXP (comparison, 1);
5741 /* Expand both operands and force them in reg, if required. */
5742 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5743 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5744 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5745 && mode != VOIDmode)
5746 rtx_op1 = force_reg (mode, rtx_op1);
5747
5748 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5749 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5750 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5751 && mode != VOIDmode)
5752 rtx_op2 = force_reg (mode, rtx_op2);
5753
5754 /* Emit instruction! */
5755 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5756 comparison, cc_op0, cc_op1));
5757
5758 return target;
5759 }
5760
5761
5762 /* This is an internal subroutine of the other compare_and_swap expanders.
5763 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5764 operation. TARGET is an optional place to store the value result of
5765 the operation. ICODE is the particular instruction to expand. Return
5766 the result of the operation. */
5767
5768 static rtx
expand_val_compare_and_swap_1(rtx mem,rtx old_val,rtx new_val,rtx target,enum insn_code icode)5769 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5770 rtx target, enum insn_code icode)
5771 {
5772 enum machine_mode mode = GET_MODE (mem);
5773 rtx insn;
5774
5775 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5776 target = gen_reg_rtx (mode);
5777
5778 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5779 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5780 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5781 old_val = force_reg (mode, old_val);
5782
5783 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5784 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5785 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5786 new_val = force_reg (mode, new_val);
5787
5788 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5789 if (insn == NULL_RTX)
5790 return NULL_RTX;
5791 emit_insn (insn);
5792
5793 return target;
5794 }
5795
5796 /* Expand a compare-and-swap operation and return its value. */
5797
5798 rtx
expand_val_compare_and_swap(rtx mem,rtx old_val,rtx new_val,rtx target)5799 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5800 {
5801 enum machine_mode mode = GET_MODE (mem);
5802 enum insn_code icode = sync_compare_and_swap[mode];
5803
5804 if (icode == CODE_FOR_nothing)
5805 return NULL_RTX;
5806
5807 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5808 }
5809
5810 /* Expand a compare-and-swap operation and store true into the result if
5811 the operation was successful and false otherwise. Return the result.
5812 Unlike other routines, TARGET is not optional. */
5813
5814 rtx
expand_bool_compare_and_swap(rtx mem,rtx old_val,rtx new_val,rtx target)5815 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5816 {
5817 enum machine_mode mode = GET_MODE (mem);
5818 enum insn_code icode;
5819 rtx subtarget, label0, label1;
5820
5821 /* If the target supports a compare-and-swap pattern that simultaneously
5822 sets some flag for success, then use it. Otherwise use the regular
5823 compare-and-swap and follow that immediately with a compare insn. */
5824 icode = sync_compare_and_swap_cc[mode];
5825 switch (icode)
5826 {
5827 default:
5828 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5829 NULL_RTX, icode);
5830 if (subtarget != NULL_RTX)
5831 break;
5832
5833 /* FALLTHRU */
5834 case CODE_FOR_nothing:
5835 icode = sync_compare_and_swap[mode];
5836 if (icode == CODE_FOR_nothing)
5837 return NULL_RTX;
5838
5839 /* Ensure that if old_val == mem, that we're not comparing
5840 against an old value. */
5841 if (MEM_P (old_val))
5842 old_val = force_reg (mode, old_val);
5843
5844 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5845 NULL_RTX, icode);
5846 if (subtarget == NULL_RTX)
5847 return NULL_RTX;
5848
5849 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5850 }
5851
5852 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5853 setcc instruction from the beginning. We don't work too hard here,
5854 but it's nice to not be stupid about initial code gen either. */
5855 if (STORE_FLAG_VALUE == 1)
5856 {
5857 icode = setcc_gen_code[EQ];
5858 if (icode != CODE_FOR_nothing)
5859 {
5860 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5861 rtx insn;
5862
5863 subtarget = target;
5864 if (!insn_data[icode].operand[0].predicate (target, cmode))
5865 subtarget = gen_reg_rtx (cmode);
5866
5867 insn = GEN_FCN (icode) (subtarget);
5868 if (insn)
5869 {
5870 emit_insn (insn);
5871 if (GET_MODE (target) != GET_MODE (subtarget))
5872 {
5873 convert_move (target, subtarget, 1);
5874 subtarget = target;
5875 }
5876 return subtarget;
5877 }
5878 }
5879 }
5880
5881 /* Without an appropriate setcc instruction, use a set of branches to
5882 get 1 and 0 stored into target. Presumably if the target has a
5883 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5884
5885 label0 = gen_label_rtx ();
5886 label1 = gen_label_rtx ();
5887
5888 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5889 emit_move_insn (target, const0_rtx);
5890 emit_jump_insn (gen_jump (label1));
5891 emit_barrier ();
5892 emit_label (label0);
5893 emit_move_insn (target, const1_rtx);
5894 emit_label (label1);
5895
5896 return target;
5897 }
5898
5899 /* This is a helper function for the other atomic operations. This function
5900 emits a loop that contains SEQ that iterates until a compare-and-swap
5901 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5902 a set of instructions that takes a value from OLD_REG as an input and
5903 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5904 set to the current contents of MEM. After SEQ, a compare-and-swap will
5905 attempt to update MEM with NEW_REG. The function returns true when the
5906 loop was generated successfully. */
5907
5908 static bool
expand_compare_and_swap_loop(rtx mem,rtx old_reg,rtx new_reg,rtx seq)5909 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5910 {
5911 enum machine_mode mode = GET_MODE (mem);
5912 enum insn_code icode;
5913 rtx label, cmp_reg, subtarget;
5914
5915 /* The loop we want to generate looks like
5916
5917 cmp_reg = mem;
5918 label:
5919 old_reg = cmp_reg;
5920 seq;
5921 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5922 if (cmp_reg != old_reg)
5923 goto label;
5924
5925 Note that we only do the plain load from memory once. Subsequent
5926 iterations use the value loaded by the compare-and-swap pattern. */
5927
5928 label = gen_label_rtx ();
5929 cmp_reg = gen_reg_rtx (mode);
5930
5931 emit_move_insn (cmp_reg, mem);
5932 emit_label (label);
5933 emit_move_insn (old_reg, cmp_reg);
5934 if (seq)
5935 emit_insn (seq);
5936
5937 /* If the target supports a compare-and-swap pattern that simultaneously
5938 sets some flag for success, then use it. Otherwise use the regular
5939 compare-and-swap and follow that immediately with a compare insn. */
5940 icode = sync_compare_and_swap_cc[mode];
5941 switch (icode)
5942 {
5943 default:
5944 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5945 cmp_reg, icode);
5946 if (subtarget != NULL_RTX)
5947 {
5948 gcc_assert (subtarget == cmp_reg);
5949 break;
5950 }
5951
5952 /* FALLTHRU */
5953 case CODE_FOR_nothing:
5954 icode = sync_compare_and_swap[mode];
5955 if (icode == CODE_FOR_nothing)
5956 return false;
5957
5958 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5959 cmp_reg, icode);
5960 if (subtarget == NULL_RTX)
5961 return false;
5962 if (subtarget != cmp_reg)
5963 emit_move_insn (cmp_reg, subtarget);
5964
5965 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5966 }
5967
5968 /* ??? Mark this jump predicted not taken? */
5969 emit_jump_insn (bcc_gen_fctn[NE] (label));
5970
5971 return true;
5972 }
5973
5974 /* This function generates the atomic operation MEM CODE= VAL. In this
5975 case, we do not care about any resulting value. Returns NULL if we
5976 cannot generate the operation. */
5977
5978 rtx
expand_sync_operation(rtx mem,rtx val,enum rtx_code code)5979 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5980 {
5981 enum machine_mode mode = GET_MODE (mem);
5982 enum insn_code icode;
5983 rtx insn;
5984
5985 /* Look to see if the target supports the operation directly. */
5986 switch (code)
5987 {
5988 case PLUS:
5989 icode = sync_add_optab[mode];
5990 break;
5991 case IOR:
5992 icode = sync_ior_optab[mode];
5993 break;
5994 case XOR:
5995 icode = sync_xor_optab[mode];
5996 break;
5997 case AND:
5998 icode = sync_and_optab[mode];
5999 break;
6000 case NOT:
6001 icode = sync_nand_optab[mode];
6002 break;
6003
6004 case MINUS:
6005 icode = sync_sub_optab[mode];
6006 if (icode == CODE_FOR_nothing)
6007 {
6008 icode = sync_add_optab[mode];
6009 if (icode != CODE_FOR_nothing)
6010 {
6011 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6012 code = PLUS;
6013 }
6014 }
6015 break;
6016
6017 default:
6018 gcc_unreachable ();
6019 }
6020
6021 /* Generate the direct operation, if present. */
6022 if (icode != CODE_FOR_nothing)
6023 {
6024 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6025 val = convert_modes (mode, GET_MODE (val), val, 1);
6026 if (!insn_data[icode].operand[1].predicate (val, mode))
6027 val = force_reg (mode, val);
6028
6029 insn = GEN_FCN (icode) (mem, val);
6030 if (insn)
6031 {
6032 emit_insn (insn);
6033 return const0_rtx;
6034 }
6035 }
6036
6037 /* Failing that, generate a compare-and-swap loop in which we perform the
6038 operation with normal arithmetic instructions. */
6039 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6040 {
6041 rtx t0 = gen_reg_rtx (mode), t1;
6042
6043 start_sequence ();
6044
6045 t1 = t0;
6046 if (code == NOT)
6047 {
6048 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6049 code = AND;
6050 }
6051 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6052 true, OPTAB_LIB_WIDEN);
6053
6054 insn = get_insns ();
6055 end_sequence ();
6056
6057 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6058 return const0_rtx;
6059 }
6060
6061 return NULL_RTX;
6062 }
6063
6064 /* This function generates the atomic operation MEM CODE= VAL. In this
6065 case, we do care about the resulting value: if AFTER is true then
6066 return the value MEM holds after the operation, if AFTER is false
6067 then return the value MEM holds before the operation. TARGET is an
6068 optional place for the result value to be stored. */
6069
6070 rtx
expand_sync_fetch_operation(rtx mem,rtx val,enum rtx_code code,bool after,rtx target)6071 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6072 bool after, rtx target)
6073 {
6074 enum machine_mode mode = GET_MODE (mem);
6075 enum insn_code old_code, new_code, icode;
6076 bool compensate;
6077 rtx insn;
6078
6079 /* Look to see if the target supports the operation directly. */
6080 switch (code)
6081 {
6082 case PLUS:
6083 old_code = sync_old_add_optab[mode];
6084 new_code = sync_new_add_optab[mode];
6085 break;
6086 case IOR:
6087 old_code = sync_old_ior_optab[mode];
6088 new_code = sync_new_ior_optab[mode];
6089 break;
6090 case XOR:
6091 old_code = sync_old_xor_optab[mode];
6092 new_code = sync_new_xor_optab[mode];
6093 break;
6094 case AND:
6095 old_code = sync_old_and_optab[mode];
6096 new_code = sync_new_and_optab[mode];
6097 break;
6098 case NOT:
6099 old_code = sync_old_nand_optab[mode];
6100 new_code = sync_new_nand_optab[mode];
6101 break;
6102
6103 case MINUS:
6104 old_code = sync_old_sub_optab[mode];
6105 new_code = sync_new_sub_optab[mode];
6106 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6107 {
6108 old_code = sync_old_add_optab[mode];
6109 new_code = sync_new_add_optab[mode];
6110 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6111 {
6112 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6113 code = PLUS;
6114 }
6115 }
6116 break;
6117
6118 default:
6119 gcc_unreachable ();
6120 }
6121
6122 /* If the target does supports the proper new/old operation, great. But
6123 if we only support the opposite old/new operation, check to see if we
6124 can compensate. In the case in which the old value is supported, then
6125 we can always perform the operation again with normal arithmetic. In
6126 the case in which the new value is supported, then we can only handle
6127 this in the case the operation is reversible. */
6128 compensate = false;
6129 if (after)
6130 {
6131 icode = new_code;
6132 if (icode == CODE_FOR_nothing)
6133 {
6134 icode = old_code;
6135 if (icode != CODE_FOR_nothing)
6136 compensate = true;
6137 }
6138 }
6139 else
6140 {
6141 icode = old_code;
6142 if (icode == CODE_FOR_nothing
6143 && (code == PLUS || code == MINUS || code == XOR))
6144 {
6145 icode = new_code;
6146 if (icode != CODE_FOR_nothing)
6147 compensate = true;
6148 }
6149 }
6150
6151 /* If we found something supported, great. */
6152 if (icode != CODE_FOR_nothing)
6153 {
6154 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6155 target = gen_reg_rtx (mode);
6156
6157 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6158 val = convert_modes (mode, GET_MODE (val), val, 1);
6159 if (!insn_data[icode].operand[2].predicate (val, mode))
6160 val = force_reg (mode, val);
6161
6162 insn = GEN_FCN (icode) (target, mem, val);
6163 if (insn)
6164 {
6165 emit_insn (insn);
6166
6167 /* If we need to compensate for using an operation with the
6168 wrong return value, do so now. */
6169 if (compensate)
6170 {
6171 if (!after)
6172 {
6173 if (code == PLUS)
6174 code = MINUS;
6175 else if (code == MINUS)
6176 code = PLUS;
6177 }
6178
6179 if (code == NOT)
6180 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6181 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6182 true, OPTAB_LIB_WIDEN);
6183 }
6184
6185 return target;
6186 }
6187 }
6188
6189 /* Failing that, generate a compare-and-swap loop in which we perform the
6190 operation with normal arithmetic instructions. */
6191 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6192 {
6193 rtx t0 = gen_reg_rtx (mode), t1;
6194
6195 if (!target || !register_operand (target, mode))
6196 target = gen_reg_rtx (mode);
6197
6198 start_sequence ();
6199
6200 if (!after)
6201 emit_move_insn (target, t0);
6202 t1 = t0;
6203 if (code == NOT)
6204 {
6205 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6206 code = AND;
6207 }
6208 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6209 true, OPTAB_LIB_WIDEN);
6210 if (after)
6211 emit_move_insn (target, t1);
6212
6213 insn = get_insns ();
6214 end_sequence ();
6215
6216 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6217 return target;
6218 }
6219
6220 return NULL_RTX;
6221 }
6222
6223 /* This function expands a test-and-set operation. Ideally we atomically
6224 store VAL in MEM and return the previous value in MEM. Some targets
6225 may not support this operation and only support VAL with the constant 1;
6226 in this case while the return value will be 0/1, but the exact value
6227 stored in MEM is target defined. TARGET is an option place to stick
6228 the return value. */
6229
6230 rtx
expand_sync_lock_test_and_set(rtx mem,rtx val,rtx target)6231 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6232 {
6233 enum machine_mode mode = GET_MODE (mem);
6234 enum insn_code icode;
6235 rtx insn;
6236
6237 /* If the target supports the test-and-set directly, great. */
6238 icode = sync_lock_test_and_set[mode];
6239 if (icode != CODE_FOR_nothing)
6240 {
6241 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6242 target = gen_reg_rtx (mode);
6243
6244 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6245 val = convert_modes (mode, GET_MODE (val), val, 1);
6246 if (!insn_data[icode].operand[2].predicate (val, mode))
6247 val = force_reg (mode, val);
6248
6249 insn = GEN_FCN (icode) (target, mem, val);
6250 if (insn)
6251 {
6252 emit_insn (insn);
6253 return target;
6254 }
6255 }
6256
6257 /* Otherwise, use a compare-and-swap loop for the exchange. */
6258 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6259 {
6260 if (!target || !register_operand (target, mode))
6261 target = gen_reg_rtx (mode);
6262 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6263 val = convert_modes (mode, GET_MODE (val), val, 1);
6264 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6265 return target;
6266 }
6267
6268 return NULL_RTX;
6269 }
6270
6271 #include "gt-optabs.h"
6272