1*404b540aSrobert /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2*404b540aSrobert Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3*404b540aSrobert 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4*404b540aSrobert Free Software Foundation, Inc.
5*404b540aSrobert
6*404b540aSrobert This file is part of GCC.
7*404b540aSrobert
8*404b540aSrobert GCC is free software; you can redistribute it and/or modify it under
9*404b540aSrobert the terms of the GNU General Public License as published by the Free
10*404b540aSrobert Software Foundation; either version 2, or (at your option) any later
11*404b540aSrobert version.
12*404b540aSrobert
13*404b540aSrobert GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14*404b540aSrobert WARRANTY; without even the implied warranty of MERCHANTABILITY or
15*404b540aSrobert FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16*404b540aSrobert for more details.
17*404b540aSrobert
18*404b540aSrobert You should have received a copy of the GNU General Public License
19*404b540aSrobert along with GCC; see the file COPYING. If not, write to the Free
20*404b540aSrobert Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21*404b540aSrobert 02110-1301, USA. */
22*404b540aSrobert
23*404b540aSrobert
24*404b540aSrobert #include "config.h"
25*404b540aSrobert #include "system.h"
26*404b540aSrobert #include "coretypes.h"
27*404b540aSrobert #include "tm.h"
28*404b540aSrobert #include "toplev.h"
29*404b540aSrobert
30*404b540aSrobert /* Include insn-config.h before expr.h so that HAVE_conditional_move
31*404b540aSrobert is properly defined. */
32*404b540aSrobert #include "insn-config.h"
33*404b540aSrobert #include "rtl.h"
34*404b540aSrobert #include "tree.h"
35*404b540aSrobert #include "tm_p.h"
36*404b540aSrobert #include "flags.h"
37*404b540aSrobert #include "function.h"
38*404b540aSrobert #include "except.h"
39*404b540aSrobert #include "expr.h"
40*404b540aSrobert #include "optabs.h"
41*404b540aSrobert #include "libfuncs.h"
42*404b540aSrobert #include "recog.h"
43*404b540aSrobert #include "reload.h"
44*404b540aSrobert #include "ggc.h"
45*404b540aSrobert #include "real.h"
46*404b540aSrobert #include "basic-block.h"
47*404b540aSrobert #include "target.h"
48*404b540aSrobert
49*404b540aSrobert /* Each optab contains info on how this target machine
50*404b540aSrobert can perform a particular operation
51*404b540aSrobert for all sizes and kinds of operands.
52*404b540aSrobert
53*404b540aSrobert The operation to be performed is often specified
54*404b540aSrobert by passing one of these optabs as an argument.
55*404b540aSrobert
56*404b540aSrobert See expr.h for documentation of these optabs. */
57*404b540aSrobert
58*404b540aSrobert optab optab_table[OTI_MAX];
59*404b540aSrobert
60*404b540aSrobert rtx libfunc_table[LTI_MAX];
61*404b540aSrobert
62*404b540aSrobert /* Tables of patterns for converting one mode to another. */
63*404b540aSrobert convert_optab convert_optab_table[COI_MAX];
64*404b540aSrobert
65*404b540aSrobert /* Contains the optab used for each rtx code. */
66*404b540aSrobert optab code_to_optab[NUM_RTX_CODE + 1];
67*404b540aSrobert
68*404b540aSrobert /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
69*404b540aSrobert gives the gen_function to make a branch to test that condition. */
70*404b540aSrobert
71*404b540aSrobert rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72*404b540aSrobert
73*404b540aSrobert /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
74*404b540aSrobert gives the insn code to make a store-condition insn
75*404b540aSrobert to test that condition. */
76*404b540aSrobert
77*404b540aSrobert enum insn_code setcc_gen_code[NUM_RTX_CODE];
78*404b540aSrobert
79*404b540aSrobert #ifdef HAVE_conditional_move
80*404b540aSrobert /* Indexed by the machine mode, gives the insn code to make a conditional
81*404b540aSrobert move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
82*404b540aSrobert setcc_gen_code to cut down on the number of named patterns. Consider a day
83*404b540aSrobert when a lot more rtx codes are conditional (eg: for the ARM). */
84*404b540aSrobert
85*404b540aSrobert enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
86*404b540aSrobert #endif
87*404b540aSrobert
88*404b540aSrobert /* Indexed by the machine mode, gives the insn code for vector conditional
89*404b540aSrobert operation. */
90*404b540aSrobert
91*404b540aSrobert enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
92*404b540aSrobert enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93*404b540aSrobert
94*404b540aSrobert /* The insn generating function can not take an rtx_code argument.
95*404b540aSrobert TRAP_RTX is used as an rtx argument. Its code is replaced with
96*404b540aSrobert the code to be used in the trap insn and all other fields are ignored. */
97*404b540aSrobert static GTY(()) rtx trap_rtx;
98*404b540aSrobert
99*404b540aSrobert static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
100*404b540aSrobert static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101*404b540aSrobert int);
102*404b540aSrobert static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
103*404b540aSrobert enum machine_mode *, int *,
104*404b540aSrobert enum can_compare_purpose);
105*404b540aSrobert static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106*404b540aSrobert int *);
107*404b540aSrobert static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
108*404b540aSrobert static optab new_optab (void);
109*404b540aSrobert static convert_optab new_convert_optab (void);
110*404b540aSrobert static inline optab init_optab (enum rtx_code);
111*404b540aSrobert static inline optab init_optabv (enum rtx_code);
112*404b540aSrobert static inline convert_optab init_convert_optab (enum rtx_code);
113*404b540aSrobert static void init_libfuncs (optab, int, int, const char *, int);
114*404b540aSrobert static void init_integral_libfuncs (optab, const char *, int);
115*404b540aSrobert static void init_floating_libfuncs (optab, const char *, int);
116*404b540aSrobert static void init_interclass_conv_libfuncs (convert_optab, const char *,
117*404b540aSrobert enum mode_class, enum mode_class);
118*404b540aSrobert static void init_intraclass_conv_libfuncs (convert_optab, const char *,
119*404b540aSrobert enum mode_class, bool);
120*404b540aSrobert static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
121*404b540aSrobert enum rtx_code, int, rtx);
122*404b540aSrobert static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
123*404b540aSrobert enum machine_mode *, int *);
124*404b540aSrobert static rtx widen_clz (enum machine_mode, rtx, rtx);
125*404b540aSrobert static rtx expand_parity (enum machine_mode, rtx, rtx);
126*404b540aSrobert static enum rtx_code get_rtx_code (enum tree_code, bool);
127*404b540aSrobert static rtx vector_compare_rtx (tree, bool, enum insn_code);
128*404b540aSrobert
129*404b540aSrobert #ifndef HAVE_conditional_trap
130*404b540aSrobert #define HAVE_conditional_trap 0
131*404b540aSrobert #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
132*404b540aSrobert #endif
133*404b540aSrobert
134*404b540aSrobert /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135*404b540aSrobert the result of operation CODE applied to OP0 (and OP1 if it is a binary
136*404b540aSrobert operation).
137*404b540aSrobert
138*404b540aSrobert If the last insn does not set TARGET, don't do anything, but return 1.
139*404b540aSrobert
140*404b540aSrobert If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141*404b540aSrobert don't add the REG_EQUAL note but return 0. Our caller can then try
142*404b540aSrobert again, ensuring that TARGET is not one of the operands. */
143*404b540aSrobert
144*404b540aSrobert static int
add_equal_note(rtx insns,rtx target,enum rtx_code code,rtx op0,rtx op1)145*404b540aSrobert add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146*404b540aSrobert {
147*404b540aSrobert rtx last_insn, insn, set;
148*404b540aSrobert rtx note;
149*404b540aSrobert
150*404b540aSrobert gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151*404b540aSrobert
152*404b540aSrobert if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
153*404b540aSrobert && GET_RTX_CLASS (code) != RTX_BIN_ARITH
154*404b540aSrobert && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
155*404b540aSrobert && GET_RTX_CLASS (code) != RTX_COMPARE
156*404b540aSrobert && GET_RTX_CLASS (code) != RTX_UNARY)
157*404b540aSrobert return 1;
158*404b540aSrobert
159*404b540aSrobert if (GET_CODE (target) == ZERO_EXTRACT)
160*404b540aSrobert return 1;
161*404b540aSrobert
162*404b540aSrobert for (last_insn = insns;
163*404b540aSrobert NEXT_INSN (last_insn) != NULL_RTX;
164*404b540aSrobert last_insn = NEXT_INSN (last_insn))
165*404b540aSrobert ;
166*404b540aSrobert
167*404b540aSrobert set = single_set (last_insn);
168*404b540aSrobert if (set == NULL_RTX)
169*404b540aSrobert return 1;
170*404b540aSrobert
171*404b540aSrobert if (! rtx_equal_p (SET_DEST (set), target)
172*404b540aSrobert /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173*404b540aSrobert && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
174*404b540aSrobert || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
175*404b540aSrobert return 1;
176*404b540aSrobert
177*404b540aSrobert /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178*404b540aSrobert besides the last insn. */
179*404b540aSrobert if (reg_overlap_mentioned_p (target, op0)
180*404b540aSrobert || (op1 && reg_overlap_mentioned_p (target, op1)))
181*404b540aSrobert {
182*404b540aSrobert insn = PREV_INSN (last_insn);
183*404b540aSrobert while (insn != NULL_RTX)
184*404b540aSrobert {
185*404b540aSrobert if (reg_set_p (target, insn))
186*404b540aSrobert return 0;
187*404b540aSrobert
188*404b540aSrobert insn = PREV_INSN (insn);
189*404b540aSrobert }
190*404b540aSrobert }
191*404b540aSrobert
192*404b540aSrobert if (GET_RTX_CLASS (code) == RTX_UNARY)
193*404b540aSrobert note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194*404b540aSrobert else
195*404b540aSrobert note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196*404b540aSrobert
197*404b540aSrobert set_unique_reg_note (last_insn, REG_EQUAL, note);
198*404b540aSrobert
199*404b540aSrobert return 1;
200*404b540aSrobert }
201*404b540aSrobert
202*404b540aSrobert /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203*404b540aSrobert says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204*404b540aSrobert not actually do a sign-extend or zero-extend, but can leave the
205*404b540aSrobert higher-order bits of the result rtx undefined, for example, in the case
206*404b540aSrobert of logical operations, but not right shifts. */
207*404b540aSrobert
208*404b540aSrobert static rtx
widen_operand(rtx op,enum machine_mode mode,enum machine_mode oldmode,int unsignedp,int no_extend)209*404b540aSrobert widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
210*404b540aSrobert int unsignedp, int no_extend)
211*404b540aSrobert {
212*404b540aSrobert rtx result;
213*404b540aSrobert
214*404b540aSrobert /* If we don't have to extend and this is a constant, return it. */
215*404b540aSrobert if (no_extend && GET_MODE (op) == VOIDmode)
216*404b540aSrobert return op;
217*404b540aSrobert
218*404b540aSrobert /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219*404b540aSrobert extend since it will be more efficient to do so unless the signedness of
220*404b540aSrobert a promoted object differs from our extension. */
221*404b540aSrobert if (! no_extend
222*404b540aSrobert || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
223*404b540aSrobert && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
224*404b540aSrobert return convert_modes (mode, oldmode, op, unsignedp);
225*404b540aSrobert
226*404b540aSrobert /* If MODE is no wider than a single word, we return a paradoxical
227*404b540aSrobert SUBREG. */
228*404b540aSrobert if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
229*404b540aSrobert return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230*404b540aSrobert
231*404b540aSrobert /* Otherwise, get an object of MODE, clobber it, and set the low-order
232*404b540aSrobert part to OP. */
233*404b540aSrobert
234*404b540aSrobert result = gen_reg_rtx (mode);
235*404b540aSrobert emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
236*404b540aSrobert emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
237*404b540aSrobert return result;
238*404b540aSrobert }
239*404b540aSrobert
240*404b540aSrobert /* Return the optab used for computing the operation given by
241*404b540aSrobert the tree code, CODE. This function is not always usable (for
242*404b540aSrobert example, it cannot give complete results for multiplication
243*404b540aSrobert or division) but probably ought to be relied on more widely
244*404b540aSrobert throughout the expander. */
245*404b540aSrobert optab
optab_for_tree_code(enum tree_code code,tree type)246*404b540aSrobert optab_for_tree_code (enum tree_code code, tree type)
247*404b540aSrobert {
248*404b540aSrobert bool trapv;
249*404b540aSrobert switch (code)
250*404b540aSrobert {
251*404b540aSrobert case BIT_AND_EXPR:
252*404b540aSrobert return and_optab;
253*404b540aSrobert
254*404b540aSrobert case BIT_IOR_EXPR:
255*404b540aSrobert return ior_optab;
256*404b540aSrobert
257*404b540aSrobert case BIT_NOT_EXPR:
258*404b540aSrobert return one_cmpl_optab;
259*404b540aSrobert
260*404b540aSrobert case BIT_XOR_EXPR:
261*404b540aSrobert return xor_optab;
262*404b540aSrobert
263*404b540aSrobert case TRUNC_MOD_EXPR:
264*404b540aSrobert case CEIL_MOD_EXPR:
265*404b540aSrobert case FLOOR_MOD_EXPR:
266*404b540aSrobert case ROUND_MOD_EXPR:
267*404b540aSrobert return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
268*404b540aSrobert
269*404b540aSrobert case RDIV_EXPR:
270*404b540aSrobert case TRUNC_DIV_EXPR:
271*404b540aSrobert case CEIL_DIV_EXPR:
272*404b540aSrobert case FLOOR_DIV_EXPR:
273*404b540aSrobert case ROUND_DIV_EXPR:
274*404b540aSrobert case EXACT_DIV_EXPR:
275*404b540aSrobert return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
276*404b540aSrobert
277*404b540aSrobert case LSHIFT_EXPR:
278*404b540aSrobert return ashl_optab;
279*404b540aSrobert
280*404b540aSrobert case RSHIFT_EXPR:
281*404b540aSrobert return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
282*404b540aSrobert
283*404b540aSrobert case LROTATE_EXPR:
284*404b540aSrobert return rotl_optab;
285*404b540aSrobert
286*404b540aSrobert case RROTATE_EXPR:
287*404b540aSrobert return rotr_optab;
288*404b540aSrobert
289*404b540aSrobert case MAX_EXPR:
290*404b540aSrobert return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
291*404b540aSrobert
292*404b540aSrobert case MIN_EXPR:
293*404b540aSrobert return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294*404b540aSrobert
295*404b540aSrobert case REALIGN_LOAD_EXPR:
296*404b540aSrobert return vec_realign_load_optab;
297*404b540aSrobert
298*404b540aSrobert case WIDEN_SUM_EXPR:
299*404b540aSrobert return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
300*404b540aSrobert
301*404b540aSrobert case DOT_PROD_EXPR:
302*404b540aSrobert return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
303*404b540aSrobert
304*404b540aSrobert case REDUC_MAX_EXPR:
305*404b540aSrobert return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
306*404b540aSrobert
307*404b540aSrobert case REDUC_MIN_EXPR:
308*404b540aSrobert return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
309*404b540aSrobert
310*404b540aSrobert case REDUC_PLUS_EXPR:
311*404b540aSrobert return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
312*404b540aSrobert
313*404b540aSrobert case VEC_LSHIFT_EXPR:
314*404b540aSrobert return vec_shl_optab;
315*404b540aSrobert
316*404b540aSrobert case VEC_RSHIFT_EXPR:
317*404b540aSrobert return vec_shr_optab;
318*404b540aSrobert
319*404b540aSrobert default:
320*404b540aSrobert break;
321*404b540aSrobert }
322*404b540aSrobert
323*404b540aSrobert trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
324*404b540aSrobert switch (code)
325*404b540aSrobert {
326*404b540aSrobert case PLUS_EXPR:
327*404b540aSrobert return trapv ? addv_optab : add_optab;
328*404b540aSrobert
329*404b540aSrobert case MINUS_EXPR:
330*404b540aSrobert return trapv ? subv_optab : sub_optab;
331*404b540aSrobert
332*404b540aSrobert case MULT_EXPR:
333*404b540aSrobert return trapv ? smulv_optab : smul_optab;
334*404b540aSrobert
335*404b540aSrobert case NEGATE_EXPR:
336*404b540aSrobert return trapv ? negv_optab : neg_optab;
337*404b540aSrobert
338*404b540aSrobert case ABS_EXPR:
339*404b540aSrobert return trapv ? absv_optab : abs_optab;
340*404b540aSrobert
341*404b540aSrobert default:
342*404b540aSrobert return NULL;
343*404b540aSrobert }
344*404b540aSrobert }
345*404b540aSrobert
346*404b540aSrobert
347*404b540aSrobert /* Expand vector widening operations.
348*404b540aSrobert
349*404b540aSrobert There are two different classes of operations handled here:
350*404b540aSrobert 1) Operations whose result is wider than all the arguments to the operation.
351*404b540aSrobert Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
352*404b540aSrobert In this case OP0 and optionally OP1 would be initialized,
353*404b540aSrobert but WIDE_OP wouldn't (not relevant for this case).
354*404b540aSrobert 2) Operations whose result is of the same size as the last argument to the
355*404b540aSrobert operation, but wider than all the other arguments to the operation.
356*404b540aSrobert Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
357*404b540aSrobert In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
358*404b540aSrobert
359*404b540aSrobert E.g, when called to expand the following operations, this is how
360*404b540aSrobert the arguments will be initialized:
361*404b540aSrobert nops OP0 OP1 WIDE_OP
362*404b540aSrobert widening-sum 2 oprnd0 - oprnd1
363*404b540aSrobert widening-dot-product 3 oprnd0 oprnd1 oprnd2
364*404b540aSrobert widening-mult 2 oprnd0 oprnd1 -
365*404b540aSrobert type-promotion (vec-unpack) 1 oprnd0 - - */
366*404b540aSrobert
367*404b540aSrobert rtx
expand_widen_pattern_expr(tree exp,rtx op0,rtx op1,rtx wide_op,rtx target,int unsignedp)368*404b540aSrobert expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
369*404b540aSrobert int unsignedp)
370*404b540aSrobert {
371*404b540aSrobert tree oprnd0, oprnd1, oprnd2;
372*404b540aSrobert enum machine_mode wmode = 0, tmode0, tmode1 = 0;
373*404b540aSrobert optab widen_pattern_optab;
374*404b540aSrobert int icode;
375*404b540aSrobert enum machine_mode xmode0, xmode1 = 0, wxmode = 0;
376*404b540aSrobert rtx temp;
377*404b540aSrobert rtx pat;
378*404b540aSrobert rtx xop0, xop1, wxop;
379*404b540aSrobert int nops = TREE_CODE_LENGTH (TREE_CODE (exp));
380*404b540aSrobert
381*404b540aSrobert oprnd0 = TREE_OPERAND (exp, 0);
382*404b540aSrobert tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
383*404b540aSrobert widen_pattern_optab =
384*404b540aSrobert optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0));
385*404b540aSrobert icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code;
386*404b540aSrobert gcc_assert (icode != CODE_FOR_nothing);
387*404b540aSrobert xmode0 = insn_data[icode].operand[1].mode;
388*404b540aSrobert
389*404b540aSrobert if (nops >= 2)
390*404b540aSrobert {
391*404b540aSrobert oprnd1 = TREE_OPERAND (exp, 1);
392*404b540aSrobert tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
393*404b540aSrobert xmode1 = insn_data[icode].operand[2].mode;
394*404b540aSrobert }
395*404b540aSrobert
396*404b540aSrobert /* The last operand is of a wider mode than the rest of the operands. */
397*404b540aSrobert if (nops == 2)
398*404b540aSrobert {
399*404b540aSrobert wmode = tmode1;
400*404b540aSrobert wxmode = xmode1;
401*404b540aSrobert }
402*404b540aSrobert else if (nops == 3)
403*404b540aSrobert {
404*404b540aSrobert gcc_assert (tmode1 == tmode0);
405*404b540aSrobert gcc_assert (op1);
406*404b540aSrobert oprnd2 = TREE_OPERAND (exp, 2);
407*404b540aSrobert wmode = TYPE_MODE (TREE_TYPE (oprnd2));
408*404b540aSrobert wxmode = insn_data[icode].operand[3].mode;
409*404b540aSrobert }
410*404b540aSrobert
411*404b540aSrobert if (!wide_op)
412*404b540aSrobert wmode = wxmode = insn_data[icode].operand[0].mode;
413*404b540aSrobert
414*404b540aSrobert if (!target
415*404b540aSrobert || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
416*404b540aSrobert temp = gen_reg_rtx (wmode);
417*404b540aSrobert else
418*404b540aSrobert temp = target;
419*404b540aSrobert
420*404b540aSrobert xop0 = op0;
421*404b540aSrobert xop1 = op1;
422*404b540aSrobert wxop = wide_op;
423*404b540aSrobert
424*404b540aSrobert /* In case the insn wants input operands in modes different from
425*404b540aSrobert those of the actual operands, convert the operands. It would
426*404b540aSrobert seem that we don't need to convert CONST_INTs, but we do, so
427*404b540aSrobert that they're properly zero-extended, sign-extended or truncated
428*404b540aSrobert for their mode. */
429*404b540aSrobert
430*404b540aSrobert if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
431*404b540aSrobert xop0 = convert_modes (xmode0,
432*404b540aSrobert GET_MODE (op0) != VOIDmode
433*404b540aSrobert ? GET_MODE (op0)
434*404b540aSrobert : tmode0,
435*404b540aSrobert xop0, unsignedp);
436*404b540aSrobert
437*404b540aSrobert if (op1)
438*404b540aSrobert if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
439*404b540aSrobert xop1 = convert_modes (xmode1,
440*404b540aSrobert GET_MODE (op1) != VOIDmode
441*404b540aSrobert ? GET_MODE (op1)
442*404b540aSrobert : tmode1,
443*404b540aSrobert xop1, unsignedp);
444*404b540aSrobert
445*404b540aSrobert if (wide_op)
446*404b540aSrobert if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
447*404b540aSrobert wxop = convert_modes (wxmode,
448*404b540aSrobert GET_MODE (wide_op) != VOIDmode
449*404b540aSrobert ? GET_MODE (wide_op)
450*404b540aSrobert : wmode,
451*404b540aSrobert wxop, unsignedp);
452*404b540aSrobert
453*404b540aSrobert /* Now, if insn's predicates don't allow our operands, put them into
454*404b540aSrobert pseudo regs. */
455*404b540aSrobert
456*404b540aSrobert if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
457*404b540aSrobert && xmode0 != VOIDmode)
458*404b540aSrobert xop0 = copy_to_mode_reg (xmode0, xop0);
459*404b540aSrobert
460*404b540aSrobert if (op1)
461*404b540aSrobert {
462*404b540aSrobert if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
463*404b540aSrobert && xmode1 != VOIDmode)
464*404b540aSrobert xop1 = copy_to_mode_reg (xmode1, xop1);
465*404b540aSrobert
466*404b540aSrobert if (wide_op)
467*404b540aSrobert {
468*404b540aSrobert if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
469*404b540aSrobert && wxmode != VOIDmode)
470*404b540aSrobert wxop = copy_to_mode_reg (wxmode, wxop);
471*404b540aSrobert
472*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
473*404b540aSrobert }
474*404b540aSrobert else
475*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0, xop1);
476*404b540aSrobert }
477*404b540aSrobert else
478*404b540aSrobert {
479*404b540aSrobert if (wide_op)
480*404b540aSrobert {
481*404b540aSrobert if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
482*404b540aSrobert && wxmode != VOIDmode)
483*404b540aSrobert wxop = copy_to_mode_reg (wxmode, wxop);
484*404b540aSrobert
485*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0, wxop);
486*404b540aSrobert }
487*404b540aSrobert else
488*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0);
489*404b540aSrobert }
490*404b540aSrobert
491*404b540aSrobert emit_insn (pat);
492*404b540aSrobert return temp;
493*404b540aSrobert }
494*404b540aSrobert
495*404b540aSrobert /* Generate code to perform an operation specified by TERNARY_OPTAB
496*404b540aSrobert on operands OP0, OP1 and OP2, with result having machine-mode MODE.
497*404b540aSrobert
498*404b540aSrobert UNSIGNEDP is for the case where we have to widen the operands
499*404b540aSrobert to perform the operation. It says to use zero-extension.
500*404b540aSrobert
501*404b540aSrobert If TARGET is nonzero, the value
502*404b540aSrobert is generated there, if it is convenient to do so.
503*404b540aSrobert In all cases an rtx is returned for the locus of the value;
504*404b540aSrobert this may or may not be TARGET. */
505*404b540aSrobert
506*404b540aSrobert rtx
expand_ternary_op(enum machine_mode mode,optab ternary_optab,rtx op0,rtx op1,rtx op2,rtx target,int unsignedp)507*404b540aSrobert expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
508*404b540aSrobert rtx op1, rtx op2, rtx target, int unsignedp)
509*404b540aSrobert {
510*404b540aSrobert int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
511*404b540aSrobert enum machine_mode mode0 = insn_data[icode].operand[1].mode;
512*404b540aSrobert enum machine_mode mode1 = insn_data[icode].operand[2].mode;
513*404b540aSrobert enum machine_mode mode2 = insn_data[icode].operand[3].mode;
514*404b540aSrobert rtx temp;
515*404b540aSrobert rtx pat;
516*404b540aSrobert rtx xop0 = op0, xop1 = op1, xop2 = op2;
517*404b540aSrobert
518*404b540aSrobert gcc_assert (ternary_optab->handlers[(int) mode].insn_code
519*404b540aSrobert != CODE_FOR_nothing);
520*404b540aSrobert
521*404b540aSrobert if (!target || !insn_data[icode].operand[0].predicate (target, mode))
522*404b540aSrobert temp = gen_reg_rtx (mode);
523*404b540aSrobert else
524*404b540aSrobert temp = target;
525*404b540aSrobert
526*404b540aSrobert /* In case the insn wants input operands in modes different from
527*404b540aSrobert those of the actual operands, convert the operands. It would
528*404b540aSrobert seem that we don't need to convert CONST_INTs, but we do, so
529*404b540aSrobert that they're properly zero-extended, sign-extended or truncated
530*404b540aSrobert for their mode. */
531*404b540aSrobert
532*404b540aSrobert if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
533*404b540aSrobert xop0 = convert_modes (mode0,
534*404b540aSrobert GET_MODE (op0) != VOIDmode
535*404b540aSrobert ? GET_MODE (op0)
536*404b540aSrobert : mode,
537*404b540aSrobert xop0, unsignedp);
538*404b540aSrobert
539*404b540aSrobert if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
540*404b540aSrobert xop1 = convert_modes (mode1,
541*404b540aSrobert GET_MODE (op1) != VOIDmode
542*404b540aSrobert ? GET_MODE (op1)
543*404b540aSrobert : mode,
544*404b540aSrobert xop1, unsignedp);
545*404b540aSrobert
546*404b540aSrobert if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
547*404b540aSrobert xop2 = convert_modes (mode2,
548*404b540aSrobert GET_MODE (op2) != VOIDmode
549*404b540aSrobert ? GET_MODE (op2)
550*404b540aSrobert : mode,
551*404b540aSrobert xop2, unsignedp);
552*404b540aSrobert
553*404b540aSrobert /* Now, if insn's predicates don't allow our operands, put them into
554*404b540aSrobert pseudo regs. */
555*404b540aSrobert
556*404b540aSrobert if (!insn_data[icode].operand[1].predicate (xop0, mode0)
557*404b540aSrobert && mode0 != VOIDmode)
558*404b540aSrobert xop0 = copy_to_mode_reg (mode0, xop0);
559*404b540aSrobert
560*404b540aSrobert if (!insn_data[icode].operand[2].predicate (xop1, mode1)
561*404b540aSrobert && mode1 != VOIDmode)
562*404b540aSrobert xop1 = copy_to_mode_reg (mode1, xop1);
563*404b540aSrobert
564*404b540aSrobert if (!insn_data[icode].operand[3].predicate (xop2, mode2)
565*404b540aSrobert && mode2 != VOIDmode)
566*404b540aSrobert xop2 = copy_to_mode_reg (mode2, xop2);
567*404b540aSrobert
568*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
569*404b540aSrobert
570*404b540aSrobert emit_insn (pat);
571*404b540aSrobert return temp;
572*404b540aSrobert }
573*404b540aSrobert
574*404b540aSrobert
575*404b540aSrobert /* Like expand_binop, but return a constant rtx if the result can be
576*404b540aSrobert calculated at compile time. The arguments and return value are
577*404b540aSrobert otherwise the same as for expand_binop. */
578*404b540aSrobert
579*404b540aSrobert static rtx
simplify_expand_binop(enum machine_mode mode,optab binoptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)580*404b540aSrobert simplify_expand_binop (enum machine_mode mode, optab binoptab,
581*404b540aSrobert rtx op0, rtx op1, rtx target, int unsignedp,
582*404b540aSrobert enum optab_methods methods)
583*404b540aSrobert {
584*404b540aSrobert if (CONSTANT_P (op0) && CONSTANT_P (op1))
585*404b540aSrobert {
586*404b540aSrobert rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
587*404b540aSrobert
588*404b540aSrobert if (x)
589*404b540aSrobert return x;
590*404b540aSrobert }
591*404b540aSrobert
592*404b540aSrobert return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
593*404b540aSrobert }
594*404b540aSrobert
595*404b540aSrobert /* Like simplify_expand_binop, but always put the result in TARGET.
596*404b540aSrobert Return true if the expansion succeeded. */
597*404b540aSrobert
598*404b540aSrobert bool
force_expand_binop(enum machine_mode mode,optab binoptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)599*404b540aSrobert force_expand_binop (enum machine_mode mode, optab binoptab,
600*404b540aSrobert rtx op0, rtx op1, rtx target, int unsignedp,
601*404b540aSrobert enum optab_methods methods)
602*404b540aSrobert {
603*404b540aSrobert rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
604*404b540aSrobert target, unsignedp, methods);
605*404b540aSrobert if (x == 0)
606*404b540aSrobert return false;
607*404b540aSrobert if (x != target)
608*404b540aSrobert emit_move_insn (target, x);
609*404b540aSrobert return true;
610*404b540aSrobert }
611*404b540aSrobert
612*404b540aSrobert /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
613*404b540aSrobert
614*404b540aSrobert rtx
expand_vec_shift_expr(tree vec_shift_expr,rtx target)615*404b540aSrobert expand_vec_shift_expr (tree vec_shift_expr, rtx target)
616*404b540aSrobert {
617*404b540aSrobert enum insn_code icode;
618*404b540aSrobert rtx rtx_op1, rtx_op2;
619*404b540aSrobert enum machine_mode mode1;
620*404b540aSrobert enum machine_mode mode2;
621*404b540aSrobert enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
622*404b540aSrobert tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
623*404b540aSrobert tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
624*404b540aSrobert optab shift_optab;
625*404b540aSrobert rtx pat;
626*404b540aSrobert
627*404b540aSrobert switch (TREE_CODE (vec_shift_expr))
628*404b540aSrobert {
629*404b540aSrobert case VEC_RSHIFT_EXPR:
630*404b540aSrobert shift_optab = vec_shr_optab;
631*404b540aSrobert break;
632*404b540aSrobert case VEC_LSHIFT_EXPR:
633*404b540aSrobert shift_optab = vec_shl_optab;
634*404b540aSrobert break;
635*404b540aSrobert default:
636*404b540aSrobert gcc_unreachable ();
637*404b540aSrobert }
638*404b540aSrobert
639*404b540aSrobert icode = (int) shift_optab->handlers[(int) mode].insn_code;
640*404b540aSrobert gcc_assert (icode != CODE_FOR_nothing);
641*404b540aSrobert
642*404b540aSrobert mode1 = insn_data[icode].operand[1].mode;
643*404b540aSrobert mode2 = insn_data[icode].operand[2].mode;
644*404b540aSrobert
645*404b540aSrobert rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
646*404b540aSrobert if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
647*404b540aSrobert && mode1 != VOIDmode)
648*404b540aSrobert rtx_op1 = force_reg (mode1, rtx_op1);
649*404b540aSrobert
650*404b540aSrobert rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
651*404b540aSrobert if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
652*404b540aSrobert && mode2 != VOIDmode)
653*404b540aSrobert rtx_op2 = force_reg (mode2, rtx_op2);
654*404b540aSrobert
655*404b540aSrobert if (!target
656*404b540aSrobert || ! (*insn_data[icode].operand[0].predicate) (target, mode))
657*404b540aSrobert target = gen_reg_rtx (mode);
658*404b540aSrobert
659*404b540aSrobert /* Emit instruction */
660*404b540aSrobert pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
661*404b540aSrobert gcc_assert (pat);
662*404b540aSrobert emit_insn (pat);
663*404b540aSrobert
664*404b540aSrobert return target;
665*404b540aSrobert }
666*404b540aSrobert
667*404b540aSrobert /* This subroutine of expand_doubleword_shift handles the cases in which
668*404b540aSrobert the effective shift value is >= BITS_PER_WORD. The arguments and return
669*404b540aSrobert value are the same as for the parent routine, except that SUPERWORD_OP1
670*404b540aSrobert is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
671*404b540aSrobert INTO_TARGET may be null if the caller has decided to calculate it. */
672*404b540aSrobert
673*404b540aSrobert static bool
expand_superword_shift(optab binoptab,rtx outof_input,rtx superword_op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods)674*404b540aSrobert expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
675*404b540aSrobert rtx outof_target, rtx into_target,
676*404b540aSrobert int unsignedp, enum optab_methods methods)
677*404b540aSrobert {
678*404b540aSrobert if (into_target != 0)
679*404b540aSrobert if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
680*404b540aSrobert into_target, unsignedp, methods))
681*404b540aSrobert return false;
682*404b540aSrobert
683*404b540aSrobert if (outof_target != 0)
684*404b540aSrobert {
685*404b540aSrobert /* For a signed right shift, we must fill OUTOF_TARGET with copies
686*404b540aSrobert of the sign bit, otherwise we must fill it with zeros. */
687*404b540aSrobert if (binoptab != ashr_optab)
688*404b540aSrobert emit_move_insn (outof_target, CONST0_RTX (word_mode));
689*404b540aSrobert else
690*404b540aSrobert if (!force_expand_binop (word_mode, binoptab,
691*404b540aSrobert outof_input, GEN_INT (BITS_PER_WORD - 1),
692*404b540aSrobert outof_target, unsignedp, methods))
693*404b540aSrobert return false;
694*404b540aSrobert }
695*404b540aSrobert return true;
696*404b540aSrobert }
697*404b540aSrobert
698*404b540aSrobert /* This subroutine of expand_doubleword_shift handles the cases in which
699*404b540aSrobert the effective shift value is < BITS_PER_WORD. The arguments and return
700*404b540aSrobert value are the same as for the parent routine. */
701*404b540aSrobert
702*404b540aSrobert static bool
expand_subword_shift(enum machine_mode op1_mode,optab binoptab,rtx outof_input,rtx into_input,rtx op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods,unsigned HOST_WIDE_INT shift_mask)703*404b540aSrobert expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
704*404b540aSrobert rtx outof_input, rtx into_input, rtx op1,
705*404b540aSrobert rtx outof_target, rtx into_target,
706*404b540aSrobert int unsignedp, enum optab_methods methods,
707*404b540aSrobert unsigned HOST_WIDE_INT shift_mask)
708*404b540aSrobert {
709*404b540aSrobert optab reverse_unsigned_shift, unsigned_shift;
710*404b540aSrobert rtx tmp, carries;
711*404b540aSrobert
712*404b540aSrobert reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
713*404b540aSrobert unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
714*404b540aSrobert
715*404b540aSrobert /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
716*404b540aSrobert We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
717*404b540aSrobert the opposite direction to BINOPTAB. */
718*404b540aSrobert if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
719*404b540aSrobert {
720*404b540aSrobert carries = outof_input;
721*404b540aSrobert tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
722*404b540aSrobert tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
723*404b540aSrobert 0, true, methods);
724*404b540aSrobert }
725*404b540aSrobert else
726*404b540aSrobert {
727*404b540aSrobert /* We must avoid shifting by BITS_PER_WORD bits since that is either
728*404b540aSrobert the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
729*404b540aSrobert has unknown behavior. Do a single shift first, then shift by the
730*404b540aSrobert remainder. It's OK to use ~OP1 as the remainder if shift counts
731*404b540aSrobert are truncated to the mode size. */
732*404b540aSrobert carries = expand_binop (word_mode, reverse_unsigned_shift,
733*404b540aSrobert outof_input, const1_rtx, 0, unsignedp, methods);
734*404b540aSrobert if (shift_mask == BITS_PER_WORD - 1)
735*404b540aSrobert {
736*404b540aSrobert tmp = immed_double_const (-1, -1, op1_mode);
737*404b540aSrobert tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
738*404b540aSrobert 0, true, methods);
739*404b540aSrobert }
740*404b540aSrobert else
741*404b540aSrobert {
742*404b540aSrobert tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
743*404b540aSrobert tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
744*404b540aSrobert 0, true, methods);
745*404b540aSrobert }
746*404b540aSrobert }
747*404b540aSrobert if (tmp == 0 || carries == 0)
748*404b540aSrobert return false;
749*404b540aSrobert carries = expand_binop (word_mode, reverse_unsigned_shift,
750*404b540aSrobert carries, tmp, 0, unsignedp, methods);
751*404b540aSrobert if (carries == 0)
752*404b540aSrobert return false;
753*404b540aSrobert
754*404b540aSrobert /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
755*404b540aSrobert so the result can go directly into INTO_TARGET if convenient. */
756*404b540aSrobert tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
757*404b540aSrobert into_target, unsignedp, methods);
758*404b540aSrobert if (tmp == 0)
759*404b540aSrobert return false;
760*404b540aSrobert
761*404b540aSrobert /* Now OR in the bits carried over from OUTOF_INPUT. */
762*404b540aSrobert if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
763*404b540aSrobert into_target, unsignedp, methods))
764*404b540aSrobert return false;
765*404b540aSrobert
766*404b540aSrobert /* Use a standard word_mode shift for the out-of half. */
767*404b540aSrobert if (outof_target != 0)
768*404b540aSrobert if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
769*404b540aSrobert outof_target, unsignedp, methods))
770*404b540aSrobert return false;
771*404b540aSrobert
772*404b540aSrobert return true;
773*404b540aSrobert }
774*404b540aSrobert
775*404b540aSrobert
776*404b540aSrobert #ifdef HAVE_conditional_move
777*404b540aSrobert /* Try implementing expand_doubleword_shift using conditional moves.
778*404b540aSrobert The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
779*404b540aSrobert otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
780*404b540aSrobert are the shift counts to use in the former and latter case. All other
781*404b540aSrobert arguments are the same as the parent routine. */
782*404b540aSrobert
783*404b540aSrobert static bool
expand_doubleword_shift_condmove(enum machine_mode op1_mode,optab binoptab,enum rtx_code cmp_code,rtx cmp1,rtx cmp2,rtx outof_input,rtx into_input,rtx subword_op1,rtx superword_op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods,unsigned HOST_WIDE_INT shift_mask)784*404b540aSrobert expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
785*404b540aSrobert enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
786*404b540aSrobert rtx outof_input, rtx into_input,
787*404b540aSrobert rtx subword_op1, rtx superword_op1,
788*404b540aSrobert rtx outof_target, rtx into_target,
789*404b540aSrobert int unsignedp, enum optab_methods methods,
790*404b540aSrobert unsigned HOST_WIDE_INT shift_mask)
791*404b540aSrobert {
792*404b540aSrobert rtx outof_superword, into_superword;
793*404b540aSrobert
794*404b540aSrobert /* Put the superword version of the output into OUTOF_SUPERWORD and
795*404b540aSrobert INTO_SUPERWORD. */
796*404b540aSrobert outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
797*404b540aSrobert if (outof_target != 0 && subword_op1 == superword_op1)
798*404b540aSrobert {
799*404b540aSrobert /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
800*404b540aSrobert OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
801*404b540aSrobert into_superword = outof_target;
802*404b540aSrobert if (!expand_superword_shift (binoptab, outof_input, superword_op1,
803*404b540aSrobert outof_superword, 0, unsignedp, methods))
804*404b540aSrobert return false;
805*404b540aSrobert }
806*404b540aSrobert else
807*404b540aSrobert {
808*404b540aSrobert into_superword = gen_reg_rtx (word_mode);
809*404b540aSrobert if (!expand_superword_shift (binoptab, outof_input, superword_op1,
810*404b540aSrobert outof_superword, into_superword,
811*404b540aSrobert unsignedp, methods))
812*404b540aSrobert return false;
813*404b540aSrobert }
814*404b540aSrobert
815*404b540aSrobert /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
816*404b540aSrobert if (!expand_subword_shift (op1_mode, binoptab,
817*404b540aSrobert outof_input, into_input, subword_op1,
818*404b540aSrobert outof_target, into_target,
819*404b540aSrobert unsignedp, methods, shift_mask))
820*404b540aSrobert return false;
821*404b540aSrobert
822*404b540aSrobert /* Select between them. Do the INTO half first because INTO_SUPERWORD
823*404b540aSrobert might be the current value of OUTOF_TARGET. */
824*404b540aSrobert if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
825*404b540aSrobert into_target, into_superword, word_mode, false))
826*404b540aSrobert return false;
827*404b540aSrobert
828*404b540aSrobert if (outof_target != 0)
829*404b540aSrobert if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
830*404b540aSrobert outof_target, outof_superword,
831*404b540aSrobert word_mode, false))
832*404b540aSrobert return false;
833*404b540aSrobert
834*404b540aSrobert return true;
835*404b540aSrobert }
836*404b540aSrobert #endif
837*404b540aSrobert
838*404b540aSrobert /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
839*404b540aSrobert OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
840*404b540aSrobert input operand; the shift moves bits in the direction OUTOF_INPUT->
841*404b540aSrobert INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
842*404b540aSrobert of the target. OP1 is the shift count and OP1_MODE is its mode.
843*404b540aSrobert If OP1 is constant, it will have been truncated as appropriate
844*404b540aSrobert and is known to be nonzero.
845*404b540aSrobert
846*404b540aSrobert If SHIFT_MASK is zero, the result of word shifts is undefined when the
847*404b540aSrobert shift count is outside the range [0, BITS_PER_WORD). This routine must
848*404b540aSrobert avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
849*404b540aSrobert
850*404b540aSrobert If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
851*404b540aSrobert masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
852*404b540aSrobert fill with zeros or sign bits as appropriate.
853*404b540aSrobert
854*404b540aSrobert If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
855*404b540aSrobert a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
856*404b540aSrobert Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
857*404b540aSrobert In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
858*404b540aSrobert are undefined.
859*404b540aSrobert
860*404b540aSrobert BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
861*404b540aSrobert may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
862*404b540aSrobert OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
863*404b540aSrobert function wants to calculate it itself.
864*404b540aSrobert
865*404b540aSrobert Return true if the shift could be successfully synthesized. */
866*404b540aSrobert
867*404b540aSrobert static bool
expand_doubleword_shift(enum machine_mode op1_mode,optab binoptab,rtx outof_input,rtx into_input,rtx op1,rtx outof_target,rtx into_target,int unsignedp,enum optab_methods methods,unsigned HOST_WIDE_INT shift_mask)868*404b540aSrobert expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
869*404b540aSrobert rtx outof_input, rtx into_input, rtx op1,
870*404b540aSrobert rtx outof_target, rtx into_target,
871*404b540aSrobert int unsignedp, enum optab_methods methods,
872*404b540aSrobert unsigned HOST_WIDE_INT shift_mask)
873*404b540aSrobert {
874*404b540aSrobert rtx superword_op1, tmp, cmp1, cmp2;
875*404b540aSrobert rtx subword_label, done_label;
876*404b540aSrobert enum rtx_code cmp_code;
877*404b540aSrobert
878*404b540aSrobert /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
879*404b540aSrobert fill the result with sign or zero bits as appropriate. If so, the value
880*404b540aSrobert of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
881*404b540aSrobert this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
882*404b540aSrobert and INTO_INPUT), then emit code to set up OUTOF_TARGET.
883*404b540aSrobert
884*404b540aSrobert This isn't worthwhile for constant shifts since the optimizers will
885*404b540aSrobert cope better with in-range shift counts. */
886*404b540aSrobert if (shift_mask >= BITS_PER_WORD
887*404b540aSrobert && outof_target != 0
888*404b540aSrobert && !CONSTANT_P (op1))
889*404b540aSrobert {
890*404b540aSrobert if (!expand_doubleword_shift (op1_mode, binoptab,
891*404b540aSrobert outof_input, into_input, op1,
892*404b540aSrobert 0, into_target,
893*404b540aSrobert unsignedp, methods, shift_mask))
894*404b540aSrobert return false;
895*404b540aSrobert if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
896*404b540aSrobert outof_target, unsignedp, methods))
897*404b540aSrobert return false;
898*404b540aSrobert return true;
899*404b540aSrobert }
900*404b540aSrobert
901*404b540aSrobert /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
902*404b540aSrobert is true when the effective shift value is less than BITS_PER_WORD.
903*404b540aSrobert Set SUPERWORD_OP1 to the shift count that should be used to shift
904*404b540aSrobert OUTOF_INPUT into INTO_TARGET when the condition is false. */
905*404b540aSrobert tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
906*404b540aSrobert if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
907*404b540aSrobert {
908*404b540aSrobert /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
909*404b540aSrobert is a subword shift count. */
910*404b540aSrobert cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
911*404b540aSrobert 0, true, methods);
912*404b540aSrobert cmp2 = CONST0_RTX (op1_mode);
913*404b540aSrobert cmp_code = EQ;
914*404b540aSrobert superword_op1 = op1;
915*404b540aSrobert }
916*404b540aSrobert else
917*404b540aSrobert {
918*404b540aSrobert /* Set CMP1 to OP1 - BITS_PER_WORD. */
919*404b540aSrobert cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
920*404b540aSrobert 0, true, methods);
921*404b540aSrobert cmp2 = CONST0_RTX (op1_mode);
922*404b540aSrobert cmp_code = LT;
923*404b540aSrobert superword_op1 = cmp1;
924*404b540aSrobert }
925*404b540aSrobert if (cmp1 == 0)
926*404b540aSrobert return false;
927*404b540aSrobert
928*404b540aSrobert /* If we can compute the condition at compile time, pick the
929*404b540aSrobert appropriate subroutine. */
930*404b540aSrobert tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
931*404b540aSrobert if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
932*404b540aSrobert {
933*404b540aSrobert if (tmp == const0_rtx)
934*404b540aSrobert return expand_superword_shift (binoptab, outof_input, superword_op1,
935*404b540aSrobert outof_target, into_target,
936*404b540aSrobert unsignedp, methods);
937*404b540aSrobert else
938*404b540aSrobert return expand_subword_shift (op1_mode, binoptab,
939*404b540aSrobert outof_input, into_input, op1,
940*404b540aSrobert outof_target, into_target,
941*404b540aSrobert unsignedp, methods, shift_mask);
942*404b540aSrobert }
943*404b540aSrobert
944*404b540aSrobert #ifdef HAVE_conditional_move
945*404b540aSrobert /* Try using conditional moves to generate straight-line code. */
946*404b540aSrobert {
947*404b540aSrobert rtx start = get_last_insn ();
948*404b540aSrobert if (expand_doubleword_shift_condmove (op1_mode, binoptab,
949*404b540aSrobert cmp_code, cmp1, cmp2,
950*404b540aSrobert outof_input, into_input,
951*404b540aSrobert op1, superword_op1,
952*404b540aSrobert outof_target, into_target,
953*404b540aSrobert unsignedp, methods, shift_mask))
954*404b540aSrobert return true;
955*404b540aSrobert delete_insns_since (start);
956*404b540aSrobert }
957*404b540aSrobert #endif
958*404b540aSrobert
959*404b540aSrobert /* As a last resort, use branches to select the correct alternative. */
960*404b540aSrobert subword_label = gen_label_rtx ();
961*404b540aSrobert done_label = gen_label_rtx ();
962*404b540aSrobert
963*404b540aSrobert NO_DEFER_POP;
964*404b540aSrobert do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
965*404b540aSrobert 0, 0, subword_label);
966*404b540aSrobert OK_DEFER_POP;
967*404b540aSrobert
968*404b540aSrobert if (!expand_superword_shift (binoptab, outof_input, superword_op1,
969*404b540aSrobert outof_target, into_target,
970*404b540aSrobert unsignedp, methods))
971*404b540aSrobert return false;
972*404b540aSrobert
973*404b540aSrobert emit_jump_insn (gen_jump (done_label));
974*404b540aSrobert emit_barrier ();
975*404b540aSrobert emit_label (subword_label);
976*404b540aSrobert
977*404b540aSrobert if (!expand_subword_shift (op1_mode, binoptab,
978*404b540aSrobert outof_input, into_input, op1,
979*404b540aSrobert outof_target, into_target,
980*404b540aSrobert unsignedp, methods, shift_mask))
981*404b540aSrobert return false;
982*404b540aSrobert
983*404b540aSrobert emit_label (done_label);
984*404b540aSrobert return true;
985*404b540aSrobert }
986*404b540aSrobert
987*404b540aSrobert /* Subroutine of expand_binop. Perform a double word multiplication of
988*404b540aSrobert operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
989*404b540aSrobert as the target's word_mode. This function return NULL_RTX if anything
990*404b540aSrobert goes wrong, in which case it may have already emitted instructions
991*404b540aSrobert which need to be deleted.
992*404b540aSrobert
993*404b540aSrobert If we want to multiply two two-word values and have normal and widening
994*404b540aSrobert multiplies of single-word values, we can do this with three smaller
995*404b540aSrobert multiplications. Note that we do not make a REG_NO_CONFLICT block here
996*404b540aSrobert because we are not operating on one word at a time.
997*404b540aSrobert
998*404b540aSrobert The multiplication proceeds as follows:
999*404b540aSrobert _______________________
1000*404b540aSrobert [__op0_high_|__op0_low__]
1001*404b540aSrobert _______________________
1002*404b540aSrobert * [__op1_high_|__op1_low__]
1003*404b540aSrobert _______________________________________________
1004*404b540aSrobert _______________________
1005*404b540aSrobert (1) [__op0_low__*__op1_low__]
1006*404b540aSrobert _______________________
1007*404b540aSrobert (2a) [__op0_low__*__op1_high_]
1008*404b540aSrobert _______________________
1009*404b540aSrobert (2b) [__op0_high_*__op1_low__]
1010*404b540aSrobert _______________________
1011*404b540aSrobert (3) [__op0_high_*__op1_high_]
1012*404b540aSrobert
1013*404b540aSrobert
1014*404b540aSrobert This gives a 4-word result. Since we are only interested in the
1015*404b540aSrobert lower 2 words, partial result (3) and the upper words of (2a) and
1016*404b540aSrobert (2b) don't need to be calculated. Hence (2a) and (2b) can be
1017*404b540aSrobert calculated using non-widening multiplication.
1018*404b540aSrobert
1019*404b540aSrobert (1), however, needs to be calculated with an unsigned widening
1020*404b540aSrobert multiplication. If this operation is not directly supported we
1021*404b540aSrobert try using a signed widening multiplication and adjust the result.
1022*404b540aSrobert This adjustment works as follows:
1023*404b540aSrobert
1024*404b540aSrobert If both operands are positive then no adjustment is needed.
1025*404b540aSrobert
1026*404b540aSrobert If the operands have different signs, for example op0_low < 0 and
1027*404b540aSrobert op1_low >= 0, the instruction treats the most significant bit of
1028*404b540aSrobert op0_low as a sign bit instead of a bit with significance
1029*404b540aSrobert 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1030*404b540aSrobert with 2**BITS_PER_WORD - op0_low, and two's complements the
1031*404b540aSrobert result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1032*404b540aSrobert the result.
1033*404b540aSrobert
1034*404b540aSrobert Similarly, if both operands are negative, we need to add
1035*404b540aSrobert (op0_low + op1_low) * 2**BITS_PER_WORD.
1036*404b540aSrobert
1037*404b540aSrobert We use a trick to adjust quickly. We logically shift op0_low right
1038*404b540aSrobert (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1039*404b540aSrobert op0_high (op1_high) before it is used to calculate 2b (2a). If no
1040*404b540aSrobert logical shift exists, we do an arithmetic right shift and subtract
1041*404b540aSrobert the 0 or -1. */
1042*404b540aSrobert
1043*404b540aSrobert static rtx
expand_doubleword_mult(enum machine_mode mode,rtx op0,rtx op1,rtx target,bool umulp,enum optab_methods methods)1044*404b540aSrobert expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1045*404b540aSrobert bool umulp, enum optab_methods methods)
1046*404b540aSrobert {
1047*404b540aSrobert int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1048*404b540aSrobert int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1049*404b540aSrobert rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1050*404b540aSrobert rtx product, adjust, product_high, temp;
1051*404b540aSrobert
1052*404b540aSrobert rtx op0_high = operand_subword_force (op0, high, mode);
1053*404b540aSrobert rtx op0_low = operand_subword_force (op0, low, mode);
1054*404b540aSrobert rtx op1_high = operand_subword_force (op1, high, mode);
1055*404b540aSrobert rtx op1_low = operand_subword_force (op1, low, mode);
1056*404b540aSrobert
1057*404b540aSrobert /* If we're using an unsigned multiply to directly compute the product
1058*404b540aSrobert of the low-order words of the operands and perform any required
1059*404b540aSrobert adjustments of the operands, we begin by trying two more multiplications
1060*404b540aSrobert and then computing the appropriate sum.
1061*404b540aSrobert
1062*404b540aSrobert We have checked above that the required addition is provided.
1063*404b540aSrobert Full-word addition will normally always succeed, especially if
1064*404b540aSrobert it is provided at all, so we don't worry about its failure. The
1065*404b540aSrobert multiplication may well fail, however, so we do handle that. */
1066*404b540aSrobert
1067*404b540aSrobert if (!umulp)
1068*404b540aSrobert {
1069*404b540aSrobert /* ??? This could be done with emit_store_flag where available. */
1070*404b540aSrobert temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1071*404b540aSrobert NULL_RTX, 1, methods);
1072*404b540aSrobert if (temp)
1073*404b540aSrobert op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1074*404b540aSrobert NULL_RTX, 0, OPTAB_DIRECT);
1075*404b540aSrobert else
1076*404b540aSrobert {
1077*404b540aSrobert temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1078*404b540aSrobert NULL_RTX, 0, methods);
1079*404b540aSrobert if (!temp)
1080*404b540aSrobert return NULL_RTX;
1081*404b540aSrobert op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1082*404b540aSrobert NULL_RTX, 0, OPTAB_DIRECT);
1083*404b540aSrobert }
1084*404b540aSrobert
1085*404b540aSrobert if (!op0_high)
1086*404b540aSrobert return NULL_RTX;
1087*404b540aSrobert }
1088*404b540aSrobert
1089*404b540aSrobert adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1090*404b540aSrobert NULL_RTX, 0, OPTAB_DIRECT);
1091*404b540aSrobert if (!adjust)
1092*404b540aSrobert return NULL_RTX;
1093*404b540aSrobert
1094*404b540aSrobert /* OP0_HIGH should now be dead. */
1095*404b540aSrobert
1096*404b540aSrobert if (!umulp)
1097*404b540aSrobert {
1098*404b540aSrobert /* ??? This could be done with emit_store_flag where available. */
1099*404b540aSrobert temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1100*404b540aSrobert NULL_RTX, 1, methods);
1101*404b540aSrobert if (temp)
1102*404b540aSrobert op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1103*404b540aSrobert NULL_RTX, 0, OPTAB_DIRECT);
1104*404b540aSrobert else
1105*404b540aSrobert {
1106*404b540aSrobert temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1107*404b540aSrobert NULL_RTX, 0, methods);
1108*404b540aSrobert if (!temp)
1109*404b540aSrobert return NULL_RTX;
1110*404b540aSrobert op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1111*404b540aSrobert NULL_RTX, 0, OPTAB_DIRECT);
1112*404b540aSrobert }
1113*404b540aSrobert
1114*404b540aSrobert if (!op1_high)
1115*404b540aSrobert return NULL_RTX;
1116*404b540aSrobert }
1117*404b540aSrobert
1118*404b540aSrobert temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1119*404b540aSrobert NULL_RTX, 0, OPTAB_DIRECT);
1120*404b540aSrobert if (!temp)
1121*404b540aSrobert return NULL_RTX;
1122*404b540aSrobert
1123*404b540aSrobert /* OP1_HIGH should now be dead. */
1124*404b540aSrobert
1125*404b540aSrobert adjust = expand_binop (word_mode, add_optab, adjust, temp,
1126*404b540aSrobert adjust, 0, OPTAB_DIRECT);
1127*404b540aSrobert
1128*404b540aSrobert if (target && !REG_P (target))
1129*404b540aSrobert target = NULL_RTX;
1130*404b540aSrobert
1131*404b540aSrobert if (umulp)
1132*404b540aSrobert product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1133*404b540aSrobert target, 1, OPTAB_DIRECT);
1134*404b540aSrobert else
1135*404b540aSrobert product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1136*404b540aSrobert target, 1, OPTAB_DIRECT);
1137*404b540aSrobert
1138*404b540aSrobert if (!product)
1139*404b540aSrobert return NULL_RTX;
1140*404b540aSrobert
1141*404b540aSrobert product_high = operand_subword (product, high, 1, mode);
1142*404b540aSrobert adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1143*404b540aSrobert REG_P (product_high) ? product_high : adjust,
1144*404b540aSrobert 0, OPTAB_DIRECT);
1145*404b540aSrobert emit_move_insn (product_high, adjust);
1146*404b540aSrobert return product;
1147*404b540aSrobert }
1148*404b540aSrobert
1149*404b540aSrobert /* Wrapper around expand_binop which takes an rtx code to specify
1150*404b540aSrobert the operation to perform, not an optab pointer. All other
1151*404b540aSrobert arguments are the same. */
1152*404b540aSrobert rtx
expand_simple_binop(enum machine_mode mode,enum rtx_code code,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)1153*404b540aSrobert expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1154*404b540aSrobert rtx op1, rtx target, int unsignedp,
1155*404b540aSrobert enum optab_methods methods)
1156*404b540aSrobert {
1157*404b540aSrobert optab binop = code_to_optab[(int) code];
1158*404b540aSrobert gcc_assert (binop);
1159*404b540aSrobert
1160*404b540aSrobert return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1161*404b540aSrobert }
1162*404b540aSrobert
1163*404b540aSrobert /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1164*404b540aSrobert binop. Order them according to commutative_operand_precedence and, if
1165*404b540aSrobert possible, try to put TARGET or a pseudo first. */
1166*404b540aSrobert static bool
swap_commutative_operands_with_target(rtx target,rtx op0,rtx op1)1167*404b540aSrobert swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1168*404b540aSrobert {
1169*404b540aSrobert int op0_prec = commutative_operand_precedence (op0);
1170*404b540aSrobert int op1_prec = commutative_operand_precedence (op1);
1171*404b540aSrobert
1172*404b540aSrobert if (op0_prec < op1_prec)
1173*404b540aSrobert return true;
1174*404b540aSrobert
1175*404b540aSrobert if (op0_prec > op1_prec)
1176*404b540aSrobert return false;
1177*404b540aSrobert
1178*404b540aSrobert /* With equal precedence, both orders are ok, but it is better if the
1179*404b540aSrobert first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1180*404b540aSrobert if (target == 0 || REG_P (target))
1181*404b540aSrobert return (REG_P (op1) && !REG_P (op0)) || target == op1;
1182*404b540aSrobert else
1183*404b540aSrobert return rtx_equal_p (op1, target);
1184*404b540aSrobert }
1185*404b540aSrobert
1186*404b540aSrobert
1187*404b540aSrobert /* Generate code to perform an operation specified by BINOPTAB
1188*404b540aSrobert on operands OP0 and OP1, with result having machine-mode MODE.
1189*404b540aSrobert
1190*404b540aSrobert UNSIGNEDP is for the case where we have to widen the operands
1191*404b540aSrobert to perform the operation. It says to use zero-extension.
1192*404b540aSrobert
1193*404b540aSrobert If TARGET is nonzero, the value
1194*404b540aSrobert is generated there, if it is convenient to do so.
1195*404b540aSrobert In all cases an rtx is returned for the locus of the value;
1196*404b540aSrobert this may or may not be TARGET. */
1197*404b540aSrobert
1198*404b540aSrobert rtx
expand_binop(enum machine_mode mode,optab binoptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)1199*404b540aSrobert expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1200*404b540aSrobert rtx target, int unsignedp, enum optab_methods methods)
1201*404b540aSrobert {
1202*404b540aSrobert enum optab_methods next_methods
1203*404b540aSrobert = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1204*404b540aSrobert ? OPTAB_WIDEN : methods);
1205*404b540aSrobert enum mode_class class;
1206*404b540aSrobert enum machine_mode wider_mode;
1207*404b540aSrobert rtx temp;
1208*404b540aSrobert int commutative_op = 0;
1209*404b540aSrobert int shift_op = (binoptab->code == ASHIFT
1210*404b540aSrobert || binoptab->code == ASHIFTRT
1211*404b540aSrobert || binoptab->code == LSHIFTRT
1212*404b540aSrobert || binoptab->code == ROTATE
1213*404b540aSrobert || binoptab->code == ROTATERT);
1214*404b540aSrobert rtx entry_last = get_last_insn ();
1215*404b540aSrobert rtx last;
1216*404b540aSrobert bool first_pass_p = true;
1217*404b540aSrobert
1218*404b540aSrobert class = GET_MODE_CLASS (mode);
1219*404b540aSrobert
1220*404b540aSrobert /* If subtracting an integer constant, convert this into an addition of
1221*404b540aSrobert the negated constant. */
1222*404b540aSrobert
1223*404b540aSrobert if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1224*404b540aSrobert {
1225*404b540aSrobert op1 = negate_rtx (mode, op1);
1226*404b540aSrobert binoptab = add_optab;
1227*404b540aSrobert }
1228*404b540aSrobert
1229*404b540aSrobert /* If we are inside an appropriately-short loop and we are optimizing,
1230*404b540aSrobert force expensive constants into a register. */
1231*404b540aSrobert if (CONSTANT_P (op0) && optimize
1232*404b540aSrobert && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1233*404b540aSrobert {
1234*404b540aSrobert if (GET_MODE (op0) != VOIDmode)
1235*404b540aSrobert op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1236*404b540aSrobert op0 = force_reg (mode, op0);
1237*404b540aSrobert }
1238*404b540aSrobert
1239*404b540aSrobert if (CONSTANT_P (op1) && optimize
1240*404b540aSrobert && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1241*404b540aSrobert {
1242*404b540aSrobert if (GET_MODE (op1) != VOIDmode)
1243*404b540aSrobert op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1244*404b540aSrobert op1 = force_reg (mode, op1);
1245*404b540aSrobert }
1246*404b540aSrobert
1247*404b540aSrobert /* Record where to delete back to if we backtrack. */
1248*404b540aSrobert last = get_last_insn ();
1249*404b540aSrobert
1250*404b540aSrobert /* If operation is commutative,
1251*404b540aSrobert try to make the first operand a register.
1252*404b540aSrobert Even better, try to make it the same as the target.
1253*404b540aSrobert Also try to make the last operand a constant. */
1254*404b540aSrobert if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1255*404b540aSrobert || binoptab == smul_widen_optab
1256*404b540aSrobert || binoptab == umul_widen_optab
1257*404b540aSrobert || binoptab == smul_highpart_optab
1258*404b540aSrobert || binoptab == umul_highpart_optab)
1259*404b540aSrobert {
1260*404b540aSrobert commutative_op = 1;
1261*404b540aSrobert
1262*404b540aSrobert if (swap_commutative_operands_with_target (target, op0, op1))
1263*404b540aSrobert {
1264*404b540aSrobert temp = op1;
1265*404b540aSrobert op1 = op0;
1266*404b540aSrobert op0 = temp;
1267*404b540aSrobert }
1268*404b540aSrobert }
1269*404b540aSrobert
1270*404b540aSrobert retry:
1271*404b540aSrobert
1272*404b540aSrobert /* If we can do it with a three-operand insn, do so. */
1273*404b540aSrobert
1274*404b540aSrobert if (methods != OPTAB_MUST_WIDEN
1275*404b540aSrobert && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1276*404b540aSrobert {
1277*404b540aSrobert int icode = (int) binoptab->handlers[(int) mode].insn_code;
1278*404b540aSrobert enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1279*404b540aSrobert enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1280*404b540aSrobert rtx pat;
1281*404b540aSrobert rtx xop0 = op0, xop1 = op1;
1282*404b540aSrobert
1283*404b540aSrobert if (target)
1284*404b540aSrobert temp = target;
1285*404b540aSrobert else
1286*404b540aSrobert temp = gen_reg_rtx (mode);
1287*404b540aSrobert
1288*404b540aSrobert /* If it is a commutative operator and the modes would match
1289*404b540aSrobert if we would swap the operands, we can save the conversions. */
1290*404b540aSrobert if (commutative_op)
1291*404b540aSrobert {
1292*404b540aSrobert if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1293*404b540aSrobert && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1294*404b540aSrobert {
1295*404b540aSrobert rtx tmp;
1296*404b540aSrobert
1297*404b540aSrobert tmp = op0; op0 = op1; op1 = tmp;
1298*404b540aSrobert tmp = xop0; xop0 = xop1; xop1 = tmp;
1299*404b540aSrobert }
1300*404b540aSrobert }
1301*404b540aSrobert
1302*404b540aSrobert /* In case the insn wants input operands in modes different from
1303*404b540aSrobert those of the actual operands, convert the operands. It would
1304*404b540aSrobert seem that we don't need to convert CONST_INTs, but we do, so
1305*404b540aSrobert that they're properly zero-extended, sign-extended or truncated
1306*404b540aSrobert for their mode. */
1307*404b540aSrobert
1308*404b540aSrobert if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1309*404b540aSrobert xop0 = convert_modes (mode0,
1310*404b540aSrobert GET_MODE (op0) != VOIDmode
1311*404b540aSrobert ? GET_MODE (op0)
1312*404b540aSrobert : mode,
1313*404b540aSrobert xop0, unsignedp);
1314*404b540aSrobert
1315*404b540aSrobert if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1316*404b540aSrobert xop1 = convert_modes (mode1,
1317*404b540aSrobert GET_MODE (op1) != VOIDmode
1318*404b540aSrobert ? GET_MODE (op1)
1319*404b540aSrobert : mode,
1320*404b540aSrobert xop1, unsignedp);
1321*404b540aSrobert
1322*404b540aSrobert /* Now, if insn's predicates don't allow our operands, put them into
1323*404b540aSrobert pseudo regs. */
1324*404b540aSrobert
1325*404b540aSrobert if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1326*404b540aSrobert && mode0 != VOIDmode)
1327*404b540aSrobert xop0 = copy_to_mode_reg (mode0, xop0);
1328*404b540aSrobert
1329*404b540aSrobert if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1330*404b540aSrobert && mode1 != VOIDmode)
1331*404b540aSrobert xop1 = copy_to_mode_reg (mode1, xop1);
1332*404b540aSrobert
1333*404b540aSrobert if (!insn_data[icode].operand[0].predicate (temp, mode))
1334*404b540aSrobert temp = gen_reg_rtx (mode);
1335*404b540aSrobert
1336*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0, xop1);
1337*404b540aSrobert if (pat)
1338*404b540aSrobert {
1339*404b540aSrobert /* If PAT is composed of more than one insn, try to add an appropriate
1340*404b540aSrobert REG_EQUAL note to it. If we can't because TEMP conflicts with an
1341*404b540aSrobert operand, call ourselves again, this time without a target. */
1342*404b540aSrobert if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1343*404b540aSrobert && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1344*404b540aSrobert {
1345*404b540aSrobert delete_insns_since (last);
1346*404b540aSrobert return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1347*404b540aSrobert unsignedp, methods);
1348*404b540aSrobert }
1349*404b540aSrobert
1350*404b540aSrobert emit_insn (pat);
1351*404b540aSrobert return temp;
1352*404b540aSrobert }
1353*404b540aSrobert else
1354*404b540aSrobert delete_insns_since (last);
1355*404b540aSrobert }
1356*404b540aSrobert
1357*404b540aSrobert /* If we were trying to rotate by a constant value, and that didn't
1358*404b540aSrobert work, try rotating the other direction before falling back to
1359*404b540aSrobert shifts and bitwise-or. */
1360*404b540aSrobert if (first_pass_p
1361*404b540aSrobert && (binoptab == rotl_optab || binoptab == rotr_optab)
1362*404b540aSrobert && class == MODE_INT
1363*404b540aSrobert && GET_CODE (op1) == CONST_INT
1364*404b540aSrobert && INTVAL (op1) > 0
1365*404b540aSrobert && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1366*404b540aSrobert {
1367*404b540aSrobert first_pass_p = false;
1368*404b540aSrobert op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1369*404b540aSrobert binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1370*404b540aSrobert goto retry;
1371*404b540aSrobert }
1372*404b540aSrobert
1373*404b540aSrobert /* If this is a multiply, see if we can do a widening operation that
1374*404b540aSrobert takes operands of this mode and makes a wider mode. */
1375*404b540aSrobert
1376*404b540aSrobert if (binoptab == smul_optab
1377*404b540aSrobert && GET_MODE_WIDER_MODE (mode) != VOIDmode
1378*404b540aSrobert && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1379*404b540aSrobert ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1380*404b540aSrobert != CODE_FOR_nothing))
1381*404b540aSrobert {
1382*404b540aSrobert temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1383*404b540aSrobert unsignedp ? umul_widen_optab : smul_widen_optab,
1384*404b540aSrobert op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1385*404b540aSrobert
1386*404b540aSrobert if (temp != 0)
1387*404b540aSrobert {
1388*404b540aSrobert if (GET_MODE_CLASS (mode) == MODE_INT
1389*404b540aSrobert && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1390*404b540aSrobert GET_MODE_BITSIZE (GET_MODE (temp))))
1391*404b540aSrobert return gen_lowpart (mode, temp);
1392*404b540aSrobert else
1393*404b540aSrobert return convert_to_mode (mode, temp, unsignedp);
1394*404b540aSrobert }
1395*404b540aSrobert }
1396*404b540aSrobert
1397*404b540aSrobert /* Look for a wider mode of the same class for which we think we
1398*404b540aSrobert can open-code the operation. Check for a widening multiply at the
1399*404b540aSrobert wider mode as well. */
1400*404b540aSrobert
1401*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class)
1402*404b540aSrobert && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1403*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
1404*404b540aSrobert wider_mode != VOIDmode;
1405*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1406*404b540aSrobert {
1407*404b540aSrobert if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1408*404b540aSrobert || (binoptab == smul_optab
1409*404b540aSrobert && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1410*404b540aSrobert && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1411*404b540aSrobert ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1412*404b540aSrobert != CODE_FOR_nothing)))
1413*404b540aSrobert {
1414*404b540aSrobert rtx xop0 = op0, xop1 = op1;
1415*404b540aSrobert int no_extend = 0;
1416*404b540aSrobert
1417*404b540aSrobert /* For certain integer operations, we need not actually extend
1418*404b540aSrobert the narrow operands, as long as we will truncate
1419*404b540aSrobert the results to the same narrowness. */
1420*404b540aSrobert
1421*404b540aSrobert if ((binoptab == ior_optab || binoptab == and_optab
1422*404b540aSrobert || binoptab == xor_optab
1423*404b540aSrobert || binoptab == add_optab || binoptab == sub_optab
1424*404b540aSrobert || binoptab == smul_optab || binoptab == ashl_optab)
1425*404b540aSrobert && class == MODE_INT)
1426*404b540aSrobert no_extend = 1;
1427*404b540aSrobert
1428*404b540aSrobert xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1429*404b540aSrobert
1430*404b540aSrobert /* The second operand of a shift must always be extended. */
1431*404b540aSrobert xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1432*404b540aSrobert no_extend && binoptab != ashl_optab);
1433*404b540aSrobert
1434*404b540aSrobert temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1435*404b540aSrobert unsignedp, OPTAB_DIRECT);
1436*404b540aSrobert if (temp)
1437*404b540aSrobert {
1438*404b540aSrobert if (class != MODE_INT
1439*404b540aSrobert || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1440*404b540aSrobert GET_MODE_BITSIZE (wider_mode)))
1441*404b540aSrobert {
1442*404b540aSrobert if (target == 0)
1443*404b540aSrobert target = gen_reg_rtx (mode);
1444*404b540aSrobert convert_move (target, temp, 0);
1445*404b540aSrobert return target;
1446*404b540aSrobert }
1447*404b540aSrobert else
1448*404b540aSrobert return gen_lowpart (mode, temp);
1449*404b540aSrobert }
1450*404b540aSrobert else
1451*404b540aSrobert delete_insns_since (last);
1452*404b540aSrobert }
1453*404b540aSrobert }
1454*404b540aSrobert
1455*404b540aSrobert /* These can be done a word at a time. */
1456*404b540aSrobert if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1457*404b540aSrobert && class == MODE_INT
1458*404b540aSrobert && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1459*404b540aSrobert && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1460*404b540aSrobert {
1461*404b540aSrobert int i;
1462*404b540aSrobert rtx insns;
1463*404b540aSrobert rtx equiv_value;
1464*404b540aSrobert
1465*404b540aSrobert /* If TARGET is the same as one of the operands, the REG_EQUAL note
1466*404b540aSrobert won't be accurate, so use a new target. */
1467*404b540aSrobert if (target == 0 || target == op0 || target == op1)
1468*404b540aSrobert target = gen_reg_rtx (mode);
1469*404b540aSrobert
1470*404b540aSrobert start_sequence ();
1471*404b540aSrobert
1472*404b540aSrobert /* Do the actual arithmetic. */
1473*404b540aSrobert for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1474*404b540aSrobert {
1475*404b540aSrobert rtx target_piece = operand_subword (target, i, 1, mode);
1476*404b540aSrobert rtx x = expand_binop (word_mode, binoptab,
1477*404b540aSrobert operand_subword_force (op0, i, mode),
1478*404b540aSrobert operand_subword_force (op1, i, mode),
1479*404b540aSrobert target_piece, unsignedp, next_methods);
1480*404b540aSrobert
1481*404b540aSrobert if (x == 0)
1482*404b540aSrobert break;
1483*404b540aSrobert
1484*404b540aSrobert if (target_piece != x)
1485*404b540aSrobert emit_move_insn (target_piece, x);
1486*404b540aSrobert }
1487*404b540aSrobert
1488*404b540aSrobert insns = get_insns ();
1489*404b540aSrobert end_sequence ();
1490*404b540aSrobert
1491*404b540aSrobert if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1492*404b540aSrobert {
1493*404b540aSrobert if (binoptab->code != UNKNOWN)
1494*404b540aSrobert equiv_value
1495*404b540aSrobert = gen_rtx_fmt_ee (binoptab->code, mode,
1496*404b540aSrobert copy_rtx (op0), copy_rtx (op1));
1497*404b540aSrobert else
1498*404b540aSrobert equiv_value = 0;
1499*404b540aSrobert
1500*404b540aSrobert emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1501*404b540aSrobert return target;
1502*404b540aSrobert }
1503*404b540aSrobert }
1504*404b540aSrobert
1505*404b540aSrobert /* Synthesize double word shifts from single word shifts. */
1506*404b540aSrobert if ((binoptab == lshr_optab || binoptab == ashl_optab
1507*404b540aSrobert || binoptab == ashr_optab)
1508*404b540aSrobert && class == MODE_INT
1509*404b540aSrobert && (GET_CODE (op1) == CONST_INT || !optimize_size)
1510*404b540aSrobert && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1511*404b540aSrobert && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1512*404b540aSrobert && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1513*404b540aSrobert && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1514*404b540aSrobert {
1515*404b540aSrobert unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1516*404b540aSrobert enum machine_mode op1_mode;
1517*404b540aSrobert
1518*404b540aSrobert double_shift_mask = targetm.shift_truncation_mask (mode);
1519*404b540aSrobert shift_mask = targetm.shift_truncation_mask (word_mode);
1520*404b540aSrobert op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1521*404b540aSrobert
1522*404b540aSrobert /* Apply the truncation to constant shifts. */
1523*404b540aSrobert if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1524*404b540aSrobert op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1525*404b540aSrobert
1526*404b540aSrobert if (op1 == CONST0_RTX (op1_mode))
1527*404b540aSrobert return op0;
1528*404b540aSrobert
1529*404b540aSrobert /* Make sure that this is a combination that expand_doubleword_shift
1530*404b540aSrobert can handle. See the comments there for details. */
1531*404b540aSrobert if (double_shift_mask == 0
1532*404b540aSrobert || (shift_mask == BITS_PER_WORD - 1
1533*404b540aSrobert && double_shift_mask == BITS_PER_WORD * 2 - 1))
1534*404b540aSrobert {
1535*404b540aSrobert rtx insns, equiv_value;
1536*404b540aSrobert rtx into_target, outof_target;
1537*404b540aSrobert rtx into_input, outof_input;
1538*404b540aSrobert int left_shift, outof_word;
1539*404b540aSrobert
1540*404b540aSrobert /* If TARGET is the same as one of the operands, the REG_EQUAL note
1541*404b540aSrobert won't be accurate, so use a new target. */
1542*404b540aSrobert if (target == 0 || target == op0 || target == op1)
1543*404b540aSrobert target = gen_reg_rtx (mode);
1544*404b540aSrobert
1545*404b540aSrobert start_sequence ();
1546*404b540aSrobert
1547*404b540aSrobert /* OUTOF_* is the word we are shifting bits away from, and
1548*404b540aSrobert INTO_* is the word that we are shifting bits towards, thus
1549*404b540aSrobert they differ depending on the direction of the shift and
1550*404b540aSrobert WORDS_BIG_ENDIAN. */
1551*404b540aSrobert
1552*404b540aSrobert left_shift = binoptab == ashl_optab;
1553*404b540aSrobert outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1554*404b540aSrobert
1555*404b540aSrobert outof_target = operand_subword (target, outof_word, 1, mode);
1556*404b540aSrobert into_target = operand_subword (target, 1 - outof_word, 1, mode);
1557*404b540aSrobert
1558*404b540aSrobert outof_input = operand_subword_force (op0, outof_word, mode);
1559*404b540aSrobert into_input = operand_subword_force (op0, 1 - outof_word, mode);
1560*404b540aSrobert
1561*404b540aSrobert if (expand_doubleword_shift (op1_mode, binoptab,
1562*404b540aSrobert outof_input, into_input, op1,
1563*404b540aSrobert outof_target, into_target,
1564*404b540aSrobert unsignedp, next_methods, shift_mask))
1565*404b540aSrobert {
1566*404b540aSrobert insns = get_insns ();
1567*404b540aSrobert end_sequence ();
1568*404b540aSrobert
1569*404b540aSrobert equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1570*404b540aSrobert emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1571*404b540aSrobert return target;
1572*404b540aSrobert }
1573*404b540aSrobert end_sequence ();
1574*404b540aSrobert }
1575*404b540aSrobert }
1576*404b540aSrobert
1577*404b540aSrobert /* Synthesize double word rotates from single word shifts. */
1578*404b540aSrobert if ((binoptab == rotl_optab || binoptab == rotr_optab)
1579*404b540aSrobert && class == MODE_INT
1580*404b540aSrobert && GET_CODE (op1) == CONST_INT
1581*404b540aSrobert && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1582*404b540aSrobert && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1583*404b540aSrobert && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1584*404b540aSrobert {
1585*404b540aSrobert rtx insns;
1586*404b540aSrobert rtx into_target, outof_target;
1587*404b540aSrobert rtx into_input, outof_input;
1588*404b540aSrobert rtx inter;
1589*404b540aSrobert int shift_count, left_shift, outof_word;
1590*404b540aSrobert
1591*404b540aSrobert /* If TARGET is the same as one of the operands, the REG_EQUAL note
1592*404b540aSrobert won't be accurate, so use a new target. Do this also if target is not
1593*404b540aSrobert a REG, first because having a register instead may open optimization
1594*404b540aSrobert opportunities, and second because if target and op0 happen to be MEMs
1595*404b540aSrobert designating the same location, we would risk clobbering it too early
1596*404b540aSrobert in the code sequence we generate below. */
1597*404b540aSrobert if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1598*404b540aSrobert target = gen_reg_rtx (mode);
1599*404b540aSrobert
1600*404b540aSrobert start_sequence ();
1601*404b540aSrobert
1602*404b540aSrobert shift_count = INTVAL (op1);
1603*404b540aSrobert
1604*404b540aSrobert /* OUTOF_* is the word we are shifting bits away from, and
1605*404b540aSrobert INTO_* is the word that we are shifting bits towards, thus
1606*404b540aSrobert they differ depending on the direction of the shift and
1607*404b540aSrobert WORDS_BIG_ENDIAN. */
1608*404b540aSrobert
1609*404b540aSrobert left_shift = (binoptab == rotl_optab);
1610*404b540aSrobert outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1611*404b540aSrobert
1612*404b540aSrobert outof_target = operand_subword (target, outof_word, 1, mode);
1613*404b540aSrobert into_target = operand_subword (target, 1 - outof_word, 1, mode);
1614*404b540aSrobert
1615*404b540aSrobert outof_input = operand_subword_force (op0, outof_word, mode);
1616*404b540aSrobert into_input = operand_subword_force (op0, 1 - outof_word, mode);
1617*404b540aSrobert
1618*404b540aSrobert if (shift_count == BITS_PER_WORD)
1619*404b540aSrobert {
1620*404b540aSrobert /* This is just a word swap. */
1621*404b540aSrobert emit_move_insn (outof_target, into_input);
1622*404b540aSrobert emit_move_insn (into_target, outof_input);
1623*404b540aSrobert inter = const0_rtx;
1624*404b540aSrobert }
1625*404b540aSrobert else
1626*404b540aSrobert {
1627*404b540aSrobert rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1628*404b540aSrobert rtx first_shift_count, second_shift_count;
1629*404b540aSrobert optab reverse_unsigned_shift, unsigned_shift;
1630*404b540aSrobert
1631*404b540aSrobert reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1632*404b540aSrobert ? lshr_optab : ashl_optab);
1633*404b540aSrobert
1634*404b540aSrobert unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1635*404b540aSrobert ? ashl_optab : lshr_optab);
1636*404b540aSrobert
1637*404b540aSrobert if (shift_count > BITS_PER_WORD)
1638*404b540aSrobert {
1639*404b540aSrobert first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1640*404b540aSrobert second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1641*404b540aSrobert }
1642*404b540aSrobert else
1643*404b540aSrobert {
1644*404b540aSrobert first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1645*404b540aSrobert second_shift_count = GEN_INT (shift_count);
1646*404b540aSrobert }
1647*404b540aSrobert
1648*404b540aSrobert into_temp1 = expand_binop (word_mode, unsigned_shift,
1649*404b540aSrobert outof_input, first_shift_count,
1650*404b540aSrobert NULL_RTX, unsignedp, next_methods);
1651*404b540aSrobert into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1652*404b540aSrobert into_input, second_shift_count,
1653*404b540aSrobert NULL_RTX, unsignedp, next_methods);
1654*404b540aSrobert
1655*404b540aSrobert if (into_temp1 != 0 && into_temp2 != 0)
1656*404b540aSrobert inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1657*404b540aSrobert into_target, unsignedp, next_methods);
1658*404b540aSrobert else
1659*404b540aSrobert inter = 0;
1660*404b540aSrobert
1661*404b540aSrobert if (inter != 0 && inter != into_target)
1662*404b540aSrobert emit_move_insn (into_target, inter);
1663*404b540aSrobert
1664*404b540aSrobert outof_temp1 = expand_binop (word_mode, unsigned_shift,
1665*404b540aSrobert into_input, first_shift_count,
1666*404b540aSrobert NULL_RTX, unsignedp, next_methods);
1667*404b540aSrobert outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1668*404b540aSrobert outof_input, second_shift_count,
1669*404b540aSrobert NULL_RTX, unsignedp, next_methods);
1670*404b540aSrobert
1671*404b540aSrobert if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1672*404b540aSrobert inter = expand_binop (word_mode, ior_optab,
1673*404b540aSrobert outof_temp1, outof_temp2,
1674*404b540aSrobert outof_target, unsignedp, next_methods);
1675*404b540aSrobert
1676*404b540aSrobert if (inter != 0 && inter != outof_target)
1677*404b540aSrobert emit_move_insn (outof_target, inter);
1678*404b540aSrobert }
1679*404b540aSrobert
1680*404b540aSrobert insns = get_insns ();
1681*404b540aSrobert end_sequence ();
1682*404b540aSrobert
1683*404b540aSrobert if (inter != 0)
1684*404b540aSrobert {
1685*404b540aSrobert /* One may be tempted to wrap the insns in a REG_NO_CONFLICT
1686*404b540aSrobert block to help the register allocator a bit. But a multi-word
1687*404b540aSrobert rotate will need all the input bits when setting the output
1688*404b540aSrobert bits, so there clearly is a conflict between the input and
1689*404b540aSrobert output registers. So we can't use a no-conflict block here. */
1690*404b540aSrobert emit_insn (insns);
1691*404b540aSrobert return target;
1692*404b540aSrobert }
1693*404b540aSrobert }
1694*404b540aSrobert
1695*404b540aSrobert /* These can be done a word at a time by propagating carries. */
1696*404b540aSrobert if ((binoptab == add_optab || binoptab == sub_optab)
1697*404b540aSrobert && class == MODE_INT
1698*404b540aSrobert && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1699*404b540aSrobert && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1700*404b540aSrobert {
1701*404b540aSrobert unsigned int i;
1702*404b540aSrobert optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1703*404b540aSrobert const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1704*404b540aSrobert rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1705*404b540aSrobert rtx xop0, xop1, xtarget;
1706*404b540aSrobert
1707*404b540aSrobert /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1708*404b540aSrobert value is one of those, use it. Otherwise, use 1 since it is the
1709*404b540aSrobert one easiest to get. */
1710*404b540aSrobert #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1711*404b540aSrobert int normalizep = STORE_FLAG_VALUE;
1712*404b540aSrobert #else
1713*404b540aSrobert int normalizep = 1;
1714*404b540aSrobert #endif
1715*404b540aSrobert
1716*404b540aSrobert /* Prepare the operands. */
1717*404b540aSrobert xop0 = force_reg (mode, op0);
1718*404b540aSrobert xop1 = force_reg (mode, op1);
1719*404b540aSrobert
1720*404b540aSrobert xtarget = gen_reg_rtx (mode);
1721*404b540aSrobert
1722*404b540aSrobert if (target == 0 || !REG_P (target))
1723*404b540aSrobert target = xtarget;
1724*404b540aSrobert
1725*404b540aSrobert /* Indicate for flow that the entire target reg is being set. */
1726*404b540aSrobert if (REG_P (target))
1727*404b540aSrobert emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1728*404b540aSrobert
1729*404b540aSrobert /* Do the actual arithmetic. */
1730*404b540aSrobert for (i = 0; i < nwords; i++)
1731*404b540aSrobert {
1732*404b540aSrobert int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1733*404b540aSrobert rtx target_piece = operand_subword (xtarget, index, 1, mode);
1734*404b540aSrobert rtx op0_piece = operand_subword_force (xop0, index, mode);
1735*404b540aSrobert rtx op1_piece = operand_subword_force (xop1, index, mode);
1736*404b540aSrobert rtx x;
1737*404b540aSrobert
1738*404b540aSrobert /* Main add/subtract of the input operands. */
1739*404b540aSrobert x = expand_binop (word_mode, binoptab,
1740*404b540aSrobert op0_piece, op1_piece,
1741*404b540aSrobert target_piece, unsignedp, next_methods);
1742*404b540aSrobert if (x == 0)
1743*404b540aSrobert break;
1744*404b540aSrobert
1745*404b540aSrobert if (i + 1 < nwords)
1746*404b540aSrobert {
1747*404b540aSrobert /* Store carry from main add/subtract. */
1748*404b540aSrobert carry_out = gen_reg_rtx (word_mode);
1749*404b540aSrobert carry_out = emit_store_flag_force (carry_out,
1750*404b540aSrobert (binoptab == add_optab
1751*404b540aSrobert ? LT : GT),
1752*404b540aSrobert x, op0_piece,
1753*404b540aSrobert word_mode, 1, normalizep);
1754*404b540aSrobert }
1755*404b540aSrobert
1756*404b540aSrobert if (i > 0)
1757*404b540aSrobert {
1758*404b540aSrobert rtx newx;
1759*404b540aSrobert
1760*404b540aSrobert /* Add/subtract previous carry to main result. */
1761*404b540aSrobert newx = expand_binop (word_mode,
1762*404b540aSrobert normalizep == 1 ? binoptab : otheroptab,
1763*404b540aSrobert x, carry_in,
1764*404b540aSrobert NULL_RTX, 1, next_methods);
1765*404b540aSrobert
1766*404b540aSrobert if (i + 1 < nwords)
1767*404b540aSrobert {
1768*404b540aSrobert /* Get out carry from adding/subtracting carry in. */
1769*404b540aSrobert rtx carry_tmp = gen_reg_rtx (word_mode);
1770*404b540aSrobert carry_tmp = emit_store_flag_force (carry_tmp,
1771*404b540aSrobert (binoptab == add_optab
1772*404b540aSrobert ? LT : GT),
1773*404b540aSrobert newx, x,
1774*404b540aSrobert word_mode, 1, normalizep);
1775*404b540aSrobert
1776*404b540aSrobert /* Logical-ior the two poss. carry together. */
1777*404b540aSrobert carry_out = expand_binop (word_mode, ior_optab,
1778*404b540aSrobert carry_out, carry_tmp,
1779*404b540aSrobert carry_out, 0, next_methods);
1780*404b540aSrobert if (carry_out == 0)
1781*404b540aSrobert break;
1782*404b540aSrobert }
1783*404b540aSrobert emit_move_insn (target_piece, newx);
1784*404b540aSrobert }
1785*404b540aSrobert else
1786*404b540aSrobert {
1787*404b540aSrobert if (x != target_piece)
1788*404b540aSrobert emit_move_insn (target_piece, x);
1789*404b540aSrobert }
1790*404b540aSrobert
1791*404b540aSrobert carry_in = carry_out;
1792*404b540aSrobert }
1793*404b540aSrobert
1794*404b540aSrobert if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1795*404b540aSrobert {
1796*404b540aSrobert if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1797*404b540aSrobert || ! rtx_equal_p (target, xtarget))
1798*404b540aSrobert {
1799*404b540aSrobert rtx temp = emit_move_insn (target, xtarget);
1800*404b540aSrobert
1801*404b540aSrobert set_unique_reg_note (temp,
1802*404b540aSrobert REG_EQUAL,
1803*404b540aSrobert gen_rtx_fmt_ee (binoptab->code, mode,
1804*404b540aSrobert copy_rtx (xop0),
1805*404b540aSrobert copy_rtx (xop1)));
1806*404b540aSrobert }
1807*404b540aSrobert else
1808*404b540aSrobert target = xtarget;
1809*404b540aSrobert
1810*404b540aSrobert return target;
1811*404b540aSrobert }
1812*404b540aSrobert
1813*404b540aSrobert else
1814*404b540aSrobert delete_insns_since (last);
1815*404b540aSrobert }
1816*404b540aSrobert
1817*404b540aSrobert /* Attempt to synthesize double word multiplies using a sequence of word
1818*404b540aSrobert mode multiplications. We first attempt to generate a sequence using a
1819*404b540aSrobert more efficient unsigned widening multiply, and if that fails we then
1820*404b540aSrobert try using a signed widening multiply. */
1821*404b540aSrobert
1822*404b540aSrobert if (binoptab == smul_optab
1823*404b540aSrobert && class == MODE_INT
1824*404b540aSrobert && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1825*404b540aSrobert && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1826*404b540aSrobert && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1827*404b540aSrobert {
1828*404b540aSrobert rtx product = NULL_RTX;
1829*404b540aSrobert
1830*404b540aSrobert if (umul_widen_optab->handlers[(int) mode].insn_code
1831*404b540aSrobert != CODE_FOR_nothing)
1832*404b540aSrobert {
1833*404b540aSrobert product = expand_doubleword_mult (mode, op0, op1, target,
1834*404b540aSrobert true, methods);
1835*404b540aSrobert if (!product)
1836*404b540aSrobert delete_insns_since (last);
1837*404b540aSrobert }
1838*404b540aSrobert
1839*404b540aSrobert if (product == NULL_RTX
1840*404b540aSrobert && smul_widen_optab->handlers[(int) mode].insn_code
1841*404b540aSrobert != CODE_FOR_nothing)
1842*404b540aSrobert {
1843*404b540aSrobert product = expand_doubleword_mult (mode, op0, op1, target,
1844*404b540aSrobert false, methods);
1845*404b540aSrobert if (!product)
1846*404b540aSrobert delete_insns_since (last);
1847*404b540aSrobert }
1848*404b540aSrobert
1849*404b540aSrobert if (product != NULL_RTX)
1850*404b540aSrobert {
1851*404b540aSrobert if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1852*404b540aSrobert {
1853*404b540aSrobert temp = emit_move_insn (target ? target : product, product);
1854*404b540aSrobert set_unique_reg_note (temp,
1855*404b540aSrobert REG_EQUAL,
1856*404b540aSrobert gen_rtx_fmt_ee (MULT, mode,
1857*404b540aSrobert copy_rtx (op0),
1858*404b540aSrobert copy_rtx (op1)));
1859*404b540aSrobert }
1860*404b540aSrobert return product;
1861*404b540aSrobert }
1862*404b540aSrobert }
1863*404b540aSrobert
1864*404b540aSrobert /* It can't be open-coded in this mode.
1865*404b540aSrobert Use a library call if one is available and caller says that's ok. */
1866*404b540aSrobert
1867*404b540aSrobert if (binoptab->handlers[(int) mode].libfunc
1868*404b540aSrobert && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1869*404b540aSrobert {
1870*404b540aSrobert rtx insns;
1871*404b540aSrobert rtx op1x = op1;
1872*404b540aSrobert enum machine_mode op1_mode = mode;
1873*404b540aSrobert rtx value;
1874*404b540aSrobert
1875*404b540aSrobert start_sequence ();
1876*404b540aSrobert
1877*404b540aSrobert if (shift_op)
1878*404b540aSrobert {
1879*404b540aSrobert op1_mode = word_mode;
1880*404b540aSrobert /* Specify unsigned here,
1881*404b540aSrobert since negative shift counts are meaningless. */
1882*404b540aSrobert op1x = convert_to_mode (word_mode, op1, 1);
1883*404b540aSrobert }
1884*404b540aSrobert
1885*404b540aSrobert if (GET_MODE (op0) != VOIDmode
1886*404b540aSrobert && GET_MODE (op0) != mode)
1887*404b540aSrobert op0 = convert_to_mode (mode, op0, unsignedp);
1888*404b540aSrobert
1889*404b540aSrobert /* Pass 1 for NO_QUEUE so we don't lose any increments
1890*404b540aSrobert if the libcall is cse'd or moved. */
1891*404b540aSrobert value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1892*404b540aSrobert NULL_RTX, LCT_CONST, mode, 2,
1893*404b540aSrobert op0, mode, op1x, op1_mode);
1894*404b540aSrobert
1895*404b540aSrobert insns = get_insns ();
1896*404b540aSrobert end_sequence ();
1897*404b540aSrobert
1898*404b540aSrobert target = gen_reg_rtx (mode);
1899*404b540aSrobert emit_libcall_block (insns, target, value,
1900*404b540aSrobert gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1901*404b540aSrobert
1902*404b540aSrobert return target;
1903*404b540aSrobert }
1904*404b540aSrobert
1905*404b540aSrobert delete_insns_since (last);
1906*404b540aSrobert
1907*404b540aSrobert /* It can't be done in this mode. Can we do it in a wider mode? */
1908*404b540aSrobert
1909*404b540aSrobert if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1910*404b540aSrobert || methods == OPTAB_MUST_WIDEN))
1911*404b540aSrobert {
1912*404b540aSrobert /* Caller says, don't even try. */
1913*404b540aSrobert delete_insns_since (entry_last);
1914*404b540aSrobert return 0;
1915*404b540aSrobert }
1916*404b540aSrobert
1917*404b540aSrobert /* Compute the value of METHODS to pass to recursive calls.
1918*404b540aSrobert Don't allow widening to be tried recursively. */
1919*404b540aSrobert
1920*404b540aSrobert methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1921*404b540aSrobert
1922*404b540aSrobert /* Look for a wider mode of the same class for which it appears we can do
1923*404b540aSrobert the operation. */
1924*404b540aSrobert
1925*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
1926*404b540aSrobert {
1927*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
1928*404b540aSrobert wider_mode != VOIDmode;
1929*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1930*404b540aSrobert {
1931*404b540aSrobert if ((binoptab->handlers[(int) wider_mode].insn_code
1932*404b540aSrobert != CODE_FOR_nothing)
1933*404b540aSrobert || (methods == OPTAB_LIB
1934*404b540aSrobert && binoptab->handlers[(int) wider_mode].libfunc))
1935*404b540aSrobert {
1936*404b540aSrobert rtx xop0 = op0, xop1 = op1;
1937*404b540aSrobert int no_extend = 0;
1938*404b540aSrobert
1939*404b540aSrobert /* For certain integer operations, we need not actually extend
1940*404b540aSrobert the narrow operands, as long as we will truncate
1941*404b540aSrobert the results to the same narrowness. */
1942*404b540aSrobert
1943*404b540aSrobert if ((binoptab == ior_optab || binoptab == and_optab
1944*404b540aSrobert || binoptab == xor_optab
1945*404b540aSrobert || binoptab == add_optab || binoptab == sub_optab
1946*404b540aSrobert || binoptab == smul_optab || binoptab == ashl_optab)
1947*404b540aSrobert && class == MODE_INT)
1948*404b540aSrobert no_extend = 1;
1949*404b540aSrobert
1950*404b540aSrobert xop0 = widen_operand (xop0, wider_mode, mode,
1951*404b540aSrobert unsignedp, no_extend);
1952*404b540aSrobert
1953*404b540aSrobert /* The second operand of a shift must always be extended. */
1954*404b540aSrobert xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1955*404b540aSrobert no_extend && binoptab != ashl_optab);
1956*404b540aSrobert
1957*404b540aSrobert temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1958*404b540aSrobert unsignedp, methods);
1959*404b540aSrobert if (temp)
1960*404b540aSrobert {
1961*404b540aSrobert if (class != MODE_INT
1962*404b540aSrobert || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1963*404b540aSrobert GET_MODE_BITSIZE (wider_mode)))
1964*404b540aSrobert {
1965*404b540aSrobert if (target == 0)
1966*404b540aSrobert target = gen_reg_rtx (mode);
1967*404b540aSrobert convert_move (target, temp, 0);
1968*404b540aSrobert return target;
1969*404b540aSrobert }
1970*404b540aSrobert else
1971*404b540aSrobert return gen_lowpart (mode, temp);
1972*404b540aSrobert }
1973*404b540aSrobert else
1974*404b540aSrobert delete_insns_since (last);
1975*404b540aSrobert }
1976*404b540aSrobert }
1977*404b540aSrobert }
1978*404b540aSrobert
1979*404b540aSrobert delete_insns_since (entry_last);
1980*404b540aSrobert return 0;
1981*404b540aSrobert }
1982*404b540aSrobert
1983*404b540aSrobert /* Expand a binary operator which has both signed and unsigned forms.
1984*404b540aSrobert UOPTAB is the optab for unsigned operations, and SOPTAB is for
1985*404b540aSrobert signed operations.
1986*404b540aSrobert
1987*404b540aSrobert If we widen unsigned operands, we may use a signed wider operation instead
1988*404b540aSrobert of an unsigned wider operation, since the result would be the same. */
1989*404b540aSrobert
1990*404b540aSrobert rtx
sign_expand_binop(enum machine_mode mode,optab uoptab,optab soptab,rtx op0,rtx op1,rtx target,int unsignedp,enum optab_methods methods)1991*404b540aSrobert sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1992*404b540aSrobert rtx op0, rtx op1, rtx target, int unsignedp,
1993*404b540aSrobert enum optab_methods methods)
1994*404b540aSrobert {
1995*404b540aSrobert rtx temp;
1996*404b540aSrobert optab direct_optab = unsignedp ? uoptab : soptab;
1997*404b540aSrobert struct optab wide_soptab;
1998*404b540aSrobert
1999*404b540aSrobert /* Do it without widening, if possible. */
2000*404b540aSrobert temp = expand_binop (mode, direct_optab, op0, op1, target,
2001*404b540aSrobert unsignedp, OPTAB_DIRECT);
2002*404b540aSrobert if (temp || methods == OPTAB_DIRECT)
2003*404b540aSrobert return temp;
2004*404b540aSrobert
2005*404b540aSrobert /* Try widening to a signed int. Make a fake signed optab that
2006*404b540aSrobert hides any signed insn for direct use. */
2007*404b540aSrobert wide_soptab = *soptab;
2008*404b540aSrobert wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2009*404b540aSrobert wide_soptab.handlers[(int) mode].libfunc = 0;
2010*404b540aSrobert
2011*404b540aSrobert temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2012*404b540aSrobert unsignedp, OPTAB_WIDEN);
2013*404b540aSrobert
2014*404b540aSrobert /* For unsigned operands, try widening to an unsigned int. */
2015*404b540aSrobert if (temp == 0 && unsignedp)
2016*404b540aSrobert temp = expand_binop (mode, uoptab, op0, op1, target,
2017*404b540aSrobert unsignedp, OPTAB_WIDEN);
2018*404b540aSrobert if (temp || methods == OPTAB_WIDEN)
2019*404b540aSrobert return temp;
2020*404b540aSrobert
2021*404b540aSrobert /* Use the right width lib call if that exists. */
2022*404b540aSrobert temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2023*404b540aSrobert if (temp || methods == OPTAB_LIB)
2024*404b540aSrobert return temp;
2025*404b540aSrobert
2026*404b540aSrobert /* Must widen and use a lib call, use either signed or unsigned. */
2027*404b540aSrobert temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2028*404b540aSrobert unsignedp, methods);
2029*404b540aSrobert if (temp != 0)
2030*404b540aSrobert return temp;
2031*404b540aSrobert if (unsignedp)
2032*404b540aSrobert return expand_binop (mode, uoptab, op0, op1, target,
2033*404b540aSrobert unsignedp, methods);
2034*404b540aSrobert return 0;
2035*404b540aSrobert }
2036*404b540aSrobert
2037*404b540aSrobert /* Generate code to perform an operation specified by UNOPPTAB
2038*404b540aSrobert on operand OP0, with two results to TARG0 and TARG1.
2039*404b540aSrobert We assume that the order of the operands for the instruction
2040*404b540aSrobert is TARG0, TARG1, OP0.
2041*404b540aSrobert
2042*404b540aSrobert Either TARG0 or TARG1 may be zero, but what that means is that
2043*404b540aSrobert the result is not actually wanted. We will generate it into
2044*404b540aSrobert a dummy pseudo-reg and discard it. They may not both be zero.
2045*404b540aSrobert
2046*404b540aSrobert Returns 1 if this operation can be performed; 0 if not. */
2047*404b540aSrobert
2048*404b540aSrobert int
expand_twoval_unop(optab unoptab,rtx op0,rtx targ0,rtx targ1,int unsignedp)2049*404b540aSrobert expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2050*404b540aSrobert int unsignedp)
2051*404b540aSrobert {
2052*404b540aSrobert enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2053*404b540aSrobert enum mode_class class;
2054*404b540aSrobert enum machine_mode wider_mode;
2055*404b540aSrobert rtx entry_last = get_last_insn ();
2056*404b540aSrobert rtx last;
2057*404b540aSrobert
2058*404b540aSrobert class = GET_MODE_CLASS (mode);
2059*404b540aSrobert
2060*404b540aSrobert if (!targ0)
2061*404b540aSrobert targ0 = gen_reg_rtx (mode);
2062*404b540aSrobert if (!targ1)
2063*404b540aSrobert targ1 = gen_reg_rtx (mode);
2064*404b540aSrobert
2065*404b540aSrobert /* Record where to go back to if we fail. */
2066*404b540aSrobert last = get_last_insn ();
2067*404b540aSrobert
2068*404b540aSrobert if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2069*404b540aSrobert {
2070*404b540aSrobert int icode = (int) unoptab->handlers[(int) mode].insn_code;
2071*404b540aSrobert enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2072*404b540aSrobert rtx pat;
2073*404b540aSrobert rtx xop0 = op0;
2074*404b540aSrobert
2075*404b540aSrobert if (GET_MODE (xop0) != VOIDmode
2076*404b540aSrobert && GET_MODE (xop0) != mode0)
2077*404b540aSrobert xop0 = convert_to_mode (mode0, xop0, unsignedp);
2078*404b540aSrobert
2079*404b540aSrobert /* Now, if insn doesn't accept these operands, put them into pseudos. */
2080*404b540aSrobert if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2081*404b540aSrobert xop0 = copy_to_mode_reg (mode0, xop0);
2082*404b540aSrobert
2083*404b540aSrobert /* We could handle this, but we should always be called with a pseudo
2084*404b540aSrobert for our targets and all insns should take them as outputs. */
2085*404b540aSrobert gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2086*404b540aSrobert gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2087*404b540aSrobert
2088*404b540aSrobert pat = GEN_FCN (icode) (targ0, targ1, xop0);
2089*404b540aSrobert if (pat)
2090*404b540aSrobert {
2091*404b540aSrobert emit_insn (pat);
2092*404b540aSrobert return 1;
2093*404b540aSrobert }
2094*404b540aSrobert else
2095*404b540aSrobert delete_insns_since (last);
2096*404b540aSrobert }
2097*404b540aSrobert
2098*404b540aSrobert /* It can't be done in this mode. Can we do it in a wider mode? */
2099*404b540aSrobert
2100*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
2101*404b540aSrobert {
2102*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
2103*404b540aSrobert wider_mode != VOIDmode;
2104*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2105*404b540aSrobert {
2106*404b540aSrobert if (unoptab->handlers[(int) wider_mode].insn_code
2107*404b540aSrobert != CODE_FOR_nothing)
2108*404b540aSrobert {
2109*404b540aSrobert rtx t0 = gen_reg_rtx (wider_mode);
2110*404b540aSrobert rtx t1 = gen_reg_rtx (wider_mode);
2111*404b540aSrobert rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2112*404b540aSrobert
2113*404b540aSrobert if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2114*404b540aSrobert {
2115*404b540aSrobert convert_move (targ0, t0, unsignedp);
2116*404b540aSrobert convert_move (targ1, t1, unsignedp);
2117*404b540aSrobert return 1;
2118*404b540aSrobert }
2119*404b540aSrobert else
2120*404b540aSrobert delete_insns_since (last);
2121*404b540aSrobert }
2122*404b540aSrobert }
2123*404b540aSrobert }
2124*404b540aSrobert
2125*404b540aSrobert delete_insns_since (entry_last);
2126*404b540aSrobert return 0;
2127*404b540aSrobert }
2128*404b540aSrobert
2129*404b540aSrobert /* Generate code to perform an operation specified by BINOPTAB
2130*404b540aSrobert on operands OP0 and OP1, with two results to TARG1 and TARG2.
2131*404b540aSrobert We assume that the order of the operands for the instruction
2132*404b540aSrobert is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2133*404b540aSrobert [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2134*404b540aSrobert
2135*404b540aSrobert Either TARG0 or TARG1 may be zero, but what that means is that
2136*404b540aSrobert the result is not actually wanted. We will generate it into
2137*404b540aSrobert a dummy pseudo-reg and discard it. They may not both be zero.
2138*404b540aSrobert
2139*404b540aSrobert Returns 1 if this operation can be performed; 0 if not. */
2140*404b540aSrobert
2141*404b540aSrobert int
expand_twoval_binop(optab binoptab,rtx op0,rtx op1,rtx targ0,rtx targ1,int unsignedp)2142*404b540aSrobert expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2143*404b540aSrobert int unsignedp)
2144*404b540aSrobert {
2145*404b540aSrobert enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2146*404b540aSrobert enum mode_class class;
2147*404b540aSrobert enum machine_mode wider_mode;
2148*404b540aSrobert rtx entry_last = get_last_insn ();
2149*404b540aSrobert rtx last;
2150*404b540aSrobert
2151*404b540aSrobert class = GET_MODE_CLASS (mode);
2152*404b540aSrobert
2153*404b540aSrobert /* If we are inside an appropriately-short loop and we are optimizing,
2154*404b540aSrobert force expensive constants into a register. */
2155*404b540aSrobert if (CONSTANT_P (op0) && optimize
2156*404b540aSrobert && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2157*404b540aSrobert op0 = force_reg (mode, op0);
2158*404b540aSrobert
2159*404b540aSrobert if (CONSTANT_P (op1) && optimize
2160*404b540aSrobert && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2161*404b540aSrobert op1 = force_reg (mode, op1);
2162*404b540aSrobert
2163*404b540aSrobert if (!targ0)
2164*404b540aSrobert targ0 = gen_reg_rtx (mode);
2165*404b540aSrobert if (!targ1)
2166*404b540aSrobert targ1 = gen_reg_rtx (mode);
2167*404b540aSrobert
2168*404b540aSrobert /* Record where to go back to if we fail. */
2169*404b540aSrobert last = get_last_insn ();
2170*404b540aSrobert
2171*404b540aSrobert if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2172*404b540aSrobert {
2173*404b540aSrobert int icode = (int) binoptab->handlers[(int) mode].insn_code;
2174*404b540aSrobert enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2175*404b540aSrobert enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2176*404b540aSrobert rtx pat;
2177*404b540aSrobert rtx xop0 = op0, xop1 = op1;
2178*404b540aSrobert
2179*404b540aSrobert /* In case the insn wants input operands in modes different from
2180*404b540aSrobert those of the actual operands, convert the operands. It would
2181*404b540aSrobert seem that we don't need to convert CONST_INTs, but we do, so
2182*404b540aSrobert that they're properly zero-extended, sign-extended or truncated
2183*404b540aSrobert for their mode. */
2184*404b540aSrobert
2185*404b540aSrobert if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2186*404b540aSrobert xop0 = convert_modes (mode0,
2187*404b540aSrobert GET_MODE (op0) != VOIDmode
2188*404b540aSrobert ? GET_MODE (op0)
2189*404b540aSrobert : mode,
2190*404b540aSrobert xop0, unsignedp);
2191*404b540aSrobert
2192*404b540aSrobert if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2193*404b540aSrobert xop1 = convert_modes (mode1,
2194*404b540aSrobert GET_MODE (op1) != VOIDmode
2195*404b540aSrobert ? GET_MODE (op1)
2196*404b540aSrobert : mode,
2197*404b540aSrobert xop1, unsignedp);
2198*404b540aSrobert
2199*404b540aSrobert /* Now, if insn doesn't accept these operands, put them into pseudos. */
2200*404b540aSrobert if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2201*404b540aSrobert xop0 = copy_to_mode_reg (mode0, xop0);
2202*404b540aSrobert
2203*404b540aSrobert if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2204*404b540aSrobert xop1 = copy_to_mode_reg (mode1, xop1);
2205*404b540aSrobert
2206*404b540aSrobert /* We could handle this, but we should always be called with a pseudo
2207*404b540aSrobert for our targets and all insns should take them as outputs. */
2208*404b540aSrobert gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2209*404b540aSrobert gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2210*404b540aSrobert
2211*404b540aSrobert pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2212*404b540aSrobert if (pat)
2213*404b540aSrobert {
2214*404b540aSrobert emit_insn (pat);
2215*404b540aSrobert return 1;
2216*404b540aSrobert }
2217*404b540aSrobert else
2218*404b540aSrobert delete_insns_since (last);
2219*404b540aSrobert }
2220*404b540aSrobert
2221*404b540aSrobert /* It can't be done in this mode. Can we do it in a wider mode? */
2222*404b540aSrobert
2223*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
2224*404b540aSrobert {
2225*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
2226*404b540aSrobert wider_mode != VOIDmode;
2227*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2228*404b540aSrobert {
2229*404b540aSrobert if (binoptab->handlers[(int) wider_mode].insn_code
2230*404b540aSrobert != CODE_FOR_nothing)
2231*404b540aSrobert {
2232*404b540aSrobert rtx t0 = gen_reg_rtx (wider_mode);
2233*404b540aSrobert rtx t1 = gen_reg_rtx (wider_mode);
2234*404b540aSrobert rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2235*404b540aSrobert rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2236*404b540aSrobert
2237*404b540aSrobert if (expand_twoval_binop (binoptab, cop0, cop1,
2238*404b540aSrobert t0, t1, unsignedp))
2239*404b540aSrobert {
2240*404b540aSrobert convert_move (targ0, t0, unsignedp);
2241*404b540aSrobert convert_move (targ1, t1, unsignedp);
2242*404b540aSrobert return 1;
2243*404b540aSrobert }
2244*404b540aSrobert else
2245*404b540aSrobert delete_insns_since (last);
2246*404b540aSrobert }
2247*404b540aSrobert }
2248*404b540aSrobert }
2249*404b540aSrobert
2250*404b540aSrobert delete_insns_since (entry_last);
2251*404b540aSrobert return 0;
2252*404b540aSrobert }
2253*404b540aSrobert
2254*404b540aSrobert /* Expand the two-valued library call indicated by BINOPTAB, but
2255*404b540aSrobert preserve only one of the values. If TARG0 is non-NULL, the first
2256*404b540aSrobert value is placed into TARG0; otherwise the second value is placed
2257*404b540aSrobert into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2258*404b540aSrobert value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2259*404b540aSrobert This routine assumes that the value returned by the library call is
2260*404b540aSrobert as if the return value was of an integral mode twice as wide as the
2261*404b540aSrobert mode of OP0. Returns 1 if the call was successful. */
2262*404b540aSrobert
2263*404b540aSrobert bool
expand_twoval_binop_libfunc(optab binoptab,rtx op0,rtx op1,rtx targ0,rtx targ1,enum rtx_code code)2264*404b540aSrobert expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2265*404b540aSrobert rtx targ0, rtx targ1, enum rtx_code code)
2266*404b540aSrobert {
2267*404b540aSrobert enum machine_mode mode;
2268*404b540aSrobert enum machine_mode libval_mode;
2269*404b540aSrobert rtx libval;
2270*404b540aSrobert rtx insns;
2271*404b540aSrobert
2272*404b540aSrobert /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2273*404b540aSrobert gcc_assert (!targ0 != !targ1);
2274*404b540aSrobert
2275*404b540aSrobert mode = GET_MODE (op0);
2276*404b540aSrobert if (!binoptab->handlers[(int) mode].libfunc)
2277*404b540aSrobert return false;
2278*404b540aSrobert
2279*404b540aSrobert /* The value returned by the library function will have twice as
2280*404b540aSrobert many bits as the nominal MODE. */
2281*404b540aSrobert libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2282*404b540aSrobert MODE_INT);
2283*404b540aSrobert start_sequence ();
2284*404b540aSrobert libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2285*404b540aSrobert NULL_RTX, LCT_CONST,
2286*404b540aSrobert libval_mode, 2,
2287*404b540aSrobert op0, mode,
2288*404b540aSrobert op1, mode);
2289*404b540aSrobert /* Get the part of VAL containing the value that we want. */
2290*404b540aSrobert libval = simplify_gen_subreg (mode, libval, libval_mode,
2291*404b540aSrobert targ0 ? 0 : GET_MODE_SIZE (mode));
2292*404b540aSrobert insns = get_insns ();
2293*404b540aSrobert end_sequence ();
2294*404b540aSrobert /* Move the into the desired location. */
2295*404b540aSrobert emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2296*404b540aSrobert gen_rtx_fmt_ee (code, mode, op0, op1));
2297*404b540aSrobert
2298*404b540aSrobert return true;
2299*404b540aSrobert }
2300*404b540aSrobert
2301*404b540aSrobert
2302*404b540aSrobert /* Wrapper around expand_unop which takes an rtx code to specify
2303*404b540aSrobert the operation to perform, not an optab pointer. All other
2304*404b540aSrobert arguments are the same. */
2305*404b540aSrobert rtx
expand_simple_unop(enum machine_mode mode,enum rtx_code code,rtx op0,rtx target,int unsignedp)2306*404b540aSrobert expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2307*404b540aSrobert rtx target, int unsignedp)
2308*404b540aSrobert {
2309*404b540aSrobert optab unop = code_to_optab[(int) code];
2310*404b540aSrobert gcc_assert (unop);
2311*404b540aSrobert
2312*404b540aSrobert return expand_unop (mode, unop, op0, target, unsignedp);
2313*404b540aSrobert }
2314*404b540aSrobert
2315*404b540aSrobert /* Try calculating
2316*404b540aSrobert (clz:narrow x)
2317*404b540aSrobert as
2318*404b540aSrobert (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2319*404b540aSrobert static rtx
widen_clz(enum machine_mode mode,rtx op0,rtx target)2320*404b540aSrobert widen_clz (enum machine_mode mode, rtx op0, rtx target)
2321*404b540aSrobert {
2322*404b540aSrobert enum mode_class class = GET_MODE_CLASS (mode);
2323*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
2324*404b540aSrobert {
2325*404b540aSrobert enum machine_mode wider_mode;
2326*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
2327*404b540aSrobert wider_mode != VOIDmode;
2328*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2329*404b540aSrobert {
2330*404b540aSrobert if (clz_optab->handlers[(int) wider_mode].insn_code
2331*404b540aSrobert != CODE_FOR_nothing)
2332*404b540aSrobert {
2333*404b540aSrobert rtx xop0, temp, last;
2334*404b540aSrobert
2335*404b540aSrobert last = get_last_insn ();
2336*404b540aSrobert
2337*404b540aSrobert if (target == 0)
2338*404b540aSrobert target = gen_reg_rtx (mode);
2339*404b540aSrobert xop0 = widen_operand (op0, wider_mode, mode, true, false);
2340*404b540aSrobert temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2341*404b540aSrobert if (temp != 0)
2342*404b540aSrobert temp = expand_binop (wider_mode, sub_optab, temp,
2343*404b540aSrobert GEN_INT (GET_MODE_BITSIZE (wider_mode)
2344*404b540aSrobert - GET_MODE_BITSIZE (mode)),
2345*404b540aSrobert target, true, OPTAB_DIRECT);
2346*404b540aSrobert if (temp == 0)
2347*404b540aSrobert delete_insns_since (last);
2348*404b540aSrobert
2349*404b540aSrobert return temp;
2350*404b540aSrobert }
2351*404b540aSrobert }
2352*404b540aSrobert }
2353*404b540aSrobert return 0;
2354*404b540aSrobert }
2355*404b540aSrobert
2356*404b540aSrobert /* Try calculating (parity x) as (and (popcount x) 1), where
2357*404b540aSrobert popcount can also be done in a wider mode. */
2358*404b540aSrobert static rtx
expand_parity(enum machine_mode mode,rtx op0,rtx target)2359*404b540aSrobert expand_parity (enum machine_mode mode, rtx op0, rtx target)
2360*404b540aSrobert {
2361*404b540aSrobert enum mode_class class = GET_MODE_CLASS (mode);
2362*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
2363*404b540aSrobert {
2364*404b540aSrobert enum machine_mode wider_mode;
2365*404b540aSrobert for (wider_mode = mode; wider_mode != VOIDmode;
2366*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2367*404b540aSrobert {
2368*404b540aSrobert if (popcount_optab->handlers[(int) wider_mode].insn_code
2369*404b540aSrobert != CODE_FOR_nothing)
2370*404b540aSrobert {
2371*404b540aSrobert rtx xop0, temp, last;
2372*404b540aSrobert
2373*404b540aSrobert last = get_last_insn ();
2374*404b540aSrobert
2375*404b540aSrobert if (target == 0)
2376*404b540aSrobert target = gen_reg_rtx (mode);
2377*404b540aSrobert xop0 = widen_operand (op0, wider_mode, mode, true, false);
2378*404b540aSrobert temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2379*404b540aSrobert true);
2380*404b540aSrobert if (temp != 0)
2381*404b540aSrobert temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2382*404b540aSrobert target, true, OPTAB_DIRECT);
2383*404b540aSrobert if (temp == 0)
2384*404b540aSrobert delete_insns_since (last);
2385*404b540aSrobert
2386*404b540aSrobert return temp;
2387*404b540aSrobert }
2388*404b540aSrobert }
2389*404b540aSrobert }
2390*404b540aSrobert return 0;
2391*404b540aSrobert }
2392*404b540aSrobert
2393*404b540aSrobert /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2394*404b540aSrobert conditions, VAL may already be a SUBREG against which we cannot generate
2395*404b540aSrobert a further SUBREG. In this case, we expect forcing the value into a
2396*404b540aSrobert register will work around the situation. */
2397*404b540aSrobert
2398*404b540aSrobert static rtx
lowpart_subreg_maybe_copy(enum machine_mode omode,rtx val,enum machine_mode imode)2399*404b540aSrobert lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2400*404b540aSrobert enum machine_mode imode)
2401*404b540aSrobert {
2402*404b540aSrobert rtx ret;
2403*404b540aSrobert ret = lowpart_subreg (omode, val, imode);
2404*404b540aSrobert if (ret == NULL)
2405*404b540aSrobert {
2406*404b540aSrobert val = force_reg (imode, val);
2407*404b540aSrobert ret = lowpart_subreg (omode, val, imode);
2408*404b540aSrobert gcc_assert (ret != NULL);
2409*404b540aSrobert }
2410*404b540aSrobert return ret;
2411*404b540aSrobert }
2412*404b540aSrobert
2413*404b540aSrobert /* Expand a floating point absolute value or negation operation via a
2414*404b540aSrobert logical operation on the sign bit. */
2415*404b540aSrobert
2416*404b540aSrobert static rtx
expand_absneg_bit(enum rtx_code code,enum machine_mode mode,rtx op0,rtx target)2417*404b540aSrobert expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2418*404b540aSrobert rtx op0, rtx target)
2419*404b540aSrobert {
2420*404b540aSrobert const struct real_format *fmt;
2421*404b540aSrobert int bitpos, word, nwords, i;
2422*404b540aSrobert enum machine_mode imode;
2423*404b540aSrobert HOST_WIDE_INT hi, lo;
2424*404b540aSrobert rtx temp, insns;
2425*404b540aSrobert
2426*404b540aSrobert /* The format has to have a simple sign bit. */
2427*404b540aSrobert fmt = REAL_MODE_FORMAT (mode);
2428*404b540aSrobert if (fmt == NULL)
2429*404b540aSrobert return NULL_RTX;
2430*404b540aSrobert
2431*404b540aSrobert bitpos = fmt->signbit_rw;
2432*404b540aSrobert if (bitpos < 0)
2433*404b540aSrobert return NULL_RTX;
2434*404b540aSrobert
2435*404b540aSrobert /* Don't create negative zeros if the format doesn't support them. */
2436*404b540aSrobert if (code == NEG && !fmt->has_signed_zero)
2437*404b540aSrobert return NULL_RTX;
2438*404b540aSrobert
2439*404b540aSrobert if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2440*404b540aSrobert {
2441*404b540aSrobert imode = int_mode_for_mode (mode);
2442*404b540aSrobert if (imode == BLKmode)
2443*404b540aSrobert return NULL_RTX;
2444*404b540aSrobert word = 0;
2445*404b540aSrobert nwords = 1;
2446*404b540aSrobert }
2447*404b540aSrobert else
2448*404b540aSrobert {
2449*404b540aSrobert imode = word_mode;
2450*404b540aSrobert
2451*404b540aSrobert if (FLOAT_WORDS_BIG_ENDIAN)
2452*404b540aSrobert word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2453*404b540aSrobert else
2454*404b540aSrobert word = bitpos / BITS_PER_WORD;
2455*404b540aSrobert bitpos = bitpos % BITS_PER_WORD;
2456*404b540aSrobert nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2457*404b540aSrobert }
2458*404b540aSrobert
2459*404b540aSrobert if (bitpos < HOST_BITS_PER_WIDE_INT)
2460*404b540aSrobert {
2461*404b540aSrobert hi = 0;
2462*404b540aSrobert lo = (HOST_WIDE_INT) 1 << bitpos;
2463*404b540aSrobert }
2464*404b540aSrobert else
2465*404b540aSrobert {
2466*404b540aSrobert hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2467*404b540aSrobert lo = 0;
2468*404b540aSrobert }
2469*404b540aSrobert if (code == ABS)
2470*404b540aSrobert lo = ~lo, hi = ~hi;
2471*404b540aSrobert
2472*404b540aSrobert if (target == 0 || target == op0)
2473*404b540aSrobert target = gen_reg_rtx (mode);
2474*404b540aSrobert
2475*404b540aSrobert if (nwords > 1)
2476*404b540aSrobert {
2477*404b540aSrobert start_sequence ();
2478*404b540aSrobert
2479*404b540aSrobert for (i = 0; i < nwords; ++i)
2480*404b540aSrobert {
2481*404b540aSrobert rtx targ_piece = operand_subword (target, i, 1, mode);
2482*404b540aSrobert rtx op0_piece = operand_subword_force (op0, i, mode);
2483*404b540aSrobert
2484*404b540aSrobert if (i == word)
2485*404b540aSrobert {
2486*404b540aSrobert temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2487*404b540aSrobert op0_piece,
2488*404b540aSrobert immed_double_const (lo, hi, imode),
2489*404b540aSrobert targ_piece, 1, OPTAB_LIB_WIDEN);
2490*404b540aSrobert if (temp != targ_piece)
2491*404b540aSrobert emit_move_insn (targ_piece, temp);
2492*404b540aSrobert }
2493*404b540aSrobert else
2494*404b540aSrobert emit_move_insn (targ_piece, op0_piece);
2495*404b540aSrobert }
2496*404b540aSrobert
2497*404b540aSrobert insns = get_insns ();
2498*404b540aSrobert end_sequence ();
2499*404b540aSrobert
2500*404b540aSrobert temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2501*404b540aSrobert emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2502*404b540aSrobert }
2503*404b540aSrobert else
2504*404b540aSrobert {
2505*404b540aSrobert temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2506*404b540aSrobert gen_lowpart (imode, op0),
2507*404b540aSrobert immed_double_const (lo, hi, imode),
2508*404b540aSrobert gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2509*404b540aSrobert target = lowpart_subreg_maybe_copy (mode, temp, imode);
2510*404b540aSrobert
2511*404b540aSrobert set_unique_reg_note (get_last_insn (), REG_EQUAL,
2512*404b540aSrobert gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2513*404b540aSrobert }
2514*404b540aSrobert
2515*404b540aSrobert return target;
2516*404b540aSrobert }
2517*404b540aSrobert
2518*404b540aSrobert /* Generate code to perform an operation specified by UNOPTAB
2519*404b540aSrobert on operand OP0, with result having machine-mode MODE.
2520*404b540aSrobert
2521*404b540aSrobert UNSIGNEDP is for the case where we have to widen the operands
2522*404b540aSrobert to perform the operation. It says to use zero-extension.
2523*404b540aSrobert
2524*404b540aSrobert If TARGET is nonzero, the value
2525*404b540aSrobert is generated there, if it is convenient to do so.
2526*404b540aSrobert In all cases an rtx is returned for the locus of the value;
2527*404b540aSrobert this may or may not be TARGET. */
2528*404b540aSrobert
2529*404b540aSrobert rtx
expand_unop(enum machine_mode mode,optab unoptab,rtx op0,rtx target,int unsignedp)2530*404b540aSrobert expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2531*404b540aSrobert int unsignedp)
2532*404b540aSrobert {
2533*404b540aSrobert enum mode_class class;
2534*404b540aSrobert enum machine_mode wider_mode;
2535*404b540aSrobert rtx temp;
2536*404b540aSrobert rtx last = get_last_insn ();
2537*404b540aSrobert rtx pat;
2538*404b540aSrobert
2539*404b540aSrobert class = GET_MODE_CLASS (mode);
2540*404b540aSrobert
2541*404b540aSrobert if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2542*404b540aSrobert {
2543*404b540aSrobert int icode = (int) unoptab->handlers[(int) mode].insn_code;
2544*404b540aSrobert enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2545*404b540aSrobert rtx xop0 = op0;
2546*404b540aSrobert
2547*404b540aSrobert if (target)
2548*404b540aSrobert temp = target;
2549*404b540aSrobert else
2550*404b540aSrobert temp = gen_reg_rtx (mode);
2551*404b540aSrobert
2552*404b540aSrobert if (GET_MODE (xop0) != VOIDmode
2553*404b540aSrobert && GET_MODE (xop0) != mode0)
2554*404b540aSrobert xop0 = convert_to_mode (mode0, xop0, unsignedp);
2555*404b540aSrobert
2556*404b540aSrobert /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2557*404b540aSrobert
2558*404b540aSrobert if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2559*404b540aSrobert xop0 = copy_to_mode_reg (mode0, xop0);
2560*404b540aSrobert
2561*404b540aSrobert if (!insn_data[icode].operand[0].predicate (temp, mode))
2562*404b540aSrobert temp = gen_reg_rtx (mode);
2563*404b540aSrobert
2564*404b540aSrobert pat = GEN_FCN (icode) (temp, xop0);
2565*404b540aSrobert if (pat)
2566*404b540aSrobert {
2567*404b540aSrobert if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2568*404b540aSrobert && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2569*404b540aSrobert {
2570*404b540aSrobert delete_insns_since (last);
2571*404b540aSrobert return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2572*404b540aSrobert }
2573*404b540aSrobert
2574*404b540aSrobert emit_insn (pat);
2575*404b540aSrobert
2576*404b540aSrobert return temp;
2577*404b540aSrobert }
2578*404b540aSrobert else
2579*404b540aSrobert delete_insns_since (last);
2580*404b540aSrobert }
2581*404b540aSrobert
2582*404b540aSrobert /* It can't be done in this mode. Can we open-code it in a wider mode? */
2583*404b540aSrobert
2584*404b540aSrobert /* Widening clz needs special treatment. */
2585*404b540aSrobert if (unoptab == clz_optab)
2586*404b540aSrobert {
2587*404b540aSrobert temp = widen_clz (mode, op0, target);
2588*404b540aSrobert if (temp)
2589*404b540aSrobert return temp;
2590*404b540aSrobert else
2591*404b540aSrobert goto try_libcall;
2592*404b540aSrobert }
2593*404b540aSrobert
2594*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
2595*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
2596*404b540aSrobert wider_mode != VOIDmode;
2597*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2598*404b540aSrobert {
2599*404b540aSrobert if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2600*404b540aSrobert {
2601*404b540aSrobert rtx xop0 = op0;
2602*404b540aSrobert
2603*404b540aSrobert /* For certain operations, we need not actually extend
2604*404b540aSrobert the narrow operand, as long as we will truncate the
2605*404b540aSrobert results to the same narrowness. */
2606*404b540aSrobert
2607*404b540aSrobert xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2608*404b540aSrobert (unoptab == neg_optab
2609*404b540aSrobert || unoptab == one_cmpl_optab)
2610*404b540aSrobert && class == MODE_INT);
2611*404b540aSrobert
2612*404b540aSrobert temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2613*404b540aSrobert unsignedp);
2614*404b540aSrobert
2615*404b540aSrobert if (temp)
2616*404b540aSrobert {
2617*404b540aSrobert if (class != MODE_INT
2618*404b540aSrobert || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2619*404b540aSrobert GET_MODE_BITSIZE (wider_mode)))
2620*404b540aSrobert {
2621*404b540aSrobert if (target == 0)
2622*404b540aSrobert target = gen_reg_rtx (mode);
2623*404b540aSrobert convert_move (target, temp, 0);
2624*404b540aSrobert return target;
2625*404b540aSrobert }
2626*404b540aSrobert else
2627*404b540aSrobert return gen_lowpart (mode, temp);
2628*404b540aSrobert }
2629*404b540aSrobert else
2630*404b540aSrobert delete_insns_since (last);
2631*404b540aSrobert }
2632*404b540aSrobert }
2633*404b540aSrobert
2634*404b540aSrobert /* These can be done a word at a time. */
2635*404b540aSrobert if (unoptab == one_cmpl_optab
2636*404b540aSrobert && class == MODE_INT
2637*404b540aSrobert && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2638*404b540aSrobert && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2639*404b540aSrobert {
2640*404b540aSrobert int i;
2641*404b540aSrobert rtx insns;
2642*404b540aSrobert
2643*404b540aSrobert if (target == 0 || target == op0)
2644*404b540aSrobert target = gen_reg_rtx (mode);
2645*404b540aSrobert
2646*404b540aSrobert start_sequence ();
2647*404b540aSrobert
2648*404b540aSrobert /* Do the actual arithmetic. */
2649*404b540aSrobert for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2650*404b540aSrobert {
2651*404b540aSrobert rtx target_piece = operand_subword (target, i, 1, mode);
2652*404b540aSrobert rtx x = expand_unop (word_mode, unoptab,
2653*404b540aSrobert operand_subword_force (op0, i, mode),
2654*404b540aSrobert target_piece, unsignedp);
2655*404b540aSrobert
2656*404b540aSrobert if (target_piece != x)
2657*404b540aSrobert emit_move_insn (target_piece, x);
2658*404b540aSrobert }
2659*404b540aSrobert
2660*404b540aSrobert insns = get_insns ();
2661*404b540aSrobert end_sequence ();
2662*404b540aSrobert
2663*404b540aSrobert emit_no_conflict_block (insns, target, op0, NULL_RTX,
2664*404b540aSrobert gen_rtx_fmt_e (unoptab->code, mode,
2665*404b540aSrobert copy_rtx (op0)));
2666*404b540aSrobert return target;
2667*404b540aSrobert }
2668*404b540aSrobert
2669*404b540aSrobert if (unoptab->code == NEG)
2670*404b540aSrobert {
2671*404b540aSrobert /* Try negating floating point values by flipping the sign bit. */
2672*404b540aSrobert if (SCALAR_FLOAT_MODE_P (mode))
2673*404b540aSrobert {
2674*404b540aSrobert temp = expand_absneg_bit (NEG, mode, op0, target);
2675*404b540aSrobert if (temp)
2676*404b540aSrobert return temp;
2677*404b540aSrobert }
2678*404b540aSrobert
2679*404b540aSrobert /* If there is no negation pattern, and we have no negative zero,
2680*404b540aSrobert try subtracting from zero. */
2681*404b540aSrobert if (!HONOR_SIGNED_ZEROS (mode))
2682*404b540aSrobert {
2683*404b540aSrobert temp = expand_binop (mode, (unoptab == negv_optab
2684*404b540aSrobert ? subv_optab : sub_optab),
2685*404b540aSrobert CONST0_RTX (mode), op0, target,
2686*404b540aSrobert unsignedp, OPTAB_DIRECT);
2687*404b540aSrobert if (temp)
2688*404b540aSrobert return temp;
2689*404b540aSrobert }
2690*404b540aSrobert }
2691*404b540aSrobert
2692*404b540aSrobert /* Try calculating parity (x) as popcount (x) % 2. */
2693*404b540aSrobert if (unoptab == parity_optab)
2694*404b540aSrobert {
2695*404b540aSrobert temp = expand_parity (mode, op0, target);
2696*404b540aSrobert if (temp)
2697*404b540aSrobert return temp;
2698*404b540aSrobert }
2699*404b540aSrobert
2700*404b540aSrobert try_libcall:
2701*404b540aSrobert /* Now try a library call in this mode. */
2702*404b540aSrobert if (unoptab->handlers[(int) mode].libfunc)
2703*404b540aSrobert {
2704*404b540aSrobert rtx insns;
2705*404b540aSrobert rtx value;
2706*404b540aSrobert enum machine_mode outmode = mode;
2707*404b540aSrobert
2708*404b540aSrobert /* All of these functions return small values. Thus we choose to
2709*404b540aSrobert have them return something that isn't a double-word. */
2710*404b540aSrobert if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2711*404b540aSrobert || unoptab == popcount_optab || unoptab == parity_optab)
2712*404b540aSrobert outmode
2713*404b540aSrobert = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2714*404b540aSrobert
2715*404b540aSrobert start_sequence ();
2716*404b540aSrobert
2717*404b540aSrobert /* Pass 1 for NO_QUEUE so we don't lose any increments
2718*404b540aSrobert if the libcall is cse'd or moved. */
2719*404b540aSrobert value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2720*404b540aSrobert NULL_RTX, LCT_CONST, outmode,
2721*404b540aSrobert 1, op0, mode);
2722*404b540aSrobert insns = get_insns ();
2723*404b540aSrobert end_sequence ();
2724*404b540aSrobert
2725*404b540aSrobert target = gen_reg_rtx (outmode);
2726*404b540aSrobert emit_libcall_block (insns, target, value,
2727*404b540aSrobert gen_rtx_fmt_e (unoptab->code, outmode, op0));
2728*404b540aSrobert
2729*404b540aSrobert return target;
2730*404b540aSrobert }
2731*404b540aSrobert
2732*404b540aSrobert /* It can't be done in this mode. Can we do it in a wider mode? */
2733*404b540aSrobert
2734*404b540aSrobert if (CLASS_HAS_WIDER_MODES_P (class))
2735*404b540aSrobert {
2736*404b540aSrobert for (wider_mode = GET_MODE_WIDER_MODE (mode);
2737*404b540aSrobert wider_mode != VOIDmode;
2738*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2739*404b540aSrobert {
2740*404b540aSrobert if ((unoptab->handlers[(int) wider_mode].insn_code
2741*404b540aSrobert != CODE_FOR_nothing)
2742*404b540aSrobert || unoptab->handlers[(int) wider_mode].libfunc)
2743*404b540aSrobert {
2744*404b540aSrobert rtx xop0 = op0;
2745*404b540aSrobert
2746*404b540aSrobert /* For certain operations, we need not actually extend
2747*404b540aSrobert the narrow operand, as long as we will truncate the
2748*404b540aSrobert results to the same narrowness. */
2749*404b540aSrobert
2750*404b540aSrobert xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2751*404b540aSrobert (unoptab == neg_optab
2752*404b540aSrobert || unoptab == one_cmpl_optab)
2753*404b540aSrobert && class == MODE_INT);
2754*404b540aSrobert
2755*404b540aSrobert temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2756*404b540aSrobert unsignedp);
2757*404b540aSrobert
2758*404b540aSrobert /* If we are generating clz using wider mode, adjust the
2759*404b540aSrobert result. */
2760*404b540aSrobert if (unoptab == clz_optab && temp != 0)
2761*404b540aSrobert temp = expand_binop (wider_mode, sub_optab, temp,
2762*404b540aSrobert GEN_INT (GET_MODE_BITSIZE (wider_mode)
2763*404b540aSrobert - GET_MODE_BITSIZE (mode)),
2764*404b540aSrobert target, true, OPTAB_DIRECT);
2765*404b540aSrobert
2766*404b540aSrobert if (temp)
2767*404b540aSrobert {
2768*404b540aSrobert if (class != MODE_INT)
2769*404b540aSrobert {
2770*404b540aSrobert if (target == 0)
2771*404b540aSrobert target = gen_reg_rtx (mode);
2772*404b540aSrobert convert_move (target, temp, 0);
2773*404b540aSrobert return target;
2774*404b540aSrobert }
2775*404b540aSrobert else
2776*404b540aSrobert return gen_lowpart (mode, temp);
2777*404b540aSrobert }
2778*404b540aSrobert else
2779*404b540aSrobert delete_insns_since (last);
2780*404b540aSrobert }
2781*404b540aSrobert }
2782*404b540aSrobert }
2783*404b540aSrobert
2784*404b540aSrobert /* One final attempt at implementing negation via subtraction,
2785*404b540aSrobert this time allowing widening of the operand. */
2786*404b540aSrobert if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2787*404b540aSrobert {
2788*404b540aSrobert rtx temp;
2789*404b540aSrobert temp = expand_binop (mode,
2790*404b540aSrobert unoptab == negv_optab ? subv_optab : sub_optab,
2791*404b540aSrobert CONST0_RTX (mode), op0,
2792*404b540aSrobert target, unsignedp, OPTAB_LIB_WIDEN);
2793*404b540aSrobert if (temp)
2794*404b540aSrobert return temp;
2795*404b540aSrobert }
2796*404b540aSrobert
2797*404b540aSrobert return 0;
2798*404b540aSrobert }
2799*404b540aSrobert
2800*404b540aSrobert /* Emit code to compute the absolute value of OP0, with result to
2801*404b540aSrobert TARGET if convenient. (TARGET may be 0.) The return value says
2802*404b540aSrobert where the result actually is to be found.
2803*404b540aSrobert
2804*404b540aSrobert MODE is the mode of the operand; the mode of the result is
2805*404b540aSrobert different but can be deduced from MODE.
2806*404b540aSrobert
2807*404b540aSrobert */
2808*404b540aSrobert
2809*404b540aSrobert rtx
expand_abs_nojump(enum machine_mode mode,rtx op0,rtx target,int result_unsignedp)2810*404b540aSrobert expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2811*404b540aSrobert int result_unsignedp)
2812*404b540aSrobert {
2813*404b540aSrobert rtx temp;
2814*404b540aSrobert
2815*404b540aSrobert if (! flag_trapv)
2816*404b540aSrobert result_unsignedp = 1;
2817*404b540aSrobert
2818*404b540aSrobert /* First try to do it with a special abs instruction. */
2819*404b540aSrobert temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2820*404b540aSrobert op0, target, 0);
2821*404b540aSrobert if (temp != 0)
2822*404b540aSrobert return temp;
2823*404b540aSrobert
2824*404b540aSrobert /* For floating point modes, try clearing the sign bit. */
2825*404b540aSrobert if (SCALAR_FLOAT_MODE_P (mode))
2826*404b540aSrobert {
2827*404b540aSrobert temp = expand_absneg_bit (ABS, mode, op0, target);
2828*404b540aSrobert if (temp)
2829*404b540aSrobert return temp;
2830*404b540aSrobert }
2831*404b540aSrobert
2832*404b540aSrobert /* If we have a MAX insn, we can do this as MAX (x, -x). */
2833*404b540aSrobert if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2834*404b540aSrobert && !HONOR_SIGNED_ZEROS (mode))
2835*404b540aSrobert {
2836*404b540aSrobert rtx last = get_last_insn ();
2837*404b540aSrobert
2838*404b540aSrobert temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2839*404b540aSrobert if (temp != 0)
2840*404b540aSrobert temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2841*404b540aSrobert OPTAB_WIDEN);
2842*404b540aSrobert
2843*404b540aSrobert if (temp != 0)
2844*404b540aSrobert return temp;
2845*404b540aSrobert
2846*404b540aSrobert delete_insns_since (last);
2847*404b540aSrobert }
2848*404b540aSrobert
2849*404b540aSrobert /* If this machine has expensive jumps, we can do integer absolute
2850*404b540aSrobert value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2851*404b540aSrobert where W is the width of MODE. */
2852*404b540aSrobert
2853*404b540aSrobert if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2854*404b540aSrobert {
2855*404b540aSrobert rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2856*404b540aSrobert size_int (GET_MODE_BITSIZE (mode) - 1),
2857*404b540aSrobert NULL_RTX, 0);
2858*404b540aSrobert
2859*404b540aSrobert temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2860*404b540aSrobert OPTAB_LIB_WIDEN);
2861*404b540aSrobert if (temp != 0)
2862*404b540aSrobert temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2863*404b540aSrobert temp, extended, target, 0, OPTAB_LIB_WIDEN);
2864*404b540aSrobert
2865*404b540aSrobert if (temp != 0)
2866*404b540aSrobert return temp;
2867*404b540aSrobert }
2868*404b540aSrobert
2869*404b540aSrobert return NULL_RTX;
2870*404b540aSrobert }
2871*404b540aSrobert
2872*404b540aSrobert rtx
expand_abs(enum machine_mode mode,rtx op0,rtx target,int result_unsignedp,int safe)2873*404b540aSrobert expand_abs (enum machine_mode mode, rtx op0, rtx target,
2874*404b540aSrobert int result_unsignedp, int safe)
2875*404b540aSrobert {
2876*404b540aSrobert rtx temp, op1;
2877*404b540aSrobert
2878*404b540aSrobert if (! flag_trapv)
2879*404b540aSrobert result_unsignedp = 1;
2880*404b540aSrobert
2881*404b540aSrobert temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2882*404b540aSrobert if (temp != 0)
2883*404b540aSrobert return temp;
2884*404b540aSrobert
2885*404b540aSrobert /* If that does not win, use conditional jump and negate. */
2886*404b540aSrobert
2887*404b540aSrobert /* It is safe to use the target if it is the same
2888*404b540aSrobert as the source if this is also a pseudo register */
2889*404b540aSrobert if (op0 == target && REG_P (op0)
2890*404b540aSrobert && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2891*404b540aSrobert safe = 1;
2892*404b540aSrobert
2893*404b540aSrobert op1 = gen_label_rtx ();
2894*404b540aSrobert if (target == 0 || ! safe
2895*404b540aSrobert || GET_MODE (target) != mode
2896*404b540aSrobert || (MEM_P (target) && MEM_VOLATILE_P (target))
2897*404b540aSrobert || (REG_P (target)
2898*404b540aSrobert && REGNO (target) < FIRST_PSEUDO_REGISTER))
2899*404b540aSrobert target = gen_reg_rtx (mode);
2900*404b540aSrobert
2901*404b540aSrobert emit_move_insn (target, op0);
2902*404b540aSrobert NO_DEFER_POP;
2903*404b540aSrobert
2904*404b540aSrobert do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2905*404b540aSrobert NULL_RTX, NULL_RTX, op1);
2906*404b540aSrobert
2907*404b540aSrobert op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2908*404b540aSrobert target, target, 0);
2909*404b540aSrobert if (op0 != target)
2910*404b540aSrobert emit_move_insn (target, op0);
2911*404b540aSrobert emit_label (op1);
2912*404b540aSrobert OK_DEFER_POP;
2913*404b540aSrobert return target;
2914*404b540aSrobert }
2915*404b540aSrobert
2916*404b540aSrobert /* A subroutine of expand_copysign, perform the copysign operation using the
2917*404b540aSrobert abs and neg primitives advertised to exist on the target. The assumption
2918*404b540aSrobert is that we have a split register file, and leaving op0 in fp registers,
2919*404b540aSrobert and not playing with subregs so much, will help the register allocator. */
2920*404b540aSrobert
2921*404b540aSrobert static rtx
expand_copysign_absneg(enum machine_mode mode,rtx op0,rtx op1,rtx target,int bitpos,bool op0_is_abs)2922*404b540aSrobert expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2923*404b540aSrobert int bitpos, bool op0_is_abs)
2924*404b540aSrobert {
2925*404b540aSrobert enum machine_mode imode;
2926*404b540aSrobert HOST_WIDE_INT hi, lo;
2927*404b540aSrobert int word;
2928*404b540aSrobert rtx label;
2929*404b540aSrobert
2930*404b540aSrobert if (target == op1)
2931*404b540aSrobert target = NULL_RTX;
2932*404b540aSrobert
2933*404b540aSrobert if (!op0_is_abs)
2934*404b540aSrobert {
2935*404b540aSrobert op0 = expand_unop (mode, abs_optab, op0, target, 0);
2936*404b540aSrobert if (op0 == NULL)
2937*404b540aSrobert return NULL_RTX;
2938*404b540aSrobert target = op0;
2939*404b540aSrobert }
2940*404b540aSrobert else
2941*404b540aSrobert {
2942*404b540aSrobert if (target == NULL_RTX)
2943*404b540aSrobert target = copy_to_reg (op0);
2944*404b540aSrobert else
2945*404b540aSrobert emit_move_insn (target, op0);
2946*404b540aSrobert }
2947*404b540aSrobert
2948*404b540aSrobert if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2949*404b540aSrobert {
2950*404b540aSrobert imode = int_mode_for_mode (mode);
2951*404b540aSrobert if (imode == BLKmode)
2952*404b540aSrobert return NULL_RTX;
2953*404b540aSrobert op1 = gen_lowpart (imode, op1);
2954*404b540aSrobert }
2955*404b540aSrobert else
2956*404b540aSrobert {
2957*404b540aSrobert imode = word_mode;
2958*404b540aSrobert if (FLOAT_WORDS_BIG_ENDIAN)
2959*404b540aSrobert word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2960*404b540aSrobert else
2961*404b540aSrobert word = bitpos / BITS_PER_WORD;
2962*404b540aSrobert bitpos = bitpos % BITS_PER_WORD;
2963*404b540aSrobert op1 = operand_subword_force (op1, word, mode);
2964*404b540aSrobert }
2965*404b540aSrobert
2966*404b540aSrobert if (bitpos < HOST_BITS_PER_WIDE_INT)
2967*404b540aSrobert {
2968*404b540aSrobert hi = 0;
2969*404b540aSrobert lo = (HOST_WIDE_INT) 1 << bitpos;
2970*404b540aSrobert }
2971*404b540aSrobert else
2972*404b540aSrobert {
2973*404b540aSrobert hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2974*404b540aSrobert lo = 0;
2975*404b540aSrobert }
2976*404b540aSrobert
2977*404b540aSrobert op1 = expand_binop (imode, and_optab, op1,
2978*404b540aSrobert immed_double_const (lo, hi, imode),
2979*404b540aSrobert NULL_RTX, 1, OPTAB_LIB_WIDEN);
2980*404b540aSrobert
2981*404b540aSrobert label = gen_label_rtx ();
2982*404b540aSrobert emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2983*404b540aSrobert
2984*404b540aSrobert if (GET_CODE (op0) == CONST_DOUBLE)
2985*404b540aSrobert op0 = simplify_unary_operation (NEG, mode, op0, mode);
2986*404b540aSrobert else
2987*404b540aSrobert op0 = expand_unop (mode, neg_optab, op0, target, 0);
2988*404b540aSrobert if (op0 != target)
2989*404b540aSrobert emit_move_insn (target, op0);
2990*404b540aSrobert
2991*404b540aSrobert emit_label (label);
2992*404b540aSrobert
2993*404b540aSrobert return target;
2994*404b540aSrobert }
2995*404b540aSrobert
2996*404b540aSrobert
2997*404b540aSrobert /* A subroutine of expand_copysign, perform the entire copysign operation
2998*404b540aSrobert with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2999*404b540aSrobert is true if op0 is known to have its sign bit clear. */
3000*404b540aSrobert
3001*404b540aSrobert static rtx
expand_copysign_bit(enum machine_mode mode,rtx op0,rtx op1,rtx target,int bitpos,bool op0_is_abs)3002*404b540aSrobert expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3003*404b540aSrobert int bitpos, bool op0_is_abs)
3004*404b540aSrobert {
3005*404b540aSrobert enum machine_mode imode;
3006*404b540aSrobert HOST_WIDE_INT hi, lo;
3007*404b540aSrobert int word, nwords, i;
3008*404b540aSrobert rtx temp, insns;
3009*404b540aSrobert
3010*404b540aSrobert if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3011*404b540aSrobert {
3012*404b540aSrobert imode = int_mode_for_mode (mode);
3013*404b540aSrobert if (imode == BLKmode)
3014*404b540aSrobert return NULL_RTX;
3015*404b540aSrobert word = 0;
3016*404b540aSrobert nwords = 1;
3017*404b540aSrobert }
3018*404b540aSrobert else
3019*404b540aSrobert {
3020*404b540aSrobert imode = word_mode;
3021*404b540aSrobert
3022*404b540aSrobert if (FLOAT_WORDS_BIG_ENDIAN)
3023*404b540aSrobert word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3024*404b540aSrobert else
3025*404b540aSrobert word = bitpos / BITS_PER_WORD;
3026*404b540aSrobert bitpos = bitpos % BITS_PER_WORD;
3027*404b540aSrobert nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3028*404b540aSrobert }
3029*404b540aSrobert
3030*404b540aSrobert if (bitpos < HOST_BITS_PER_WIDE_INT)
3031*404b540aSrobert {
3032*404b540aSrobert hi = 0;
3033*404b540aSrobert lo = (HOST_WIDE_INT) 1 << bitpos;
3034*404b540aSrobert }
3035*404b540aSrobert else
3036*404b540aSrobert {
3037*404b540aSrobert hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3038*404b540aSrobert lo = 0;
3039*404b540aSrobert }
3040*404b540aSrobert
3041*404b540aSrobert if (target == 0 || target == op0 || target == op1)
3042*404b540aSrobert target = gen_reg_rtx (mode);
3043*404b540aSrobert
3044*404b540aSrobert if (nwords > 1)
3045*404b540aSrobert {
3046*404b540aSrobert start_sequence ();
3047*404b540aSrobert
3048*404b540aSrobert for (i = 0; i < nwords; ++i)
3049*404b540aSrobert {
3050*404b540aSrobert rtx targ_piece = operand_subword (target, i, 1, mode);
3051*404b540aSrobert rtx op0_piece = operand_subword_force (op0, i, mode);
3052*404b540aSrobert
3053*404b540aSrobert if (i == word)
3054*404b540aSrobert {
3055*404b540aSrobert if (!op0_is_abs)
3056*404b540aSrobert op0_piece = expand_binop (imode, and_optab, op0_piece,
3057*404b540aSrobert immed_double_const (~lo, ~hi, imode),
3058*404b540aSrobert NULL_RTX, 1, OPTAB_LIB_WIDEN);
3059*404b540aSrobert
3060*404b540aSrobert op1 = expand_binop (imode, and_optab,
3061*404b540aSrobert operand_subword_force (op1, i, mode),
3062*404b540aSrobert immed_double_const (lo, hi, imode),
3063*404b540aSrobert NULL_RTX, 1, OPTAB_LIB_WIDEN);
3064*404b540aSrobert
3065*404b540aSrobert temp = expand_binop (imode, ior_optab, op0_piece, op1,
3066*404b540aSrobert targ_piece, 1, OPTAB_LIB_WIDEN);
3067*404b540aSrobert if (temp != targ_piece)
3068*404b540aSrobert emit_move_insn (targ_piece, temp);
3069*404b540aSrobert }
3070*404b540aSrobert else
3071*404b540aSrobert emit_move_insn (targ_piece, op0_piece);
3072*404b540aSrobert }
3073*404b540aSrobert
3074*404b540aSrobert insns = get_insns ();
3075*404b540aSrobert end_sequence ();
3076*404b540aSrobert
3077*404b540aSrobert emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
3078*404b540aSrobert }
3079*404b540aSrobert else
3080*404b540aSrobert {
3081*404b540aSrobert op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3082*404b540aSrobert immed_double_const (lo, hi, imode),
3083*404b540aSrobert NULL_RTX, 1, OPTAB_LIB_WIDEN);
3084*404b540aSrobert
3085*404b540aSrobert op0 = gen_lowpart (imode, op0);
3086*404b540aSrobert if (!op0_is_abs)
3087*404b540aSrobert op0 = expand_binop (imode, and_optab, op0,
3088*404b540aSrobert immed_double_const (~lo, ~hi, imode),
3089*404b540aSrobert NULL_RTX, 1, OPTAB_LIB_WIDEN);
3090*404b540aSrobert
3091*404b540aSrobert temp = expand_binop (imode, ior_optab, op0, op1,
3092*404b540aSrobert gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3093*404b540aSrobert target = lowpart_subreg_maybe_copy (mode, temp, imode);
3094*404b540aSrobert }
3095*404b540aSrobert
3096*404b540aSrobert return target;
3097*404b540aSrobert }
3098*404b540aSrobert
3099*404b540aSrobert /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3100*404b540aSrobert scalar floating point mode. Return NULL if we do not know how to
3101*404b540aSrobert expand the operation inline. */
3102*404b540aSrobert
3103*404b540aSrobert rtx
expand_copysign(rtx op0,rtx op1,rtx target)3104*404b540aSrobert expand_copysign (rtx op0, rtx op1, rtx target)
3105*404b540aSrobert {
3106*404b540aSrobert enum machine_mode mode = GET_MODE (op0);
3107*404b540aSrobert const struct real_format *fmt;
3108*404b540aSrobert bool op0_is_abs;
3109*404b540aSrobert rtx temp;
3110*404b540aSrobert
3111*404b540aSrobert gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3112*404b540aSrobert gcc_assert (GET_MODE (op1) == mode);
3113*404b540aSrobert
3114*404b540aSrobert /* First try to do it with a special instruction. */
3115*404b540aSrobert temp = expand_binop (mode, copysign_optab, op0, op1,
3116*404b540aSrobert target, 0, OPTAB_DIRECT);
3117*404b540aSrobert if (temp)
3118*404b540aSrobert return temp;
3119*404b540aSrobert
3120*404b540aSrobert fmt = REAL_MODE_FORMAT (mode);
3121*404b540aSrobert if (fmt == NULL || !fmt->has_signed_zero)
3122*404b540aSrobert return NULL_RTX;
3123*404b540aSrobert
3124*404b540aSrobert op0_is_abs = false;
3125*404b540aSrobert if (GET_CODE (op0) == CONST_DOUBLE)
3126*404b540aSrobert {
3127*404b540aSrobert if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3128*404b540aSrobert op0 = simplify_unary_operation (ABS, mode, op0, mode);
3129*404b540aSrobert op0_is_abs = true;
3130*404b540aSrobert }
3131*404b540aSrobert
3132*404b540aSrobert if (fmt->signbit_ro >= 0
3133*404b540aSrobert && (GET_CODE (op0) == CONST_DOUBLE
3134*404b540aSrobert || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
3135*404b540aSrobert && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
3136*404b540aSrobert {
3137*404b540aSrobert temp = expand_copysign_absneg (mode, op0, op1, target,
3138*404b540aSrobert fmt->signbit_ro, op0_is_abs);
3139*404b540aSrobert if (temp)
3140*404b540aSrobert return temp;
3141*404b540aSrobert }
3142*404b540aSrobert
3143*404b540aSrobert if (fmt->signbit_rw < 0)
3144*404b540aSrobert return NULL_RTX;
3145*404b540aSrobert return expand_copysign_bit (mode, op0, op1, target,
3146*404b540aSrobert fmt->signbit_rw, op0_is_abs);
3147*404b540aSrobert }
3148*404b540aSrobert
3149*404b540aSrobert /* Generate an instruction whose insn-code is INSN_CODE,
3150*404b540aSrobert with two operands: an output TARGET and an input OP0.
3151*404b540aSrobert TARGET *must* be nonzero, and the output is always stored there.
3152*404b540aSrobert CODE is an rtx code such that (CODE OP0) is an rtx that describes
3153*404b540aSrobert the value that is stored into TARGET. */
3154*404b540aSrobert
3155*404b540aSrobert void
emit_unop_insn(int icode,rtx target,rtx op0,enum rtx_code code)3156*404b540aSrobert emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3157*404b540aSrobert {
3158*404b540aSrobert rtx temp;
3159*404b540aSrobert enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3160*404b540aSrobert rtx pat;
3161*404b540aSrobert
3162*404b540aSrobert temp = target;
3163*404b540aSrobert
3164*404b540aSrobert /* Now, if insn does not accept our operands, put them into pseudos. */
3165*404b540aSrobert
3166*404b540aSrobert if (!insn_data[icode].operand[1].predicate (op0, mode0))
3167*404b540aSrobert op0 = copy_to_mode_reg (mode0, op0);
3168*404b540aSrobert
3169*404b540aSrobert if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3170*404b540aSrobert temp = gen_reg_rtx (GET_MODE (temp));
3171*404b540aSrobert
3172*404b540aSrobert pat = GEN_FCN (icode) (temp, op0);
3173*404b540aSrobert
3174*404b540aSrobert if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3175*404b540aSrobert add_equal_note (pat, temp, code, op0, NULL_RTX);
3176*404b540aSrobert
3177*404b540aSrobert emit_insn (pat);
3178*404b540aSrobert
3179*404b540aSrobert if (temp != target)
3180*404b540aSrobert emit_move_insn (target, temp);
3181*404b540aSrobert }
3182*404b540aSrobert
3183*404b540aSrobert struct no_conflict_data
3184*404b540aSrobert {
3185*404b540aSrobert rtx target, first, insn;
3186*404b540aSrobert bool must_stay;
3187*404b540aSrobert };
3188*404b540aSrobert
3189*404b540aSrobert /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3190*404b540aSrobert Set P->must_stay if the currently examined clobber / store has to stay
3191*404b540aSrobert in the list of insns that constitute the actual no_conflict block /
3192*404b540aSrobert libcall block. */
3193*404b540aSrobert static void
no_conflict_move_test(rtx dest,rtx set,void * p0)3194*404b540aSrobert no_conflict_move_test (rtx dest, rtx set, void *p0)
3195*404b540aSrobert {
3196*404b540aSrobert struct no_conflict_data *p= p0;
3197*404b540aSrobert
3198*404b540aSrobert /* If this inns directly contributes to setting the target, it must stay. */
3199*404b540aSrobert if (reg_overlap_mentioned_p (p->target, dest))
3200*404b540aSrobert p->must_stay = true;
3201*404b540aSrobert /* If we haven't committed to keeping any other insns in the list yet,
3202*404b540aSrobert there is nothing more to check. */
3203*404b540aSrobert else if (p->insn == p->first)
3204*404b540aSrobert return;
3205*404b540aSrobert /* If this insn sets / clobbers a register that feeds one of the insns
3206*404b540aSrobert already in the list, this insn has to stay too. */
3207*404b540aSrobert else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3208*404b540aSrobert || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3209*404b540aSrobert || reg_used_between_p (dest, p->first, p->insn)
3210*404b540aSrobert /* Likewise if this insn depends on a register set by a previous
3211*404b540aSrobert insn in the list, or if it sets a result (presumably a hard
3212*404b540aSrobert register) that is set or clobbered by a previous insn.
3213*404b540aSrobert N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3214*404b540aSrobert SET_DEST perform the former check on the address, and the latter
3215*404b540aSrobert check on the MEM. */
3216*404b540aSrobert || (GET_CODE (set) == SET
3217*404b540aSrobert && (modified_in_p (SET_SRC (set), p->first)
3218*404b540aSrobert || modified_in_p (SET_DEST (set), p->first)
3219*404b540aSrobert || modified_between_p (SET_SRC (set), p->first, p->insn)
3220*404b540aSrobert || modified_between_p (SET_DEST (set), p->first, p->insn))))
3221*404b540aSrobert p->must_stay = true;
3222*404b540aSrobert }
3223*404b540aSrobert
3224*404b540aSrobert /* Encapsulate the block starting at FIRST and ending with LAST, which is
3225*404b540aSrobert logically equivalent to EQUIV, so it gets manipulated as a unit if it
3226*404b540aSrobert is possible to do so. */
3227*404b540aSrobert
3228*404b540aSrobert static void
maybe_encapsulate_block(rtx first,rtx last,rtx equiv)3229*404b540aSrobert maybe_encapsulate_block (rtx first, rtx last, rtx equiv)
3230*404b540aSrobert {
3231*404b540aSrobert if (!flag_non_call_exceptions || !may_trap_p (equiv))
3232*404b540aSrobert {
3233*404b540aSrobert /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the
3234*404b540aSrobert encapsulated region would not be in one basic block, i.e. when
3235*404b540aSrobert there is a control_flow_insn_p insn between FIRST and LAST. */
3236*404b540aSrobert bool attach_libcall_retval_notes = true;
3237*404b540aSrobert rtx insn, next = NEXT_INSN (last);
3238*404b540aSrobert
3239*404b540aSrobert for (insn = first; insn != next; insn = NEXT_INSN (insn))
3240*404b540aSrobert if (control_flow_insn_p (insn))
3241*404b540aSrobert {
3242*404b540aSrobert attach_libcall_retval_notes = false;
3243*404b540aSrobert break;
3244*404b540aSrobert }
3245*404b540aSrobert
3246*404b540aSrobert if (attach_libcall_retval_notes)
3247*404b540aSrobert {
3248*404b540aSrobert REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3249*404b540aSrobert REG_NOTES (first));
3250*404b540aSrobert REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3251*404b540aSrobert REG_NOTES (last));
3252*404b540aSrobert }
3253*404b540aSrobert }
3254*404b540aSrobert }
3255*404b540aSrobert
3256*404b540aSrobert /* Emit code to perform a series of operations on a multi-word quantity, one
3257*404b540aSrobert word at a time.
3258*404b540aSrobert
3259*404b540aSrobert Such a block is preceded by a CLOBBER of the output, consists of multiple
3260*404b540aSrobert insns, each setting one word of the output, and followed by a SET copying
3261*404b540aSrobert the output to itself.
3262*404b540aSrobert
3263*404b540aSrobert Each of the insns setting words of the output receives a REG_NO_CONFLICT
3264*404b540aSrobert note indicating that it doesn't conflict with the (also multi-word)
3265*404b540aSrobert inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3266*404b540aSrobert notes.
3267*404b540aSrobert
3268*404b540aSrobert INSNS is a block of code generated to perform the operation, not including
3269*404b540aSrobert the CLOBBER and final copy. All insns that compute intermediate values
3270*404b540aSrobert are first emitted, followed by the block as described above.
3271*404b540aSrobert
3272*404b540aSrobert TARGET, OP0, and OP1 are the output and inputs of the operations,
3273*404b540aSrobert respectively. OP1 may be zero for a unary operation.
3274*404b540aSrobert
3275*404b540aSrobert EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3276*404b540aSrobert on the last insn.
3277*404b540aSrobert
3278*404b540aSrobert If TARGET is not a register, INSNS is simply emitted with no special
3279*404b540aSrobert processing. Likewise if anything in INSNS is not an INSN or if
3280*404b540aSrobert there is a libcall block inside INSNS.
3281*404b540aSrobert
3282*404b540aSrobert The final insn emitted is returned. */
3283*404b540aSrobert
3284*404b540aSrobert rtx
emit_no_conflict_block(rtx insns,rtx target,rtx op0,rtx op1,rtx equiv)3285*404b540aSrobert emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3286*404b540aSrobert {
3287*404b540aSrobert rtx prev, next, first, last, insn;
3288*404b540aSrobert
3289*404b540aSrobert if (!REG_P (target) || reload_in_progress)
3290*404b540aSrobert return emit_insn (insns);
3291*404b540aSrobert else
3292*404b540aSrobert for (insn = insns; insn; insn = NEXT_INSN (insn))
3293*404b540aSrobert if (!NONJUMP_INSN_P (insn)
3294*404b540aSrobert || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3295*404b540aSrobert return emit_insn (insns);
3296*404b540aSrobert
3297*404b540aSrobert /* First emit all insns that do not store into words of the output and remove
3298*404b540aSrobert these from the list. */
3299*404b540aSrobert for (insn = insns; insn; insn = next)
3300*404b540aSrobert {
3301*404b540aSrobert rtx note;
3302*404b540aSrobert struct no_conflict_data data;
3303*404b540aSrobert
3304*404b540aSrobert next = NEXT_INSN (insn);
3305*404b540aSrobert
3306*404b540aSrobert /* Some ports (cris) create a libcall regions at their own. We must
3307*404b540aSrobert avoid any potential nesting of LIBCALLs. */
3308*404b540aSrobert if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3309*404b540aSrobert remove_note (insn, note);
3310*404b540aSrobert if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3311*404b540aSrobert remove_note (insn, note);
3312*404b540aSrobert
3313*404b540aSrobert data.target = target;
3314*404b540aSrobert data.first = insns;
3315*404b540aSrobert data.insn = insn;
3316*404b540aSrobert data.must_stay = 0;
3317*404b540aSrobert note_stores (PATTERN (insn), no_conflict_move_test, &data);
3318*404b540aSrobert if (! data.must_stay)
3319*404b540aSrobert {
3320*404b540aSrobert if (PREV_INSN (insn))
3321*404b540aSrobert NEXT_INSN (PREV_INSN (insn)) = next;
3322*404b540aSrobert else
3323*404b540aSrobert insns = next;
3324*404b540aSrobert
3325*404b540aSrobert if (next)
3326*404b540aSrobert PREV_INSN (next) = PREV_INSN (insn);
3327*404b540aSrobert
3328*404b540aSrobert add_insn (insn);
3329*404b540aSrobert }
3330*404b540aSrobert }
3331*404b540aSrobert
3332*404b540aSrobert prev = get_last_insn ();
3333*404b540aSrobert
3334*404b540aSrobert /* Now write the CLOBBER of the output, followed by the setting of each
3335*404b540aSrobert of the words, followed by the final copy. */
3336*404b540aSrobert if (target != op0 && target != op1)
3337*404b540aSrobert emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3338*404b540aSrobert
3339*404b540aSrobert for (insn = insns; insn; insn = next)
3340*404b540aSrobert {
3341*404b540aSrobert next = NEXT_INSN (insn);
3342*404b540aSrobert add_insn (insn);
3343*404b540aSrobert
3344*404b540aSrobert if (op1 && REG_P (op1))
3345*404b540aSrobert REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3346*404b540aSrobert REG_NOTES (insn));
3347*404b540aSrobert
3348*404b540aSrobert if (op0 && REG_P (op0))
3349*404b540aSrobert REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3350*404b540aSrobert REG_NOTES (insn));
3351*404b540aSrobert }
3352*404b540aSrobert
3353*404b540aSrobert if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3354*404b540aSrobert != CODE_FOR_nothing)
3355*404b540aSrobert {
3356*404b540aSrobert last = emit_move_insn (target, target);
3357*404b540aSrobert if (equiv)
3358*404b540aSrobert set_unique_reg_note (last, REG_EQUAL, equiv);
3359*404b540aSrobert }
3360*404b540aSrobert else
3361*404b540aSrobert {
3362*404b540aSrobert last = get_last_insn ();
3363*404b540aSrobert
3364*404b540aSrobert /* Remove any existing REG_EQUAL note from "last", or else it will
3365*404b540aSrobert be mistaken for a note referring to the full contents of the
3366*404b540aSrobert alleged libcall value when found together with the REG_RETVAL
3367*404b540aSrobert note added below. An existing note can come from an insn
3368*404b540aSrobert expansion at "last". */
3369*404b540aSrobert remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3370*404b540aSrobert }
3371*404b540aSrobert
3372*404b540aSrobert if (prev == 0)
3373*404b540aSrobert first = get_insns ();
3374*404b540aSrobert else
3375*404b540aSrobert first = NEXT_INSN (prev);
3376*404b540aSrobert
3377*404b540aSrobert maybe_encapsulate_block (first, last, equiv);
3378*404b540aSrobert
3379*404b540aSrobert return last;
3380*404b540aSrobert }
3381*404b540aSrobert
3382*404b540aSrobert /* Emit code to make a call to a constant function or a library call.
3383*404b540aSrobert
3384*404b540aSrobert INSNS is a list containing all insns emitted in the call.
3385*404b540aSrobert These insns leave the result in RESULT. Our block is to copy RESULT
3386*404b540aSrobert to TARGET, which is logically equivalent to EQUIV.
3387*404b540aSrobert
3388*404b540aSrobert We first emit any insns that set a pseudo on the assumption that these are
3389*404b540aSrobert loading constants into registers; doing so allows them to be safely cse'ed
3390*404b540aSrobert between blocks. Then we emit all the other insns in the block, followed by
3391*404b540aSrobert an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3392*404b540aSrobert note with an operand of EQUIV.
3393*404b540aSrobert
3394*404b540aSrobert Moving assignments to pseudos outside of the block is done to improve
3395*404b540aSrobert the generated code, but is not required to generate correct code,
3396*404b540aSrobert hence being unable to move an assignment is not grounds for not making
3397*404b540aSrobert a libcall block. There are two reasons why it is safe to leave these
3398*404b540aSrobert insns inside the block: First, we know that these pseudos cannot be
3399*404b540aSrobert used in generated RTL outside the block since they are created for
3400*404b540aSrobert temporary purposes within the block. Second, CSE will not record the
3401*404b540aSrobert values of anything set inside a libcall block, so we know they must
3402*404b540aSrobert be dead at the end of the block.
3403*404b540aSrobert
3404*404b540aSrobert Except for the first group of insns (the ones setting pseudos), the
3405*404b540aSrobert block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3406*404b540aSrobert
3407*404b540aSrobert void
emit_libcall_block(rtx insns,rtx target,rtx result,rtx equiv)3408*404b540aSrobert emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3409*404b540aSrobert {
3410*404b540aSrobert rtx final_dest = target;
3411*404b540aSrobert rtx prev, next, first, last, insn;
3412*404b540aSrobert
3413*404b540aSrobert /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3414*404b540aSrobert into a MEM later. Protect the libcall block from this change. */
3415*404b540aSrobert if (! REG_P (target) || REG_USERVAR_P (target))
3416*404b540aSrobert target = gen_reg_rtx (GET_MODE (target));
3417*404b540aSrobert
3418*404b540aSrobert /* If we're using non-call exceptions, a libcall corresponding to an
3419*404b540aSrobert operation that may trap may also trap. */
3420*404b540aSrobert if (flag_non_call_exceptions && may_trap_p (equiv))
3421*404b540aSrobert {
3422*404b540aSrobert for (insn = insns; insn; insn = NEXT_INSN (insn))
3423*404b540aSrobert if (CALL_P (insn))
3424*404b540aSrobert {
3425*404b540aSrobert rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3426*404b540aSrobert
3427*404b540aSrobert if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3428*404b540aSrobert remove_note (insn, note);
3429*404b540aSrobert }
3430*404b540aSrobert }
3431*404b540aSrobert else
3432*404b540aSrobert /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3433*404b540aSrobert reg note to indicate that this call cannot throw or execute a nonlocal
3434*404b540aSrobert goto (unless there is already a REG_EH_REGION note, in which case
3435*404b540aSrobert we update it). */
3436*404b540aSrobert for (insn = insns; insn; insn = NEXT_INSN (insn))
3437*404b540aSrobert if (CALL_P (insn))
3438*404b540aSrobert {
3439*404b540aSrobert rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3440*404b540aSrobert
3441*404b540aSrobert if (note != 0)
3442*404b540aSrobert XEXP (note, 0) = constm1_rtx;
3443*404b540aSrobert else
3444*404b540aSrobert REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3445*404b540aSrobert REG_NOTES (insn));
3446*404b540aSrobert }
3447*404b540aSrobert
3448*404b540aSrobert /* First emit all insns that set pseudos. Remove them from the list as
3449*404b540aSrobert we go. Avoid insns that set pseudos which were referenced in previous
3450*404b540aSrobert insns. These can be generated by move_by_pieces, for example,
3451*404b540aSrobert to update an address. Similarly, avoid insns that reference things
3452*404b540aSrobert set in previous insns. */
3453*404b540aSrobert
3454*404b540aSrobert for (insn = insns; insn; insn = next)
3455*404b540aSrobert {
3456*404b540aSrobert rtx set = single_set (insn);
3457*404b540aSrobert rtx note;
3458*404b540aSrobert
3459*404b540aSrobert /* Some ports (cris) create a libcall regions at their own. We must
3460*404b540aSrobert avoid any potential nesting of LIBCALLs. */
3461*404b540aSrobert if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3462*404b540aSrobert remove_note (insn, note);
3463*404b540aSrobert if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3464*404b540aSrobert remove_note (insn, note);
3465*404b540aSrobert
3466*404b540aSrobert next = NEXT_INSN (insn);
3467*404b540aSrobert
3468*404b540aSrobert if (set != 0 && REG_P (SET_DEST (set))
3469*404b540aSrobert && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3470*404b540aSrobert {
3471*404b540aSrobert struct no_conflict_data data;
3472*404b540aSrobert
3473*404b540aSrobert data.target = const0_rtx;
3474*404b540aSrobert data.first = insns;
3475*404b540aSrobert data.insn = insn;
3476*404b540aSrobert data.must_stay = 0;
3477*404b540aSrobert note_stores (PATTERN (insn), no_conflict_move_test, &data);
3478*404b540aSrobert if (! data.must_stay)
3479*404b540aSrobert {
3480*404b540aSrobert if (PREV_INSN (insn))
3481*404b540aSrobert NEXT_INSN (PREV_INSN (insn)) = next;
3482*404b540aSrobert else
3483*404b540aSrobert insns = next;
3484*404b540aSrobert
3485*404b540aSrobert if (next)
3486*404b540aSrobert PREV_INSN (next) = PREV_INSN (insn);
3487*404b540aSrobert
3488*404b540aSrobert add_insn (insn);
3489*404b540aSrobert }
3490*404b540aSrobert }
3491*404b540aSrobert
3492*404b540aSrobert /* Some ports use a loop to copy large arguments onto the stack.
3493*404b540aSrobert Don't move anything outside such a loop. */
3494*404b540aSrobert if (LABEL_P (insn))
3495*404b540aSrobert break;
3496*404b540aSrobert }
3497*404b540aSrobert
3498*404b540aSrobert prev = get_last_insn ();
3499*404b540aSrobert
3500*404b540aSrobert /* Write the remaining insns followed by the final copy. */
3501*404b540aSrobert
3502*404b540aSrobert for (insn = insns; insn; insn = next)
3503*404b540aSrobert {
3504*404b540aSrobert next = NEXT_INSN (insn);
3505*404b540aSrobert
3506*404b540aSrobert add_insn (insn);
3507*404b540aSrobert }
3508*404b540aSrobert
3509*404b540aSrobert last = emit_move_insn (target, result);
3510*404b540aSrobert if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3511*404b540aSrobert != CODE_FOR_nothing)
3512*404b540aSrobert set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3513*404b540aSrobert else
3514*404b540aSrobert {
3515*404b540aSrobert /* Remove any existing REG_EQUAL note from "last", or else it will
3516*404b540aSrobert be mistaken for a note referring to the full contents of the
3517*404b540aSrobert libcall value when found together with the REG_RETVAL note added
3518*404b540aSrobert below. An existing note can come from an insn expansion at
3519*404b540aSrobert "last". */
3520*404b540aSrobert remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3521*404b540aSrobert }
3522*404b540aSrobert
3523*404b540aSrobert if (final_dest != target)
3524*404b540aSrobert emit_move_insn (final_dest, target);
3525*404b540aSrobert
3526*404b540aSrobert if (prev == 0)
3527*404b540aSrobert first = get_insns ();
3528*404b540aSrobert else
3529*404b540aSrobert first = NEXT_INSN (prev);
3530*404b540aSrobert
3531*404b540aSrobert maybe_encapsulate_block (first, last, equiv);
3532*404b540aSrobert }
3533*404b540aSrobert
3534*404b540aSrobert /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3535*404b540aSrobert PURPOSE describes how this comparison will be used. CODE is the rtx
3536*404b540aSrobert comparison code we will be using.
3537*404b540aSrobert
3538*404b540aSrobert ??? Actually, CODE is slightly weaker than that. A target is still
3539*404b540aSrobert required to implement all of the normal bcc operations, but not
3540*404b540aSrobert required to implement all (or any) of the unordered bcc operations. */
3541*404b540aSrobert
3542*404b540aSrobert int
can_compare_p(enum rtx_code code,enum machine_mode mode,enum can_compare_purpose purpose)3543*404b540aSrobert can_compare_p (enum rtx_code code, enum machine_mode mode,
3544*404b540aSrobert enum can_compare_purpose purpose)
3545*404b540aSrobert {
3546*404b540aSrobert do
3547*404b540aSrobert {
3548*404b540aSrobert if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3549*404b540aSrobert {
3550*404b540aSrobert if (purpose == ccp_jump)
3551*404b540aSrobert return bcc_gen_fctn[(int) code] != NULL;
3552*404b540aSrobert else if (purpose == ccp_store_flag)
3553*404b540aSrobert return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3554*404b540aSrobert else
3555*404b540aSrobert /* There's only one cmov entry point, and it's allowed to fail. */
3556*404b540aSrobert return 1;
3557*404b540aSrobert }
3558*404b540aSrobert if (purpose == ccp_jump
3559*404b540aSrobert && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3560*404b540aSrobert return 1;
3561*404b540aSrobert if (purpose == ccp_cmov
3562*404b540aSrobert && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3563*404b540aSrobert return 1;
3564*404b540aSrobert if (purpose == ccp_store_flag
3565*404b540aSrobert && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3566*404b540aSrobert return 1;
3567*404b540aSrobert mode = GET_MODE_WIDER_MODE (mode);
3568*404b540aSrobert }
3569*404b540aSrobert while (mode != VOIDmode);
3570*404b540aSrobert
3571*404b540aSrobert return 0;
3572*404b540aSrobert }
3573*404b540aSrobert
3574*404b540aSrobert /* This function is called when we are going to emit a compare instruction that
3575*404b540aSrobert compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3576*404b540aSrobert
3577*404b540aSrobert *PMODE is the mode of the inputs (in case they are const_int).
3578*404b540aSrobert *PUNSIGNEDP nonzero says that the operands are unsigned;
3579*404b540aSrobert this matters if they need to be widened.
3580*404b540aSrobert
3581*404b540aSrobert If they have mode BLKmode, then SIZE specifies the size of both operands.
3582*404b540aSrobert
3583*404b540aSrobert This function performs all the setup necessary so that the caller only has
3584*404b540aSrobert to emit a single comparison insn. This setup can involve doing a BLKmode
3585*404b540aSrobert comparison or emitting a library call to perform the comparison if no insn
3586*404b540aSrobert is available to handle it.
3587*404b540aSrobert The values which are passed in through pointers can be modified; the caller
3588*404b540aSrobert should perform the comparison on the modified values. Constant
3589*404b540aSrobert comparisons must have already been folded. */
3590*404b540aSrobert
3591*404b540aSrobert static void
prepare_cmp_insn(rtx * px,rtx * py,enum rtx_code * pcomparison,rtx size,enum machine_mode * pmode,int * punsignedp,enum can_compare_purpose purpose)3592*404b540aSrobert prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3593*404b540aSrobert enum machine_mode *pmode, int *punsignedp,
3594*404b540aSrobert enum can_compare_purpose purpose)
3595*404b540aSrobert {
3596*404b540aSrobert enum machine_mode mode = *pmode;
3597*404b540aSrobert rtx x = *px, y = *py;
3598*404b540aSrobert int unsignedp = *punsignedp;
3599*404b540aSrobert
3600*404b540aSrobert /* If we are inside an appropriately-short loop and we are optimizing,
3601*404b540aSrobert force expensive constants into a register. */
3602*404b540aSrobert if (CONSTANT_P (x) && optimize
3603*404b540aSrobert && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3604*404b540aSrobert x = force_reg (mode, x);
3605*404b540aSrobert
3606*404b540aSrobert if (CONSTANT_P (y) && optimize
3607*404b540aSrobert && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3608*404b540aSrobert y = force_reg (mode, y);
3609*404b540aSrobert
3610*404b540aSrobert #ifdef HAVE_cc0
3611*404b540aSrobert /* Make sure if we have a canonical comparison. The RTL
3612*404b540aSrobert documentation states that canonical comparisons are required only
3613*404b540aSrobert for targets which have cc0. */
3614*404b540aSrobert gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3615*404b540aSrobert #endif
3616*404b540aSrobert
3617*404b540aSrobert /* Don't let both operands fail to indicate the mode. */
3618*404b540aSrobert if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3619*404b540aSrobert x = force_reg (mode, x);
3620*404b540aSrobert
3621*404b540aSrobert /* Handle all BLKmode compares. */
3622*404b540aSrobert
3623*404b540aSrobert if (mode == BLKmode)
3624*404b540aSrobert {
3625*404b540aSrobert enum machine_mode cmp_mode, result_mode;
3626*404b540aSrobert enum insn_code cmp_code;
3627*404b540aSrobert tree length_type;
3628*404b540aSrobert rtx libfunc;
3629*404b540aSrobert rtx result;
3630*404b540aSrobert rtx opalign
3631*404b540aSrobert = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3632*404b540aSrobert
3633*404b540aSrobert gcc_assert (size);
3634*404b540aSrobert
3635*404b540aSrobert /* Try to use a memory block compare insn - either cmpstr
3636*404b540aSrobert or cmpmem will do. */
3637*404b540aSrobert for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3638*404b540aSrobert cmp_mode != VOIDmode;
3639*404b540aSrobert cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3640*404b540aSrobert {
3641*404b540aSrobert cmp_code = cmpmem_optab[cmp_mode];
3642*404b540aSrobert if (cmp_code == CODE_FOR_nothing)
3643*404b540aSrobert cmp_code = cmpstr_optab[cmp_mode];
3644*404b540aSrobert if (cmp_code == CODE_FOR_nothing)
3645*404b540aSrobert cmp_code = cmpstrn_optab[cmp_mode];
3646*404b540aSrobert if (cmp_code == CODE_FOR_nothing)
3647*404b540aSrobert continue;
3648*404b540aSrobert
3649*404b540aSrobert /* Must make sure the size fits the insn's mode. */
3650*404b540aSrobert if ((GET_CODE (size) == CONST_INT
3651*404b540aSrobert && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3652*404b540aSrobert || (GET_MODE_BITSIZE (GET_MODE (size))
3653*404b540aSrobert > GET_MODE_BITSIZE (cmp_mode)))
3654*404b540aSrobert continue;
3655*404b540aSrobert
3656*404b540aSrobert result_mode = insn_data[cmp_code].operand[0].mode;
3657*404b540aSrobert result = gen_reg_rtx (result_mode);
3658*404b540aSrobert size = convert_to_mode (cmp_mode, size, 1);
3659*404b540aSrobert emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3660*404b540aSrobert
3661*404b540aSrobert *px = result;
3662*404b540aSrobert *py = const0_rtx;
3663*404b540aSrobert *pmode = result_mode;
3664*404b540aSrobert return;
3665*404b540aSrobert }
3666*404b540aSrobert
3667*404b540aSrobert /* Otherwise call a library function, memcmp. */
3668*404b540aSrobert libfunc = memcmp_libfunc;
3669*404b540aSrobert length_type = sizetype;
3670*404b540aSrobert result_mode = TYPE_MODE (integer_type_node);
3671*404b540aSrobert cmp_mode = TYPE_MODE (length_type);
3672*404b540aSrobert size = convert_to_mode (TYPE_MODE (length_type), size,
3673*404b540aSrobert TYPE_UNSIGNED (length_type));
3674*404b540aSrobert
3675*404b540aSrobert result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3676*404b540aSrobert result_mode, 3,
3677*404b540aSrobert XEXP (x, 0), Pmode,
3678*404b540aSrobert XEXP (y, 0), Pmode,
3679*404b540aSrobert size, cmp_mode);
3680*404b540aSrobert *px = result;
3681*404b540aSrobert *py = const0_rtx;
3682*404b540aSrobert *pmode = result_mode;
3683*404b540aSrobert return;
3684*404b540aSrobert }
3685*404b540aSrobert
3686*404b540aSrobert /* Don't allow operands to the compare to trap, as that can put the
3687*404b540aSrobert compare and branch in different basic blocks. */
3688*404b540aSrobert if (flag_non_call_exceptions)
3689*404b540aSrobert {
3690*404b540aSrobert if (may_trap_p (x))
3691*404b540aSrobert x = force_reg (mode, x);
3692*404b540aSrobert if (may_trap_p (y))
3693*404b540aSrobert y = force_reg (mode, y);
3694*404b540aSrobert }
3695*404b540aSrobert
3696*404b540aSrobert *px = x;
3697*404b540aSrobert *py = y;
3698*404b540aSrobert if (can_compare_p (*pcomparison, mode, purpose))
3699*404b540aSrobert return;
3700*404b540aSrobert
3701*404b540aSrobert /* Handle a lib call just for the mode we are using. */
3702*404b540aSrobert
3703*404b540aSrobert if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode))
3704*404b540aSrobert {
3705*404b540aSrobert rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3706*404b540aSrobert rtx result;
3707*404b540aSrobert
3708*404b540aSrobert /* If we want unsigned, and this mode has a distinct unsigned
3709*404b540aSrobert comparison routine, use that. */
3710*404b540aSrobert if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3711*404b540aSrobert libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3712*404b540aSrobert
3713*404b540aSrobert result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3714*404b540aSrobert word_mode, 2, x, mode, y, mode);
3715*404b540aSrobert
3716*404b540aSrobert /* There are two kinds of comparison routines. Biased routines
3717*404b540aSrobert return 0/1/2, and unbiased routines return -1/0/1. Other parts
3718*404b540aSrobert of gcc expect that the comparison operation is equivalent
3719*404b540aSrobert to the modified comparison. For signed comparisons compare the
3720*404b540aSrobert result against 1 in the biased case, and zero in the unbiased
3721*404b540aSrobert case. For unsigned comparisons always compare against 1 after
3722*404b540aSrobert biasing the unbiased result by adding 1. This gives us a way to
3723*404b540aSrobert represent LTU. */
3724*404b540aSrobert *px = result;
3725*404b540aSrobert *pmode = word_mode;
3726*404b540aSrobert *py = const1_rtx;
3727*404b540aSrobert
3728*404b540aSrobert if (!TARGET_LIB_INT_CMP_BIASED)
3729*404b540aSrobert {
3730*404b540aSrobert if (*punsignedp)
3731*404b540aSrobert *px = plus_constant (result, 1);
3732*404b540aSrobert else
3733*404b540aSrobert *py = const0_rtx;
3734*404b540aSrobert }
3735*404b540aSrobert return;
3736*404b540aSrobert }
3737*404b540aSrobert
3738*404b540aSrobert gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3739*404b540aSrobert prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3740*404b540aSrobert }
3741*404b540aSrobert
3742*404b540aSrobert /* Before emitting an insn with code ICODE, make sure that X, which is going
3743*404b540aSrobert to be used for operand OPNUM of the insn, is converted from mode MODE to
3744*404b540aSrobert WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3745*404b540aSrobert that it is accepted by the operand predicate. Return the new value. */
3746*404b540aSrobert
3747*404b540aSrobert static rtx
prepare_operand(int icode,rtx x,int opnum,enum machine_mode mode,enum machine_mode wider_mode,int unsignedp)3748*404b540aSrobert prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3749*404b540aSrobert enum machine_mode wider_mode, int unsignedp)
3750*404b540aSrobert {
3751*404b540aSrobert if (mode != wider_mode)
3752*404b540aSrobert x = convert_modes (wider_mode, mode, x, unsignedp);
3753*404b540aSrobert
3754*404b540aSrobert if (!insn_data[icode].operand[opnum].predicate
3755*404b540aSrobert (x, insn_data[icode].operand[opnum].mode))
3756*404b540aSrobert {
3757*404b540aSrobert if (no_new_pseudos)
3758*404b540aSrobert return NULL_RTX;
3759*404b540aSrobert x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3760*404b540aSrobert }
3761*404b540aSrobert
3762*404b540aSrobert return x;
3763*404b540aSrobert }
3764*404b540aSrobert
3765*404b540aSrobert /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3766*404b540aSrobert we can do the comparison.
3767*404b540aSrobert The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3768*404b540aSrobert be NULL_RTX which indicates that only a comparison is to be generated. */
3769*404b540aSrobert
3770*404b540aSrobert static void
emit_cmp_and_jump_insn_1(rtx x,rtx y,enum machine_mode mode,enum rtx_code comparison,int unsignedp,rtx label)3771*404b540aSrobert emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3772*404b540aSrobert enum rtx_code comparison, int unsignedp, rtx label)
3773*404b540aSrobert {
3774*404b540aSrobert rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3775*404b540aSrobert enum mode_class class = GET_MODE_CLASS (mode);
3776*404b540aSrobert enum machine_mode wider_mode = mode;
3777*404b540aSrobert
3778*404b540aSrobert /* Try combined insns first. */
3779*404b540aSrobert do
3780*404b540aSrobert {
3781*404b540aSrobert enum insn_code icode;
3782*404b540aSrobert PUT_MODE (test, wider_mode);
3783*404b540aSrobert
3784*404b540aSrobert if (label)
3785*404b540aSrobert {
3786*404b540aSrobert icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3787*404b540aSrobert
3788*404b540aSrobert if (icode != CODE_FOR_nothing
3789*404b540aSrobert && insn_data[icode].operand[0].predicate (test, wider_mode))
3790*404b540aSrobert {
3791*404b540aSrobert x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3792*404b540aSrobert y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3793*404b540aSrobert emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3794*404b540aSrobert return;
3795*404b540aSrobert }
3796*404b540aSrobert }
3797*404b540aSrobert
3798*404b540aSrobert /* Handle some compares against zero. */
3799*404b540aSrobert icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3800*404b540aSrobert if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3801*404b540aSrobert {
3802*404b540aSrobert x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3803*404b540aSrobert emit_insn (GEN_FCN (icode) (x));
3804*404b540aSrobert if (label)
3805*404b540aSrobert emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3806*404b540aSrobert return;
3807*404b540aSrobert }
3808*404b540aSrobert
3809*404b540aSrobert /* Handle compares for which there is a directly suitable insn. */
3810*404b540aSrobert
3811*404b540aSrobert icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3812*404b540aSrobert if (icode != CODE_FOR_nothing)
3813*404b540aSrobert {
3814*404b540aSrobert x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3815*404b540aSrobert y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3816*404b540aSrobert emit_insn (GEN_FCN (icode) (x, y));
3817*404b540aSrobert if (label)
3818*404b540aSrobert emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3819*404b540aSrobert return;
3820*404b540aSrobert }
3821*404b540aSrobert
3822*404b540aSrobert if (!CLASS_HAS_WIDER_MODES_P (class))
3823*404b540aSrobert break;
3824*404b540aSrobert
3825*404b540aSrobert wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3826*404b540aSrobert }
3827*404b540aSrobert while (wider_mode != VOIDmode);
3828*404b540aSrobert
3829*404b540aSrobert gcc_unreachable ();
3830*404b540aSrobert }
3831*404b540aSrobert
3832*404b540aSrobert /* Generate code to compare X with Y so that the condition codes are
3833*404b540aSrobert set and to jump to LABEL if the condition is true. If X is a
3834*404b540aSrobert constant and Y is not a constant, then the comparison is swapped to
3835*404b540aSrobert ensure that the comparison RTL has the canonical form.
3836*404b540aSrobert
3837*404b540aSrobert UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3838*404b540aSrobert need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3839*404b540aSrobert the proper branch condition code.
3840*404b540aSrobert
3841*404b540aSrobert If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3842*404b540aSrobert
3843*404b540aSrobert MODE is the mode of the inputs (in case they are const_int).
3844*404b540aSrobert
3845*404b540aSrobert COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3846*404b540aSrobert be passed unchanged to emit_cmp_insn, then potentially converted into an
3847*404b540aSrobert unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3848*404b540aSrobert
3849*404b540aSrobert void
emit_cmp_and_jump_insns(rtx x,rtx y,enum rtx_code comparison,rtx size,enum machine_mode mode,int unsignedp,rtx label)3850*404b540aSrobert emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3851*404b540aSrobert enum machine_mode mode, int unsignedp, rtx label)
3852*404b540aSrobert {
3853*404b540aSrobert rtx op0 = x, op1 = y;
3854*404b540aSrobert
3855*404b540aSrobert /* Swap operands and condition to ensure canonical RTL. */
3856*404b540aSrobert if (swap_commutative_operands_p (x, y))
3857*404b540aSrobert {
3858*404b540aSrobert /* If we're not emitting a branch, this means some caller
3859*404b540aSrobert is out of sync. */
3860*404b540aSrobert gcc_assert (label);
3861*404b540aSrobert
3862*404b540aSrobert op0 = y, op1 = x;
3863*404b540aSrobert comparison = swap_condition (comparison);
3864*404b540aSrobert }
3865*404b540aSrobert
3866*404b540aSrobert #ifdef HAVE_cc0
3867*404b540aSrobert /* If OP0 is still a constant, then both X and Y must be constants.
3868*404b540aSrobert Force X into a register to create canonical RTL. */
3869*404b540aSrobert if (CONSTANT_P (op0))
3870*404b540aSrobert op0 = force_reg (mode, op0);
3871*404b540aSrobert #endif
3872*404b540aSrobert
3873*404b540aSrobert if (unsignedp)
3874*404b540aSrobert comparison = unsigned_condition (comparison);
3875*404b540aSrobert
3876*404b540aSrobert prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3877*404b540aSrobert ccp_jump);
3878*404b540aSrobert emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3879*404b540aSrobert }
3880*404b540aSrobert
3881*404b540aSrobert /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3882*404b540aSrobert
3883*404b540aSrobert void
emit_cmp_insn(rtx x,rtx y,enum rtx_code comparison,rtx size,enum machine_mode mode,int unsignedp)3884*404b540aSrobert emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3885*404b540aSrobert enum machine_mode mode, int unsignedp)
3886*404b540aSrobert {
3887*404b540aSrobert emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3888*404b540aSrobert }
3889*404b540aSrobert
3890*404b540aSrobert /* Emit a library call comparison between floating point X and Y.
3891*404b540aSrobert COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3892*404b540aSrobert
3893*404b540aSrobert static void
prepare_float_lib_cmp(rtx * px,rtx * py,enum rtx_code * pcomparison,enum machine_mode * pmode,int * punsignedp)3894*404b540aSrobert prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3895*404b540aSrobert enum machine_mode *pmode, int *punsignedp)
3896*404b540aSrobert {
3897*404b540aSrobert enum rtx_code comparison = *pcomparison;
3898*404b540aSrobert enum rtx_code swapped = swap_condition (comparison);
3899*404b540aSrobert enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3900*404b540aSrobert rtx x = *px;
3901*404b540aSrobert rtx y = *py;
3902*404b540aSrobert enum machine_mode orig_mode = GET_MODE (x);
3903*404b540aSrobert enum machine_mode mode;
3904*404b540aSrobert rtx value, target, insns, equiv;
3905*404b540aSrobert rtx libfunc = 0;
3906*404b540aSrobert bool reversed_p = false;
3907*404b540aSrobert
3908*404b540aSrobert for (mode = orig_mode;
3909*404b540aSrobert mode != VOIDmode;
3910*404b540aSrobert mode = GET_MODE_WIDER_MODE (mode))
3911*404b540aSrobert {
3912*404b540aSrobert if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3913*404b540aSrobert break;
3914*404b540aSrobert
3915*404b540aSrobert if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3916*404b540aSrobert {
3917*404b540aSrobert rtx tmp;
3918*404b540aSrobert tmp = x; x = y; y = tmp;
3919*404b540aSrobert comparison = swapped;
3920*404b540aSrobert break;
3921*404b540aSrobert }
3922*404b540aSrobert
3923*404b540aSrobert if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3924*404b540aSrobert && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3925*404b540aSrobert {
3926*404b540aSrobert comparison = reversed;
3927*404b540aSrobert reversed_p = true;
3928*404b540aSrobert break;
3929*404b540aSrobert }
3930*404b540aSrobert }
3931*404b540aSrobert
3932*404b540aSrobert gcc_assert (mode != VOIDmode);
3933*404b540aSrobert
3934*404b540aSrobert if (mode != orig_mode)
3935*404b540aSrobert {
3936*404b540aSrobert x = convert_to_mode (mode, x, 0);
3937*404b540aSrobert y = convert_to_mode (mode, y, 0);
3938*404b540aSrobert }
3939*404b540aSrobert
3940*404b540aSrobert /* Attach a REG_EQUAL note describing the semantics of the libcall to
3941*404b540aSrobert the RTL. The allows the RTL optimizers to delete the libcall if the
3942*404b540aSrobert condition can be determined at compile-time. */
3943*404b540aSrobert if (comparison == UNORDERED)
3944*404b540aSrobert {
3945*404b540aSrobert rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3946*404b540aSrobert equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3947*404b540aSrobert equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3948*404b540aSrobert temp, const_true_rtx, equiv);
3949*404b540aSrobert }
3950*404b540aSrobert else
3951*404b540aSrobert {
3952*404b540aSrobert equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3953*404b540aSrobert if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3954*404b540aSrobert {
3955*404b540aSrobert rtx true_rtx, false_rtx;
3956*404b540aSrobert
3957*404b540aSrobert switch (comparison)
3958*404b540aSrobert {
3959*404b540aSrobert case EQ:
3960*404b540aSrobert true_rtx = const0_rtx;
3961*404b540aSrobert false_rtx = const_true_rtx;
3962*404b540aSrobert break;
3963*404b540aSrobert
3964*404b540aSrobert case NE:
3965*404b540aSrobert true_rtx = const_true_rtx;
3966*404b540aSrobert false_rtx = const0_rtx;
3967*404b540aSrobert break;
3968*404b540aSrobert
3969*404b540aSrobert case GT:
3970*404b540aSrobert true_rtx = const1_rtx;
3971*404b540aSrobert false_rtx = const0_rtx;
3972*404b540aSrobert break;
3973*404b540aSrobert
3974*404b540aSrobert case GE:
3975*404b540aSrobert true_rtx = const0_rtx;
3976*404b540aSrobert false_rtx = constm1_rtx;
3977*404b540aSrobert break;
3978*404b540aSrobert
3979*404b540aSrobert case LT:
3980*404b540aSrobert true_rtx = constm1_rtx;
3981*404b540aSrobert false_rtx = const0_rtx;
3982*404b540aSrobert break;
3983*404b540aSrobert
3984*404b540aSrobert case LE:
3985*404b540aSrobert true_rtx = const0_rtx;
3986*404b540aSrobert false_rtx = const1_rtx;
3987*404b540aSrobert break;
3988*404b540aSrobert
3989*404b540aSrobert default:
3990*404b540aSrobert gcc_unreachable ();
3991*404b540aSrobert }
3992*404b540aSrobert equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3993*404b540aSrobert equiv, true_rtx, false_rtx);
3994*404b540aSrobert }
3995*404b540aSrobert }
3996*404b540aSrobert
3997*404b540aSrobert start_sequence ();
3998*404b540aSrobert value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3999*404b540aSrobert word_mode, 2, x, mode, y, mode);
4000*404b540aSrobert insns = get_insns ();
4001*404b540aSrobert end_sequence ();
4002*404b540aSrobert
4003*404b540aSrobert target = gen_reg_rtx (word_mode);
4004*404b540aSrobert emit_libcall_block (insns, target, value, equiv);
4005*404b540aSrobert
4006*404b540aSrobert if (comparison == UNORDERED
4007*404b540aSrobert || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4008*404b540aSrobert comparison = reversed_p ? EQ : NE;
4009*404b540aSrobert
4010*404b540aSrobert *px = target;
4011*404b540aSrobert *py = const0_rtx;
4012*404b540aSrobert *pmode = word_mode;
4013*404b540aSrobert *pcomparison = comparison;
4014*404b540aSrobert *punsignedp = 0;
4015*404b540aSrobert }
4016*404b540aSrobert
4017*404b540aSrobert /* Generate code to indirectly jump to a location given in the rtx LOC. */
4018*404b540aSrobert
4019*404b540aSrobert void
emit_indirect_jump(rtx loc)4020*404b540aSrobert emit_indirect_jump (rtx loc)
4021*404b540aSrobert {
4022*404b540aSrobert if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4023*404b540aSrobert (loc, Pmode))
4024*404b540aSrobert loc = copy_to_mode_reg (Pmode, loc);
4025*404b540aSrobert
4026*404b540aSrobert emit_jump_insn (gen_indirect_jump (loc));
4027*404b540aSrobert emit_barrier ();
4028*404b540aSrobert }
4029*404b540aSrobert
4030*404b540aSrobert #ifdef HAVE_conditional_move
4031*404b540aSrobert
4032*404b540aSrobert /* Emit a conditional move instruction if the machine supports one for that
4033*404b540aSrobert condition and machine mode.
4034*404b540aSrobert
4035*404b540aSrobert OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4036*404b540aSrobert the mode to use should they be constants. If it is VOIDmode, they cannot
4037*404b540aSrobert both be constants.
4038*404b540aSrobert
4039*404b540aSrobert OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4040*404b540aSrobert should be stored there. MODE is the mode to use should they be constants.
4041*404b540aSrobert If it is VOIDmode, they cannot both be constants.
4042*404b540aSrobert
4043*404b540aSrobert The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4044*404b540aSrobert is not supported. */
4045*404b540aSrobert
4046*404b540aSrobert rtx
emit_conditional_move(rtx target,enum rtx_code code,rtx op0,rtx op1,enum machine_mode cmode,rtx op2,rtx op3,enum machine_mode mode,int unsignedp)4047*404b540aSrobert emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4048*404b540aSrobert enum machine_mode cmode, rtx op2, rtx op3,
4049*404b540aSrobert enum machine_mode mode, int unsignedp)
4050*404b540aSrobert {
4051*404b540aSrobert rtx tem, subtarget, comparison, insn;
4052*404b540aSrobert enum insn_code icode;
4053*404b540aSrobert enum rtx_code reversed;
4054*404b540aSrobert
4055*404b540aSrobert /* If one operand is constant, make it the second one. Only do this
4056*404b540aSrobert if the other operand is not constant as well. */
4057*404b540aSrobert
4058*404b540aSrobert if (swap_commutative_operands_p (op0, op1))
4059*404b540aSrobert {
4060*404b540aSrobert tem = op0;
4061*404b540aSrobert op0 = op1;
4062*404b540aSrobert op1 = tem;
4063*404b540aSrobert code = swap_condition (code);
4064*404b540aSrobert }
4065*404b540aSrobert
4066*404b540aSrobert /* get_condition will prefer to generate LT and GT even if the old
4067*404b540aSrobert comparison was against zero, so undo that canonicalization here since
4068*404b540aSrobert comparisons against zero are cheaper. */
4069*404b540aSrobert if (code == LT && op1 == const1_rtx)
4070*404b540aSrobert code = LE, op1 = const0_rtx;
4071*404b540aSrobert else if (code == GT && op1 == constm1_rtx)
4072*404b540aSrobert code = GE, op1 = const0_rtx;
4073*404b540aSrobert
4074*404b540aSrobert if (cmode == VOIDmode)
4075*404b540aSrobert cmode = GET_MODE (op0);
4076*404b540aSrobert
4077*404b540aSrobert if (swap_commutative_operands_p (op2, op3)
4078*404b540aSrobert && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4079*404b540aSrobert != UNKNOWN))
4080*404b540aSrobert {
4081*404b540aSrobert tem = op2;
4082*404b540aSrobert op2 = op3;
4083*404b540aSrobert op3 = tem;
4084*404b540aSrobert code = reversed;
4085*404b540aSrobert }
4086*404b540aSrobert
4087*404b540aSrobert if (mode == VOIDmode)
4088*404b540aSrobert mode = GET_MODE (op2);
4089*404b540aSrobert
4090*404b540aSrobert icode = movcc_gen_code[mode];
4091*404b540aSrobert
4092*404b540aSrobert if (icode == CODE_FOR_nothing)
4093*404b540aSrobert return 0;
4094*404b540aSrobert
4095*404b540aSrobert if (!target)
4096*404b540aSrobert target = gen_reg_rtx (mode);
4097*404b540aSrobert
4098*404b540aSrobert subtarget = target;
4099*404b540aSrobert
4100*404b540aSrobert /* If the insn doesn't accept these operands, put them in pseudos. */
4101*404b540aSrobert
4102*404b540aSrobert if (!insn_data[icode].operand[0].predicate
4103*404b540aSrobert (subtarget, insn_data[icode].operand[0].mode))
4104*404b540aSrobert subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4105*404b540aSrobert
4106*404b540aSrobert if (!insn_data[icode].operand[2].predicate
4107*404b540aSrobert (op2, insn_data[icode].operand[2].mode))
4108*404b540aSrobert op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4109*404b540aSrobert
4110*404b540aSrobert if (!insn_data[icode].operand[3].predicate
4111*404b540aSrobert (op3, insn_data[icode].operand[3].mode))
4112*404b540aSrobert op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4113*404b540aSrobert
4114*404b540aSrobert /* Everything should now be in the suitable form, so emit the compare insn
4115*404b540aSrobert and then the conditional move. */
4116*404b540aSrobert
4117*404b540aSrobert comparison
4118*404b540aSrobert = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4119*404b540aSrobert
4120*404b540aSrobert /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4121*404b540aSrobert /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4122*404b540aSrobert return NULL and let the caller figure out how best to deal with this
4123*404b540aSrobert situation. */
4124*404b540aSrobert if (GET_CODE (comparison) != code)
4125*404b540aSrobert return NULL_RTX;
4126*404b540aSrobert
4127*404b540aSrobert insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4128*404b540aSrobert
4129*404b540aSrobert /* If that failed, then give up. */
4130*404b540aSrobert if (insn == 0)
4131*404b540aSrobert return 0;
4132*404b540aSrobert
4133*404b540aSrobert emit_insn (insn);
4134*404b540aSrobert
4135*404b540aSrobert if (subtarget != target)
4136*404b540aSrobert convert_move (target, subtarget, 0);
4137*404b540aSrobert
4138*404b540aSrobert return target;
4139*404b540aSrobert }
4140*404b540aSrobert
4141*404b540aSrobert /* Return nonzero if a conditional move of mode MODE is supported.
4142*404b540aSrobert
4143*404b540aSrobert This function is for combine so it can tell whether an insn that looks
4144*404b540aSrobert like a conditional move is actually supported by the hardware. If we
4145*404b540aSrobert guess wrong we lose a bit on optimization, but that's it. */
4146*404b540aSrobert /* ??? sparc64 supports conditionally moving integers values based on fp
4147*404b540aSrobert comparisons, and vice versa. How do we handle them? */
4148*404b540aSrobert
4149*404b540aSrobert int
can_conditionally_move_p(enum machine_mode mode)4150*404b540aSrobert can_conditionally_move_p (enum machine_mode mode)
4151*404b540aSrobert {
4152*404b540aSrobert if (movcc_gen_code[mode] != CODE_FOR_nothing)
4153*404b540aSrobert return 1;
4154*404b540aSrobert
4155*404b540aSrobert return 0;
4156*404b540aSrobert }
4157*404b540aSrobert
4158*404b540aSrobert #endif /* HAVE_conditional_move */
4159*404b540aSrobert
4160*404b540aSrobert /* Emit a conditional addition instruction if the machine supports one for that
4161*404b540aSrobert condition and machine mode.
4162*404b540aSrobert
4163*404b540aSrobert OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4164*404b540aSrobert the mode to use should they be constants. If it is VOIDmode, they cannot
4165*404b540aSrobert both be constants.
4166*404b540aSrobert
4167*404b540aSrobert OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4168*404b540aSrobert should be stored there. MODE is the mode to use should they be constants.
4169*404b540aSrobert If it is VOIDmode, they cannot both be constants.
4170*404b540aSrobert
4171*404b540aSrobert The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4172*404b540aSrobert is not supported. */
4173*404b540aSrobert
4174*404b540aSrobert rtx
emit_conditional_add(rtx target,enum rtx_code code,rtx op0,rtx op1,enum machine_mode cmode,rtx op2,rtx op3,enum machine_mode mode,int unsignedp)4175*404b540aSrobert emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4176*404b540aSrobert enum machine_mode cmode, rtx op2, rtx op3,
4177*404b540aSrobert enum machine_mode mode, int unsignedp)
4178*404b540aSrobert {
4179*404b540aSrobert rtx tem, subtarget, comparison, insn;
4180*404b540aSrobert enum insn_code icode;
4181*404b540aSrobert enum rtx_code reversed;
4182*404b540aSrobert
4183*404b540aSrobert /* If one operand is constant, make it the second one. Only do this
4184*404b540aSrobert if the other operand is not constant as well. */
4185*404b540aSrobert
4186*404b540aSrobert if (swap_commutative_operands_p (op0, op1))
4187*404b540aSrobert {
4188*404b540aSrobert tem = op0;
4189*404b540aSrobert op0 = op1;
4190*404b540aSrobert op1 = tem;
4191*404b540aSrobert code = swap_condition (code);
4192*404b540aSrobert }
4193*404b540aSrobert
4194*404b540aSrobert /* get_condition will prefer to generate LT and GT even if the old
4195*404b540aSrobert comparison was against zero, so undo that canonicalization here since
4196*404b540aSrobert comparisons against zero are cheaper. */
4197*404b540aSrobert if (code == LT && op1 == const1_rtx)
4198*404b540aSrobert code = LE, op1 = const0_rtx;
4199*404b540aSrobert else if (code == GT && op1 == constm1_rtx)
4200*404b540aSrobert code = GE, op1 = const0_rtx;
4201*404b540aSrobert
4202*404b540aSrobert if (cmode == VOIDmode)
4203*404b540aSrobert cmode = GET_MODE (op0);
4204*404b540aSrobert
4205*404b540aSrobert if (swap_commutative_operands_p (op2, op3)
4206*404b540aSrobert && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4207*404b540aSrobert != UNKNOWN))
4208*404b540aSrobert {
4209*404b540aSrobert tem = op2;
4210*404b540aSrobert op2 = op3;
4211*404b540aSrobert op3 = tem;
4212*404b540aSrobert code = reversed;
4213*404b540aSrobert }
4214*404b540aSrobert
4215*404b540aSrobert if (mode == VOIDmode)
4216*404b540aSrobert mode = GET_MODE (op2);
4217*404b540aSrobert
4218*404b540aSrobert icode = addcc_optab->handlers[(int) mode].insn_code;
4219*404b540aSrobert
4220*404b540aSrobert if (icode == CODE_FOR_nothing)
4221*404b540aSrobert return 0;
4222*404b540aSrobert
4223*404b540aSrobert if (!target)
4224*404b540aSrobert target = gen_reg_rtx (mode);
4225*404b540aSrobert
4226*404b540aSrobert /* If the insn doesn't accept these operands, put them in pseudos. */
4227*404b540aSrobert
4228*404b540aSrobert if (!insn_data[icode].operand[0].predicate
4229*404b540aSrobert (target, insn_data[icode].operand[0].mode))
4230*404b540aSrobert subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4231*404b540aSrobert else
4232*404b540aSrobert subtarget = target;
4233*404b540aSrobert
4234*404b540aSrobert if (!insn_data[icode].operand[2].predicate
4235*404b540aSrobert (op2, insn_data[icode].operand[2].mode))
4236*404b540aSrobert op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4237*404b540aSrobert
4238*404b540aSrobert if (!insn_data[icode].operand[3].predicate
4239*404b540aSrobert (op3, insn_data[icode].operand[3].mode))
4240*404b540aSrobert op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4241*404b540aSrobert
4242*404b540aSrobert /* Everything should now be in the suitable form, so emit the compare insn
4243*404b540aSrobert and then the conditional move. */
4244*404b540aSrobert
4245*404b540aSrobert comparison
4246*404b540aSrobert = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4247*404b540aSrobert
4248*404b540aSrobert /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4249*404b540aSrobert /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4250*404b540aSrobert return NULL and let the caller figure out how best to deal with this
4251*404b540aSrobert situation. */
4252*404b540aSrobert if (GET_CODE (comparison) != code)
4253*404b540aSrobert return NULL_RTX;
4254*404b540aSrobert
4255*404b540aSrobert insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4256*404b540aSrobert
4257*404b540aSrobert /* If that failed, then give up. */
4258*404b540aSrobert if (insn == 0)
4259*404b540aSrobert return 0;
4260*404b540aSrobert
4261*404b540aSrobert emit_insn (insn);
4262*404b540aSrobert
4263*404b540aSrobert if (subtarget != target)
4264*404b540aSrobert convert_move (target, subtarget, 0);
4265*404b540aSrobert
4266*404b540aSrobert return target;
4267*404b540aSrobert }
4268*404b540aSrobert
4269*404b540aSrobert /* These functions attempt to generate an insn body, rather than
4270*404b540aSrobert emitting the insn, but if the gen function already emits them, we
4271*404b540aSrobert make no attempt to turn them back into naked patterns. */
4272*404b540aSrobert
4273*404b540aSrobert /* Generate and return an insn body to add Y to X. */
4274*404b540aSrobert
4275*404b540aSrobert rtx
gen_add2_insn(rtx x,rtx y)4276*404b540aSrobert gen_add2_insn (rtx x, rtx y)
4277*404b540aSrobert {
4278*404b540aSrobert int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4279*404b540aSrobert
4280*404b540aSrobert gcc_assert (insn_data[icode].operand[0].predicate
4281*404b540aSrobert (x, insn_data[icode].operand[0].mode));
4282*404b540aSrobert gcc_assert (insn_data[icode].operand[1].predicate
4283*404b540aSrobert (x, insn_data[icode].operand[1].mode));
4284*404b540aSrobert gcc_assert (insn_data[icode].operand[2].predicate
4285*404b540aSrobert (y, insn_data[icode].operand[2].mode));
4286*404b540aSrobert
4287*404b540aSrobert return GEN_FCN (icode) (x, x, y);
4288*404b540aSrobert }
4289*404b540aSrobert
4290*404b540aSrobert /* Generate and return an insn body to add r1 and c,
4291*404b540aSrobert storing the result in r0. */
4292*404b540aSrobert rtx
gen_add3_insn(rtx r0,rtx r1,rtx c)4293*404b540aSrobert gen_add3_insn (rtx r0, rtx r1, rtx c)
4294*404b540aSrobert {
4295*404b540aSrobert int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4296*404b540aSrobert
4297*404b540aSrobert if (icode == CODE_FOR_nothing
4298*404b540aSrobert || !(insn_data[icode].operand[0].predicate
4299*404b540aSrobert (r0, insn_data[icode].operand[0].mode))
4300*404b540aSrobert || !(insn_data[icode].operand[1].predicate
4301*404b540aSrobert (r1, insn_data[icode].operand[1].mode))
4302*404b540aSrobert || !(insn_data[icode].operand[2].predicate
4303*404b540aSrobert (c, insn_data[icode].operand[2].mode)))
4304*404b540aSrobert return NULL_RTX;
4305*404b540aSrobert
4306*404b540aSrobert return GEN_FCN (icode) (r0, r1, c);
4307*404b540aSrobert }
4308*404b540aSrobert
4309*404b540aSrobert int
have_add2_insn(rtx x,rtx y)4310*404b540aSrobert have_add2_insn (rtx x, rtx y)
4311*404b540aSrobert {
4312*404b540aSrobert int icode;
4313*404b540aSrobert
4314*404b540aSrobert gcc_assert (GET_MODE (x) != VOIDmode);
4315*404b540aSrobert
4316*404b540aSrobert icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4317*404b540aSrobert
4318*404b540aSrobert if (icode == CODE_FOR_nothing)
4319*404b540aSrobert return 0;
4320*404b540aSrobert
4321*404b540aSrobert if (!(insn_data[icode].operand[0].predicate
4322*404b540aSrobert (x, insn_data[icode].operand[0].mode))
4323*404b540aSrobert || !(insn_data[icode].operand[1].predicate
4324*404b540aSrobert (x, insn_data[icode].operand[1].mode))
4325*404b540aSrobert || !(insn_data[icode].operand[2].predicate
4326*404b540aSrobert (y, insn_data[icode].operand[2].mode)))
4327*404b540aSrobert return 0;
4328*404b540aSrobert
4329*404b540aSrobert return 1;
4330*404b540aSrobert }
4331*404b540aSrobert
4332*404b540aSrobert /* Generate and return an insn body to subtract Y from X. */
4333*404b540aSrobert
4334*404b540aSrobert rtx
gen_sub2_insn(rtx x,rtx y)4335*404b540aSrobert gen_sub2_insn (rtx x, rtx y)
4336*404b540aSrobert {
4337*404b540aSrobert int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4338*404b540aSrobert
4339*404b540aSrobert gcc_assert (insn_data[icode].operand[0].predicate
4340*404b540aSrobert (x, insn_data[icode].operand[0].mode));
4341*404b540aSrobert gcc_assert (insn_data[icode].operand[1].predicate
4342*404b540aSrobert (x, insn_data[icode].operand[1].mode));
4343*404b540aSrobert gcc_assert (insn_data[icode].operand[2].predicate
4344*404b540aSrobert (y, insn_data[icode].operand[2].mode));
4345*404b540aSrobert
4346*404b540aSrobert return GEN_FCN (icode) (x, x, y);
4347*404b540aSrobert }
4348*404b540aSrobert
4349*404b540aSrobert /* Generate and return an insn body to subtract r1 and c,
4350*404b540aSrobert storing the result in r0. */
4351*404b540aSrobert rtx
gen_sub3_insn(rtx r0,rtx r1,rtx c)4352*404b540aSrobert gen_sub3_insn (rtx r0, rtx r1, rtx c)
4353*404b540aSrobert {
4354*404b540aSrobert int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4355*404b540aSrobert
4356*404b540aSrobert if (icode == CODE_FOR_nothing
4357*404b540aSrobert || !(insn_data[icode].operand[0].predicate
4358*404b540aSrobert (r0, insn_data[icode].operand[0].mode))
4359*404b540aSrobert || !(insn_data[icode].operand[1].predicate
4360*404b540aSrobert (r1, insn_data[icode].operand[1].mode))
4361*404b540aSrobert || !(insn_data[icode].operand[2].predicate
4362*404b540aSrobert (c, insn_data[icode].operand[2].mode)))
4363*404b540aSrobert return NULL_RTX;
4364*404b540aSrobert
4365*404b540aSrobert return GEN_FCN (icode) (r0, r1, c);
4366*404b540aSrobert }
4367*404b540aSrobert
4368*404b540aSrobert int
have_sub2_insn(rtx x,rtx y)4369*404b540aSrobert have_sub2_insn (rtx x, rtx y)
4370*404b540aSrobert {
4371*404b540aSrobert int icode;
4372*404b540aSrobert
4373*404b540aSrobert gcc_assert (GET_MODE (x) != VOIDmode);
4374*404b540aSrobert
4375*404b540aSrobert icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4376*404b540aSrobert
4377*404b540aSrobert if (icode == CODE_FOR_nothing)
4378*404b540aSrobert return 0;
4379*404b540aSrobert
4380*404b540aSrobert if (!(insn_data[icode].operand[0].predicate
4381*404b540aSrobert (x, insn_data[icode].operand[0].mode))
4382*404b540aSrobert || !(insn_data[icode].operand[1].predicate
4383*404b540aSrobert (x, insn_data[icode].operand[1].mode))
4384*404b540aSrobert || !(insn_data[icode].operand[2].predicate
4385*404b540aSrobert (y, insn_data[icode].operand[2].mode)))
4386*404b540aSrobert return 0;
4387*404b540aSrobert
4388*404b540aSrobert return 1;
4389*404b540aSrobert }
4390*404b540aSrobert
4391*404b540aSrobert /* Generate the body of an instruction to copy Y into X.
4392*404b540aSrobert It may be a list of insns, if one insn isn't enough. */
4393*404b540aSrobert
4394*404b540aSrobert rtx
gen_move_insn(rtx x,rtx y)4395*404b540aSrobert gen_move_insn (rtx x, rtx y)
4396*404b540aSrobert {
4397*404b540aSrobert rtx seq;
4398*404b540aSrobert
4399*404b540aSrobert start_sequence ();
4400*404b540aSrobert emit_move_insn_1 (x, y);
4401*404b540aSrobert seq = get_insns ();
4402*404b540aSrobert end_sequence ();
4403*404b540aSrobert return seq;
4404*404b540aSrobert }
4405*404b540aSrobert
4406*404b540aSrobert /* Return the insn code used to extend FROM_MODE to TO_MODE.
4407*404b540aSrobert UNSIGNEDP specifies zero-extension instead of sign-extension. If
4408*404b540aSrobert no such operation exists, CODE_FOR_nothing will be returned. */
4409*404b540aSrobert
4410*404b540aSrobert enum insn_code
can_extend_p(enum machine_mode to_mode,enum machine_mode from_mode,int unsignedp)4411*404b540aSrobert can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4412*404b540aSrobert int unsignedp)
4413*404b540aSrobert {
4414*404b540aSrobert convert_optab tab;
4415*404b540aSrobert #ifdef HAVE_ptr_extend
4416*404b540aSrobert if (unsignedp < 0)
4417*404b540aSrobert return CODE_FOR_ptr_extend;
4418*404b540aSrobert #endif
4419*404b540aSrobert
4420*404b540aSrobert tab = unsignedp ? zext_optab : sext_optab;
4421*404b540aSrobert return tab->handlers[to_mode][from_mode].insn_code;
4422*404b540aSrobert }
4423*404b540aSrobert
4424*404b540aSrobert /* Generate the body of an insn to extend Y (with mode MFROM)
4425*404b540aSrobert into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4426*404b540aSrobert
4427*404b540aSrobert rtx
gen_extend_insn(rtx x,rtx y,enum machine_mode mto,enum machine_mode mfrom,int unsignedp)4428*404b540aSrobert gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4429*404b540aSrobert enum machine_mode mfrom, int unsignedp)
4430*404b540aSrobert {
4431*404b540aSrobert enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4432*404b540aSrobert return GEN_FCN (icode) (x, y);
4433*404b540aSrobert }
4434*404b540aSrobert
4435*404b540aSrobert /* can_fix_p and can_float_p say whether the target machine
4436*404b540aSrobert can directly convert a given fixed point type to
4437*404b540aSrobert a given floating point type, or vice versa.
4438*404b540aSrobert The returned value is the CODE_FOR_... value to use,
4439*404b540aSrobert or CODE_FOR_nothing if these modes cannot be directly converted.
4440*404b540aSrobert
4441*404b540aSrobert *TRUNCP_PTR is set to 1 if it is necessary to output
4442*404b540aSrobert an explicit FTRUNC insn before the fix insn; otherwise 0. */
4443*404b540aSrobert
4444*404b540aSrobert static enum insn_code
can_fix_p(enum machine_mode fixmode,enum machine_mode fltmode,int unsignedp,int * truncp_ptr)4445*404b540aSrobert can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4446*404b540aSrobert int unsignedp, int *truncp_ptr)
4447*404b540aSrobert {
4448*404b540aSrobert convert_optab tab;
4449*404b540aSrobert enum insn_code icode;
4450*404b540aSrobert
4451*404b540aSrobert tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4452*404b540aSrobert icode = tab->handlers[fixmode][fltmode].insn_code;
4453*404b540aSrobert if (icode != CODE_FOR_nothing)
4454*404b540aSrobert {
4455*404b540aSrobert *truncp_ptr = 0;
4456*404b540aSrobert return icode;
4457*404b540aSrobert }
4458*404b540aSrobert
4459*404b540aSrobert /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4460*404b540aSrobert for this to work. We need to rework the fix* and ftrunc* patterns
4461*404b540aSrobert and documentation. */
4462*404b540aSrobert tab = unsignedp ? ufix_optab : sfix_optab;
4463*404b540aSrobert icode = tab->handlers[fixmode][fltmode].insn_code;
4464*404b540aSrobert if (icode != CODE_FOR_nothing
4465*404b540aSrobert && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4466*404b540aSrobert {
4467*404b540aSrobert *truncp_ptr = 1;
4468*404b540aSrobert return icode;
4469*404b540aSrobert }
4470*404b540aSrobert
4471*404b540aSrobert *truncp_ptr = 0;
4472*404b540aSrobert return CODE_FOR_nothing;
4473*404b540aSrobert }
4474*404b540aSrobert
4475*404b540aSrobert static enum insn_code
can_float_p(enum machine_mode fltmode,enum machine_mode fixmode,int unsignedp)4476*404b540aSrobert can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4477*404b540aSrobert int unsignedp)
4478*404b540aSrobert {
4479*404b540aSrobert convert_optab tab;
4480*404b540aSrobert
4481*404b540aSrobert tab = unsignedp ? ufloat_optab : sfloat_optab;
4482*404b540aSrobert return tab->handlers[fltmode][fixmode].insn_code;
4483*404b540aSrobert }
4484*404b540aSrobert
4485*404b540aSrobert /* Generate code to convert FROM to floating point
4486*404b540aSrobert and store in TO. FROM must be fixed point and not VOIDmode.
4487*404b540aSrobert UNSIGNEDP nonzero means regard FROM as unsigned.
4488*404b540aSrobert Normally this is done by correcting the final value
4489*404b540aSrobert if it is negative. */
4490*404b540aSrobert
4491*404b540aSrobert void
expand_float(rtx to,rtx from,int unsignedp)4492*404b540aSrobert expand_float (rtx to, rtx from, int unsignedp)
4493*404b540aSrobert {
4494*404b540aSrobert enum insn_code icode;
4495*404b540aSrobert rtx target = to;
4496*404b540aSrobert enum machine_mode fmode, imode;
4497*404b540aSrobert bool can_do_signed = false;
4498*404b540aSrobert
4499*404b540aSrobert /* Crash now, because we won't be able to decide which mode to use. */
4500*404b540aSrobert gcc_assert (GET_MODE (from) != VOIDmode);
4501*404b540aSrobert
4502*404b540aSrobert /* Look for an insn to do the conversion. Do it in the specified
4503*404b540aSrobert modes if possible; otherwise convert either input, output or both to
4504*404b540aSrobert wider mode. If the integer mode is wider than the mode of FROM,
4505*404b540aSrobert we can do the conversion signed even if the input is unsigned. */
4506*404b540aSrobert
4507*404b540aSrobert for (fmode = GET_MODE (to); fmode != VOIDmode;
4508*404b540aSrobert fmode = GET_MODE_WIDER_MODE (fmode))
4509*404b540aSrobert for (imode = GET_MODE (from); imode != VOIDmode;
4510*404b540aSrobert imode = GET_MODE_WIDER_MODE (imode))
4511*404b540aSrobert {
4512*404b540aSrobert int doing_unsigned = unsignedp;
4513*404b540aSrobert
4514*404b540aSrobert if (fmode != GET_MODE (to)
4515*404b540aSrobert && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4516*404b540aSrobert continue;
4517*404b540aSrobert
4518*404b540aSrobert icode = can_float_p (fmode, imode, unsignedp);
4519*404b540aSrobert if (icode == CODE_FOR_nothing && unsignedp)
4520*404b540aSrobert {
4521*404b540aSrobert enum insn_code scode = can_float_p (fmode, imode, 0);
4522*404b540aSrobert if (scode != CODE_FOR_nothing)
4523*404b540aSrobert can_do_signed = true;
4524*404b540aSrobert if (imode != GET_MODE (from))
4525*404b540aSrobert icode = scode, doing_unsigned = 0;
4526*404b540aSrobert }
4527*404b540aSrobert
4528*404b540aSrobert if (icode != CODE_FOR_nothing)
4529*404b540aSrobert {
4530*404b540aSrobert if (imode != GET_MODE (from))
4531*404b540aSrobert from = convert_to_mode (imode, from, unsignedp);
4532*404b540aSrobert
4533*404b540aSrobert if (fmode != GET_MODE (to))
4534*404b540aSrobert target = gen_reg_rtx (fmode);
4535*404b540aSrobert
4536*404b540aSrobert emit_unop_insn (icode, target, from,
4537*404b540aSrobert doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4538*404b540aSrobert
4539*404b540aSrobert if (target != to)
4540*404b540aSrobert convert_move (to, target, 0);
4541*404b540aSrobert return;
4542*404b540aSrobert }
4543*404b540aSrobert }
4544*404b540aSrobert
4545*404b540aSrobert /* Unsigned integer, and no way to convert directly. For binary
4546*404b540aSrobert floating point modes, convert as signed, then conditionally adjust
4547*404b540aSrobert the result. */
4548*404b540aSrobert if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to)))
4549*404b540aSrobert {
4550*404b540aSrobert rtx label = gen_label_rtx ();
4551*404b540aSrobert rtx temp;
4552*404b540aSrobert REAL_VALUE_TYPE offset;
4553*404b540aSrobert
4554*404b540aSrobert /* Look for a usable floating mode FMODE wider than the source and at
4555*404b540aSrobert least as wide as the target. Using FMODE will avoid rounding woes
4556*404b540aSrobert with unsigned values greater than the signed maximum value. */
4557*404b540aSrobert
4558*404b540aSrobert for (fmode = GET_MODE (to); fmode != VOIDmode;
4559*404b540aSrobert fmode = GET_MODE_WIDER_MODE (fmode))
4560*404b540aSrobert if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4561*404b540aSrobert && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4562*404b540aSrobert break;
4563*404b540aSrobert
4564*404b540aSrobert if (fmode == VOIDmode)
4565*404b540aSrobert {
4566*404b540aSrobert /* There is no such mode. Pretend the target is wide enough. */
4567*404b540aSrobert fmode = GET_MODE (to);
4568*404b540aSrobert
4569*404b540aSrobert /* Avoid double-rounding when TO is narrower than FROM. */
4570*404b540aSrobert if ((significand_size (fmode) + 1)
4571*404b540aSrobert < GET_MODE_BITSIZE (GET_MODE (from)))
4572*404b540aSrobert {
4573*404b540aSrobert rtx temp1;
4574*404b540aSrobert rtx neglabel = gen_label_rtx ();
4575*404b540aSrobert
4576*404b540aSrobert /* Don't use TARGET if it isn't a register, is a hard register,
4577*404b540aSrobert or is the wrong mode. */
4578*404b540aSrobert if (!REG_P (target)
4579*404b540aSrobert || REGNO (target) < FIRST_PSEUDO_REGISTER
4580*404b540aSrobert || GET_MODE (target) != fmode)
4581*404b540aSrobert target = gen_reg_rtx (fmode);
4582*404b540aSrobert
4583*404b540aSrobert imode = GET_MODE (from);
4584*404b540aSrobert do_pending_stack_adjust ();
4585*404b540aSrobert
4586*404b540aSrobert /* Test whether the sign bit is set. */
4587*404b540aSrobert emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4588*404b540aSrobert 0, neglabel);
4589*404b540aSrobert
4590*404b540aSrobert /* The sign bit is not set. Convert as signed. */
4591*404b540aSrobert expand_float (target, from, 0);
4592*404b540aSrobert emit_jump_insn (gen_jump (label));
4593*404b540aSrobert emit_barrier ();
4594*404b540aSrobert
4595*404b540aSrobert /* The sign bit is set.
4596*404b540aSrobert Convert to a usable (positive signed) value by shifting right
4597*404b540aSrobert one bit, while remembering if a nonzero bit was shifted
4598*404b540aSrobert out; i.e., compute (from & 1) | (from >> 1). */
4599*404b540aSrobert
4600*404b540aSrobert emit_label (neglabel);
4601*404b540aSrobert temp = expand_binop (imode, and_optab, from, const1_rtx,
4602*404b540aSrobert NULL_RTX, 1, OPTAB_LIB_WIDEN);
4603*404b540aSrobert temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4604*404b540aSrobert NULL_RTX, 1);
4605*404b540aSrobert temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4606*404b540aSrobert OPTAB_LIB_WIDEN);
4607*404b540aSrobert expand_float (target, temp, 0);
4608*404b540aSrobert
4609*404b540aSrobert /* Multiply by 2 to undo the shift above. */
4610*404b540aSrobert temp = expand_binop (fmode, add_optab, target, target,
4611*404b540aSrobert target, 0, OPTAB_LIB_WIDEN);
4612*404b540aSrobert if (temp != target)
4613*404b540aSrobert emit_move_insn (target, temp);
4614*404b540aSrobert
4615*404b540aSrobert do_pending_stack_adjust ();
4616*404b540aSrobert emit_label (label);
4617*404b540aSrobert goto done;
4618*404b540aSrobert }
4619*404b540aSrobert }
4620*404b540aSrobert
4621*404b540aSrobert /* If we are about to do some arithmetic to correct for an
4622*404b540aSrobert unsigned operand, do it in a pseudo-register. */
4623*404b540aSrobert
4624*404b540aSrobert if (GET_MODE (to) != fmode
4625*404b540aSrobert || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4626*404b540aSrobert target = gen_reg_rtx (fmode);
4627*404b540aSrobert
4628*404b540aSrobert /* Convert as signed integer to floating. */
4629*404b540aSrobert expand_float (target, from, 0);
4630*404b540aSrobert
4631*404b540aSrobert /* If FROM is negative (and therefore TO is negative),
4632*404b540aSrobert correct its value by 2**bitwidth. */
4633*404b540aSrobert
4634*404b540aSrobert do_pending_stack_adjust ();
4635*404b540aSrobert emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4636*404b540aSrobert 0, label);
4637*404b540aSrobert
4638*404b540aSrobert
4639*404b540aSrobert real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4640*404b540aSrobert temp = expand_binop (fmode, add_optab, target,
4641*404b540aSrobert CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4642*404b540aSrobert target, 0, OPTAB_LIB_WIDEN);
4643*404b540aSrobert if (temp != target)
4644*404b540aSrobert emit_move_insn (target, temp);
4645*404b540aSrobert
4646*404b540aSrobert do_pending_stack_adjust ();
4647*404b540aSrobert emit_label (label);
4648*404b540aSrobert goto done;
4649*404b540aSrobert }
4650*404b540aSrobert
4651*404b540aSrobert /* No hardware instruction available; call a library routine. */
4652*404b540aSrobert {
4653*404b540aSrobert rtx libfunc;
4654*404b540aSrobert rtx insns;
4655*404b540aSrobert rtx value;
4656*404b540aSrobert convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4657*404b540aSrobert
4658*404b540aSrobert if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4659*404b540aSrobert from = convert_to_mode (SImode, from, unsignedp);
4660*404b540aSrobert
4661*404b540aSrobert libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4662*404b540aSrobert gcc_assert (libfunc);
4663*404b540aSrobert
4664*404b540aSrobert start_sequence ();
4665*404b540aSrobert
4666*404b540aSrobert value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4667*404b540aSrobert GET_MODE (to), 1, from,
4668*404b540aSrobert GET_MODE (from));
4669*404b540aSrobert insns = get_insns ();
4670*404b540aSrobert end_sequence ();
4671*404b540aSrobert
4672*404b540aSrobert emit_libcall_block (insns, target, value,
4673*404b540aSrobert gen_rtx_FLOAT (GET_MODE (to), from));
4674*404b540aSrobert }
4675*404b540aSrobert
4676*404b540aSrobert done:
4677*404b540aSrobert
4678*404b540aSrobert /* Copy result to requested destination
4679*404b540aSrobert if we have been computing in a temp location. */
4680*404b540aSrobert
4681*404b540aSrobert if (target != to)
4682*404b540aSrobert {
4683*404b540aSrobert if (GET_MODE (target) == GET_MODE (to))
4684*404b540aSrobert emit_move_insn (to, target);
4685*404b540aSrobert else
4686*404b540aSrobert convert_move (to, target, 0);
4687*404b540aSrobert }
4688*404b540aSrobert }
4689*404b540aSrobert
4690*404b540aSrobert /* Generate code to convert FROM to fixed point and store in TO. FROM
4691*404b540aSrobert must be floating point. */
4692*404b540aSrobert
4693*404b540aSrobert void
expand_fix(rtx to,rtx from,int unsignedp)4694*404b540aSrobert expand_fix (rtx to, rtx from, int unsignedp)
4695*404b540aSrobert {
4696*404b540aSrobert enum insn_code icode;
4697*404b540aSrobert rtx target = to;
4698*404b540aSrobert enum machine_mode fmode, imode;
4699*404b540aSrobert int must_trunc = 0;
4700*404b540aSrobert
4701*404b540aSrobert /* We first try to find a pair of modes, one real and one integer, at
4702*404b540aSrobert least as wide as FROM and TO, respectively, in which we can open-code
4703*404b540aSrobert this conversion. If the integer mode is wider than the mode of TO,
4704*404b540aSrobert we can do the conversion either signed or unsigned. */
4705*404b540aSrobert
4706*404b540aSrobert for (fmode = GET_MODE (from); fmode != VOIDmode;
4707*404b540aSrobert fmode = GET_MODE_WIDER_MODE (fmode))
4708*404b540aSrobert for (imode = GET_MODE (to); imode != VOIDmode;
4709*404b540aSrobert imode = GET_MODE_WIDER_MODE (imode))
4710*404b540aSrobert {
4711*404b540aSrobert int doing_unsigned = unsignedp;
4712*404b540aSrobert
4713*404b540aSrobert icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4714*404b540aSrobert if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4715*404b540aSrobert icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4716*404b540aSrobert
4717*404b540aSrobert if (icode != CODE_FOR_nothing)
4718*404b540aSrobert {
4719*404b540aSrobert if (fmode != GET_MODE (from))
4720*404b540aSrobert from = convert_to_mode (fmode, from, 0);
4721*404b540aSrobert
4722*404b540aSrobert if (must_trunc)
4723*404b540aSrobert {
4724*404b540aSrobert rtx temp = gen_reg_rtx (GET_MODE (from));
4725*404b540aSrobert from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4726*404b540aSrobert temp, 0);
4727*404b540aSrobert }
4728*404b540aSrobert
4729*404b540aSrobert if (imode != GET_MODE (to))
4730*404b540aSrobert target = gen_reg_rtx (imode);
4731*404b540aSrobert
4732*404b540aSrobert emit_unop_insn (icode, target, from,
4733*404b540aSrobert doing_unsigned ? UNSIGNED_FIX : FIX);
4734*404b540aSrobert if (target != to)
4735*404b540aSrobert convert_move (to, target, unsignedp);
4736*404b540aSrobert return;
4737*404b540aSrobert }
4738*404b540aSrobert }
4739*404b540aSrobert
4740*404b540aSrobert /* For an unsigned conversion, there is one more way to do it.
4741*404b540aSrobert If we have a signed conversion, we generate code that compares
4742*404b540aSrobert the real value to the largest representable positive number. If if
4743*404b540aSrobert is smaller, the conversion is done normally. Otherwise, subtract
4744*404b540aSrobert one plus the highest signed number, convert, and add it back.
4745*404b540aSrobert
4746*404b540aSrobert We only need to check all real modes, since we know we didn't find
4747*404b540aSrobert anything with a wider integer mode.
4748*404b540aSrobert
4749*404b540aSrobert This code used to extend FP value into mode wider than the destination.
4750*404b540aSrobert This is not needed. Consider, for instance conversion from SFmode
4751*404b540aSrobert into DImode.
4752*404b540aSrobert
4753*404b540aSrobert The hot path through the code is dealing with inputs smaller than 2^63
4754*404b540aSrobert and doing just the conversion, so there is no bits to lose.
4755*404b540aSrobert
4756*404b540aSrobert In the other path we know the value is positive in the range 2^63..2^64-1
4757*404b540aSrobert inclusive. (as for other imput overflow happens and result is undefined)
4758*404b540aSrobert So we know that the most important bit set in mantissa corresponds to
4759*404b540aSrobert 2^63. The subtraction of 2^63 should not generate any rounding as it
4760*404b540aSrobert simply clears out that bit. The rest is trivial. */
4761*404b540aSrobert
4762*404b540aSrobert if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4763*404b540aSrobert for (fmode = GET_MODE (from); fmode != VOIDmode;
4764*404b540aSrobert fmode = GET_MODE_WIDER_MODE (fmode))
4765*404b540aSrobert if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4766*404b540aSrobert &must_trunc))
4767*404b540aSrobert {
4768*404b540aSrobert int bitsize;
4769*404b540aSrobert REAL_VALUE_TYPE offset;
4770*404b540aSrobert rtx limit, lab1, lab2, insn;
4771*404b540aSrobert
4772*404b540aSrobert bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4773*404b540aSrobert real_2expN (&offset, bitsize - 1);
4774*404b540aSrobert limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4775*404b540aSrobert lab1 = gen_label_rtx ();
4776*404b540aSrobert lab2 = gen_label_rtx ();
4777*404b540aSrobert
4778*404b540aSrobert if (fmode != GET_MODE (from))
4779*404b540aSrobert from = convert_to_mode (fmode, from, 0);
4780*404b540aSrobert
4781*404b540aSrobert /* See if we need to do the subtraction. */
4782*404b540aSrobert do_pending_stack_adjust ();
4783*404b540aSrobert emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4784*404b540aSrobert 0, lab1);
4785*404b540aSrobert
4786*404b540aSrobert /* If not, do the signed "fix" and branch around fixup code. */
4787*404b540aSrobert expand_fix (to, from, 0);
4788*404b540aSrobert emit_jump_insn (gen_jump (lab2));
4789*404b540aSrobert emit_barrier ();
4790*404b540aSrobert
4791*404b540aSrobert /* Otherwise, subtract 2**(N-1), convert to signed number,
4792*404b540aSrobert then add 2**(N-1). Do the addition using XOR since this
4793*404b540aSrobert will often generate better code. */
4794*404b540aSrobert emit_label (lab1);
4795*404b540aSrobert target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4796*404b540aSrobert NULL_RTX, 0, OPTAB_LIB_WIDEN);
4797*404b540aSrobert expand_fix (to, target, 0);
4798*404b540aSrobert target = expand_binop (GET_MODE (to), xor_optab, to,
4799*404b540aSrobert gen_int_mode
4800*404b540aSrobert ((HOST_WIDE_INT) 1 << (bitsize - 1),
4801*404b540aSrobert GET_MODE (to)),
4802*404b540aSrobert to, 1, OPTAB_LIB_WIDEN);
4803*404b540aSrobert
4804*404b540aSrobert if (target != to)
4805*404b540aSrobert emit_move_insn (to, target);
4806*404b540aSrobert
4807*404b540aSrobert emit_label (lab2);
4808*404b540aSrobert
4809*404b540aSrobert if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4810*404b540aSrobert != CODE_FOR_nothing)
4811*404b540aSrobert {
4812*404b540aSrobert /* Make a place for a REG_NOTE and add it. */
4813*404b540aSrobert insn = emit_move_insn (to, to);
4814*404b540aSrobert set_unique_reg_note (insn,
4815*404b540aSrobert REG_EQUAL,
4816*404b540aSrobert gen_rtx_fmt_e (UNSIGNED_FIX,
4817*404b540aSrobert GET_MODE (to),
4818*404b540aSrobert copy_rtx (from)));
4819*404b540aSrobert }
4820*404b540aSrobert
4821*404b540aSrobert return;
4822*404b540aSrobert }
4823*404b540aSrobert
4824*404b540aSrobert /* We can't do it with an insn, so use a library call. But first ensure
4825*404b540aSrobert that the mode of TO is at least as wide as SImode, since those are the
4826*404b540aSrobert only library calls we know about. */
4827*404b540aSrobert
4828*404b540aSrobert if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4829*404b540aSrobert {
4830*404b540aSrobert target = gen_reg_rtx (SImode);
4831*404b540aSrobert
4832*404b540aSrobert expand_fix (target, from, unsignedp);
4833*404b540aSrobert }
4834*404b540aSrobert else
4835*404b540aSrobert {
4836*404b540aSrobert rtx insns;
4837*404b540aSrobert rtx value;
4838*404b540aSrobert rtx libfunc;
4839*404b540aSrobert
4840*404b540aSrobert convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4841*404b540aSrobert libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4842*404b540aSrobert gcc_assert (libfunc);
4843*404b540aSrobert
4844*404b540aSrobert start_sequence ();
4845*404b540aSrobert
4846*404b540aSrobert value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4847*404b540aSrobert GET_MODE (to), 1, from,
4848*404b540aSrobert GET_MODE (from));
4849*404b540aSrobert insns = get_insns ();
4850*404b540aSrobert end_sequence ();
4851*404b540aSrobert
4852*404b540aSrobert emit_libcall_block (insns, target, value,
4853*404b540aSrobert gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4854*404b540aSrobert GET_MODE (to), from));
4855*404b540aSrobert }
4856*404b540aSrobert
4857*404b540aSrobert if (target != to)
4858*404b540aSrobert {
4859*404b540aSrobert if (GET_MODE (to) == GET_MODE (target))
4860*404b540aSrobert emit_move_insn (to, target);
4861*404b540aSrobert else
4862*404b540aSrobert convert_move (to, target, 0);
4863*404b540aSrobert }
4864*404b540aSrobert }
4865*404b540aSrobert
4866*404b540aSrobert /* Report whether we have an instruction to perform the operation
4867*404b540aSrobert specified by CODE on operands of mode MODE. */
4868*404b540aSrobert int
have_insn_for(enum rtx_code code,enum machine_mode mode)4869*404b540aSrobert have_insn_for (enum rtx_code code, enum machine_mode mode)
4870*404b540aSrobert {
4871*404b540aSrobert return (code_to_optab[(int) code] != 0
4872*404b540aSrobert && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4873*404b540aSrobert != CODE_FOR_nothing));
4874*404b540aSrobert }
4875*404b540aSrobert
4876*404b540aSrobert /* Create a blank optab. */
4877*404b540aSrobert static optab
new_optab(void)4878*404b540aSrobert new_optab (void)
4879*404b540aSrobert {
4880*404b540aSrobert int i;
4881*404b540aSrobert optab op = ggc_alloc (sizeof (struct optab));
4882*404b540aSrobert for (i = 0; i < NUM_MACHINE_MODES; i++)
4883*404b540aSrobert {
4884*404b540aSrobert op->handlers[i].insn_code = CODE_FOR_nothing;
4885*404b540aSrobert op->handlers[i].libfunc = 0;
4886*404b540aSrobert }
4887*404b540aSrobert
4888*404b540aSrobert return op;
4889*404b540aSrobert }
4890*404b540aSrobert
4891*404b540aSrobert static convert_optab
new_convert_optab(void)4892*404b540aSrobert new_convert_optab (void)
4893*404b540aSrobert {
4894*404b540aSrobert int i, j;
4895*404b540aSrobert convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4896*404b540aSrobert for (i = 0; i < NUM_MACHINE_MODES; i++)
4897*404b540aSrobert for (j = 0; j < NUM_MACHINE_MODES; j++)
4898*404b540aSrobert {
4899*404b540aSrobert op->handlers[i][j].insn_code = CODE_FOR_nothing;
4900*404b540aSrobert op->handlers[i][j].libfunc = 0;
4901*404b540aSrobert }
4902*404b540aSrobert return op;
4903*404b540aSrobert }
4904*404b540aSrobert
4905*404b540aSrobert /* Same, but fill in its code as CODE, and write it into the
4906*404b540aSrobert code_to_optab table. */
4907*404b540aSrobert static inline optab
init_optab(enum rtx_code code)4908*404b540aSrobert init_optab (enum rtx_code code)
4909*404b540aSrobert {
4910*404b540aSrobert optab op = new_optab ();
4911*404b540aSrobert op->code = code;
4912*404b540aSrobert code_to_optab[(int) code] = op;
4913*404b540aSrobert return op;
4914*404b540aSrobert }
4915*404b540aSrobert
4916*404b540aSrobert /* Same, but fill in its code as CODE, and do _not_ write it into
4917*404b540aSrobert the code_to_optab table. */
4918*404b540aSrobert static inline optab
init_optabv(enum rtx_code code)4919*404b540aSrobert init_optabv (enum rtx_code code)
4920*404b540aSrobert {
4921*404b540aSrobert optab op = new_optab ();
4922*404b540aSrobert op->code = code;
4923*404b540aSrobert return op;
4924*404b540aSrobert }
4925*404b540aSrobert
4926*404b540aSrobert /* Conversion optabs never go in the code_to_optab table. */
4927*404b540aSrobert static inline convert_optab
init_convert_optab(enum rtx_code code)4928*404b540aSrobert init_convert_optab (enum rtx_code code)
4929*404b540aSrobert {
4930*404b540aSrobert convert_optab op = new_convert_optab ();
4931*404b540aSrobert op->code = code;
4932*404b540aSrobert return op;
4933*404b540aSrobert }
4934*404b540aSrobert
4935*404b540aSrobert /* Initialize the libfunc fields of an entire group of entries in some
4936*404b540aSrobert optab. Each entry is set equal to a string consisting of a leading
4937*404b540aSrobert pair of underscores followed by a generic operation name followed by
4938*404b540aSrobert a mode name (downshifted to lowercase) followed by a single character
4939*404b540aSrobert representing the number of operands for the given operation (which is
4940*404b540aSrobert usually one of the characters '2', '3', or '4').
4941*404b540aSrobert
4942*404b540aSrobert OPTABLE is the table in which libfunc fields are to be initialized.
4943*404b540aSrobert FIRST_MODE is the first machine mode index in the given optab to
4944*404b540aSrobert initialize.
4945*404b540aSrobert LAST_MODE is the last machine mode index in the given optab to
4946*404b540aSrobert initialize.
4947*404b540aSrobert OPNAME is the generic (string) name of the operation.
4948*404b540aSrobert SUFFIX is the character which specifies the number of operands for
4949*404b540aSrobert the given generic operation.
4950*404b540aSrobert */
4951*404b540aSrobert
4952*404b540aSrobert static void
init_libfuncs(optab optable,int first_mode,int last_mode,const char * opname,int suffix)4953*404b540aSrobert init_libfuncs (optab optable, int first_mode, int last_mode,
4954*404b540aSrobert const char *opname, int suffix)
4955*404b540aSrobert {
4956*404b540aSrobert int mode;
4957*404b540aSrobert unsigned opname_len = strlen (opname);
4958*404b540aSrobert
4959*404b540aSrobert for (mode = first_mode; (int) mode <= (int) last_mode;
4960*404b540aSrobert mode = (enum machine_mode) ((int) mode + 1))
4961*404b540aSrobert {
4962*404b540aSrobert const char *mname = GET_MODE_NAME (mode);
4963*404b540aSrobert unsigned mname_len = strlen (mname);
4964*404b540aSrobert char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4965*404b540aSrobert char *p;
4966*404b540aSrobert const char *q;
4967*404b540aSrobert
4968*404b540aSrobert p = libfunc_name;
4969*404b540aSrobert *p++ = '_';
4970*404b540aSrobert *p++ = '_';
4971*404b540aSrobert for (q = opname; *q; )
4972*404b540aSrobert *p++ = *q++;
4973*404b540aSrobert for (q = mname; *q; q++)
4974*404b540aSrobert *p++ = TOLOWER (*q);
4975*404b540aSrobert *p++ = suffix;
4976*404b540aSrobert *p = '\0';
4977*404b540aSrobert
4978*404b540aSrobert optable->handlers[(int) mode].libfunc
4979*404b540aSrobert = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4980*404b540aSrobert }
4981*404b540aSrobert }
4982*404b540aSrobert
4983*404b540aSrobert /* Initialize the libfunc fields of an entire group of entries in some
4984*404b540aSrobert optab which correspond to all integer mode operations. The parameters
4985*404b540aSrobert have the same meaning as similarly named ones for the `init_libfuncs'
4986*404b540aSrobert routine. (See above). */
4987*404b540aSrobert
4988*404b540aSrobert static void
init_integral_libfuncs(optab optable,const char * opname,int suffix)4989*404b540aSrobert init_integral_libfuncs (optab optable, const char *opname, int suffix)
4990*404b540aSrobert {
4991*404b540aSrobert int maxsize = 2*BITS_PER_WORD;
4992*404b540aSrobert if (maxsize < LONG_LONG_TYPE_SIZE)
4993*404b540aSrobert maxsize = LONG_LONG_TYPE_SIZE;
4994*404b540aSrobert init_libfuncs (optable, word_mode,
4995*404b540aSrobert mode_for_size (maxsize, MODE_INT, 0),
4996*404b540aSrobert opname, suffix);
4997*404b540aSrobert }
4998*404b540aSrobert
4999*404b540aSrobert /* Initialize the libfunc fields of an entire group of entries in some
5000*404b540aSrobert optab which correspond to all real mode operations. The parameters
5001*404b540aSrobert have the same meaning as similarly named ones for the `init_libfuncs'
5002*404b540aSrobert routine. (See above). */
5003*404b540aSrobert
5004*404b540aSrobert static void
init_floating_libfuncs(optab optable,const char * opname,int suffix)5005*404b540aSrobert init_floating_libfuncs (optab optable, const char *opname, int suffix)
5006*404b540aSrobert {
5007*404b540aSrobert init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
5008*404b540aSrobert init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT,
5009*404b540aSrobert opname, suffix);
5010*404b540aSrobert }
5011*404b540aSrobert
5012*404b540aSrobert /* Initialize the libfunc fields of an entire group of entries of an
5013*404b540aSrobert inter-mode-class conversion optab. The string formation rules are
5014*404b540aSrobert similar to the ones for init_libfuncs, above, but instead of having
5015*404b540aSrobert a mode name and an operand count these functions have two mode names
5016*404b540aSrobert and no operand count. */
5017*404b540aSrobert static void
init_interclass_conv_libfuncs(convert_optab tab,const char * opname,enum mode_class from_class,enum mode_class to_class)5018*404b540aSrobert init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
5019*404b540aSrobert enum mode_class from_class,
5020*404b540aSrobert enum mode_class to_class)
5021*404b540aSrobert {
5022*404b540aSrobert enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
5023*404b540aSrobert enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
5024*404b540aSrobert size_t opname_len = strlen (opname);
5025*404b540aSrobert size_t max_mname_len = 0;
5026*404b540aSrobert
5027*404b540aSrobert enum machine_mode fmode, tmode;
5028*404b540aSrobert const char *fname, *tname;
5029*404b540aSrobert const char *q;
5030*404b540aSrobert char *libfunc_name, *suffix;
5031*404b540aSrobert char *p;
5032*404b540aSrobert
5033*404b540aSrobert for (fmode = first_from_mode;
5034*404b540aSrobert fmode != VOIDmode;
5035*404b540aSrobert fmode = GET_MODE_WIDER_MODE (fmode))
5036*404b540aSrobert max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
5037*404b540aSrobert
5038*404b540aSrobert for (tmode = first_to_mode;
5039*404b540aSrobert tmode != VOIDmode;
5040*404b540aSrobert tmode = GET_MODE_WIDER_MODE (tmode))
5041*404b540aSrobert max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
5042*404b540aSrobert
5043*404b540aSrobert libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5044*404b540aSrobert libfunc_name[0] = '_';
5045*404b540aSrobert libfunc_name[1] = '_';
5046*404b540aSrobert memcpy (&libfunc_name[2], opname, opname_len);
5047*404b540aSrobert suffix = libfunc_name + opname_len + 2;
5048*404b540aSrobert
5049*404b540aSrobert for (fmode = first_from_mode; fmode != VOIDmode;
5050*404b540aSrobert fmode = GET_MODE_WIDER_MODE (fmode))
5051*404b540aSrobert for (tmode = first_to_mode; tmode != VOIDmode;
5052*404b540aSrobert tmode = GET_MODE_WIDER_MODE (tmode))
5053*404b540aSrobert {
5054*404b540aSrobert fname = GET_MODE_NAME (fmode);
5055*404b540aSrobert tname = GET_MODE_NAME (tmode);
5056*404b540aSrobert
5057*404b540aSrobert p = suffix;
5058*404b540aSrobert for (q = fname; *q; p++, q++)
5059*404b540aSrobert *p = TOLOWER (*q);
5060*404b540aSrobert for (q = tname; *q; p++, q++)
5061*404b540aSrobert *p = TOLOWER (*q);
5062*404b540aSrobert
5063*404b540aSrobert *p = '\0';
5064*404b540aSrobert
5065*404b540aSrobert tab->handlers[tmode][fmode].libfunc
5066*404b540aSrobert = init_one_libfunc (ggc_alloc_string (libfunc_name,
5067*404b540aSrobert p - libfunc_name));
5068*404b540aSrobert }
5069*404b540aSrobert }
5070*404b540aSrobert
5071*404b540aSrobert /* Initialize the libfunc fields of an entire group of entries of an
5072*404b540aSrobert intra-mode-class conversion optab. The string formation rules are
5073*404b540aSrobert similar to the ones for init_libfunc, above. WIDENING says whether
5074*404b540aSrobert the optab goes from narrow to wide modes or vice versa. These functions
5075*404b540aSrobert have two mode names _and_ an operand count. */
5076*404b540aSrobert static void
init_intraclass_conv_libfuncs(convert_optab tab,const char * opname,enum mode_class class,bool widening)5077*404b540aSrobert init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
5078*404b540aSrobert enum mode_class class, bool widening)
5079*404b540aSrobert {
5080*404b540aSrobert enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
5081*404b540aSrobert size_t opname_len = strlen (opname);
5082*404b540aSrobert size_t max_mname_len = 0;
5083*404b540aSrobert
5084*404b540aSrobert enum machine_mode nmode, wmode;
5085*404b540aSrobert const char *nname, *wname;
5086*404b540aSrobert const char *q;
5087*404b540aSrobert char *libfunc_name, *suffix;
5088*404b540aSrobert char *p;
5089*404b540aSrobert
5090*404b540aSrobert for (nmode = first_mode; nmode != VOIDmode;
5091*404b540aSrobert nmode = GET_MODE_WIDER_MODE (nmode))
5092*404b540aSrobert max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
5093*404b540aSrobert
5094*404b540aSrobert libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
5095*404b540aSrobert libfunc_name[0] = '_';
5096*404b540aSrobert libfunc_name[1] = '_';
5097*404b540aSrobert memcpy (&libfunc_name[2], opname, opname_len);
5098*404b540aSrobert suffix = libfunc_name + opname_len + 2;
5099*404b540aSrobert
5100*404b540aSrobert for (nmode = first_mode; nmode != VOIDmode;
5101*404b540aSrobert nmode = GET_MODE_WIDER_MODE (nmode))
5102*404b540aSrobert for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
5103*404b540aSrobert wmode = GET_MODE_WIDER_MODE (wmode))
5104*404b540aSrobert {
5105*404b540aSrobert nname = GET_MODE_NAME (nmode);
5106*404b540aSrobert wname = GET_MODE_NAME (wmode);
5107*404b540aSrobert
5108*404b540aSrobert p = suffix;
5109*404b540aSrobert for (q = widening ? nname : wname; *q; p++, q++)
5110*404b540aSrobert *p = TOLOWER (*q);
5111*404b540aSrobert for (q = widening ? wname : nname; *q; p++, q++)
5112*404b540aSrobert *p = TOLOWER (*q);
5113*404b540aSrobert
5114*404b540aSrobert *p++ = '2';
5115*404b540aSrobert *p = '\0';
5116*404b540aSrobert
5117*404b540aSrobert tab->handlers[widening ? wmode : nmode]
5118*404b540aSrobert [widening ? nmode : wmode].libfunc
5119*404b540aSrobert = init_one_libfunc (ggc_alloc_string (libfunc_name,
5120*404b540aSrobert p - libfunc_name));
5121*404b540aSrobert }
5122*404b540aSrobert }
5123*404b540aSrobert
5124*404b540aSrobert
5125*404b540aSrobert rtx
init_one_libfunc(const char * name)5126*404b540aSrobert init_one_libfunc (const char *name)
5127*404b540aSrobert {
5128*404b540aSrobert rtx symbol;
5129*404b540aSrobert
5130*404b540aSrobert /* Create a FUNCTION_DECL that can be passed to
5131*404b540aSrobert targetm.encode_section_info. */
5132*404b540aSrobert /* ??? We don't have any type information except for this is
5133*404b540aSrobert a function. Pretend this is "int foo()". */
5134*404b540aSrobert tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5135*404b540aSrobert build_function_type (integer_type_node, NULL_TREE));
5136*404b540aSrobert DECL_ARTIFICIAL (decl) = 1;
5137*404b540aSrobert DECL_EXTERNAL (decl) = 1;
5138*404b540aSrobert TREE_PUBLIC (decl) = 1;
5139*404b540aSrobert
5140*404b540aSrobert symbol = XEXP (DECL_RTL (decl), 0);
5141*404b540aSrobert
5142*404b540aSrobert /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5143*404b540aSrobert are the flags assigned by targetm.encode_section_info. */
5144*404b540aSrobert SET_SYMBOL_REF_DECL (symbol, 0);
5145*404b540aSrobert
5146*404b540aSrobert return symbol;
5147*404b540aSrobert }
5148*404b540aSrobert
5149*404b540aSrobert /* Call this to reset the function entry for one optab (OPTABLE) in mode
5150*404b540aSrobert MODE to NAME, which should be either 0 or a string constant. */
5151*404b540aSrobert void
set_optab_libfunc(optab optable,enum machine_mode mode,const char * name)5152*404b540aSrobert set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
5153*404b540aSrobert {
5154*404b540aSrobert if (name)
5155*404b540aSrobert optable->handlers[mode].libfunc = init_one_libfunc (name);
5156*404b540aSrobert else
5157*404b540aSrobert optable->handlers[mode].libfunc = 0;
5158*404b540aSrobert }
5159*404b540aSrobert
5160*404b540aSrobert /* Call this to reset the function entry for one conversion optab
5161*404b540aSrobert (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
5162*404b540aSrobert either 0 or a string constant. */
5163*404b540aSrobert void
set_conv_libfunc(convert_optab optable,enum machine_mode tmode,enum machine_mode fmode,const char * name)5164*404b540aSrobert set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
5165*404b540aSrobert enum machine_mode fmode, const char *name)
5166*404b540aSrobert {
5167*404b540aSrobert if (name)
5168*404b540aSrobert optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
5169*404b540aSrobert else
5170*404b540aSrobert optable->handlers[tmode][fmode].libfunc = 0;
5171*404b540aSrobert }
5172*404b540aSrobert
5173*404b540aSrobert /* Call this once to initialize the contents of the optabs
5174*404b540aSrobert appropriately for the current target machine. */
5175*404b540aSrobert
5176*404b540aSrobert void
init_optabs(void)5177*404b540aSrobert init_optabs (void)
5178*404b540aSrobert {
5179*404b540aSrobert unsigned int i;
5180*404b540aSrobert
5181*404b540aSrobert /* Start by initializing all tables to contain CODE_FOR_nothing. */
5182*404b540aSrobert
5183*404b540aSrobert for (i = 0; i < NUM_RTX_CODE; i++)
5184*404b540aSrobert setcc_gen_code[i] = CODE_FOR_nothing;
5185*404b540aSrobert
5186*404b540aSrobert #ifdef HAVE_conditional_move
5187*404b540aSrobert for (i = 0; i < NUM_MACHINE_MODES; i++)
5188*404b540aSrobert movcc_gen_code[i] = CODE_FOR_nothing;
5189*404b540aSrobert #endif
5190*404b540aSrobert
5191*404b540aSrobert for (i = 0; i < NUM_MACHINE_MODES; i++)
5192*404b540aSrobert {
5193*404b540aSrobert vcond_gen_code[i] = CODE_FOR_nothing;
5194*404b540aSrobert vcondu_gen_code[i] = CODE_FOR_nothing;
5195*404b540aSrobert }
5196*404b540aSrobert
5197*404b540aSrobert add_optab = init_optab (PLUS);
5198*404b540aSrobert addv_optab = init_optabv (PLUS);
5199*404b540aSrobert sub_optab = init_optab (MINUS);
5200*404b540aSrobert subv_optab = init_optabv (MINUS);
5201*404b540aSrobert smul_optab = init_optab (MULT);
5202*404b540aSrobert smulv_optab = init_optabv (MULT);
5203*404b540aSrobert smul_highpart_optab = init_optab (UNKNOWN);
5204*404b540aSrobert umul_highpart_optab = init_optab (UNKNOWN);
5205*404b540aSrobert smul_widen_optab = init_optab (UNKNOWN);
5206*404b540aSrobert umul_widen_optab = init_optab (UNKNOWN);
5207*404b540aSrobert usmul_widen_optab = init_optab (UNKNOWN);
5208*404b540aSrobert sdiv_optab = init_optab (DIV);
5209*404b540aSrobert sdivv_optab = init_optabv (DIV);
5210*404b540aSrobert sdivmod_optab = init_optab (UNKNOWN);
5211*404b540aSrobert udiv_optab = init_optab (UDIV);
5212*404b540aSrobert udivmod_optab = init_optab (UNKNOWN);
5213*404b540aSrobert smod_optab = init_optab (MOD);
5214*404b540aSrobert umod_optab = init_optab (UMOD);
5215*404b540aSrobert fmod_optab = init_optab (UNKNOWN);
5216*404b540aSrobert drem_optab = init_optab (UNKNOWN);
5217*404b540aSrobert ftrunc_optab = init_optab (UNKNOWN);
5218*404b540aSrobert and_optab = init_optab (AND);
5219*404b540aSrobert ior_optab = init_optab (IOR);
5220*404b540aSrobert xor_optab = init_optab (XOR);
5221*404b540aSrobert ashl_optab = init_optab (ASHIFT);
5222*404b540aSrobert ashr_optab = init_optab (ASHIFTRT);
5223*404b540aSrobert lshr_optab = init_optab (LSHIFTRT);
5224*404b540aSrobert rotl_optab = init_optab (ROTATE);
5225*404b540aSrobert rotr_optab = init_optab (ROTATERT);
5226*404b540aSrobert smin_optab = init_optab (SMIN);
5227*404b540aSrobert smax_optab = init_optab (SMAX);
5228*404b540aSrobert umin_optab = init_optab (UMIN);
5229*404b540aSrobert umax_optab = init_optab (UMAX);
5230*404b540aSrobert pow_optab = init_optab (UNKNOWN);
5231*404b540aSrobert atan2_optab = init_optab (UNKNOWN);
5232*404b540aSrobert
5233*404b540aSrobert /* These three have codes assigned exclusively for the sake of
5234*404b540aSrobert have_insn_for. */
5235*404b540aSrobert mov_optab = init_optab (SET);
5236*404b540aSrobert movstrict_optab = init_optab (STRICT_LOW_PART);
5237*404b540aSrobert cmp_optab = init_optab (COMPARE);
5238*404b540aSrobert
5239*404b540aSrobert ucmp_optab = init_optab (UNKNOWN);
5240*404b540aSrobert tst_optab = init_optab (UNKNOWN);
5241*404b540aSrobert
5242*404b540aSrobert eq_optab = init_optab (EQ);
5243*404b540aSrobert ne_optab = init_optab (NE);
5244*404b540aSrobert gt_optab = init_optab (GT);
5245*404b540aSrobert ge_optab = init_optab (GE);
5246*404b540aSrobert lt_optab = init_optab (LT);
5247*404b540aSrobert le_optab = init_optab (LE);
5248*404b540aSrobert unord_optab = init_optab (UNORDERED);
5249*404b540aSrobert
5250*404b540aSrobert neg_optab = init_optab (NEG);
5251*404b540aSrobert negv_optab = init_optabv (NEG);
5252*404b540aSrobert abs_optab = init_optab (ABS);
5253*404b540aSrobert absv_optab = init_optabv (ABS);
5254*404b540aSrobert addcc_optab = init_optab (UNKNOWN);
5255*404b540aSrobert one_cmpl_optab = init_optab (NOT);
5256*404b540aSrobert ffs_optab = init_optab (FFS);
5257*404b540aSrobert clz_optab = init_optab (CLZ);
5258*404b540aSrobert ctz_optab = init_optab (CTZ);
5259*404b540aSrobert popcount_optab = init_optab (POPCOUNT);
5260*404b540aSrobert parity_optab = init_optab (PARITY);
5261*404b540aSrobert sqrt_optab = init_optab (SQRT);
5262*404b540aSrobert floor_optab = init_optab (UNKNOWN);
5263*404b540aSrobert lfloor_optab = init_optab (UNKNOWN);
5264*404b540aSrobert ceil_optab = init_optab (UNKNOWN);
5265*404b540aSrobert lceil_optab = init_optab (UNKNOWN);
5266*404b540aSrobert round_optab = init_optab (UNKNOWN);
5267*404b540aSrobert btrunc_optab = init_optab (UNKNOWN);
5268*404b540aSrobert nearbyint_optab = init_optab (UNKNOWN);
5269*404b540aSrobert rint_optab = init_optab (UNKNOWN);
5270*404b540aSrobert lrint_optab = init_optab (UNKNOWN);
5271*404b540aSrobert sincos_optab = init_optab (UNKNOWN);
5272*404b540aSrobert sin_optab = init_optab (UNKNOWN);
5273*404b540aSrobert asin_optab = init_optab (UNKNOWN);
5274*404b540aSrobert cos_optab = init_optab (UNKNOWN);
5275*404b540aSrobert acos_optab = init_optab (UNKNOWN);
5276*404b540aSrobert exp_optab = init_optab (UNKNOWN);
5277*404b540aSrobert exp10_optab = init_optab (UNKNOWN);
5278*404b540aSrobert exp2_optab = init_optab (UNKNOWN);
5279*404b540aSrobert expm1_optab = init_optab (UNKNOWN);
5280*404b540aSrobert ldexp_optab = init_optab (UNKNOWN);
5281*404b540aSrobert logb_optab = init_optab (UNKNOWN);
5282*404b540aSrobert ilogb_optab = init_optab (UNKNOWN);
5283*404b540aSrobert log_optab = init_optab (UNKNOWN);
5284*404b540aSrobert log10_optab = init_optab (UNKNOWN);
5285*404b540aSrobert log2_optab = init_optab (UNKNOWN);
5286*404b540aSrobert log1p_optab = init_optab (UNKNOWN);
5287*404b540aSrobert tan_optab = init_optab (UNKNOWN);
5288*404b540aSrobert atan_optab = init_optab (UNKNOWN);
5289*404b540aSrobert copysign_optab = init_optab (UNKNOWN);
5290*404b540aSrobert
5291*404b540aSrobert strlen_optab = init_optab (UNKNOWN);
5292*404b540aSrobert cbranch_optab = init_optab (UNKNOWN);
5293*404b540aSrobert cmov_optab = init_optab (UNKNOWN);
5294*404b540aSrobert cstore_optab = init_optab (UNKNOWN);
5295*404b540aSrobert push_optab = init_optab (UNKNOWN);
5296*404b540aSrobert
5297*404b540aSrobert reduc_smax_optab = init_optab (UNKNOWN);
5298*404b540aSrobert reduc_umax_optab = init_optab (UNKNOWN);
5299*404b540aSrobert reduc_smin_optab = init_optab (UNKNOWN);
5300*404b540aSrobert reduc_umin_optab = init_optab (UNKNOWN);
5301*404b540aSrobert reduc_splus_optab = init_optab (UNKNOWN);
5302*404b540aSrobert reduc_uplus_optab = init_optab (UNKNOWN);
5303*404b540aSrobert
5304*404b540aSrobert ssum_widen_optab = init_optab (UNKNOWN);
5305*404b540aSrobert usum_widen_optab = init_optab (UNKNOWN);
5306*404b540aSrobert sdot_prod_optab = init_optab (UNKNOWN);
5307*404b540aSrobert udot_prod_optab = init_optab (UNKNOWN);
5308*404b540aSrobert
5309*404b540aSrobert vec_extract_optab = init_optab (UNKNOWN);
5310*404b540aSrobert vec_set_optab = init_optab (UNKNOWN);
5311*404b540aSrobert vec_init_optab = init_optab (UNKNOWN);
5312*404b540aSrobert vec_shl_optab = init_optab (UNKNOWN);
5313*404b540aSrobert vec_shr_optab = init_optab (UNKNOWN);
5314*404b540aSrobert vec_realign_load_optab = init_optab (UNKNOWN);
5315*404b540aSrobert movmisalign_optab = init_optab (UNKNOWN);
5316*404b540aSrobert
5317*404b540aSrobert powi_optab = init_optab (UNKNOWN);
5318*404b540aSrobert
5319*404b540aSrobert /* Conversions. */
5320*404b540aSrobert sext_optab = init_convert_optab (SIGN_EXTEND);
5321*404b540aSrobert zext_optab = init_convert_optab (ZERO_EXTEND);
5322*404b540aSrobert trunc_optab = init_convert_optab (TRUNCATE);
5323*404b540aSrobert sfix_optab = init_convert_optab (FIX);
5324*404b540aSrobert ufix_optab = init_convert_optab (UNSIGNED_FIX);
5325*404b540aSrobert sfixtrunc_optab = init_convert_optab (UNKNOWN);
5326*404b540aSrobert ufixtrunc_optab = init_convert_optab (UNKNOWN);
5327*404b540aSrobert sfloat_optab = init_convert_optab (FLOAT);
5328*404b540aSrobert ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5329*404b540aSrobert
5330*404b540aSrobert for (i = 0; i < NUM_MACHINE_MODES; i++)
5331*404b540aSrobert {
5332*404b540aSrobert movmem_optab[i] = CODE_FOR_nothing;
5333*404b540aSrobert cmpstr_optab[i] = CODE_FOR_nothing;
5334*404b540aSrobert cmpstrn_optab[i] = CODE_FOR_nothing;
5335*404b540aSrobert cmpmem_optab[i] = CODE_FOR_nothing;
5336*404b540aSrobert setmem_optab[i] = CODE_FOR_nothing;
5337*404b540aSrobert
5338*404b540aSrobert sync_add_optab[i] = CODE_FOR_nothing;
5339*404b540aSrobert sync_sub_optab[i] = CODE_FOR_nothing;
5340*404b540aSrobert sync_ior_optab[i] = CODE_FOR_nothing;
5341*404b540aSrobert sync_and_optab[i] = CODE_FOR_nothing;
5342*404b540aSrobert sync_xor_optab[i] = CODE_FOR_nothing;
5343*404b540aSrobert sync_nand_optab[i] = CODE_FOR_nothing;
5344*404b540aSrobert sync_old_add_optab[i] = CODE_FOR_nothing;
5345*404b540aSrobert sync_old_sub_optab[i] = CODE_FOR_nothing;
5346*404b540aSrobert sync_old_ior_optab[i] = CODE_FOR_nothing;
5347*404b540aSrobert sync_old_and_optab[i] = CODE_FOR_nothing;
5348*404b540aSrobert sync_old_xor_optab[i] = CODE_FOR_nothing;
5349*404b540aSrobert sync_old_nand_optab[i] = CODE_FOR_nothing;
5350*404b540aSrobert sync_new_add_optab[i] = CODE_FOR_nothing;
5351*404b540aSrobert sync_new_sub_optab[i] = CODE_FOR_nothing;
5352*404b540aSrobert sync_new_ior_optab[i] = CODE_FOR_nothing;
5353*404b540aSrobert sync_new_and_optab[i] = CODE_FOR_nothing;
5354*404b540aSrobert sync_new_xor_optab[i] = CODE_FOR_nothing;
5355*404b540aSrobert sync_new_nand_optab[i] = CODE_FOR_nothing;
5356*404b540aSrobert sync_compare_and_swap[i] = CODE_FOR_nothing;
5357*404b540aSrobert sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5358*404b540aSrobert sync_lock_test_and_set[i] = CODE_FOR_nothing;
5359*404b540aSrobert sync_lock_release[i] = CODE_FOR_nothing;
5360*404b540aSrobert
5361*404b540aSrobert reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5362*404b540aSrobert }
5363*404b540aSrobert
5364*404b540aSrobert /* Fill in the optabs with the insns we support. */
5365*404b540aSrobert init_all_optabs ();
5366*404b540aSrobert
5367*404b540aSrobert /* Initialize the optabs with the names of the library functions. */
5368*404b540aSrobert init_integral_libfuncs (add_optab, "add", '3');
5369*404b540aSrobert init_floating_libfuncs (add_optab, "add", '3');
5370*404b540aSrobert init_integral_libfuncs (addv_optab, "addv", '3');
5371*404b540aSrobert init_floating_libfuncs (addv_optab, "add", '3');
5372*404b540aSrobert init_integral_libfuncs (sub_optab, "sub", '3');
5373*404b540aSrobert init_floating_libfuncs (sub_optab, "sub", '3');
5374*404b540aSrobert init_integral_libfuncs (subv_optab, "subv", '3');
5375*404b540aSrobert init_floating_libfuncs (subv_optab, "sub", '3');
5376*404b540aSrobert init_integral_libfuncs (smul_optab, "mul", '3');
5377*404b540aSrobert init_floating_libfuncs (smul_optab, "mul", '3');
5378*404b540aSrobert init_integral_libfuncs (smulv_optab, "mulv", '3');
5379*404b540aSrobert init_floating_libfuncs (smulv_optab, "mul", '3');
5380*404b540aSrobert init_integral_libfuncs (sdiv_optab, "div", '3');
5381*404b540aSrobert init_floating_libfuncs (sdiv_optab, "div", '3');
5382*404b540aSrobert init_integral_libfuncs (sdivv_optab, "divv", '3');
5383*404b540aSrobert init_integral_libfuncs (udiv_optab, "udiv", '3');
5384*404b540aSrobert init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5385*404b540aSrobert init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5386*404b540aSrobert init_integral_libfuncs (smod_optab, "mod", '3');
5387*404b540aSrobert init_integral_libfuncs (umod_optab, "umod", '3');
5388*404b540aSrobert init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5389*404b540aSrobert init_integral_libfuncs (and_optab, "and", '3');
5390*404b540aSrobert init_integral_libfuncs (ior_optab, "ior", '3');
5391*404b540aSrobert init_integral_libfuncs (xor_optab, "xor", '3');
5392*404b540aSrobert init_integral_libfuncs (ashl_optab, "ashl", '3');
5393*404b540aSrobert init_integral_libfuncs (ashr_optab, "ashr", '3');
5394*404b540aSrobert init_integral_libfuncs (lshr_optab, "lshr", '3');
5395*404b540aSrobert init_integral_libfuncs (smin_optab, "min", '3');
5396*404b540aSrobert init_floating_libfuncs (smin_optab, "min", '3');
5397*404b540aSrobert init_integral_libfuncs (smax_optab, "max", '3');
5398*404b540aSrobert init_floating_libfuncs (smax_optab, "max", '3');
5399*404b540aSrobert init_integral_libfuncs (umin_optab, "umin", '3');
5400*404b540aSrobert init_integral_libfuncs (umax_optab, "umax", '3');
5401*404b540aSrobert init_integral_libfuncs (neg_optab, "neg", '2');
5402*404b540aSrobert init_floating_libfuncs (neg_optab, "neg", '2');
5403*404b540aSrobert init_integral_libfuncs (negv_optab, "negv", '2');
5404*404b540aSrobert init_floating_libfuncs (negv_optab, "neg", '2');
5405*404b540aSrobert init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5406*404b540aSrobert init_integral_libfuncs (ffs_optab, "ffs", '2');
5407*404b540aSrobert init_integral_libfuncs (clz_optab, "clz", '2');
5408*404b540aSrobert init_integral_libfuncs (ctz_optab, "ctz", '2');
5409*404b540aSrobert init_integral_libfuncs (popcount_optab, "popcount", '2');
5410*404b540aSrobert init_integral_libfuncs (parity_optab, "parity", '2');
5411*404b540aSrobert
5412*404b540aSrobert /* Comparison libcalls for integers MUST come in pairs,
5413*404b540aSrobert signed/unsigned. */
5414*404b540aSrobert init_integral_libfuncs (cmp_optab, "cmp", '2');
5415*404b540aSrobert init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5416*404b540aSrobert init_floating_libfuncs (cmp_optab, "cmp", '2');
5417*404b540aSrobert
5418*404b540aSrobert /* EQ etc are floating point only. */
5419*404b540aSrobert init_floating_libfuncs (eq_optab, "eq", '2');
5420*404b540aSrobert init_floating_libfuncs (ne_optab, "ne", '2');
5421*404b540aSrobert init_floating_libfuncs (gt_optab, "gt", '2');
5422*404b540aSrobert init_floating_libfuncs (ge_optab, "ge", '2');
5423*404b540aSrobert init_floating_libfuncs (lt_optab, "lt", '2');
5424*404b540aSrobert init_floating_libfuncs (le_optab, "le", '2');
5425*404b540aSrobert init_floating_libfuncs (unord_optab, "unord", '2');
5426*404b540aSrobert
5427*404b540aSrobert init_floating_libfuncs (powi_optab, "powi", '2');
5428*404b540aSrobert
5429*404b540aSrobert /* Conversions. */
5430*404b540aSrobert init_interclass_conv_libfuncs (sfloat_optab, "float",
5431*404b540aSrobert MODE_INT, MODE_FLOAT);
5432*404b540aSrobert init_interclass_conv_libfuncs (sfloat_optab, "float",
5433*404b540aSrobert MODE_INT, MODE_DECIMAL_FLOAT);
5434*404b540aSrobert init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5435*404b540aSrobert MODE_INT, MODE_FLOAT);
5436*404b540aSrobert init_interclass_conv_libfuncs (ufloat_optab, "floatun",
5437*404b540aSrobert MODE_INT, MODE_DECIMAL_FLOAT);
5438*404b540aSrobert init_interclass_conv_libfuncs (sfix_optab, "fix",
5439*404b540aSrobert MODE_FLOAT, MODE_INT);
5440*404b540aSrobert init_interclass_conv_libfuncs (sfix_optab, "fix",
5441*404b540aSrobert MODE_DECIMAL_FLOAT, MODE_INT);
5442*404b540aSrobert init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5443*404b540aSrobert MODE_FLOAT, MODE_INT);
5444*404b540aSrobert init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5445*404b540aSrobert MODE_DECIMAL_FLOAT, MODE_INT);
5446*404b540aSrobert init_interclass_conv_libfuncs (ufloat_optab, "floatuns",
5447*404b540aSrobert MODE_INT, MODE_DECIMAL_FLOAT);
5448*404b540aSrobert
5449*404b540aSrobert /* sext_optab is also used for FLOAT_EXTEND. */
5450*404b540aSrobert init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5451*404b540aSrobert init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true);
5452*404b540aSrobert init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5453*404b540aSrobert init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5454*404b540aSrobert init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5455*404b540aSrobert init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false);
5456*404b540aSrobert init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT);
5457*404b540aSrobert init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT);
5458*404b540aSrobert
5459*404b540aSrobert /* Use cabs for double complex abs, since systems generally have cabs.
5460*404b540aSrobert Don't define any libcall for float complex, so that cabs will be used. */
5461*404b540aSrobert if (complex_double_type_node)
5462*404b540aSrobert abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5463*404b540aSrobert = init_one_libfunc ("cabs");
5464*404b540aSrobert
5465*404b540aSrobert /* The ffs function operates on `int'. */
5466*404b540aSrobert ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5467*404b540aSrobert = init_one_libfunc ("ffs");
5468*404b540aSrobert
5469*404b540aSrobert abort_libfunc = init_one_libfunc ("abort");
5470*404b540aSrobert memcpy_libfunc = init_one_libfunc ("memcpy");
5471*404b540aSrobert memmove_libfunc = init_one_libfunc ("memmove");
5472*404b540aSrobert memcmp_libfunc = init_one_libfunc ("memcmp");
5473*404b540aSrobert memset_libfunc = init_one_libfunc ("memset");
5474*404b540aSrobert setbits_libfunc = init_one_libfunc ("__setbits");
5475*404b540aSrobert
5476*404b540aSrobert #ifndef DONT_USE_BUILTIN_SETJMP
5477*404b540aSrobert setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5478*404b540aSrobert longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5479*404b540aSrobert #else
5480*404b540aSrobert setjmp_libfunc = init_one_libfunc ("setjmp");
5481*404b540aSrobert longjmp_libfunc = init_one_libfunc ("longjmp");
5482*404b540aSrobert #endif
5483*404b540aSrobert unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5484*404b540aSrobert unwind_sjlj_unregister_libfunc
5485*404b540aSrobert = init_one_libfunc ("_Unwind_SjLj_Unregister");
5486*404b540aSrobert
5487*404b540aSrobert /* For function entry/exit instrumentation. */
5488*404b540aSrobert profile_function_entry_libfunc
5489*404b540aSrobert = init_one_libfunc ("__cyg_profile_func_enter");
5490*404b540aSrobert profile_function_exit_libfunc
5491*404b540aSrobert = init_one_libfunc ("__cyg_profile_func_exit");
5492*404b540aSrobert
5493*404b540aSrobert gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5494*404b540aSrobert
5495*404b540aSrobert if (HAVE_conditional_trap)
5496*404b540aSrobert trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5497*404b540aSrobert
5498*404b540aSrobert /* Allow the target to add more libcalls or rename some, etc. */
5499*404b540aSrobert targetm.init_libfuncs ();
5500*404b540aSrobert }
5501*404b540aSrobert
5502*404b540aSrobert #ifdef DEBUG
5503*404b540aSrobert
5504*404b540aSrobert /* Print information about the current contents of the optabs on
5505*404b540aSrobert STDERR. */
5506*404b540aSrobert
5507*404b540aSrobert static void
debug_optab_libfuncs(void)5508*404b540aSrobert debug_optab_libfuncs (void)
5509*404b540aSrobert {
5510*404b540aSrobert int i;
5511*404b540aSrobert int j;
5512*404b540aSrobert int k;
5513*404b540aSrobert
5514*404b540aSrobert /* Dump the arithmetic optabs. */
5515*404b540aSrobert for (i = 0; i != (int) OTI_MAX; i++)
5516*404b540aSrobert for (j = 0; j < NUM_MACHINE_MODES; ++j)
5517*404b540aSrobert {
5518*404b540aSrobert optab o;
5519*404b540aSrobert struct optab_handlers *h;
5520*404b540aSrobert
5521*404b540aSrobert o = optab_table[i];
5522*404b540aSrobert h = &o->handlers[j];
5523*404b540aSrobert if (h->libfunc)
5524*404b540aSrobert {
5525*404b540aSrobert gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5526*404b540aSrobert fprintf (stderr, "%s\t%s:\t%s\n",
5527*404b540aSrobert GET_RTX_NAME (o->code),
5528*404b540aSrobert GET_MODE_NAME (j),
5529*404b540aSrobert XSTR (h->libfunc, 0));
5530*404b540aSrobert }
5531*404b540aSrobert }
5532*404b540aSrobert
5533*404b540aSrobert /* Dump the conversion optabs. */
5534*404b540aSrobert for (i = 0; i < (int) COI_MAX; ++i)
5535*404b540aSrobert for (j = 0; j < NUM_MACHINE_MODES; ++j)
5536*404b540aSrobert for (k = 0; k < NUM_MACHINE_MODES; ++k)
5537*404b540aSrobert {
5538*404b540aSrobert convert_optab o;
5539*404b540aSrobert struct optab_handlers *h;
5540*404b540aSrobert
5541*404b540aSrobert o = &convert_optab_table[i];
5542*404b540aSrobert h = &o->handlers[j][k];
5543*404b540aSrobert if (h->libfunc)
5544*404b540aSrobert {
5545*404b540aSrobert gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5546*404b540aSrobert fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5547*404b540aSrobert GET_RTX_NAME (o->code),
5548*404b540aSrobert GET_MODE_NAME (j),
5549*404b540aSrobert GET_MODE_NAME (k),
5550*404b540aSrobert XSTR (h->libfunc, 0));
5551*404b540aSrobert }
5552*404b540aSrobert }
5553*404b540aSrobert }
5554*404b540aSrobert
5555*404b540aSrobert #endif /* DEBUG */
5556*404b540aSrobert
5557*404b540aSrobert
5558*404b540aSrobert /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5559*404b540aSrobert CODE. Return 0 on failure. */
5560*404b540aSrobert
5561*404b540aSrobert rtx
gen_cond_trap(enum rtx_code code ATTRIBUTE_UNUSED,rtx op1,rtx op2 ATTRIBUTE_UNUSED,rtx tcode ATTRIBUTE_UNUSED)5562*404b540aSrobert gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5563*404b540aSrobert rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5564*404b540aSrobert {
5565*404b540aSrobert enum machine_mode mode = GET_MODE (op1);
5566*404b540aSrobert enum insn_code icode;
5567*404b540aSrobert rtx insn;
5568*404b540aSrobert
5569*404b540aSrobert if (!HAVE_conditional_trap)
5570*404b540aSrobert return 0;
5571*404b540aSrobert
5572*404b540aSrobert if (mode == VOIDmode)
5573*404b540aSrobert return 0;
5574*404b540aSrobert
5575*404b540aSrobert icode = cmp_optab->handlers[(int) mode].insn_code;
5576*404b540aSrobert if (icode == CODE_FOR_nothing)
5577*404b540aSrobert return 0;
5578*404b540aSrobert
5579*404b540aSrobert start_sequence ();
5580*404b540aSrobert op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5581*404b540aSrobert op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5582*404b540aSrobert if (!op1 || !op2)
5583*404b540aSrobert {
5584*404b540aSrobert end_sequence ();
5585*404b540aSrobert return 0;
5586*404b540aSrobert }
5587*404b540aSrobert emit_insn (GEN_FCN (icode) (op1, op2));
5588*404b540aSrobert
5589*404b540aSrobert PUT_CODE (trap_rtx, code);
5590*404b540aSrobert gcc_assert (HAVE_conditional_trap);
5591*404b540aSrobert insn = gen_conditional_trap (trap_rtx, tcode);
5592*404b540aSrobert if (insn)
5593*404b540aSrobert {
5594*404b540aSrobert emit_insn (insn);
5595*404b540aSrobert insn = get_insns ();
5596*404b540aSrobert }
5597*404b540aSrobert end_sequence ();
5598*404b540aSrobert
5599*404b540aSrobert return insn;
5600*404b540aSrobert }
5601*404b540aSrobert
5602*404b540aSrobert /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5603*404b540aSrobert or unsigned operation code. */
5604*404b540aSrobert
5605*404b540aSrobert static enum rtx_code
get_rtx_code(enum tree_code tcode,bool unsignedp)5606*404b540aSrobert get_rtx_code (enum tree_code tcode, bool unsignedp)
5607*404b540aSrobert {
5608*404b540aSrobert enum rtx_code code;
5609*404b540aSrobert switch (tcode)
5610*404b540aSrobert {
5611*404b540aSrobert case EQ_EXPR:
5612*404b540aSrobert code = EQ;
5613*404b540aSrobert break;
5614*404b540aSrobert case NE_EXPR:
5615*404b540aSrobert code = NE;
5616*404b540aSrobert break;
5617*404b540aSrobert case LT_EXPR:
5618*404b540aSrobert code = unsignedp ? LTU : LT;
5619*404b540aSrobert break;
5620*404b540aSrobert case LE_EXPR:
5621*404b540aSrobert code = unsignedp ? LEU : LE;
5622*404b540aSrobert break;
5623*404b540aSrobert case GT_EXPR:
5624*404b540aSrobert code = unsignedp ? GTU : GT;
5625*404b540aSrobert break;
5626*404b540aSrobert case GE_EXPR:
5627*404b540aSrobert code = unsignedp ? GEU : GE;
5628*404b540aSrobert break;
5629*404b540aSrobert
5630*404b540aSrobert case UNORDERED_EXPR:
5631*404b540aSrobert code = UNORDERED;
5632*404b540aSrobert break;
5633*404b540aSrobert case ORDERED_EXPR:
5634*404b540aSrobert code = ORDERED;
5635*404b540aSrobert break;
5636*404b540aSrobert case UNLT_EXPR:
5637*404b540aSrobert code = UNLT;
5638*404b540aSrobert break;
5639*404b540aSrobert case UNLE_EXPR:
5640*404b540aSrobert code = UNLE;
5641*404b540aSrobert break;
5642*404b540aSrobert case UNGT_EXPR:
5643*404b540aSrobert code = UNGT;
5644*404b540aSrobert break;
5645*404b540aSrobert case UNGE_EXPR:
5646*404b540aSrobert code = UNGE;
5647*404b540aSrobert break;
5648*404b540aSrobert case UNEQ_EXPR:
5649*404b540aSrobert code = UNEQ;
5650*404b540aSrobert break;
5651*404b540aSrobert case LTGT_EXPR:
5652*404b540aSrobert code = LTGT;
5653*404b540aSrobert break;
5654*404b540aSrobert
5655*404b540aSrobert default:
5656*404b540aSrobert gcc_unreachable ();
5657*404b540aSrobert }
5658*404b540aSrobert return code;
5659*404b540aSrobert }
5660*404b540aSrobert
5661*404b540aSrobert /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5662*404b540aSrobert unsigned operators. Do not generate compare instruction. */
5663*404b540aSrobert
5664*404b540aSrobert static rtx
vector_compare_rtx(tree cond,bool unsignedp,enum insn_code icode)5665*404b540aSrobert vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5666*404b540aSrobert {
5667*404b540aSrobert enum rtx_code rcode;
5668*404b540aSrobert tree t_op0, t_op1;
5669*404b540aSrobert rtx rtx_op0, rtx_op1;
5670*404b540aSrobert
5671*404b540aSrobert /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5672*404b540aSrobert ensures that condition is a relational operation. */
5673*404b540aSrobert gcc_assert (COMPARISON_CLASS_P (cond));
5674*404b540aSrobert
5675*404b540aSrobert rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5676*404b540aSrobert t_op0 = TREE_OPERAND (cond, 0);
5677*404b540aSrobert t_op1 = TREE_OPERAND (cond, 1);
5678*404b540aSrobert
5679*404b540aSrobert /* Expand operands. */
5680*404b540aSrobert rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5681*404b540aSrobert rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5682*404b540aSrobert
5683*404b540aSrobert if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5684*404b540aSrobert && GET_MODE (rtx_op0) != VOIDmode)
5685*404b540aSrobert rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5686*404b540aSrobert
5687*404b540aSrobert if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5688*404b540aSrobert && GET_MODE (rtx_op1) != VOIDmode)
5689*404b540aSrobert rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5690*404b540aSrobert
5691*404b540aSrobert return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5692*404b540aSrobert }
5693*404b540aSrobert
5694*404b540aSrobert /* Return insn code for VEC_COND_EXPR EXPR. */
5695*404b540aSrobert
5696*404b540aSrobert static inline enum insn_code
get_vcond_icode(tree expr,enum machine_mode mode)5697*404b540aSrobert get_vcond_icode (tree expr, enum machine_mode mode)
5698*404b540aSrobert {
5699*404b540aSrobert enum insn_code icode = CODE_FOR_nothing;
5700*404b540aSrobert
5701*404b540aSrobert if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5702*404b540aSrobert icode = vcondu_gen_code[mode];
5703*404b540aSrobert else
5704*404b540aSrobert icode = vcond_gen_code[mode];
5705*404b540aSrobert return icode;
5706*404b540aSrobert }
5707*404b540aSrobert
5708*404b540aSrobert /* Return TRUE iff, appropriate vector insns are available
5709*404b540aSrobert for vector cond expr expr in VMODE mode. */
5710*404b540aSrobert
5711*404b540aSrobert bool
expand_vec_cond_expr_p(tree expr,enum machine_mode vmode)5712*404b540aSrobert expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5713*404b540aSrobert {
5714*404b540aSrobert if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5715*404b540aSrobert return false;
5716*404b540aSrobert return true;
5717*404b540aSrobert }
5718*404b540aSrobert
5719*404b540aSrobert /* Generate insns for VEC_COND_EXPR. */
5720*404b540aSrobert
5721*404b540aSrobert rtx
expand_vec_cond_expr(tree vec_cond_expr,rtx target)5722*404b540aSrobert expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5723*404b540aSrobert {
5724*404b540aSrobert enum insn_code icode;
5725*404b540aSrobert rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5726*404b540aSrobert enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5727*404b540aSrobert bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5728*404b540aSrobert
5729*404b540aSrobert icode = get_vcond_icode (vec_cond_expr, mode);
5730*404b540aSrobert if (icode == CODE_FOR_nothing)
5731*404b540aSrobert return 0;
5732*404b540aSrobert
5733*404b540aSrobert if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5734*404b540aSrobert target = gen_reg_rtx (mode);
5735*404b540aSrobert
5736*404b540aSrobert /* Get comparison rtx. First expand both cond expr operands. */
5737*404b540aSrobert comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5738*404b540aSrobert unsignedp, icode);
5739*404b540aSrobert cc_op0 = XEXP (comparison, 0);
5740*404b540aSrobert cc_op1 = XEXP (comparison, 1);
5741*404b540aSrobert /* Expand both operands and force them in reg, if required. */
5742*404b540aSrobert rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5743*404b540aSrobert NULL_RTX, VOIDmode, EXPAND_NORMAL);
5744*404b540aSrobert if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5745*404b540aSrobert && mode != VOIDmode)
5746*404b540aSrobert rtx_op1 = force_reg (mode, rtx_op1);
5747*404b540aSrobert
5748*404b540aSrobert rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5749*404b540aSrobert NULL_RTX, VOIDmode, EXPAND_NORMAL);
5750*404b540aSrobert if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5751*404b540aSrobert && mode != VOIDmode)
5752*404b540aSrobert rtx_op2 = force_reg (mode, rtx_op2);
5753*404b540aSrobert
5754*404b540aSrobert /* Emit instruction! */
5755*404b540aSrobert emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5756*404b540aSrobert comparison, cc_op0, cc_op1));
5757*404b540aSrobert
5758*404b540aSrobert return target;
5759*404b540aSrobert }
5760*404b540aSrobert
5761*404b540aSrobert
5762*404b540aSrobert /* This is an internal subroutine of the other compare_and_swap expanders.
5763*404b540aSrobert MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5764*404b540aSrobert operation. TARGET is an optional place to store the value result of
5765*404b540aSrobert the operation. ICODE is the particular instruction to expand. Return
5766*404b540aSrobert the result of the operation. */
5767*404b540aSrobert
5768*404b540aSrobert static rtx
expand_val_compare_and_swap_1(rtx mem,rtx old_val,rtx new_val,rtx target,enum insn_code icode)5769*404b540aSrobert expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5770*404b540aSrobert rtx target, enum insn_code icode)
5771*404b540aSrobert {
5772*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
5773*404b540aSrobert rtx insn;
5774*404b540aSrobert
5775*404b540aSrobert if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5776*404b540aSrobert target = gen_reg_rtx (mode);
5777*404b540aSrobert
5778*404b540aSrobert if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5779*404b540aSrobert old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5780*404b540aSrobert if (!insn_data[icode].operand[2].predicate (old_val, mode))
5781*404b540aSrobert old_val = force_reg (mode, old_val);
5782*404b540aSrobert
5783*404b540aSrobert if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5784*404b540aSrobert new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5785*404b540aSrobert if (!insn_data[icode].operand[3].predicate (new_val, mode))
5786*404b540aSrobert new_val = force_reg (mode, new_val);
5787*404b540aSrobert
5788*404b540aSrobert insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5789*404b540aSrobert if (insn == NULL_RTX)
5790*404b540aSrobert return NULL_RTX;
5791*404b540aSrobert emit_insn (insn);
5792*404b540aSrobert
5793*404b540aSrobert return target;
5794*404b540aSrobert }
5795*404b540aSrobert
5796*404b540aSrobert /* Expand a compare-and-swap operation and return its value. */
5797*404b540aSrobert
5798*404b540aSrobert rtx
expand_val_compare_and_swap(rtx mem,rtx old_val,rtx new_val,rtx target)5799*404b540aSrobert expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5800*404b540aSrobert {
5801*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
5802*404b540aSrobert enum insn_code icode = sync_compare_and_swap[mode];
5803*404b540aSrobert
5804*404b540aSrobert if (icode == CODE_FOR_nothing)
5805*404b540aSrobert return NULL_RTX;
5806*404b540aSrobert
5807*404b540aSrobert return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5808*404b540aSrobert }
5809*404b540aSrobert
5810*404b540aSrobert /* Expand a compare-and-swap operation and store true into the result if
5811*404b540aSrobert the operation was successful and false otherwise. Return the result.
5812*404b540aSrobert Unlike other routines, TARGET is not optional. */
5813*404b540aSrobert
5814*404b540aSrobert rtx
expand_bool_compare_and_swap(rtx mem,rtx old_val,rtx new_val,rtx target)5815*404b540aSrobert expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5816*404b540aSrobert {
5817*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
5818*404b540aSrobert enum insn_code icode;
5819*404b540aSrobert rtx subtarget, label0, label1;
5820*404b540aSrobert
5821*404b540aSrobert /* If the target supports a compare-and-swap pattern that simultaneously
5822*404b540aSrobert sets some flag for success, then use it. Otherwise use the regular
5823*404b540aSrobert compare-and-swap and follow that immediately with a compare insn. */
5824*404b540aSrobert icode = sync_compare_and_swap_cc[mode];
5825*404b540aSrobert switch (icode)
5826*404b540aSrobert {
5827*404b540aSrobert default:
5828*404b540aSrobert subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5829*404b540aSrobert NULL_RTX, icode);
5830*404b540aSrobert if (subtarget != NULL_RTX)
5831*404b540aSrobert break;
5832*404b540aSrobert
5833*404b540aSrobert /* FALLTHRU */
5834*404b540aSrobert case CODE_FOR_nothing:
5835*404b540aSrobert icode = sync_compare_and_swap[mode];
5836*404b540aSrobert if (icode == CODE_FOR_nothing)
5837*404b540aSrobert return NULL_RTX;
5838*404b540aSrobert
5839*404b540aSrobert /* Ensure that if old_val == mem, that we're not comparing
5840*404b540aSrobert against an old value. */
5841*404b540aSrobert if (MEM_P (old_val))
5842*404b540aSrobert old_val = force_reg (mode, old_val);
5843*404b540aSrobert
5844*404b540aSrobert subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5845*404b540aSrobert NULL_RTX, icode);
5846*404b540aSrobert if (subtarget == NULL_RTX)
5847*404b540aSrobert return NULL_RTX;
5848*404b540aSrobert
5849*404b540aSrobert emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5850*404b540aSrobert }
5851*404b540aSrobert
5852*404b540aSrobert /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5853*404b540aSrobert setcc instruction from the beginning. We don't work too hard here,
5854*404b540aSrobert but it's nice to not be stupid about initial code gen either. */
5855*404b540aSrobert if (STORE_FLAG_VALUE == 1)
5856*404b540aSrobert {
5857*404b540aSrobert icode = setcc_gen_code[EQ];
5858*404b540aSrobert if (icode != CODE_FOR_nothing)
5859*404b540aSrobert {
5860*404b540aSrobert enum machine_mode cmode = insn_data[icode].operand[0].mode;
5861*404b540aSrobert rtx insn;
5862*404b540aSrobert
5863*404b540aSrobert subtarget = target;
5864*404b540aSrobert if (!insn_data[icode].operand[0].predicate (target, cmode))
5865*404b540aSrobert subtarget = gen_reg_rtx (cmode);
5866*404b540aSrobert
5867*404b540aSrobert insn = GEN_FCN (icode) (subtarget);
5868*404b540aSrobert if (insn)
5869*404b540aSrobert {
5870*404b540aSrobert emit_insn (insn);
5871*404b540aSrobert if (GET_MODE (target) != GET_MODE (subtarget))
5872*404b540aSrobert {
5873*404b540aSrobert convert_move (target, subtarget, 1);
5874*404b540aSrobert subtarget = target;
5875*404b540aSrobert }
5876*404b540aSrobert return subtarget;
5877*404b540aSrobert }
5878*404b540aSrobert }
5879*404b540aSrobert }
5880*404b540aSrobert
5881*404b540aSrobert /* Without an appropriate setcc instruction, use a set of branches to
5882*404b540aSrobert get 1 and 0 stored into target. Presumably if the target has a
5883*404b540aSrobert STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5884*404b540aSrobert
5885*404b540aSrobert label0 = gen_label_rtx ();
5886*404b540aSrobert label1 = gen_label_rtx ();
5887*404b540aSrobert
5888*404b540aSrobert emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5889*404b540aSrobert emit_move_insn (target, const0_rtx);
5890*404b540aSrobert emit_jump_insn (gen_jump (label1));
5891*404b540aSrobert emit_barrier ();
5892*404b540aSrobert emit_label (label0);
5893*404b540aSrobert emit_move_insn (target, const1_rtx);
5894*404b540aSrobert emit_label (label1);
5895*404b540aSrobert
5896*404b540aSrobert return target;
5897*404b540aSrobert }
5898*404b540aSrobert
5899*404b540aSrobert /* This is a helper function for the other atomic operations. This function
5900*404b540aSrobert emits a loop that contains SEQ that iterates until a compare-and-swap
5901*404b540aSrobert operation at the end succeeds. MEM is the memory to be modified. SEQ is
5902*404b540aSrobert a set of instructions that takes a value from OLD_REG as an input and
5903*404b540aSrobert produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5904*404b540aSrobert set to the current contents of MEM. After SEQ, a compare-and-swap will
5905*404b540aSrobert attempt to update MEM with NEW_REG. The function returns true when the
5906*404b540aSrobert loop was generated successfully. */
5907*404b540aSrobert
5908*404b540aSrobert static bool
expand_compare_and_swap_loop(rtx mem,rtx old_reg,rtx new_reg,rtx seq)5909*404b540aSrobert expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5910*404b540aSrobert {
5911*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
5912*404b540aSrobert enum insn_code icode;
5913*404b540aSrobert rtx label, cmp_reg, subtarget;
5914*404b540aSrobert
5915*404b540aSrobert /* The loop we want to generate looks like
5916*404b540aSrobert
5917*404b540aSrobert cmp_reg = mem;
5918*404b540aSrobert label:
5919*404b540aSrobert old_reg = cmp_reg;
5920*404b540aSrobert seq;
5921*404b540aSrobert cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5922*404b540aSrobert if (cmp_reg != old_reg)
5923*404b540aSrobert goto label;
5924*404b540aSrobert
5925*404b540aSrobert Note that we only do the plain load from memory once. Subsequent
5926*404b540aSrobert iterations use the value loaded by the compare-and-swap pattern. */
5927*404b540aSrobert
5928*404b540aSrobert label = gen_label_rtx ();
5929*404b540aSrobert cmp_reg = gen_reg_rtx (mode);
5930*404b540aSrobert
5931*404b540aSrobert emit_move_insn (cmp_reg, mem);
5932*404b540aSrobert emit_label (label);
5933*404b540aSrobert emit_move_insn (old_reg, cmp_reg);
5934*404b540aSrobert if (seq)
5935*404b540aSrobert emit_insn (seq);
5936*404b540aSrobert
5937*404b540aSrobert /* If the target supports a compare-and-swap pattern that simultaneously
5938*404b540aSrobert sets some flag for success, then use it. Otherwise use the regular
5939*404b540aSrobert compare-and-swap and follow that immediately with a compare insn. */
5940*404b540aSrobert icode = sync_compare_and_swap_cc[mode];
5941*404b540aSrobert switch (icode)
5942*404b540aSrobert {
5943*404b540aSrobert default:
5944*404b540aSrobert subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5945*404b540aSrobert cmp_reg, icode);
5946*404b540aSrobert if (subtarget != NULL_RTX)
5947*404b540aSrobert {
5948*404b540aSrobert gcc_assert (subtarget == cmp_reg);
5949*404b540aSrobert break;
5950*404b540aSrobert }
5951*404b540aSrobert
5952*404b540aSrobert /* FALLTHRU */
5953*404b540aSrobert case CODE_FOR_nothing:
5954*404b540aSrobert icode = sync_compare_and_swap[mode];
5955*404b540aSrobert if (icode == CODE_FOR_nothing)
5956*404b540aSrobert return false;
5957*404b540aSrobert
5958*404b540aSrobert subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5959*404b540aSrobert cmp_reg, icode);
5960*404b540aSrobert if (subtarget == NULL_RTX)
5961*404b540aSrobert return false;
5962*404b540aSrobert if (subtarget != cmp_reg)
5963*404b540aSrobert emit_move_insn (cmp_reg, subtarget);
5964*404b540aSrobert
5965*404b540aSrobert emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5966*404b540aSrobert }
5967*404b540aSrobert
5968*404b540aSrobert /* ??? Mark this jump predicted not taken? */
5969*404b540aSrobert emit_jump_insn (bcc_gen_fctn[NE] (label));
5970*404b540aSrobert
5971*404b540aSrobert return true;
5972*404b540aSrobert }
5973*404b540aSrobert
5974*404b540aSrobert /* This function generates the atomic operation MEM CODE= VAL. In this
5975*404b540aSrobert case, we do not care about any resulting value. Returns NULL if we
5976*404b540aSrobert cannot generate the operation. */
5977*404b540aSrobert
5978*404b540aSrobert rtx
expand_sync_operation(rtx mem,rtx val,enum rtx_code code)5979*404b540aSrobert expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5980*404b540aSrobert {
5981*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
5982*404b540aSrobert enum insn_code icode;
5983*404b540aSrobert rtx insn;
5984*404b540aSrobert
5985*404b540aSrobert /* Look to see if the target supports the operation directly. */
5986*404b540aSrobert switch (code)
5987*404b540aSrobert {
5988*404b540aSrobert case PLUS:
5989*404b540aSrobert icode = sync_add_optab[mode];
5990*404b540aSrobert break;
5991*404b540aSrobert case IOR:
5992*404b540aSrobert icode = sync_ior_optab[mode];
5993*404b540aSrobert break;
5994*404b540aSrobert case XOR:
5995*404b540aSrobert icode = sync_xor_optab[mode];
5996*404b540aSrobert break;
5997*404b540aSrobert case AND:
5998*404b540aSrobert icode = sync_and_optab[mode];
5999*404b540aSrobert break;
6000*404b540aSrobert case NOT:
6001*404b540aSrobert icode = sync_nand_optab[mode];
6002*404b540aSrobert break;
6003*404b540aSrobert
6004*404b540aSrobert case MINUS:
6005*404b540aSrobert icode = sync_sub_optab[mode];
6006*404b540aSrobert if (icode == CODE_FOR_nothing)
6007*404b540aSrobert {
6008*404b540aSrobert icode = sync_add_optab[mode];
6009*404b540aSrobert if (icode != CODE_FOR_nothing)
6010*404b540aSrobert {
6011*404b540aSrobert val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6012*404b540aSrobert code = PLUS;
6013*404b540aSrobert }
6014*404b540aSrobert }
6015*404b540aSrobert break;
6016*404b540aSrobert
6017*404b540aSrobert default:
6018*404b540aSrobert gcc_unreachable ();
6019*404b540aSrobert }
6020*404b540aSrobert
6021*404b540aSrobert /* Generate the direct operation, if present. */
6022*404b540aSrobert if (icode != CODE_FOR_nothing)
6023*404b540aSrobert {
6024*404b540aSrobert if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6025*404b540aSrobert val = convert_modes (mode, GET_MODE (val), val, 1);
6026*404b540aSrobert if (!insn_data[icode].operand[1].predicate (val, mode))
6027*404b540aSrobert val = force_reg (mode, val);
6028*404b540aSrobert
6029*404b540aSrobert insn = GEN_FCN (icode) (mem, val);
6030*404b540aSrobert if (insn)
6031*404b540aSrobert {
6032*404b540aSrobert emit_insn (insn);
6033*404b540aSrobert return const0_rtx;
6034*404b540aSrobert }
6035*404b540aSrobert }
6036*404b540aSrobert
6037*404b540aSrobert /* Failing that, generate a compare-and-swap loop in which we perform the
6038*404b540aSrobert operation with normal arithmetic instructions. */
6039*404b540aSrobert if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6040*404b540aSrobert {
6041*404b540aSrobert rtx t0 = gen_reg_rtx (mode), t1;
6042*404b540aSrobert
6043*404b540aSrobert start_sequence ();
6044*404b540aSrobert
6045*404b540aSrobert t1 = t0;
6046*404b540aSrobert if (code == NOT)
6047*404b540aSrobert {
6048*404b540aSrobert t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6049*404b540aSrobert code = AND;
6050*404b540aSrobert }
6051*404b540aSrobert t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6052*404b540aSrobert true, OPTAB_LIB_WIDEN);
6053*404b540aSrobert
6054*404b540aSrobert insn = get_insns ();
6055*404b540aSrobert end_sequence ();
6056*404b540aSrobert
6057*404b540aSrobert if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6058*404b540aSrobert return const0_rtx;
6059*404b540aSrobert }
6060*404b540aSrobert
6061*404b540aSrobert return NULL_RTX;
6062*404b540aSrobert }
6063*404b540aSrobert
6064*404b540aSrobert /* This function generates the atomic operation MEM CODE= VAL. In this
6065*404b540aSrobert case, we do care about the resulting value: if AFTER is true then
6066*404b540aSrobert return the value MEM holds after the operation, if AFTER is false
6067*404b540aSrobert then return the value MEM holds before the operation. TARGET is an
6068*404b540aSrobert optional place for the result value to be stored. */
6069*404b540aSrobert
6070*404b540aSrobert rtx
expand_sync_fetch_operation(rtx mem,rtx val,enum rtx_code code,bool after,rtx target)6071*404b540aSrobert expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
6072*404b540aSrobert bool after, rtx target)
6073*404b540aSrobert {
6074*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
6075*404b540aSrobert enum insn_code old_code, new_code, icode;
6076*404b540aSrobert bool compensate;
6077*404b540aSrobert rtx insn;
6078*404b540aSrobert
6079*404b540aSrobert /* Look to see if the target supports the operation directly. */
6080*404b540aSrobert switch (code)
6081*404b540aSrobert {
6082*404b540aSrobert case PLUS:
6083*404b540aSrobert old_code = sync_old_add_optab[mode];
6084*404b540aSrobert new_code = sync_new_add_optab[mode];
6085*404b540aSrobert break;
6086*404b540aSrobert case IOR:
6087*404b540aSrobert old_code = sync_old_ior_optab[mode];
6088*404b540aSrobert new_code = sync_new_ior_optab[mode];
6089*404b540aSrobert break;
6090*404b540aSrobert case XOR:
6091*404b540aSrobert old_code = sync_old_xor_optab[mode];
6092*404b540aSrobert new_code = sync_new_xor_optab[mode];
6093*404b540aSrobert break;
6094*404b540aSrobert case AND:
6095*404b540aSrobert old_code = sync_old_and_optab[mode];
6096*404b540aSrobert new_code = sync_new_and_optab[mode];
6097*404b540aSrobert break;
6098*404b540aSrobert case NOT:
6099*404b540aSrobert old_code = sync_old_nand_optab[mode];
6100*404b540aSrobert new_code = sync_new_nand_optab[mode];
6101*404b540aSrobert break;
6102*404b540aSrobert
6103*404b540aSrobert case MINUS:
6104*404b540aSrobert old_code = sync_old_sub_optab[mode];
6105*404b540aSrobert new_code = sync_new_sub_optab[mode];
6106*404b540aSrobert if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
6107*404b540aSrobert {
6108*404b540aSrobert old_code = sync_old_add_optab[mode];
6109*404b540aSrobert new_code = sync_new_add_optab[mode];
6110*404b540aSrobert if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
6111*404b540aSrobert {
6112*404b540aSrobert val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
6113*404b540aSrobert code = PLUS;
6114*404b540aSrobert }
6115*404b540aSrobert }
6116*404b540aSrobert break;
6117*404b540aSrobert
6118*404b540aSrobert default:
6119*404b540aSrobert gcc_unreachable ();
6120*404b540aSrobert }
6121*404b540aSrobert
6122*404b540aSrobert /* If the target does supports the proper new/old operation, great. But
6123*404b540aSrobert if we only support the opposite old/new operation, check to see if we
6124*404b540aSrobert can compensate. In the case in which the old value is supported, then
6125*404b540aSrobert we can always perform the operation again with normal arithmetic. In
6126*404b540aSrobert the case in which the new value is supported, then we can only handle
6127*404b540aSrobert this in the case the operation is reversible. */
6128*404b540aSrobert compensate = false;
6129*404b540aSrobert if (after)
6130*404b540aSrobert {
6131*404b540aSrobert icode = new_code;
6132*404b540aSrobert if (icode == CODE_FOR_nothing)
6133*404b540aSrobert {
6134*404b540aSrobert icode = old_code;
6135*404b540aSrobert if (icode != CODE_FOR_nothing)
6136*404b540aSrobert compensate = true;
6137*404b540aSrobert }
6138*404b540aSrobert }
6139*404b540aSrobert else
6140*404b540aSrobert {
6141*404b540aSrobert icode = old_code;
6142*404b540aSrobert if (icode == CODE_FOR_nothing
6143*404b540aSrobert && (code == PLUS || code == MINUS || code == XOR))
6144*404b540aSrobert {
6145*404b540aSrobert icode = new_code;
6146*404b540aSrobert if (icode != CODE_FOR_nothing)
6147*404b540aSrobert compensate = true;
6148*404b540aSrobert }
6149*404b540aSrobert }
6150*404b540aSrobert
6151*404b540aSrobert /* If we found something supported, great. */
6152*404b540aSrobert if (icode != CODE_FOR_nothing)
6153*404b540aSrobert {
6154*404b540aSrobert if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6155*404b540aSrobert target = gen_reg_rtx (mode);
6156*404b540aSrobert
6157*404b540aSrobert if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6158*404b540aSrobert val = convert_modes (mode, GET_MODE (val), val, 1);
6159*404b540aSrobert if (!insn_data[icode].operand[2].predicate (val, mode))
6160*404b540aSrobert val = force_reg (mode, val);
6161*404b540aSrobert
6162*404b540aSrobert insn = GEN_FCN (icode) (target, mem, val);
6163*404b540aSrobert if (insn)
6164*404b540aSrobert {
6165*404b540aSrobert emit_insn (insn);
6166*404b540aSrobert
6167*404b540aSrobert /* If we need to compensate for using an operation with the
6168*404b540aSrobert wrong return value, do so now. */
6169*404b540aSrobert if (compensate)
6170*404b540aSrobert {
6171*404b540aSrobert if (!after)
6172*404b540aSrobert {
6173*404b540aSrobert if (code == PLUS)
6174*404b540aSrobert code = MINUS;
6175*404b540aSrobert else if (code == MINUS)
6176*404b540aSrobert code = PLUS;
6177*404b540aSrobert }
6178*404b540aSrobert
6179*404b540aSrobert if (code == NOT)
6180*404b540aSrobert target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
6181*404b540aSrobert target = expand_simple_binop (mode, code, target, val, NULL_RTX,
6182*404b540aSrobert true, OPTAB_LIB_WIDEN);
6183*404b540aSrobert }
6184*404b540aSrobert
6185*404b540aSrobert return target;
6186*404b540aSrobert }
6187*404b540aSrobert }
6188*404b540aSrobert
6189*404b540aSrobert /* Failing that, generate a compare-and-swap loop in which we perform the
6190*404b540aSrobert operation with normal arithmetic instructions. */
6191*404b540aSrobert if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6192*404b540aSrobert {
6193*404b540aSrobert rtx t0 = gen_reg_rtx (mode), t1;
6194*404b540aSrobert
6195*404b540aSrobert if (!target || !register_operand (target, mode))
6196*404b540aSrobert target = gen_reg_rtx (mode);
6197*404b540aSrobert
6198*404b540aSrobert start_sequence ();
6199*404b540aSrobert
6200*404b540aSrobert if (!after)
6201*404b540aSrobert emit_move_insn (target, t0);
6202*404b540aSrobert t1 = t0;
6203*404b540aSrobert if (code == NOT)
6204*404b540aSrobert {
6205*404b540aSrobert t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
6206*404b540aSrobert code = AND;
6207*404b540aSrobert }
6208*404b540aSrobert t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
6209*404b540aSrobert true, OPTAB_LIB_WIDEN);
6210*404b540aSrobert if (after)
6211*404b540aSrobert emit_move_insn (target, t1);
6212*404b540aSrobert
6213*404b540aSrobert insn = get_insns ();
6214*404b540aSrobert end_sequence ();
6215*404b540aSrobert
6216*404b540aSrobert if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6217*404b540aSrobert return target;
6218*404b540aSrobert }
6219*404b540aSrobert
6220*404b540aSrobert return NULL_RTX;
6221*404b540aSrobert }
6222*404b540aSrobert
6223*404b540aSrobert /* This function expands a test-and-set operation. Ideally we atomically
6224*404b540aSrobert store VAL in MEM and return the previous value in MEM. Some targets
6225*404b540aSrobert may not support this operation and only support VAL with the constant 1;
6226*404b540aSrobert in this case while the return value will be 0/1, but the exact value
6227*404b540aSrobert stored in MEM is target defined. TARGET is an option place to stick
6228*404b540aSrobert the return value. */
6229*404b540aSrobert
6230*404b540aSrobert rtx
expand_sync_lock_test_and_set(rtx mem,rtx val,rtx target)6231*404b540aSrobert expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6232*404b540aSrobert {
6233*404b540aSrobert enum machine_mode mode = GET_MODE (mem);
6234*404b540aSrobert enum insn_code icode;
6235*404b540aSrobert rtx insn;
6236*404b540aSrobert
6237*404b540aSrobert /* If the target supports the test-and-set directly, great. */
6238*404b540aSrobert icode = sync_lock_test_and_set[mode];
6239*404b540aSrobert if (icode != CODE_FOR_nothing)
6240*404b540aSrobert {
6241*404b540aSrobert if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6242*404b540aSrobert target = gen_reg_rtx (mode);
6243*404b540aSrobert
6244*404b540aSrobert if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6245*404b540aSrobert val = convert_modes (mode, GET_MODE (val), val, 1);
6246*404b540aSrobert if (!insn_data[icode].operand[2].predicate (val, mode))
6247*404b540aSrobert val = force_reg (mode, val);
6248*404b540aSrobert
6249*404b540aSrobert insn = GEN_FCN (icode) (target, mem, val);
6250*404b540aSrobert if (insn)
6251*404b540aSrobert {
6252*404b540aSrobert emit_insn (insn);
6253*404b540aSrobert return target;
6254*404b540aSrobert }
6255*404b540aSrobert }
6256*404b540aSrobert
6257*404b540aSrobert /* Otherwise, use a compare-and-swap loop for the exchange. */
6258*404b540aSrobert if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6259*404b540aSrobert {
6260*404b540aSrobert if (!target || !register_operand (target, mode))
6261*404b540aSrobert target = gen_reg_rtx (mode);
6262*404b540aSrobert if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6263*404b540aSrobert val = convert_modes (mode, GET_MODE (val), val, 1);
6264*404b540aSrobert if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6265*404b540aSrobert return target;
6266*404b540aSrobert }
6267*404b540aSrobert
6268*404b540aSrobert return NULL_RTX;
6269*404b540aSrobert }
6270*404b540aSrobert
6271*404b540aSrobert #include "gt-optabs.h"
6272