xref: /dragonfly/contrib/gcc-8.0/gcc/optabs.c (revision dcb5d66b)
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "predict.h"
30 #include "tm_p.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
37 
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39    is properly defined.  */
40 #include "stor-layout.h"
41 #include "except.h"
42 #include "dojump.h"
43 #include "explow.h"
44 #include "expr.h"
45 #include "optabs-tree.h"
46 #include "libfuncs.h"
47 
48 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
49 				   machine_mode *);
50 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
51 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
52 
53 /* Debug facility for use in GDB.  */
54 void debug_optab_libfuncs (void);
55 
56 /* Add a REG_EQUAL note to the last insn in INSNS.  TARGET is being set to
57    the result of operation CODE applied to OP0 (and OP1 if it is a binary
58    operation).
59 
60    If the last insn does not set TARGET, don't do anything, but return 1.
61 
62    If the last insn or a previous insn sets TARGET and TARGET is one of OP0
63    or OP1, don't add the REG_EQUAL note but return 0.  Our caller can then
64    try again, ensuring that TARGET is not one of the operands.  */
65 
66 static int
67 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
68 {
69   rtx_insn *last_insn;
70   rtx set;
71   rtx note;
72 
73   gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
74 
75   if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
76       && GET_RTX_CLASS (code) != RTX_BIN_ARITH
77       && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
78       && GET_RTX_CLASS (code) != RTX_COMPARE
79       && GET_RTX_CLASS (code) != RTX_UNARY)
80     return 1;
81 
82   if (GET_CODE (target) == ZERO_EXTRACT)
83     return 1;
84 
85   for (last_insn = insns;
86        NEXT_INSN (last_insn) != NULL_RTX;
87        last_insn = NEXT_INSN (last_insn))
88     ;
89 
90   /* If TARGET is in OP0 or OP1, punt.  We'd end up with a note referencing
91      a value changing in the insn, so the note would be invalid for CSE.  */
92   if (reg_overlap_mentioned_p (target, op0)
93       || (op1 && reg_overlap_mentioned_p (target, op1)))
94     {
95       if (MEM_P (target)
96 	  && (rtx_equal_p (target, op0)
97 	      || (op1 && rtx_equal_p (target, op1))))
98 	{
99 	  /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
100 	     over expanding it as temp = MEM op X, MEM = temp.  If the target
101 	     supports MEM = MEM op X instructions, it is sometimes too hard
102 	     to reconstruct that form later, especially if X is also a memory,
103 	     and due to multiple occurrences of addresses the address might
104 	     be forced into register unnecessarily.
105 	     Note that not emitting the REG_EQUIV note might inhibit
106 	     CSE in some cases.  */
107 	  set = single_set (last_insn);
108 	  if (set
109 	      && GET_CODE (SET_SRC (set)) == code
110 	      && MEM_P (SET_DEST (set))
111 	      && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
112 		  || (op1 && rtx_equal_p (SET_DEST (set),
113 					  XEXP (SET_SRC (set), 1)))))
114 	    return 1;
115 	}
116       return 0;
117     }
118 
119   set = set_for_reg_notes (last_insn);
120   if (set == NULL_RTX)
121     return 1;
122 
123   if (! rtx_equal_p (SET_DEST (set), target)
124       /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it.  */
125       && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
126 	  || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
127     return 1;
128 
129   if (GET_RTX_CLASS (code) == RTX_UNARY)
130     switch (code)
131       {
132       case FFS:
133       case CLZ:
134       case CTZ:
135       case CLRSB:
136       case POPCOUNT:
137       case PARITY:
138       case BSWAP:
139 	if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
140 	  {
141 	    note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
142 	    if (GET_MODE_UNIT_SIZE (GET_MODE (op0))
143 		> GET_MODE_UNIT_SIZE (GET_MODE (target)))
144 	      note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
145 					 note, GET_MODE (op0));
146 	    else
147 	      note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
148 					 note, GET_MODE (op0));
149 	    break;
150 	  }
151 	/* FALLTHRU */
152       default:
153 	note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
154 	break;
155       }
156   else
157     note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
158 
159   set_unique_reg_note (last_insn, REG_EQUAL, note);
160 
161   return 1;
162 }
163 
164 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
165    for a widening operation would be.  In most cases this would be OP0, but if
166    that's a constant it'll be VOIDmode, which isn't useful.  */
167 
168 static machine_mode
169 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
170 {
171   machine_mode m0 = GET_MODE (op0);
172   machine_mode m1 = GET_MODE (op1);
173   machine_mode result;
174 
175   if (m0 == VOIDmode && m1 == VOIDmode)
176     return to_mode;
177   else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1))
178     result = m1;
179   else
180     result = m0;
181 
182   if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode))
183     return to_mode;
184 
185   return result;
186 }
187 
188 /* Widen OP to MODE and return the rtx for the widened operand.  UNSIGNEDP
189    says whether OP is signed or unsigned.  NO_EXTEND is nonzero if we need
190    not actually do a sign-extend or zero-extend, but can leave the
191    higher-order bits of the result rtx undefined, for example, in the case
192    of logical operations, but not right shifts.  */
193 
194 static rtx
195 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
196 	       int unsignedp, int no_extend)
197 {
198   rtx result;
199   scalar_int_mode int_mode;
200 
201   /* If we don't have to extend and this is a constant, return it.  */
202   if (no_extend && GET_MODE (op) == VOIDmode)
203     return op;
204 
205   /* If we must extend do so.  If OP is a SUBREG for a promoted object, also
206      extend since it will be more efficient to do so unless the signedness of
207      a promoted object differs from our extension.  */
208   if (! no_extend
209       || !is_a <scalar_int_mode> (mode, &int_mode)
210       || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
211 	  && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
212     return convert_modes (mode, oldmode, op, unsignedp);
213 
214   /* If MODE is no wider than a single word, we return a lowpart or paradoxical
215      SUBREG.  */
216   if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
217     return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
218 
219   /* Otherwise, get an object of MODE, clobber it, and set the low-order
220      part to OP.  */
221 
222   result = gen_reg_rtx (int_mode);
223   emit_clobber (result);
224   emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
225   return result;
226 }
227 
228 /* Expand vector widening operations.
229 
230    There are two different classes of operations handled here:
231    1) Operations whose result is wider than all the arguments to the operation.
232       Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
233       In this case OP0 and optionally OP1 would be initialized,
234       but WIDE_OP wouldn't (not relevant for this case).
235    2) Operations whose result is of the same size as the last argument to the
236       operation, but wider than all the other arguments to the operation.
237       Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
238       In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
239 
240    E.g, when called to expand the following operations, this is how
241    the arguments will be initialized:
242                                 nops    OP0     OP1     WIDE_OP
243    widening-sum                 2       oprnd0  -       oprnd1
244    widening-dot-product         3       oprnd0  oprnd1  oprnd2
245    widening-mult                2       oprnd0  oprnd1  -
246    type-promotion (vec-unpack)  1       oprnd0  -       -  */
247 
248 rtx
249 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
250 			   rtx target, int unsignedp)
251 {
252   struct expand_operand eops[4];
253   tree oprnd0, oprnd1, oprnd2;
254   machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
255   optab widen_pattern_optab;
256   enum insn_code icode;
257   int nops = TREE_CODE_LENGTH (ops->code);
258   int op;
259 
260   oprnd0 = ops->op0;
261   tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
262   widen_pattern_optab =
263     optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
264   if (ops->code == WIDEN_MULT_PLUS_EXPR
265       || ops->code == WIDEN_MULT_MINUS_EXPR)
266     icode = find_widening_optab_handler (widen_pattern_optab,
267 					 TYPE_MODE (TREE_TYPE (ops->op2)),
268 					 tmode0);
269   else
270     icode = optab_handler (widen_pattern_optab, tmode0);
271   gcc_assert (icode != CODE_FOR_nothing);
272 
273   if (nops >= 2)
274     {
275       oprnd1 = ops->op1;
276       tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
277     }
278 
279   /* The last operand is of a wider mode than the rest of the operands.  */
280   if (nops == 2)
281     wmode = tmode1;
282   else if (nops == 3)
283     {
284       gcc_assert (tmode1 == tmode0);
285       gcc_assert (op1);
286       oprnd2 = ops->op2;
287       wmode = TYPE_MODE (TREE_TYPE (oprnd2));
288     }
289 
290   op = 0;
291   create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
292   create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
293   if (op1)
294     create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
295   if (wide_op)
296     create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
297   expand_insn (icode, op, eops);
298   return eops[0].value;
299 }
300 
301 /* Generate code to perform an operation specified by TERNARY_OPTAB
302    on operands OP0, OP1 and OP2, with result having machine-mode MODE.
303 
304    UNSIGNEDP is for the case where we have to widen the operands
305    to perform the operation.  It says to use zero-extension.
306 
307    If TARGET is nonzero, the value
308    is generated there, if it is convenient to do so.
309    In all cases an rtx is returned for the locus of the value;
310    this may or may not be TARGET.  */
311 
312 rtx
313 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
314 		   rtx op1, rtx op2, rtx target, int unsignedp)
315 {
316   struct expand_operand ops[4];
317   enum insn_code icode = optab_handler (ternary_optab, mode);
318 
319   gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
320 
321   create_output_operand (&ops[0], target, mode);
322   create_convert_operand_from (&ops[1], op0, mode, unsignedp);
323   create_convert_operand_from (&ops[2], op1, mode, unsignedp);
324   create_convert_operand_from (&ops[3], op2, mode, unsignedp);
325   expand_insn (icode, 4, ops);
326   return ops[0].value;
327 }
328 
329 
330 /* Like expand_binop, but return a constant rtx if the result can be
331    calculated at compile time.  The arguments and return value are
332    otherwise the same as for expand_binop.  */
333 
334 rtx
335 simplify_expand_binop (machine_mode mode, optab binoptab,
336 		       rtx op0, rtx op1, rtx target, int unsignedp,
337 		       enum optab_methods methods)
338 {
339   if (CONSTANT_P (op0) && CONSTANT_P (op1))
340     {
341       rtx x = simplify_binary_operation (optab_to_code (binoptab),
342 					 mode, op0, op1);
343       if (x)
344 	return x;
345     }
346 
347   return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
348 }
349 
350 /* Like simplify_expand_binop, but always put the result in TARGET.
351    Return true if the expansion succeeded.  */
352 
353 bool
354 force_expand_binop (machine_mode mode, optab binoptab,
355 		    rtx op0, rtx op1, rtx target, int unsignedp,
356 		    enum optab_methods methods)
357 {
358   rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
359 				 target, unsignedp, methods);
360   if (x == 0)
361     return false;
362   if (x != target)
363     emit_move_insn (target, x);
364   return true;
365 }
366 
367 /* Create a new vector value in VMODE with all elements set to OP.  The
368    mode of OP must be the element mode of VMODE.  If OP is a constant,
369    then the return value will be a constant.  */
370 
371 rtx
372 expand_vector_broadcast (machine_mode vmode, rtx op)
373 {
374   int n;
375   rtvec vec;
376 
377   gcc_checking_assert (VECTOR_MODE_P (vmode));
378 
379   if (valid_for_const_vector_p (vmode, op))
380     return gen_const_vec_duplicate (vmode, op);
381 
382   insn_code icode = optab_handler (vec_duplicate_optab, vmode);
383   if (icode != CODE_FOR_nothing)
384     {
385       struct expand_operand ops[2];
386       create_output_operand (&ops[0], NULL_RTX, vmode);
387       create_input_operand (&ops[1], op, GET_MODE (op));
388       expand_insn (icode, 2, ops);
389       return ops[0].value;
390     }
391 
392   if (!GET_MODE_NUNITS (vmode).is_constant (&n))
393     return NULL;
394 
395   /* ??? If the target doesn't have a vec_init, then we have no easy way
396      of performing this operation.  Most of this sort of generic support
397      is hidden away in the vector lowering support in gimple.  */
398   icode = convert_optab_handler (vec_init_optab, vmode,
399 				 GET_MODE_INNER (vmode));
400   if (icode == CODE_FOR_nothing)
401     return NULL;
402 
403   vec = rtvec_alloc (n);
404   for (int i = 0; i < n; ++i)
405     RTVEC_ELT (vec, i) = op;
406   rtx ret = gen_reg_rtx (vmode);
407   emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
408 
409   return ret;
410 }
411 
412 /* This subroutine of expand_doubleword_shift handles the cases in which
413    the effective shift value is >= BITS_PER_WORD.  The arguments and return
414    value are the same as for the parent routine, except that SUPERWORD_OP1
415    is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
416    INTO_TARGET may be null if the caller has decided to calculate it.  */
417 
418 static bool
419 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
420 			rtx outof_target, rtx into_target,
421 			int unsignedp, enum optab_methods methods)
422 {
423   if (into_target != 0)
424     if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
425 			     into_target, unsignedp, methods))
426       return false;
427 
428   if (outof_target != 0)
429     {
430       /* For a signed right shift, we must fill OUTOF_TARGET with copies
431 	 of the sign bit, otherwise we must fill it with zeros.  */
432       if (binoptab != ashr_optab)
433 	emit_move_insn (outof_target, CONST0_RTX (word_mode));
434       else
435 	if (!force_expand_binop (word_mode, binoptab, outof_input,
436 				 gen_int_shift_amount (word_mode,
437 						       BITS_PER_WORD - 1),
438 				 outof_target, unsignedp, methods))
439 	  return false;
440     }
441   return true;
442 }
443 
444 /* This subroutine of expand_doubleword_shift handles the cases in which
445    the effective shift value is < BITS_PER_WORD.  The arguments and return
446    value are the same as for the parent routine.  */
447 
448 static bool
449 expand_subword_shift (scalar_int_mode op1_mode, optab binoptab,
450 		      rtx outof_input, rtx into_input, rtx op1,
451 		      rtx outof_target, rtx into_target,
452 		      int unsignedp, enum optab_methods methods,
453 		      unsigned HOST_WIDE_INT shift_mask)
454 {
455   optab reverse_unsigned_shift, unsigned_shift;
456   rtx tmp, carries;
457 
458   reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
459   unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
460 
461   /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
462      We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
463      the opposite direction to BINOPTAB.  */
464   if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
465     {
466       carries = outof_input;
467       tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
468 					    op1_mode), op1_mode);
469       tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
470 				   0, true, methods);
471     }
472   else
473     {
474       /* We must avoid shifting by BITS_PER_WORD bits since that is either
475 	 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
476 	 has unknown behavior.  Do a single shift first, then shift by the
477 	 remainder.  It's OK to use ~OP1 as the remainder if shift counts
478 	 are truncated to the mode size.  */
479       carries = expand_binop (word_mode, reverse_unsigned_shift,
480 			      outof_input, const1_rtx, 0, unsignedp, methods);
481       if (shift_mask == BITS_PER_WORD - 1)
482 	{
483 	  tmp = immed_wide_int_const
484 	    (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
485 	  tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
486 				       0, true, methods);
487 	}
488       else
489 	{
490 	  tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
491 						op1_mode), op1_mode);
492 	  tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
493 				       0, true, methods);
494 	}
495     }
496   if (tmp == 0 || carries == 0)
497     return false;
498   carries = expand_binop (word_mode, reverse_unsigned_shift,
499 			  carries, tmp, 0, unsignedp, methods);
500   if (carries == 0)
501     return false;
502 
503   /* Shift INTO_INPUT logically by OP1.  This is the last use of INTO_INPUT
504      so the result can go directly into INTO_TARGET if convenient.  */
505   tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
506 		      into_target, unsignedp, methods);
507   if (tmp == 0)
508     return false;
509 
510   /* Now OR in the bits carried over from OUTOF_INPUT.  */
511   if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
512 			   into_target, unsignedp, methods))
513     return false;
514 
515   /* Use a standard word_mode shift for the out-of half.  */
516   if (outof_target != 0)
517     if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
518 			     outof_target, unsignedp, methods))
519       return false;
520 
521   return true;
522 }
523 
524 
525 /* Try implementing expand_doubleword_shift using conditional moves.
526    The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
527    otherwise it is by >= BITS_PER_WORD.  SUBWORD_OP1 and SUPERWORD_OP1
528    are the shift counts to use in the former and latter case.  All other
529    arguments are the same as the parent routine.  */
530 
531 static bool
532 expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab,
533 				  enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
534 				  rtx outof_input, rtx into_input,
535 				  rtx subword_op1, rtx superword_op1,
536 				  rtx outof_target, rtx into_target,
537 				  int unsignedp, enum optab_methods methods,
538 				  unsigned HOST_WIDE_INT shift_mask)
539 {
540   rtx outof_superword, into_superword;
541 
542   /* Put the superword version of the output into OUTOF_SUPERWORD and
543      INTO_SUPERWORD.  */
544   outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
545   if (outof_target != 0 && subword_op1 == superword_op1)
546     {
547       /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
548 	 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD.  */
549       into_superword = outof_target;
550       if (!expand_superword_shift (binoptab, outof_input, superword_op1,
551 				   outof_superword, 0, unsignedp, methods))
552 	return false;
553     }
554   else
555     {
556       into_superword = gen_reg_rtx (word_mode);
557       if (!expand_superword_shift (binoptab, outof_input, superword_op1,
558 				   outof_superword, into_superword,
559 				   unsignedp, methods))
560 	return false;
561     }
562 
563   /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET.  */
564   if (!expand_subword_shift (op1_mode, binoptab,
565 			     outof_input, into_input, subword_op1,
566 			     outof_target, into_target,
567 			     unsignedp, methods, shift_mask))
568     return false;
569 
570   /* Select between them.  Do the INTO half first because INTO_SUPERWORD
571      might be the current value of OUTOF_TARGET.  */
572   if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
573 			      into_target, into_superword, word_mode, false))
574     return false;
575 
576   if (outof_target != 0)
577     if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
578 				outof_target, outof_superword,
579 				word_mode, false))
580       return false;
581 
582   return true;
583 }
584 
585 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
586    OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
587    input operand; the shift moves bits in the direction OUTOF_INPUT->
588    INTO_TARGET.  OUTOF_TARGET and INTO_TARGET are the equivalent words
589    of the target.  OP1 is the shift count and OP1_MODE is its mode.
590    If OP1 is constant, it will have been truncated as appropriate
591    and is known to be nonzero.
592 
593    If SHIFT_MASK is zero, the result of word shifts is undefined when the
594    shift count is outside the range [0, BITS_PER_WORD).  This routine must
595    avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
596 
597    If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
598    masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
599    fill with zeros or sign bits as appropriate.
600 
601    If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
602    a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
603    Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
604    In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
605    are undefined.
606 
607    BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop.  This function
608    may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
609    OUTOF_INPUT and OUTOF_TARGET.  OUTOF_TARGET can be null if the parent
610    function wants to calculate it itself.
611 
612    Return true if the shift could be successfully synthesized.  */
613 
614 static bool
615 expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab,
616 			 rtx outof_input, rtx into_input, rtx op1,
617 			 rtx outof_target, rtx into_target,
618 			 int unsignedp, enum optab_methods methods,
619 			 unsigned HOST_WIDE_INT shift_mask)
620 {
621   rtx superword_op1, tmp, cmp1, cmp2;
622   enum rtx_code cmp_code;
623 
624   /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
625      fill the result with sign or zero bits as appropriate.  If so, the value
626      of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1).   Recursively call
627      this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
628      and INTO_INPUT), then emit code to set up OUTOF_TARGET.
629 
630      This isn't worthwhile for constant shifts since the optimizers will
631      cope better with in-range shift counts.  */
632   if (shift_mask >= BITS_PER_WORD
633       && outof_target != 0
634       && !CONSTANT_P (op1))
635     {
636       if (!expand_doubleword_shift (op1_mode, binoptab,
637 				    outof_input, into_input, op1,
638 				    0, into_target,
639 				    unsignedp, methods, shift_mask))
640 	return false;
641       if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
642 			       outof_target, unsignedp, methods))
643 	return false;
644       return true;
645     }
646 
647   /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
648      is true when the effective shift value is less than BITS_PER_WORD.
649      Set SUPERWORD_OP1 to the shift count that should be used to shift
650      OUTOF_INPUT into INTO_TARGET when the condition is false.  */
651   tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
652   if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
653     {
654       /* Set CMP1 to OP1 & BITS_PER_WORD.  The result is zero iff OP1
655 	 is a subword shift count.  */
656       cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
657 				    0, true, methods);
658       cmp2 = CONST0_RTX (op1_mode);
659       cmp_code = EQ;
660       superword_op1 = op1;
661     }
662   else
663     {
664       /* Set CMP1 to OP1 - BITS_PER_WORD.  */
665       cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
666 				    0, true, methods);
667       cmp2 = CONST0_RTX (op1_mode);
668       cmp_code = LT;
669       superword_op1 = cmp1;
670     }
671   if (cmp1 == 0)
672     return false;
673 
674   /* If we can compute the condition at compile time, pick the
675      appropriate subroutine.  */
676   tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
677   if (tmp != 0 && CONST_INT_P (tmp))
678     {
679       if (tmp == const0_rtx)
680 	return expand_superword_shift (binoptab, outof_input, superword_op1,
681 				       outof_target, into_target,
682 				       unsignedp, methods);
683       else
684 	return expand_subword_shift (op1_mode, binoptab,
685 				     outof_input, into_input, op1,
686 				     outof_target, into_target,
687 				     unsignedp, methods, shift_mask);
688     }
689 
690   /* Try using conditional moves to generate straight-line code.  */
691   if (HAVE_conditional_move)
692     {
693       rtx_insn *start = get_last_insn ();
694       if (expand_doubleword_shift_condmove (op1_mode, binoptab,
695 					    cmp_code, cmp1, cmp2,
696 					    outof_input, into_input,
697 					    op1, superword_op1,
698 					    outof_target, into_target,
699 					    unsignedp, methods, shift_mask))
700 	return true;
701       delete_insns_since (start);
702     }
703 
704   /* As a last resort, use branches to select the correct alternative.  */
705   rtx_code_label *subword_label = gen_label_rtx ();
706   rtx_code_label *done_label = gen_label_rtx ();
707 
708   NO_DEFER_POP;
709   do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
710 			   0, 0, subword_label,
711 			   profile_probability::uninitialized ());
712   OK_DEFER_POP;
713 
714   if (!expand_superword_shift (binoptab, outof_input, superword_op1,
715 			       outof_target, into_target,
716 			       unsignedp, methods))
717     return false;
718 
719   emit_jump_insn (targetm.gen_jump (done_label));
720   emit_barrier ();
721   emit_label (subword_label);
722 
723   if (!expand_subword_shift (op1_mode, binoptab,
724 			     outof_input, into_input, op1,
725 			     outof_target, into_target,
726 			     unsignedp, methods, shift_mask))
727     return false;
728 
729   emit_label (done_label);
730   return true;
731 }
732 
733 /* Subroutine of expand_binop.  Perform a double word multiplication of
734    operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
735    as the target's word_mode.  This function return NULL_RTX if anything
736    goes wrong, in which case it may have already emitted instructions
737    which need to be deleted.
738 
739    If we want to multiply two two-word values and have normal and widening
740    multiplies of single-word values, we can do this with three smaller
741    multiplications.
742 
743    The multiplication proceeds as follows:
744 			         _______________________
745 			        [__op0_high_|__op0_low__]
746 			         _______________________
747         *			[__op1_high_|__op1_low__]
748         _______________________________________________
749 			         _______________________
750     (1)				[__op0_low__*__op1_low__]
751 		     _______________________
752     (2a)	    [__op0_low__*__op1_high_]
753 		     _______________________
754     (2b)	    [__op0_high_*__op1_low__]
755          _______________________
756     (3) [__op0_high_*__op1_high_]
757 
758 
759   This gives a 4-word result.  Since we are only interested in the
760   lower 2 words, partial result (3) and the upper words of (2a) and
761   (2b) don't need to be calculated.  Hence (2a) and (2b) can be
762   calculated using non-widening multiplication.
763 
764   (1), however, needs to be calculated with an unsigned widening
765   multiplication.  If this operation is not directly supported we
766   try using a signed widening multiplication and adjust the result.
767   This adjustment works as follows:
768 
769       If both operands are positive then no adjustment is needed.
770 
771       If the operands have different signs, for example op0_low < 0 and
772       op1_low >= 0, the instruction treats the most significant bit of
773       op0_low as a sign bit instead of a bit with significance
774       2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
775       with 2**BITS_PER_WORD - op0_low, and two's complements the
776       result.  Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
777       the result.
778 
779       Similarly, if both operands are negative, we need to add
780       (op0_low + op1_low) * 2**BITS_PER_WORD.
781 
782       We use a trick to adjust quickly.  We logically shift op0_low right
783       (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
784       op0_high (op1_high) before it is used to calculate 2b (2a).  If no
785       logical shift exists, we do an arithmetic right shift and subtract
786       the 0 or -1.  */
787 
788 static rtx
789 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
790 		       bool umulp, enum optab_methods methods)
791 {
792   int low = (WORDS_BIG_ENDIAN ? 1 : 0);
793   int high = (WORDS_BIG_ENDIAN ? 0 : 1);
794   rtx wordm1 = (umulp ? NULL_RTX
795 		: gen_int_shift_amount (word_mode, BITS_PER_WORD - 1));
796   rtx product, adjust, product_high, temp;
797 
798   rtx op0_high = operand_subword_force (op0, high, mode);
799   rtx op0_low = operand_subword_force (op0, low, mode);
800   rtx op1_high = operand_subword_force (op1, high, mode);
801   rtx op1_low = operand_subword_force (op1, low, mode);
802 
803   /* If we're using an unsigned multiply to directly compute the product
804      of the low-order words of the operands and perform any required
805      adjustments of the operands, we begin by trying two more multiplications
806      and then computing the appropriate sum.
807 
808      We have checked above that the required addition is provided.
809      Full-word addition will normally always succeed, especially if
810      it is provided at all, so we don't worry about its failure.  The
811      multiplication may well fail, however, so we do handle that.  */
812 
813   if (!umulp)
814     {
815       /* ??? This could be done with emit_store_flag where available.  */
816       temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
817 			   NULL_RTX, 1, methods);
818       if (temp)
819 	op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
820 				 NULL_RTX, 0, OPTAB_DIRECT);
821       else
822 	{
823 	  temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
824 			       NULL_RTX, 0, methods);
825 	  if (!temp)
826 	    return NULL_RTX;
827 	  op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
828 				   NULL_RTX, 0, OPTAB_DIRECT);
829 	}
830 
831       if (!op0_high)
832 	return NULL_RTX;
833     }
834 
835   adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
836 			 NULL_RTX, 0, OPTAB_DIRECT);
837   if (!adjust)
838     return NULL_RTX;
839 
840   /* OP0_HIGH should now be dead.  */
841 
842   if (!umulp)
843     {
844       /* ??? This could be done with emit_store_flag where available.  */
845       temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
846 			   NULL_RTX, 1, methods);
847       if (temp)
848 	op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
849 				 NULL_RTX, 0, OPTAB_DIRECT);
850       else
851 	{
852 	  temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
853 			       NULL_RTX, 0, methods);
854 	  if (!temp)
855 	    return NULL_RTX;
856 	  op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
857 				   NULL_RTX, 0, OPTAB_DIRECT);
858 	}
859 
860       if (!op1_high)
861 	return NULL_RTX;
862     }
863 
864   temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
865 		       NULL_RTX, 0, OPTAB_DIRECT);
866   if (!temp)
867     return NULL_RTX;
868 
869   /* OP1_HIGH should now be dead.  */
870 
871   adjust = expand_binop (word_mode, add_optab, adjust, temp,
872 			 NULL_RTX, 0, OPTAB_DIRECT);
873 
874   if (target && !REG_P (target))
875     target = NULL_RTX;
876 
877   /* *_widen_optab needs to determine operand mode, make sure at least
878      one operand has non-VOID mode.  */
879   if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode)
880     op0_low = force_reg (word_mode, op0_low);
881 
882   if (umulp)
883     product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
884 			    target, 1, OPTAB_DIRECT);
885   else
886     product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
887 			    target, 1, OPTAB_DIRECT);
888 
889   if (!product)
890     return NULL_RTX;
891 
892   product_high = operand_subword (product, high, 1, mode);
893   adjust = expand_binop (word_mode, add_optab, product_high, adjust,
894 			 NULL_RTX, 0, OPTAB_DIRECT);
895   emit_move_insn (product_high, adjust);
896   return product;
897 }
898 
899 /* Wrapper around expand_binop which takes an rtx code to specify
900    the operation to perform, not an optab pointer.  All other
901    arguments are the same.  */
902 rtx
903 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
904 		     rtx op1, rtx target, int unsignedp,
905 		     enum optab_methods methods)
906 {
907   optab binop = code_to_optab (code);
908   gcc_assert (binop);
909 
910   return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
911 }
912 
913 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
914    binop.  Order them according to commutative_operand_precedence and, if
915    possible, try to put TARGET or a pseudo first.  */
916 static bool
917 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
918 {
919   int op0_prec = commutative_operand_precedence (op0);
920   int op1_prec = commutative_operand_precedence (op1);
921 
922   if (op0_prec < op1_prec)
923     return true;
924 
925   if (op0_prec > op1_prec)
926     return false;
927 
928   /* With equal precedence, both orders are ok, but it is better if the
929      first operand is TARGET, or if both TARGET and OP0 are pseudos.  */
930   if (target == 0 || REG_P (target))
931     return (REG_P (op1) && !REG_P (op0)) || target == op1;
932   else
933     return rtx_equal_p (op1, target);
934 }
935 
936 /* Return true if BINOPTAB implements a shift operation.  */
937 
938 static bool
939 shift_optab_p (optab binoptab)
940 {
941   switch (optab_to_code (binoptab))
942     {
943     case ASHIFT:
944     case SS_ASHIFT:
945     case US_ASHIFT:
946     case ASHIFTRT:
947     case LSHIFTRT:
948     case ROTATE:
949     case ROTATERT:
950       return true;
951 
952     default:
953       return false;
954     }
955 }
956 
957 /* Return true if BINOPTAB implements a commutative binary operation.  */
958 
959 static bool
960 commutative_optab_p (optab binoptab)
961 {
962   return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
963 	  || binoptab == smul_widen_optab
964 	  || binoptab == umul_widen_optab
965 	  || binoptab == smul_highpart_optab
966 	  || binoptab == umul_highpart_optab);
967 }
968 
969 /* X is to be used in mode MODE as operand OPN to BINOPTAB.  If we're
970    optimizing, and if the operand is a constant that costs more than
971    1 instruction, force the constant into a register and return that
972    register.  Return X otherwise.  UNSIGNEDP says whether X is unsigned.  */
973 
974 static rtx
975 avoid_expensive_constant (machine_mode mode, optab binoptab,
976 			  int opn, rtx x, bool unsignedp)
977 {
978   bool speed = optimize_insn_for_speed_p ();
979 
980   if (mode != VOIDmode
981       && optimize
982       && CONSTANT_P (x)
983       && (rtx_cost (x, mode, optab_to_code (binoptab), opn, speed)
984 	  > set_src_cost (x, mode, speed)))
985     {
986       if (CONST_INT_P (x))
987 	{
988 	  HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
989 	  if (intval != INTVAL (x))
990 	    x = GEN_INT (intval);
991 	}
992       else
993 	x = convert_modes (mode, VOIDmode, x, unsignedp);
994       x = force_reg (mode, x);
995     }
996   return x;
997 }
998 
999 /* Helper function for expand_binop: handle the case where there
1000    is an insn ICODE that directly implements the indicated operation.
1001    Returns null if this is not possible.  */
1002 static rtx
1003 expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
1004 		       rtx op0, rtx op1,
1005 		       rtx target, int unsignedp, enum optab_methods methods,
1006 		       rtx_insn *last)
1007 {
1008   machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1009   machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1010   machine_mode mode0, mode1, tmp_mode;
1011   struct expand_operand ops[3];
1012   bool commutative_p;
1013   rtx_insn *pat;
1014   rtx xop0 = op0, xop1 = op1;
1015   bool canonicalize_op1 = false;
1016 
1017   /* If it is a commutative operator and the modes would match
1018      if we would swap the operands, we can save the conversions.  */
1019   commutative_p = commutative_optab_p (binoptab);
1020   if (commutative_p
1021       && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1022       && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1023     std::swap (xop0, xop1);
1024 
1025   /* If we are optimizing, force expensive constants into a register.  */
1026   xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1027   if (!shift_optab_p (binoptab))
1028     xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1029   else
1030     /* Shifts and rotates often use a different mode for op1 from op0;
1031        for VOIDmode constants we don't know the mode, so force it
1032        to be canonicalized using convert_modes.  */
1033     canonicalize_op1 = true;
1034 
1035   /* In case the insn wants input operands in modes different from
1036      those of the actual operands, convert the operands.  It would
1037      seem that we don't need to convert CONST_INTs, but we do, so
1038      that they're properly zero-extended, sign-extended or truncated
1039      for their mode.  */
1040 
1041   mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1042   if (xmode0 != VOIDmode && xmode0 != mode0)
1043     {
1044       xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1045       mode0 = xmode0;
1046     }
1047 
1048   mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1049 	   ? GET_MODE (xop1) : mode);
1050   if (xmode1 != VOIDmode && xmode1 != mode1)
1051     {
1052       xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1053       mode1 = xmode1;
1054     }
1055 
1056   /* If operation is commutative,
1057      try to make the first operand a register.
1058      Even better, try to make it the same as the target.
1059      Also try to make the last operand a constant.  */
1060   if (commutative_p
1061       && swap_commutative_operands_with_target (target, xop0, xop1))
1062     std::swap (xop0, xop1);
1063 
1064   /* Now, if insn's predicates don't allow our operands, put them into
1065      pseudo regs.  */
1066 
1067   if (binoptab == vec_pack_trunc_optab
1068       || binoptab == vec_pack_usat_optab
1069       || binoptab == vec_pack_ssat_optab
1070       || binoptab == vec_pack_ufix_trunc_optab
1071       || binoptab == vec_pack_sfix_trunc_optab)
1072     {
1073       /* The mode of the result is different then the mode of the
1074 	 arguments.  */
1075       tmp_mode = insn_data[(int) icode].operand[0].mode;
1076       if (VECTOR_MODE_P (mode)
1077 	  && maybe_ne (GET_MODE_NUNITS (tmp_mode), 2 * GET_MODE_NUNITS (mode)))
1078 	{
1079 	  delete_insns_since (last);
1080 	  return NULL_RTX;
1081 	}
1082     }
1083   else
1084     tmp_mode = mode;
1085 
1086   create_output_operand (&ops[0], target, tmp_mode);
1087   create_input_operand (&ops[1], xop0, mode0);
1088   create_input_operand (&ops[2], xop1, mode1);
1089   pat = maybe_gen_insn (icode, 3, ops);
1090   if (pat)
1091     {
1092       /* If PAT is composed of more than one insn, try to add an appropriate
1093 	 REG_EQUAL note to it.  If we can't because TEMP conflicts with an
1094 	 operand, call expand_binop again, this time without a target.  */
1095       if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1096 	  && ! add_equal_note (pat, ops[0].value,
1097 			       optab_to_code (binoptab),
1098 			       ops[1].value, ops[2].value))
1099 	{
1100 	  delete_insns_since (last);
1101 	  return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1102 			       unsignedp, methods);
1103 	}
1104 
1105       emit_insn (pat);
1106       return ops[0].value;
1107     }
1108   delete_insns_since (last);
1109   return NULL_RTX;
1110 }
1111 
1112 /* Generate code to perform an operation specified by BINOPTAB
1113    on operands OP0 and OP1, with result having machine-mode MODE.
1114 
1115    UNSIGNEDP is for the case where we have to widen the operands
1116    to perform the operation.  It says to use zero-extension.
1117 
1118    If TARGET is nonzero, the value
1119    is generated there, if it is convenient to do so.
1120    In all cases an rtx is returned for the locus of the value;
1121    this may or may not be TARGET.  */
1122 
1123 rtx
1124 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1125 	      rtx target, int unsignedp, enum optab_methods methods)
1126 {
1127   enum optab_methods next_methods
1128     = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1129        ? OPTAB_WIDEN : methods);
1130   enum mode_class mclass;
1131   enum insn_code icode;
1132   machine_mode wider_mode;
1133   scalar_int_mode int_mode;
1134   rtx libfunc;
1135   rtx temp;
1136   rtx_insn *entry_last = get_last_insn ();
1137   rtx_insn *last;
1138 
1139   mclass = GET_MODE_CLASS (mode);
1140 
1141   /* If subtracting an integer constant, convert this into an addition of
1142      the negated constant.  */
1143 
1144   if (binoptab == sub_optab && CONST_INT_P (op1))
1145     {
1146       op1 = negate_rtx (mode, op1);
1147       binoptab = add_optab;
1148     }
1149   /* For shifts, constant invalid op1 might be expanded from different
1150      mode than MODE.  As those are invalid, force them to a register
1151      to avoid further problems during expansion.  */
1152   else if (CONST_INT_P (op1)
1153 	   && shift_optab_p (binoptab)
1154 	   && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode)))
1155     {
1156       op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode));
1157       op1 = force_reg (GET_MODE_INNER (mode), op1);
1158     }
1159 
1160   /* Record where to delete back to if we backtrack.  */
1161   last = get_last_insn ();
1162 
1163   /* If we can do it with a three-operand insn, do so.  */
1164 
1165   if (methods != OPTAB_MUST_WIDEN)
1166     {
1167       if (convert_optab_p (binoptab))
1168 	{
1169 	  machine_mode from_mode = widened_mode (mode, op0, op1);
1170 	  icode = find_widening_optab_handler (binoptab, mode, from_mode);
1171 	}
1172       else
1173 	icode = optab_handler (binoptab, mode);
1174       if (icode != CODE_FOR_nothing)
1175 	{
1176 	  temp = expand_binop_directly (icode, mode, binoptab, op0, op1,
1177 					target, unsignedp, methods, last);
1178 	  if (temp)
1179 	    return temp;
1180 	}
1181     }
1182 
1183   /* If we were trying to rotate, and that didn't work, try rotating
1184      the other direction before falling back to shifts and bitwise-or.  */
1185   if (((binoptab == rotl_optab
1186 	&& (icode = optab_handler (rotr_optab, mode)) != CODE_FOR_nothing)
1187        || (binoptab == rotr_optab
1188 	   && (icode = optab_handler (rotl_optab, mode)) != CODE_FOR_nothing))
1189       && is_int_mode (mode, &int_mode))
1190     {
1191       optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1192       rtx newop1;
1193       unsigned int bits = GET_MODE_PRECISION (int_mode);
1194 
1195       if (CONST_INT_P (op1))
1196 	newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1));
1197       else if (targetm.shift_truncation_mask (int_mode) == bits - 1)
1198         newop1 = negate_rtx (GET_MODE (op1), op1);
1199       else
1200         newop1 = expand_binop (GET_MODE (op1), sub_optab,
1201 			       gen_int_mode (bits, GET_MODE (op1)), op1,
1202 			       NULL_RTX, unsignedp, OPTAB_DIRECT);
1203 
1204       temp = expand_binop_directly (icode, int_mode, otheroptab, op0, newop1,
1205 				    target, unsignedp, methods, last);
1206       if (temp)
1207 	return temp;
1208     }
1209 
1210   /* If this is a multiply, see if we can do a widening operation that
1211      takes operands of this mode and makes a wider mode.  */
1212 
1213   if (binoptab == smul_optab
1214       && GET_MODE_2XWIDER_MODE (mode).exists (&wider_mode)
1215       && (convert_optab_handler ((unsignedp
1216 				  ? umul_widen_optab
1217 				  : smul_widen_optab),
1218 				 wider_mode, mode) != CODE_FOR_nothing))
1219     {
1220       /* *_widen_optab needs to determine operand mode, make sure at least
1221 	 one operand has non-VOID mode.  */
1222       if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
1223 	op0 = force_reg (mode, op0);
1224       temp = expand_binop (wider_mode,
1225 			   unsignedp ? umul_widen_optab : smul_widen_optab,
1226 			   op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1227 
1228       if (temp != 0)
1229 	{
1230 	  if (GET_MODE_CLASS (mode) == MODE_INT
1231 	      && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1232 	    return gen_lowpart (mode, temp);
1233 	  else
1234 	    return convert_to_mode (mode, temp, unsignedp);
1235 	}
1236     }
1237 
1238   /* If this is a vector shift by a scalar, see if we can do a vector
1239      shift by a vector.  If so, broadcast the scalar into a vector.  */
1240   if (mclass == MODE_VECTOR_INT)
1241     {
1242       optab otheroptab = unknown_optab;
1243 
1244       if (binoptab == ashl_optab)
1245 	otheroptab = vashl_optab;
1246       else if (binoptab == ashr_optab)
1247 	otheroptab = vashr_optab;
1248       else if (binoptab == lshr_optab)
1249 	otheroptab = vlshr_optab;
1250       else if (binoptab == rotl_optab)
1251 	otheroptab = vrotl_optab;
1252       else if (binoptab == rotr_optab)
1253 	otheroptab = vrotr_optab;
1254 
1255       if (otheroptab
1256 	  && (icode = optab_handler (otheroptab, mode)) != CODE_FOR_nothing)
1257 	{
1258 	  /* The scalar may have been extended to be too wide.  Truncate
1259 	     it back to the proper size to fit in the broadcast vector.  */
1260 	  scalar_mode inner_mode = GET_MODE_INNER (mode);
1261 	  if (!CONST_INT_P (op1)
1262 	      && (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (op1)))
1263 		  > GET_MODE_BITSIZE (inner_mode)))
1264 	    op1 = force_reg (inner_mode,
1265 			     simplify_gen_unary (TRUNCATE, inner_mode, op1,
1266 						 GET_MODE (op1)));
1267 	  rtx vop1 = expand_vector_broadcast (mode, op1);
1268 	  if (vop1)
1269 	    {
1270 	      temp = expand_binop_directly (icode, mode, otheroptab, op0, vop1,
1271 					    target, unsignedp, methods, last);
1272 	      if (temp)
1273 		return temp;
1274 	    }
1275 	}
1276     }
1277 
1278   /* Look for a wider mode of the same class for which we think we
1279      can open-code the operation.  Check for a widening multiply at the
1280      wider mode as well.  */
1281 
1282   if (CLASS_HAS_WIDER_MODES_P (mclass)
1283       && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1284     FOR_EACH_WIDER_MODE (wider_mode, mode)
1285       {
1286 	machine_mode next_mode;
1287 	if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1288 	    || (binoptab == smul_optab
1289 		&& GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
1290 		&& (find_widening_optab_handler ((unsignedp
1291 						  ? umul_widen_optab
1292 						  : smul_widen_optab),
1293 						 next_mode, mode)
1294 		    != CODE_FOR_nothing)))
1295 	  {
1296 	    rtx xop0 = op0, xop1 = op1;
1297 	    int no_extend = 0;
1298 
1299 	    /* For certain integer operations, we need not actually extend
1300 	       the narrow operands, as long as we will truncate
1301 	       the results to the same narrowness.  */
1302 
1303 	    if ((binoptab == ior_optab || binoptab == and_optab
1304 		 || binoptab == xor_optab
1305 		 || binoptab == add_optab || binoptab == sub_optab
1306 		 || binoptab == smul_optab || binoptab == ashl_optab)
1307 		&& mclass == MODE_INT)
1308 	      {
1309 		no_extend = 1;
1310 		xop0 = avoid_expensive_constant (mode, binoptab, 0,
1311 						 xop0, unsignedp);
1312 		if (binoptab != ashl_optab)
1313 		  xop1 = avoid_expensive_constant (mode, binoptab, 1,
1314 						   xop1, unsignedp);
1315 	      }
1316 
1317 	    xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1318 
1319 	    /* The second operand of a shift must always be extended.  */
1320 	    xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1321 				  no_extend && binoptab != ashl_optab);
1322 
1323 	    temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1324 				 unsignedp, OPTAB_DIRECT);
1325 	    if (temp)
1326 	      {
1327 		if (mclass != MODE_INT
1328                     || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1329 		  {
1330 		    if (target == 0)
1331 		      target = gen_reg_rtx (mode);
1332 		    convert_move (target, temp, 0);
1333 		    return target;
1334 		  }
1335 		else
1336 		  return gen_lowpart (mode, temp);
1337 	      }
1338 	    else
1339 	      delete_insns_since (last);
1340 	  }
1341       }
1342 
1343   /* If operation is commutative,
1344      try to make the first operand a register.
1345      Even better, try to make it the same as the target.
1346      Also try to make the last operand a constant.  */
1347   if (commutative_optab_p (binoptab)
1348       && swap_commutative_operands_with_target (target, op0, op1))
1349     std::swap (op0, op1);
1350 
1351   /* These can be done a word at a time.  */
1352   if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1353       && is_int_mode (mode, &int_mode)
1354       && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
1355       && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1356     {
1357       int i;
1358       rtx_insn *insns;
1359 
1360       /* If TARGET is the same as one of the operands, the REG_EQUAL note
1361 	 won't be accurate, so use a new target.  */
1362       if (target == 0
1363 	  || target == op0
1364 	  || target == op1
1365 	  || !valid_multiword_target_p (target))
1366 	target = gen_reg_rtx (int_mode);
1367 
1368       start_sequence ();
1369 
1370       /* Do the actual arithmetic.  */
1371       for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
1372 	{
1373 	  rtx target_piece = operand_subword (target, i, 1, int_mode);
1374 	  rtx x = expand_binop (word_mode, binoptab,
1375 				operand_subword_force (op0, i, int_mode),
1376 				operand_subword_force (op1, i, int_mode),
1377 				target_piece, unsignedp, next_methods);
1378 
1379 	  if (x == 0)
1380 	    break;
1381 
1382 	  if (target_piece != x)
1383 	    emit_move_insn (target_piece, x);
1384 	}
1385 
1386       insns = get_insns ();
1387       end_sequence ();
1388 
1389       if (i == GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD)
1390 	{
1391 	  emit_insn (insns);
1392 	  return target;
1393 	}
1394     }
1395 
1396   /* Synthesize double word shifts from single word shifts.  */
1397   if ((binoptab == lshr_optab || binoptab == ashl_optab
1398        || binoptab == ashr_optab)
1399       && is_int_mode (mode, &int_mode)
1400       && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1401       && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1402       && GET_MODE_PRECISION (int_mode) == GET_MODE_BITSIZE (int_mode)
1403       && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1404       && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1405       && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1406     {
1407       unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1408       scalar_int_mode op1_mode;
1409 
1410       double_shift_mask = targetm.shift_truncation_mask (int_mode);
1411       shift_mask = targetm.shift_truncation_mask (word_mode);
1412       op1_mode = (GET_MODE (op1) != VOIDmode
1413 		  ? as_a <scalar_int_mode> (GET_MODE (op1))
1414 		  : word_mode);
1415 
1416       /* Apply the truncation to constant shifts.  */
1417       if (double_shift_mask > 0 && CONST_INT_P (op1))
1418 	op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode);
1419 
1420       if (op1 == CONST0_RTX (op1_mode))
1421 	return op0;
1422 
1423       /* Make sure that this is a combination that expand_doubleword_shift
1424 	 can handle.  See the comments there for details.  */
1425       if (double_shift_mask == 0
1426 	  || (shift_mask == BITS_PER_WORD - 1
1427 	      && double_shift_mask == BITS_PER_WORD * 2 - 1))
1428 	{
1429 	  rtx_insn *insns;
1430 	  rtx into_target, outof_target;
1431 	  rtx into_input, outof_input;
1432 	  int left_shift, outof_word;
1433 
1434 	  /* If TARGET is the same as one of the operands, the REG_EQUAL note
1435 	     won't be accurate, so use a new target.  */
1436 	  if (target == 0
1437 	      || target == op0
1438 	      || target == op1
1439 	      || !valid_multiword_target_p (target))
1440 	    target = gen_reg_rtx (int_mode);
1441 
1442 	  start_sequence ();
1443 
1444 	  /* OUTOF_* is the word we are shifting bits away from, and
1445 	     INTO_* is the word that we are shifting bits towards, thus
1446 	     they differ depending on the direction of the shift and
1447 	     WORDS_BIG_ENDIAN.  */
1448 
1449 	  left_shift = binoptab == ashl_optab;
1450 	  outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1451 
1452 	  outof_target = operand_subword (target, outof_word, 1, int_mode);
1453 	  into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1454 
1455 	  outof_input = operand_subword_force (op0, outof_word, int_mode);
1456 	  into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1457 
1458 	  if (expand_doubleword_shift (op1_mode, binoptab,
1459 				       outof_input, into_input, op1,
1460 				       outof_target, into_target,
1461 				       unsignedp, next_methods, shift_mask))
1462 	    {
1463 	      insns = get_insns ();
1464 	      end_sequence ();
1465 
1466 	      emit_insn (insns);
1467 	      return target;
1468 	    }
1469 	  end_sequence ();
1470 	}
1471     }
1472 
1473   /* Synthesize double word rotates from single word shifts.  */
1474   if ((binoptab == rotl_optab || binoptab == rotr_optab)
1475       && is_int_mode (mode, &int_mode)
1476       && CONST_INT_P (op1)
1477       && GET_MODE_PRECISION (int_mode) == 2 * BITS_PER_WORD
1478       && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1479       && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1480     {
1481       rtx_insn *insns;
1482       rtx into_target, outof_target;
1483       rtx into_input, outof_input;
1484       rtx inter;
1485       int shift_count, left_shift, outof_word;
1486 
1487       /* If TARGET is the same as one of the operands, the REG_EQUAL note
1488 	 won't be accurate, so use a new target. Do this also if target is not
1489 	 a REG, first because having a register instead may open optimization
1490 	 opportunities, and second because if target and op0 happen to be MEMs
1491 	 designating the same location, we would risk clobbering it too early
1492 	 in the code sequence we generate below.  */
1493       if (target == 0
1494 	  || target == op0
1495 	  || target == op1
1496 	  || !REG_P (target)
1497 	  || !valid_multiword_target_p (target))
1498 	target = gen_reg_rtx (int_mode);
1499 
1500       start_sequence ();
1501 
1502       shift_count = INTVAL (op1);
1503 
1504       /* OUTOF_* is the word we are shifting bits away from, and
1505 	 INTO_* is the word that we are shifting bits towards, thus
1506 	 they differ depending on the direction of the shift and
1507 	 WORDS_BIG_ENDIAN.  */
1508 
1509       left_shift = (binoptab == rotl_optab);
1510       outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1511 
1512       outof_target = operand_subword (target, outof_word, 1, int_mode);
1513       into_target = operand_subword (target, 1 - outof_word, 1, int_mode);
1514 
1515       outof_input = operand_subword_force (op0, outof_word, int_mode);
1516       into_input = operand_subword_force (op0, 1 - outof_word, int_mode);
1517 
1518       if (shift_count == BITS_PER_WORD)
1519 	{
1520 	  /* This is just a word swap.  */
1521 	  emit_move_insn (outof_target, into_input);
1522 	  emit_move_insn (into_target, outof_input);
1523 	  inter = const0_rtx;
1524 	}
1525       else
1526 	{
1527 	  rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1528 	  HOST_WIDE_INT first_shift_count, second_shift_count;
1529 	  optab reverse_unsigned_shift, unsigned_shift;
1530 
1531 	  reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1532 				    ? lshr_optab : ashl_optab);
1533 
1534 	  unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1535 			    ? ashl_optab : lshr_optab);
1536 
1537 	  if (shift_count > BITS_PER_WORD)
1538 	    {
1539 	      first_shift_count = shift_count - BITS_PER_WORD;
1540 	      second_shift_count = 2 * BITS_PER_WORD - shift_count;
1541 	    }
1542 	  else
1543 	    {
1544 	      first_shift_count = BITS_PER_WORD - shift_count;
1545 	      second_shift_count = shift_count;
1546 	    }
1547 	  rtx first_shift_count_rtx
1548 	    = gen_int_shift_amount (word_mode, first_shift_count);
1549 	  rtx second_shift_count_rtx
1550 	    = gen_int_shift_amount (word_mode, second_shift_count);
1551 
1552 	  into_temp1 = expand_binop (word_mode, unsigned_shift,
1553 				     outof_input, first_shift_count_rtx,
1554 				     NULL_RTX, unsignedp, next_methods);
1555 	  into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1556 				     into_input, second_shift_count_rtx,
1557 				     NULL_RTX, unsignedp, next_methods);
1558 
1559 	  if (into_temp1 != 0 && into_temp2 != 0)
1560 	    inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1561 				  into_target, unsignedp, next_methods);
1562 	  else
1563 	    inter = 0;
1564 
1565 	  if (inter != 0 && inter != into_target)
1566 	    emit_move_insn (into_target, inter);
1567 
1568 	  outof_temp1 = expand_binop (word_mode, unsigned_shift,
1569 				      into_input, first_shift_count_rtx,
1570 				      NULL_RTX, unsignedp, next_methods);
1571 	  outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1572 				      outof_input, second_shift_count_rtx,
1573 				      NULL_RTX, unsignedp, next_methods);
1574 
1575 	  if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1576 	    inter = expand_binop (word_mode, ior_optab,
1577 				  outof_temp1, outof_temp2,
1578 				  outof_target, unsignedp, next_methods);
1579 
1580 	  if (inter != 0 && inter != outof_target)
1581 	    emit_move_insn (outof_target, inter);
1582 	}
1583 
1584       insns = get_insns ();
1585       end_sequence ();
1586 
1587       if (inter != 0)
1588 	{
1589 	  emit_insn (insns);
1590 	  return target;
1591 	}
1592     }
1593 
1594   /* These can be done a word at a time by propagating carries.  */
1595   if ((binoptab == add_optab || binoptab == sub_optab)
1596       && is_int_mode (mode, &int_mode)
1597       && GET_MODE_SIZE (int_mode) >= 2 * UNITS_PER_WORD
1598       && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1599     {
1600       unsigned int i;
1601       optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1602       const unsigned int nwords = GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD;
1603       rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1604       rtx xop0, xop1, xtarget;
1605 
1606       /* We can handle either a 1 or -1 value for the carry.  If STORE_FLAG
1607 	 value is one of those, use it.  Otherwise, use 1 since it is the
1608 	 one easiest to get.  */
1609 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1610       int normalizep = STORE_FLAG_VALUE;
1611 #else
1612       int normalizep = 1;
1613 #endif
1614 
1615       /* Prepare the operands.  */
1616       xop0 = force_reg (int_mode, op0);
1617       xop1 = force_reg (int_mode, op1);
1618 
1619       xtarget = gen_reg_rtx (int_mode);
1620 
1621       if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1622 	target = xtarget;
1623 
1624       /* Indicate for flow that the entire target reg is being set.  */
1625       if (REG_P (target))
1626 	emit_clobber (xtarget);
1627 
1628       /* Do the actual arithmetic.  */
1629       for (i = 0; i < nwords; i++)
1630 	{
1631 	  int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1632 	  rtx target_piece = operand_subword (xtarget, index, 1, int_mode);
1633 	  rtx op0_piece = operand_subword_force (xop0, index, int_mode);
1634 	  rtx op1_piece = operand_subword_force (xop1, index, int_mode);
1635 	  rtx x;
1636 
1637 	  /* Main add/subtract of the input operands.  */
1638 	  x = expand_binop (word_mode, binoptab,
1639 			    op0_piece, op1_piece,
1640 			    target_piece, unsignedp, next_methods);
1641 	  if (x == 0)
1642 	    break;
1643 
1644 	  if (i + 1 < nwords)
1645 	    {
1646 	      /* Store carry from main add/subtract.  */
1647 	      carry_out = gen_reg_rtx (word_mode);
1648 	      carry_out = emit_store_flag_force (carry_out,
1649 						 (binoptab == add_optab
1650 						  ? LT : GT),
1651 						 x, op0_piece,
1652 						 word_mode, 1, normalizep);
1653 	    }
1654 
1655 	  if (i > 0)
1656 	    {
1657 	      rtx newx;
1658 
1659 	      /* Add/subtract previous carry to main result.  */
1660 	      newx = expand_binop (word_mode,
1661 				   normalizep == 1 ? binoptab : otheroptab,
1662 				   x, carry_in,
1663 				   NULL_RTX, 1, next_methods);
1664 
1665 	      if (i + 1 < nwords)
1666 		{
1667 		  /* Get out carry from adding/subtracting carry in.  */
1668 		  rtx carry_tmp = gen_reg_rtx (word_mode);
1669 		  carry_tmp = emit_store_flag_force (carry_tmp,
1670 						     (binoptab == add_optab
1671 						      ? LT : GT),
1672 						     newx, x,
1673 						     word_mode, 1, normalizep);
1674 
1675 		  /* Logical-ior the two poss. carry together.  */
1676 		  carry_out = expand_binop (word_mode, ior_optab,
1677 					    carry_out, carry_tmp,
1678 					    carry_out, 0, next_methods);
1679 		  if (carry_out == 0)
1680 		    break;
1681 		}
1682 	      emit_move_insn (target_piece, newx);
1683 	    }
1684 	  else
1685 	    {
1686 	      if (x != target_piece)
1687 		emit_move_insn (target_piece, x);
1688 	    }
1689 
1690 	  carry_in = carry_out;
1691 	}
1692 
1693       if (i == GET_MODE_BITSIZE (int_mode) / (unsigned) BITS_PER_WORD)
1694 	{
1695 	  if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing
1696 	      || ! rtx_equal_p (target, xtarget))
1697 	    {
1698 	      rtx_insn *temp = emit_move_insn (target, xtarget);
1699 
1700 	      set_dst_reg_note (temp, REG_EQUAL,
1701 				gen_rtx_fmt_ee (optab_to_code (binoptab),
1702 						int_mode, copy_rtx (xop0),
1703 						copy_rtx (xop1)),
1704 				target);
1705 	    }
1706 	  else
1707 	    target = xtarget;
1708 
1709 	  return target;
1710 	}
1711 
1712       else
1713 	delete_insns_since (last);
1714     }
1715 
1716   /* Attempt to synthesize double word multiplies using a sequence of word
1717      mode multiplications.  We first attempt to generate a sequence using a
1718      more efficient unsigned widening multiply, and if that fails we then
1719      try using a signed widening multiply.  */
1720 
1721   if (binoptab == smul_optab
1722       && is_int_mode (mode, &int_mode)
1723       && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
1724       && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
1725       && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
1726     {
1727       rtx product = NULL_RTX;
1728       if (convert_optab_handler (umul_widen_optab, int_mode, word_mode)
1729 	  != CODE_FOR_nothing)
1730 	{
1731 	  product = expand_doubleword_mult (int_mode, op0, op1, target,
1732 					    true, methods);
1733 	  if (!product)
1734 	    delete_insns_since (last);
1735 	}
1736 
1737       if (product == NULL_RTX
1738 	  && (convert_optab_handler (smul_widen_optab, int_mode, word_mode)
1739 	      != CODE_FOR_nothing))
1740 	{
1741 	  product = expand_doubleword_mult (int_mode, op0, op1, target,
1742 					    false, methods);
1743 	  if (!product)
1744 	    delete_insns_since (last);
1745 	}
1746 
1747       if (product != NULL_RTX)
1748 	{
1749 	  if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
1750 	    {
1751 	      rtx_insn *move = emit_move_insn (target ? target : product,
1752 					       product);
1753 	      set_dst_reg_note (move,
1754 				REG_EQUAL,
1755 				gen_rtx_fmt_ee (MULT, int_mode,
1756 						copy_rtx (op0),
1757 						copy_rtx (op1)),
1758 				target ? target : product);
1759 	    }
1760 	  return product;
1761 	}
1762     }
1763 
1764   /* It can't be open-coded in this mode.
1765      Use a library call if one is available and caller says that's ok.  */
1766 
1767   libfunc = optab_libfunc (binoptab, mode);
1768   if (libfunc
1769       && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1770     {
1771       rtx_insn *insns;
1772       rtx op1x = op1;
1773       machine_mode op1_mode = mode;
1774       rtx value;
1775 
1776       start_sequence ();
1777 
1778       if (shift_optab_p (binoptab))
1779 	{
1780 	  op1_mode = targetm.libgcc_shift_count_mode ();
1781 	  /* Specify unsigned here,
1782 	     since negative shift counts are meaningless.  */
1783 	  op1x = convert_to_mode (op1_mode, op1, 1);
1784 	}
1785 
1786       if (GET_MODE (op0) != VOIDmode
1787 	  && GET_MODE (op0) != mode)
1788 	op0 = convert_to_mode (mode, op0, unsignedp);
1789 
1790       /* Pass 1 for NO_QUEUE so we don't lose any increments
1791 	 if the libcall is cse'd or moved.  */
1792       value = emit_library_call_value (libfunc,
1793 				       NULL_RTX, LCT_CONST, mode,
1794 				       op0, mode, op1x, op1_mode);
1795 
1796       insns = get_insns ();
1797       end_sequence ();
1798 
1799       bool trapv = trapv_binoptab_p (binoptab);
1800       target = gen_reg_rtx (mode);
1801       emit_libcall_block_1 (insns, target, value,
1802 			    trapv ? NULL_RTX
1803 			    : gen_rtx_fmt_ee (optab_to_code (binoptab),
1804 					      mode, op0, op1), trapv);
1805 
1806       return target;
1807     }
1808 
1809   delete_insns_since (last);
1810 
1811   /* It can't be done in this mode.  Can we do it in a wider mode?  */
1812 
1813   if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1814 	 || methods == OPTAB_MUST_WIDEN))
1815     {
1816       /* Caller says, don't even try.  */
1817       delete_insns_since (entry_last);
1818       return 0;
1819     }
1820 
1821   /* Compute the value of METHODS to pass to recursive calls.
1822      Don't allow widening to be tried recursively.  */
1823 
1824   methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1825 
1826   /* Look for a wider mode of the same class for which it appears we can do
1827      the operation.  */
1828 
1829   if (CLASS_HAS_WIDER_MODES_P (mclass))
1830     {
1831       /* This code doesn't make sense for conversion optabs, since we
1832 	 wouldn't then want to extend the operands to be the same size
1833 	 as the result.  */
1834       gcc_assert (!convert_optab_p (binoptab));
1835       FOR_EACH_WIDER_MODE (wider_mode, mode)
1836 	{
1837 	  if (optab_handler (binoptab, wider_mode)
1838 	      || (methods == OPTAB_LIB
1839 		  && optab_libfunc (binoptab, wider_mode)))
1840 	    {
1841 	      rtx xop0 = op0, xop1 = op1;
1842 	      int no_extend = 0;
1843 
1844 	      /* For certain integer operations, we need not actually extend
1845 		 the narrow operands, as long as we will truncate
1846 		 the results to the same narrowness.  */
1847 
1848 	      if ((binoptab == ior_optab || binoptab == and_optab
1849 		   || binoptab == xor_optab
1850 		   || binoptab == add_optab || binoptab == sub_optab
1851 		   || binoptab == smul_optab || binoptab == ashl_optab)
1852 		  && mclass == MODE_INT)
1853 		no_extend = 1;
1854 
1855 	      xop0 = widen_operand (xop0, wider_mode, mode,
1856 				    unsignedp, no_extend);
1857 
1858 	      /* The second operand of a shift must always be extended.  */
1859 	      xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1860 				    no_extend && binoptab != ashl_optab);
1861 
1862 	      temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1863 				   unsignedp, methods);
1864 	      if (temp)
1865 		{
1866 		  if (mclass != MODE_INT
1867 		      || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1868 		    {
1869 		      if (target == 0)
1870 			target = gen_reg_rtx (mode);
1871 		      convert_move (target, temp, 0);
1872 		      return target;
1873 		    }
1874 		  else
1875 		    return gen_lowpart (mode, temp);
1876 		}
1877 	      else
1878 		delete_insns_since (last);
1879 	    }
1880 	}
1881     }
1882 
1883   delete_insns_since (entry_last);
1884   return 0;
1885 }
1886 
1887 /* Expand a binary operator which has both signed and unsigned forms.
1888    UOPTAB is the optab for unsigned operations, and SOPTAB is for
1889    signed operations.
1890 
1891    If we widen unsigned operands, we may use a signed wider operation instead
1892    of an unsigned wider operation, since the result would be the same.  */
1893 
1894 rtx
1895 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
1896 		   rtx op0, rtx op1, rtx target, int unsignedp,
1897 		   enum optab_methods methods)
1898 {
1899   rtx temp;
1900   optab direct_optab = unsignedp ? uoptab : soptab;
1901   bool save_enable;
1902 
1903   /* Do it without widening, if possible.  */
1904   temp = expand_binop (mode, direct_optab, op0, op1, target,
1905 		       unsignedp, OPTAB_DIRECT);
1906   if (temp || methods == OPTAB_DIRECT)
1907     return temp;
1908 
1909   /* Try widening to a signed int.  Disable any direct use of any
1910      signed insn in the current mode.  */
1911   save_enable = swap_optab_enable (soptab, mode, false);
1912 
1913   temp = expand_binop (mode, soptab, op0, op1, target,
1914 		       unsignedp, OPTAB_WIDEN);
1915 
1916   /* For unsigned operands, try widening to an unsigned int.  */
1917   if (!temp && unsignedp)
1918     temp = expand_binop (mode, uoptab, op0, op1, target,
1919 			 unsignedp, OPTAB_WIDEN);
1920   if (temp || methods == OPTAB_WIDEN)
1921     goto egress;
1922 
1923   /* Use the right width libcall if that exists.  */
1924   temp = expand_binop (mode, direct_optab, op0, op1, target,
1925 		       unsignedp, OPTAB_LIB);
1926   if (temp || methods == OPTAB_LIB)
1927     goto egress;
1928 
1929   /* Must widen and use a libcall, use either signed or unsigned.  */
1930   temp = expand_binop (mode, soptab, op0, op1, target,
1931 		       unsignedp, methods);
1932   if (!temp && unsignedp)
1933     temp = expand_binop (mode, uoptab, op0, op1, target,
1934 			 unsignedp, methods);
1935 
1936  egress:
1937   /* Undo the fiddling above.  */
1938   if (save_enable)
1939     swap_optab_enable (soptab, mode, true);
1940   return temp;
1941 }
1942 
1943 /* Generate code to perform an operation specified by UNOPPTAB
1944    on operand OP0, with two results to TARG0 and TARG1.
1945    We assume that the order of the operands for the instruction
1946    is TARG0, TARG1, OP0.
1947 
1948    Either TARG0 or TARG1 may be zero, but what that means is that
1949    the result is not actually wanted.  We will generate it into
1950    a dummy pseudo-reg and discard it.  They may not both be zero.
1951 
1952    Returns 1 if this operation can be performed; 0 if not.  */
1953 
1954 int
1955 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1956 		    int unsignedp)
1957 {
1958   machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1959   enum mode_class mclass;
1960   machine_mode wider_mode;
1961   rtx_insn *entry_last = get_last_insn ();
1962   rtx_insn *last;
1963 
1964   mclass = GET_MODE_CLASS (mode);
1965 
1966   if (!targ0)
1967     targ0 = gen_reg_rtx (mode);
1968   if (!targ1)
1969     targ1 = gen_reg_rtx (mode);
1970 
1971   /* Record where to go back to if we fail.  */
1972   last = get_last_insn ();
1973 
1974   if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
1975     {
1976       struct expand_operand ops[3];
1977       enum insn_code icode = optab_handler (unoptab, mode);
1978 
1979       create_fixed_operand (&ops[0], targ0);
1980       create_fixed_operand (&ops[1], targ1);
1981       create_convert_operand_from (&ops[2], op0, mode, unsignedp);
1982       if (maybe_expand_insn (icode, 3, ops))
1983 	return 1;
1984     }
1985 
1986   /* It can't be done in this mode.  Can we do it in a wider mode?  */
1987 
1988   if (CLASS_HAS_WIDER_MODES_P (mclass))
1989     {
1990       FOR_EACH_WIDER_MODE (wider_mode, mode)
1991 	{
1992 	  if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
1993 	    {
1994 	      rtx t0 = gen_reg_rtx (wider_mode);
1995 	      rtx t1 = gen_reg_rtx (wider_mode);
1996 	      rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1997 
1998 	      if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1999 		{
2000 		  convert_move (targ0, t0, unsignedp);
2001 		  convert_move (targ1, t1, unsignedp);
2002 		  return 1;
2003 		}
2004 	      else
2005 		delete_insns_since (last);
2006 	    }
2007 	}
2008     }
2009 
2010   delete_insns_since (entry_last);
2011   return 0;
2012 }
2013 
2014 /* Generate code to perform an operation specified by BINOPTAB
2015    on operands OP0 and OP1, with two results to TARG1 and TARG2.
2016    We assume that the order of the operands for the instruction
2017    is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2018    [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2019 
2020    Either TARG0 or TARG1 may be zero, but what that means is that
2021    the result is not actually wanted.  We will generate it into
2022    a dummy pseudo-reg and discard it.  They may not both be zero.
2023 
2024    Returns 1 if this operation can be performed; 0 if not.  */
2025 
2026 int
2027 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2028 		     int unsignedp)
2029 {
2030   machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2031   enum mode_class mclass;
2032   machine_mode wider_mode;
2033   rtx_insn *entry_last = get_last_insn ();
2034   rtx_insn *last;
2035 
2036   mclass = GET_MODE_CLASS (mode);
2037 
2038   if (!targ0)
2039     targ0 = gen_reg_rtx (mode);
2040   if (!targ1)
2041     targ1 = gen_reg_rtx (mode);
2042 
2043   /* Record where to go back to if we fail.  */
2044   last = get_last_insn ();
2045 
2046   if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2047     {
2048       struct expand_operand ops[4];
2049       enum insn_code icode = optab_handler (binoptab, mode);
2050       machine_mode mode0 = insn_data[icode].operand[1].mode;
2051       machine_mode mode1 = insn_data[icode].operand[2].mode;
2052       rtx xop0 = op0, xop1 = op1;
2053 
2054       /* If we are optimizing, force expensive constants into a register.  */
2055       xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2056       xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2057 
2058       create_fixed_operand (&ops[0], targ0);
2059       create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2060       create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2061       create_fixed_operand (&ops[3], targ1);
2062       if (maybe_expand_insn (icode, 4, ops))
2063 	return 1;
2064       delete_insns_since (last);
2065     }
2066 
2067   /* It can't be done in this mode.  Can we do it in a wider mode?  */
2068 
2069   if (CLASS_HAS_WIDER_MODES_P (mclass))
2070     {
2071       FOR_EACH_WIDER_MODE (wider_mode, mode)
2072 	{
2073 	  if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2074 	    {
2075 	      rtx t0 = gen_reg_rtx (wider_mode);
2076 	      rtx t1 = gen_reg_rtx (wider_mode);
2077 	      rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2078 	      rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2079 
2080 	      if (expand_twoval_binop (binoptab, cop0, cop1,
2081 				       t0, t1, unsignedp))
2082 		{
2083 		  convert_move (targ0, t0, unsignedp);
2084 		  convert_move (targ1, t1, unsignedp);
2085 		  return 1;
2086 		}
2087 	      else
2088 		delete_insns_since (last);
2089 	    }
2090 	}
2091     }
2092 
2093   delete_insns_since (entry_last);
2094   return 0;
2095 }
2096 
2097 /* Expand the two-valued library call indicated by BINOPTAB, but
2098    preserve only one of the values.  If TARG0 is non-NULL, the first
2099    value is placed into TARG0; otherwise the second value is placed
2100    into TARG1.  Exactly one of TARG0 and TARG1 must be non-NULL.  The
2101    value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2102    This routine assumes that the value returned by the library call is
2103    as if the return value was of an integral mode twice as wide as the
2104    mode of OP0.  Returns 1 if the call was successful.  */
2105 
2106 bool
2107 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2108 			     rtx targ0, rtx targ1, enum rtx_code code)
2109 {
2110   machine_mode mode;
2111   machine_mode libval_mode;
2112   rtx libval;
2113   rtx_insn *insns;
2114   rtx libfunc;
2115 
2116   /* Exactly one of TARG0 or TARG1 should be non-NULL.  */
2117   gcc_assert (!targ0 != !targ1);
2118 
2119   mode = GET_MODE (op0);
2120   libfunc = optab_libfunc (binoptab, mode);
2121   if (!libfunc)
2122     return false;
2123 
2124   /* The value returned by the library function will have twice as
2125      many bits as the nominal MODE.  */
2126   libval_mode = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
2127   start_sequence ();
2128   libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2129 				    libval_mode,
2130 				    op0, mode,
2131 				    op1, mode);
2132   /* Get the part of VAL containing the value that we want.  */
2133   libval = simplify_gen_subreg (mode, libval, libval_mode,
2134 				targ0 ? 0 : GET_MODE_SIZE (mode));
2135   insns = get_insns ();
2136   end_sequence ();
2137   /* Move the into the desired location.  */
2138   emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2139 		      gen_rtx_fmt_ee (code, mode, op0, op1));
2140 
2141   return true;
2142 }
2143 
2144 
2145 /* Wrapper around expand_unop which takes an rtx code to specify
2146    the operation to perform, not an optab pointer.  All other
2147    arguments are the same.  */
2148 rtx
2149 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2150 		    rtx target, int unsignedp)
2151 {
2152   optab unop = code_to_optab (code);
2153   gcc_assert (unop);
2154 
2155   return expand_unop (mode, unop, op0, target, unsignedp);
2156 }
2157 
2158 /* Try calculating
2159 	(clz:narrow x)
2160    as
2161 	(clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2162 
2163    A similar operation can be used for clrsb.  UNOPTAB says which operation
2164    we are trying to expand.  */
2165 static rtx
2166 widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab)
2167 {
2168   opt_scalar_int_mode wider_mode_iter;
2169   FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2170     {
2171       scalar_int_mode wider_mode = wider_mode_iter.require ();
2172       if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2173 	{
2174 	  rtx xop0, temp;
2175 	  rtx_insn *last;
2176 
2177 	  last = get_last_insn ();
2178 
2179 	  if (target == 0)
2180 	    target = gen_reg_rtx (mode);
2181 	  xop0 = widen_operand (op0, wider_mode, mode,
2182 				unoptab != clrsb_optab, false);
2183 	  temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2184 			      unoptab != clrsb_optab);
2185 	  if (temp != 0)
2186 	    temp = expand_binop
2187 	      (wider_mode, sub_optab, temp,
2188 	       gen_int_mode (GET_MODE_PRECISION (wider_mode)
2189 			     - GET_MODE_PRECISION (mode),
2190 			     wider_mode),
2191 	       target, true, OPTAB_DIRECT);
2192 	  if (temp == 0)
2193 	    delete_insns_since (last);
2194 
2195 	  return temp;
2196 	}
2197     }
2198   return 0;
2199 }
2200 
2201 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2202    quantities, choosing which based on whether the high word is nonzero.  */
2203 static rtx
2204 expand_doubleword_clz (scalar_int_mode mode, rtx op0, rtx target)
2205 {
2206   rtx xop0 = force_reg (mode, op0);
2207   rtx subhi = gen_highpart (word_mode, xop0);
2208   rtx sublo = gen_lowpart (word_mode, xop0);
2209   rtx_code_label *hi0_label = gen_label_rtx ();
2210   rtx_code_label *after_label = gen_label_rtx ();
2211   rtx_insn *seq;
2212   rtx temp, result;
2213 
2214   /* If we were not given a target, use a word_mode register, not a
2215      'mode' register.  The result will fit, and nobody is expecting
2216      anything bigger (the return type of __builtin_clz* is int).  */
2217   if (!target)
2218     target = gen_reg_rtx (word_mode);
2219 
2220   /* In any case, write to a word_mode scratch in both branches of the
2221      conditional, so we can ensure there is a single move insn setting
2222      'target' to tag a REG_EQUAL note on.  */
2223   result = gen_reg_rtx (word_mode);
2224 
2225   start_sequence ();
2226 
2227   /* If the high word is not equal to zero,
2228      then clz of the full value is clz of the high word.  */
2229   emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2230 			   word_mode, true, hi0_label);
2231 
2232   temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2233   if (!temp)
2234     goto fail;
2235 
2236   if (temp != result)
2237     convert_move (result, temp, true);
2238 
2239   emit_jump_insn (targetm.gen_jump (after_label));
2240   emit_barrier ();
2241 
2242   /* Else clz of the full value is clz of the low word plus the number
2243      of bits in the high word.  */
2244   emit_label (hi0_label);
2245 
2246   temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2247   if (!temp)
2248     goto fail;
2249   temp = expand_binop (word_mode, add_optab, temp,
2250 		       gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2251 		       result, true, OPTAB_DIRECT);
2252   if (!temp)
2253     goto fail;
2254   if (temp != result)
2255     convert_move (result, temp, true);
2256 
2257   emit_label (after_label);
2258   convert_move (target, result, true);
2259 
2260   seq = get_insns ();
2261   end_sequence ();
2262 
2263   add_equal_note (seq, target, CLZ, xop0, 0);
2264   emit_insn (seq);
2265   return target;
2266 
2267  fail:
2268   end_sequence ();
2269   return 0;
2270 }
2271 
2272 /* Try calculating popcount of a double-word quantity as two popcount's of
2273    word-sized quantities and summing up the results.  */
2274 static rtx
2275 expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target)
2276 {
2277   rtx t0, t1, t;
2278   rtx_insn *seq;
2279 
2280   start_sequence ();
2281 
2282   t0 = expand_unop_direct (word_mode, popcount_optab,
2283 			   operand_subword_force (op0, 0, mode), NULL_RTX,
2284 			   true);
2285   t1 = expand_unop_direct (word_mode, popcount_optab,
2286 			   operand_subword_force (op0, 1, mode), NULL_RTX,
2287 			   true);
2288   if (!t0 || !t1)
2289     {
2290       end_sequence ();
2291       return NULL_RTX;
2292     }
2293 
2294   /* If we were not given a target, use a word_mode register, not a
2295      'mode' register.  The result will fit, and nobody is expecting
2296      anything bigger (the return type of __builtin_popcount* is int).  */
2297   if (!target)
2298     target = gen_reg_rtx (word_mode);
2299 
2300   t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
2301 
2302   seq = get_insns ();
2303   end_sequence ();
2304 
2305   add_equal_note (seq, t, POPCOUNT, op0, 0);
2306   emit_insn (seq);
2307   return t;
2308 }
2309 
2310 /* Try calculating
2311 	(parity:wide x)
2312    as
2313 	(parity:narrow (low (x) ^ high (x))) */
2314 static rtx
2315 expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target)
2316 {
2317   rtx t = expand_binop (word_mode, xor_optab,
2318 			operand_subword_force (op0, 0, mode),
2319 			operand_subword_force (op0, 1, mode),
2320 			NULL_RTX, 0, OPTAB_DIRECT);
2321   return expand_unop (word_mode, parity_optab, t, target, true);
2322 }
2323 
2324 /* Try calculating
2325 	(bswap:narrow x)
2326    as
2327 	(lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))).  */
2328 static rtx
2329 widen_bswap (scalar_int_mode mode, rtx op0, rtx target)
2330 {
2331   rtx x;
2332   rtx_insn *last;
2333   opt_scalar_int_mode wider_mode_iter;
2334 
2335   FOR_EACH_WIDER_MODE (wider_mode_iter, mode)
2336     if (optab_handler (bswap_optab, wider_mode_iter.require ())
2337 	!= CODE_FOR_nothing)
2338       break;
2339 
2340   if (!wider_mode_iter.exists ())
2341     return NULL_RTX;
2342 
2343   scalar_int_mode wider_mode = wider_mode_iter.require ();
2344   last = get_last_insn ();
2345 
2346   x = widen_operand (op0, wider_mode, mode, true, true);
2347   x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2348 
2349   gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2350 	      && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2351   if (x != 0)
2352     x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2353 		      GET_MODE_BITSIZE (wider_mode)
2354 		      - GET_MODE_BITSIZE (mode),
2355 		      NULL_RTX, true);
2356 
2357   if (x != 0)
2358     {
2359       if (target == 0)
2360 	target = gen_reg_rtx (mode);
2361       emit_move_insn (target, gen_lowpart (mode, x));
2362     }
2363   else
2364     delete_insns_since (last);
2365 
2366   return target;
2367 }
2368 
2369 /* Try calculating bswap as two bswaps of two word-sized operands.  */
2370 
2371 static rtx
2372 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2373 {
2374   rtx t0, t1;
2375 
2376   t1 = expand_unop (word_mode, bswap_optab,
2377 		    operand_subword_force (op, 0, mode), NULL_RTX, true);
2378   t0 = expand_unop (word_mode, bswap_optab,
2379 		    operand_subword_force (op, 1, mode), NULL_RTX, true);
2380 
2381   if (target == 0 || !valid_multiword_target_p (target))
2382     target = gen_reg_rtx (mode);
2383   if (REG_P (target))
2384     emit_clobber (target);
2385   emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2386   emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2387 
2388   return target;
2389 }
2390 
2391 /* Try calculating (parity x) as (and (popcount x) 1), where
2392    popcount can also be done in a wider mode.  */
2393 static rtx
2394 expand_parity (scalar_int_mode mode, rtx op0, rtx target)
2395 {
2396   enum mode_class mclass = GET_MODE_CLASS (mode);
2397   opt_scalar_int_mode wider_mode_iter;
2398   FOR_EACH_MODE_FROM (wider_mode_iter, mode)
2399     {
2400       scalar_int_mode wider_mode = wider_mode_iter.require ();
2401       if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2402 	{
2403 	  rtx xop0, temp;
2404 	  rtx_insn *last;
2405 
2406 	  last = get_last_insn ();
2407 
2408 	  if (target == 0 || GET_MODE (target) != wider_mode)
2409 	    target = gen_reg_rtx (wider_mode);
2410 
2411 	  xop0 = widen_operand (op0, wider_mode, mode, true, false);
2412 	  temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2413 			      true);
2414 	  if (temp != 0)
2415 	    temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2416 				 target, true, OPTAB_DIRECT);
2417 
2418 	  if (temp)
2419 	    {
2420 	      if (mclass != MODE_INT
2421 		  || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2422 		return convert_to_mode (mode, temp, 0);
2423 	      else
2424 		return gen_lowpart (mode, temp);
2425 	    }
2426 	  else
2427 	    delete_insns_since (last);
2428 	}
2429     }
2430   return 0;
2431 }
2432 
2433 /* Try calculating ctz(x) as K - clz(x & -x) ,
2434    where K is GET_MODE_PRECISION(mode) - 1.
2435 
2436    Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2437    don't have to worry about what the hardware does in that case.  (If
2438    the clz instruction produces the usual value at 0, which is K, the
2439    result of this code sequence will be -1; expand_ffs, below, relies
2440    on this.  It might be nice to have it be K instead, for consistency
2441    with the (very few) processors that provide a ctz with a defined
2442    value, but that would take one more instruction, and it would be
2443    less convenient for expand_ffs anyway.  */
2444 
2445 static rtx
2446 expand_ctz (scalar_int_mode mode, rtx op0, rtx target)
2447 {
2448   rtx_insn *seq;
2449   rtx temp;
2450 
2451   if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2452     return 0;
2453 
2454   start_sequence ();
2455 
2456   temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2457   if (temp)
2458     temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2459 			 true, OPTAB_DIRECT);
2460   if (temp)
2461     temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2462   if (temp)
2463     temp = expand_binop (mode, sub_optab,
2464 			 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2465 			 temp, target,
2466 			 true, OPTAB_DIRECT);
2467   if (temp == 0)
2468     {
2469       end_sequence ();
2470       return 0;
2471     }
2472 
2473   seq = get_insns ();
2474   end_sequence ();
2475 
2476   add_equal_note (seq, temp, CTZ, op0, 0);
2477   emit_insn (seq);
2478   return temp;
2479 }
2480 
2481 
2482 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2483    else with the sequence used by expand_clz.
2484 
2485    The ffs builtin promises to return zero for a zero value and ctz/clz
2486    may have an undefined value in that case.  If they do not give us a
2487    convenient value, we have to generate a test and branch.  */
2488 static rtx
2489 expand_ffs (scalar_int_mode mode, rtx op0, rtx target)
2490 {
2491   HOST_WIDE_INT val = 0;
2492   bool defined_at_zero = false;
2493   rtx temp;
2494   rtx_insn *seq;
2495 
2496   if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2497     {
2498       start_sequence ();
2499 
2500       temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2501       if (!temp)
2502 	goto fail;
2503 
2504       defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2505     }
2506   else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2507     {
2508       start_sequence ();
2509       temp = expand_ctz (mode, op0, 0);
2510       if (!temp)
2511 	goto fail;
2512 
2513       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2514 	{
2515 	  defined_at_zero = true;
2516 	  val = (GET_MODE_PRECISION (mode) - 1) - val;
2517 	}
2518     }
2519   else
2520     return 0;
2521 
2522   if (defined_at_zero && val == -1)
2523     /* No correction needed at zero.  */;
2524   else
2525     {
2526       /* We don't try to do anything clever with the situation found
2527 	 on some processors (eg Alpha) where ctz(0:mode) ==
2528 	 bitsize(mode).  If someone can think of a way to send N to -1
2529 	 and leave alone all values in the range 0..N-1 (where N is a
2530 	 power of two), cheaper than this test-and-branch, please add it.
2531 
2532 	 The test-and-branch is done after the operation itself, in case
2533 	 the operation sets condition codes that can be recycled for this.
2534 	 (This is true on i386, for instance.)  */
2535 
2536       rtx_code_label *nonzero_label = gen_label_rtx ();
2537       emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2538 			       mode, true, nonzero_label);
2539 
2540       convert_move (temp, GEN_INT (-1), false);
2541       emit_label (nonzero_label);
2542     }
2543 
2544   /* temp now has a value in the range -1..bitsize-1.  ffs is supposed
2545      to produce a value in the range 0..bitsize.  */
2546   temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2547 		       target, false, OPTAB_DIRECT);
2548   if (!temp)
2549     goto fail;
2550 
2551   seq = get_insns ();
2552   end_sequence ();
2553 
2554   add_equal_note (seq, temp, FFS, op0, 0);
2555   emit_insn (seq);
2556   return temp;
2557 
2558  fail:
2559   end_sequence ();
2560   return 0;
2561 }
2562 
2563 /* Extract the OMODE lowpart from VAL, which has IMODE.  Under certain
2564    conditions, VAL may already be a SUBREG against which we cannot generate
2565    a further SUBREG.  In this case, we expect forcing the value into a
2566    register will work around the situation.  */
2567 
2568 static rtx
2569 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2570 			   machine_mode imode)
2571 {
2572   rtx ret;
2573   ret = lowpart_subreg (omode, val, imode);
2574   if (ret == NULL)
2575     {
2576       val = force_reg (imode, val);
2577       ret = lowpart_subreg (omode, val, imode);
2578       gcc_assert (ret != NULL);
2579     }
2580   return ret;
2581 }
2582 
2583 /* Expand a floating point absolute value or negation operation via a
2584    logical operation on the sign bit.  */
2585 
2586 static rtx
2587 expand_absneg_bit (enum rtx_code code, scalar_float_mode mode,
2588 		   rtx op0, rtx target)
2589 {
2590   const struct real_format *fmt;
2591   int bitpos, word, nwords, i;
2592   scalar_int_mode imode;
2593   rtx temp;
2594   rtx_insn *insns;
2595 
2596   /* The format has to have a simple sign bit.  */
2597   fmt = REAL_MODE_FORMAT (mode);
2598   if (fmt == NULL)
2599     return NULL_RTX;
2600 
2601   bitpos = fmt->signbit_rw;
2602   if (bitpos < 0)
2603     return NULL_RTX;
2604 
2605   /* Don't create negative zeros if the format doesn't support them.  */
2606   if (code == NEG && !fmt->has_signed_zero)
2607     return NULL_RTX;
2608 
2609   if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2610     {
2611       if (!int_mode_for_mode (mode).exists (&imode))
2612 	return NULL_RTX;
2613       word = 0;
2614       nwords = 1;
2615     }
2616   else
2617     {
2618       imode = word_mode;
2619 
2620       if (FLOAT_WORDS_BIG_ENDIAN)
2621 	word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2622       else
2623 	word = bitpos / BITS_PER_WORD;
2624       bitpos = bitpos % BITS_PER_WORD;
2625       nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2626     }
2627 
2628   wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2629   if (code == ABS)
2630     mask = ~mask;
2631 
2632   if (target == 0
2633       || target == op0
2634       || (nwords > 1 && !valid_multiword_target_p (target)))
2635     target = gen_reg_rtx (mode);
2636 
2637   if (nwords > 1)
2638     {
2639       start_sequence ();
2640 
2641       for (i = 0; i < nwords; ++i)
2642 	{
2643 	  rtx targ_piece = operand_subword (target, i, 1, mode);
2644 	  rtx op0_piece = operand_subword_force (op0, i, mode);
2645 
2646 	  if (i == word)
2647 	    {
2648 	      temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2649 				   op0_piece,
2650 				   immed_wide_int_const (mask, imode),
2651 				   targ_piece, 1, OPTAB_LIB_WIDEN);
2652 	      if (temp != targ_piece)
2653 		emit_move_insn (targ_piece, temp);
2654 	    }
2655 	  else
2656 	    emit_move_insn (targ_piece, op0_piece);
2657 	}
2658 
2659       insns = get_insns ();
2660       end_sequence ();
2661 
2662       emit_insn (insns);
2663     }
2664   else
2665     {
2666       temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2667 			   gen_lowpart (imode, op0),
2668 			   immed_wide_int_const (mask, imode),
2669 		           gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2670       target = lowpart_subreg_maybe_copy (mode, temp, imode);
2671 
2672       set_dst_reg_note (get_last_insn (), REG_EQUAL,
2673 			gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
2674 			target);
2675     }
2676 
2677   return target;
2678 }
2679 
2680 /* As expand_unop, but will fail rather than attempt the operation in a
2681    different mode or with a libcall.  */
2682 static rtx
2683 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
2684 		    int unsignedp)
2685 {
2686   if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2687     {
2688       struct expand_operand ops[2];
2689       enum insn_code icode = optab_handler (unoptab, mode);
2690       rtx_insn *last = get_last_insn ();
2691       rtx_insn *pat;
2692 
2693       create_output_operand (&ops[0], target, mode);
2694       create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2695       pat = maybe_gen_insn (icode, 2, ops);
2696       if (pat)
2697 	{
2698 	  if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2699 	      && ! add_equal_note (pat, ops[0].value,
2700 				   optab_to_code (unoptab),
2701 				   ops[1].value, NULL_RTX))
2702 	    {
2703 	      delete_insns_since (last);
2704 	      return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2705 	    }
2706 
2707 	  emit_insn (pat);
2708 
2709 	  return ops[0].value;
2710 	}
2711     }
2712   return 0;
2713 }
2714 
2715 /* Generate code to perform an operation specified by UNOPTAB
2716    on operand OP0, with result having machine-mode MODE.
2717 
2718    UNSIGNEDP is for the case where we have to widen the operands
2719    to perform the operation.  It says to use zero-extension.
2720 
2721    If TARGET is nonzero, the value
2722    is generated there, if it is convenient to do so.
2723    In all cases an rtx is returned for the locus of the value;
2724    this may or may not be TARGET.  */
2725 
2726 rtx
2727 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
2728 	     int unsignedp)
2729 {
2730   enum mode_class mclass = GET_MODE_CLASS (mode);
2731   machine_mode wider_mode;
2732   scalar_int_mode int_mode;
2733   scalar_float_mode float_mode;
2734   rtx temp;
2735   rtx libfunc;
2736 
2737   temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
2738   if (temp)
2739     return temp;
2740 
2741   /* It can't be done in this mode.  Can we open-code it in a wider mode?  */
2742 
2743   /* Widening (or narrowing) clz needs special treatment.  */
2744   if (unoptab == clz_optab)
2745     {
2746       if (is_a <scalar_int_mode> (mode, &int_mode))
2747 	{
2748 	  temp = widen_leading (int_mode, op0, target, unoptab);
2749 	  if (temp)
2750 	    return temp;
2751 
2752 	  if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2753 	      && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2754 	    {
2755 	      temp = expand_doubleword_clz (int_mode, op0, target);
2756 	      if (temp)
2757 		return temp;
2758 	    }
2759 	}
2760 
2761       goto try_libcall;
2762     }
2763 
2764   if (unoptab == clrsb_optab)
2765     {
2766       if (is_a <scalar_int_mode> (mode, &int_mode))
2767 	{
2768 	  temp = widen_leading (int_mode, op0, target, unoptab);
2769 	  if (temp)
2770 	    return temp;
2771 	}
2772       goto try_libcall;
2773     }
2774 
2775   if (unoptab == popcount_optab
2776       && is_a <scalar_int_mode> (mode, &int_mode)
2777       && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2778       && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2779       && optimize_insn_for_speed_p ())
2780     {
2781       temp = expand_doubleword_popcount (int_mode, op0, target);
2782       if (temp)
2783 	return temp;
2784     }
2785 
2786   if (unoptab == parity_optab
2787       && is_a <scalar_int_mode> (mode, &int_mode)
2788       && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2789       && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
2790 	  || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
2791       && optimize_insn_for_speed_p ())
2792     {
2793       temp = expand_doubleword_parity (int_mode, op0, target);
2794       if (temp)
2795 	return temp;
2796     }
2797 
2798   /* Widening (or narrowing) bswap needs special treatment.  */
2799   if (unoptab == bswap_optab)
2800     {
2801       /* HImode is special because in this mode BSWAP is equivalent to ROTATE
2802 	 or ROTATERT.  First try these directly; if this fails, then try the
2803 	 obvious pair of shifts with allowed widening, as this will probably
2804 	 be always more efficient than the other fallback methods.  */
2805       if (mode == HImode)
2806 	{
2807 	  rtx_insn *last;
2808 	  rtx temp1, temp2;
2809 
2810 	  if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
2811 	    {
2812 	      temp = expand_binop (mode, rotl_optab, op0,
2813 				   gen_int_shift_amount (mode, 8),
2814 				   target, unsignedp, OPTAB_DIRECT);
2815 	      if (temp)
2816 		return temp;
2817 	     }
2818 
2819 	  if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
2820 	    {
2821 	      temp = expand_binop (mode, rotr_optab, op0,
2822 				   gen_int_shift_amount (mode, 8),
2823 				   target, unsignedp, OPTAB_DIRECT);
2824 	      if (temp)
2825 		return temp;
2826 	    }
2827 
2828 	  last = get_last_insn ();
2829 
2830 	  temp1 = expand_binop (mode, ashl_optab, op0,
2831 				gen_int_shift_amount (mode, 8), NULL_RTX,
2832 			        unsignedp, OPTAB_WIDEN);
2833 	  temp2 = expand_binop (mode, lshr_optab, op0,
2834 				gen_int_shift_amount (mode, 8), NULL_RTX,
2835 			        unsignedp, OPTAB_WIDEN);
2836 	  if (temp1 && temp2)
2837 	    {
2838 	      temp = expand_binop (mode, ior_optab, temp1, temp2, target,
2839 				   unsignedp, OPTAB_WIDEN);
2840 	      if (temp)
2841 		return temp;
2842 	    }
2843 
2844 	  delete_insns_since (last);
2845 	}
2846 
2847       if (is_a <scalar_int_mode> (mode, &int_mode))
2848 	{
2849 	  temp = widen_bswap (int_mode, op0, target);
2850 	  if (temp)
2851 	    return temp;
2852 
2853 	  if (GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
2854 	      && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2855 	    {
2856 	      temp = expand_doubleword_bswap (mode, op0, target);
2857 	      if (temp)
2858 		return temp;
2859 	    }
2860 	}
2861 
2862       goto try_libcall;
2863     }
2864 
2865   if (CLASS_HAS_WIDER_MODES_P (mclass))
2866     FOR_EACH_WIDER_MODE (wider_mode, mode)
2867       {
2868 	if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2869 	  {
2870 	    rtx xop0 = op0;
2871 	    rtx_insn *last = get_last_insn ();
2872 
2873 	    /* For certain operations, we need not actually extend
2874 	       the narrow operand, as long as we will truncate the
2875 	       results to the same narrowness.  */
2876 
2877 	    xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2878 				  (unoptab == neg_optab
2879 				   || unoptab == one_cmpl_optab)
2880 				  && mclass == MODE_INT);
2881 
2882 	    temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2883 				unsignedp);
2884 
2885 	    if (temp)
2886 	      {
2887 		if (mclass != MODE_INT
2888 		    || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2889 		  {
2890 		    if (target == 0)
2891 		      target = gen_reg_rtx (mode);
2892 		    convert_move (target, temp, 0);
2893 		    return target;
2894 		  }
2895 		else
2896 		  return gen_lowpart (mode, temp);
2897 	      }
2898 	    else
2899 	      delete_insns_since (last);
2900 	  }
2901       }
2902 
2903   /* These can be done a word at a time.  */
2904   if (unoptab == one_cmpl_optab
2905       && is_int_mode (mode, &int_mode)
2906       && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD
2907       && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
2908     {
2909       int i;
2910       rtx_insn *insns;
2911 
2912       if (target == 0 || target == op0 || !valid_multiword_target_p (target))
2913 	target = gen_reg_rtx (int_mode);
2914 
2915       start_sequence ();
2916 
2917       /* Do the actual arithmetic.  */
2918       for (i = 0; i < GET_MODE_BITSIZE (int_mode) / BITS_PER_WORD; i++)
2919 	{
2920 	  rtx target_piece = operand_subword (target, i, 1, int_mode);
2921 	  rtx x = expand_unop (word_mode, unoptab,
2922 			       operand_subword_force (op0, i, int_mode),
2923 			       target_piece, unsignedp);
2924 
2925 	  if (target_piece != x)
2926 	    emit_move_insn (target_piece, x);
2927 	}
2928 
2929       insns = get_insns ();
2930       end_sequence ();
2931 
2932       emit_insn (insns);
2933       return target;
2934     }
2935 
2936   if (optab_to_code (unoptab) == NEG)
2937     {
2938       /* Try negating floating point values by flipping the sign bit.  */
2939       if (is_a <scalar_float_mode> (mode, &float_mode))
2940 	{
2941 	  temp = expand_absneg_bit (NEG, float_mode, op0, target);
2942 	  if (temp)
2943 	    return temp;
2944 	}
2945 
2946       /* If there is no negation pattern, and we have no negative zero,
2947 	 try subtracting from zero.  */
2948       if (!HONOR_SIGNED_ZEROS (mode))
2949 	{
2950 	  temp = expand_binop (mode, (unoptab == negv_optab
2951 				      ? subv_optab : sub_optab),
2952 			       CONST0_RTX (mode), op0, target,
2953 			       unsignedp, OPTAB_DIRECT);
2954 	  if (temp)
2955 	    return temp;
2956 	}
2957     }
2958 
2959   /* Try calculating parity (x) as popcount (x) % 2.  */
2960   if (unoptab == parity_optab && is_a <scalar_int_mode> (mode, &int_mode))
2961     {
2962       temp = expand_parity (int_mode, op0, target);
2963       if (temp)
2964 	return temp;
2965     }
2966 
2967   /* Try implementing ffs (x) in terms of clz (x).  */
2968   if (unoptab == ffs_optab && is_a <scalar_int_mode> (mode, &int_mode))
2969     {
2970       temp = expand_ffs (int_mode, op0, target);
2971       if (temp)
2972 	return temp;
2973     }
2974 
2975   /* Try implementing ctz (x) in terms of clz (x).  */
2976   if (unoptab == ctz_optab && is_a <scalar_int_mode> (mode, &int_mode))
2977     {
2978       temp = expand_ctz (int_mode, op0, target);
2979       if (temp)
2980 	return temp;
2981     }
2982 
2983  try_libcall:
2984   /* Now try a library call in this mode.  */
2985   libfunc = optab_libfunc (unoptab, mode);
2986   if (libfunc)
2987     {
2988       rtx_insn *insns;
2989       rtx value;
2990       rtx eq_value;
2991       machine_mode outmode = mode;
2992 
2993       /* All of these functions return small values.  Thus we choose to
2994 	 have them return something that isn't a double-word.  */
2995       if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2996 	  || unoptab == clrsb_optab || unoptab == popcount_optab
2997 	  || unoptab == parity_optab)
2998 	outmode
2999 	  = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3000 					  optab_libfunc (unoptab, mode)));
3001 
3002       start_sequence ();
3003 
3004       /* Pass 1 for NO_QUEUE so we don't lose any increments
3005 	 if the libcall is cse'd or moved.  */
3006       value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3007 				       op0, mode);
3008       insns = get_insns ();
3009       end_sequence ();
3010 
3011       target = gen_reg_rtx (outmode);
3012       bool trapv = trapv_unoptab_p (unoptab);
3013       if (trapv)
3014 	eq_value = NULL_RTX;
3015       else
3016 	{
3017 	  eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3018 	  if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode))
3019 	    eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3020 	  else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode))
3021 	    eq_value = simplify_gen_unary (ZERO_EXTEND,
3022 					   outmode, eq_value, mode);
3023 	}
3024       emit_libcall_block_1 (insns, target, value, eq_value, trapv);
3025 
3026       return target;
3027     }
3028 
3029   /* It can't be done in this mode.  Can we do it in a wider mode?  */
3030 
3031   if (CLASS_HAS_WIDER_MODES_P (mclass))
3032     {
3033       FOR_EACH_WIDER_MODE (wider_mode, mode)
3034 	{
3035 	  if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3036 	      || optab_libfunc (unoptab, wider_mode))
3037 	    {
3038 	      rtx xop0 = op0;
3039 	      rtx_insn *last = get_last_insn ();
3040 
3041 	      /* For certain operations, we need not actually extend
3042 		 the narrow operand, as long as we will truncate the
3043 		 results to the same narrowness.  */
3044 	      xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3045 				    (unoptab == neg_optab
3046 				     || unoptab == one_cmpl_optab
3047 				     || unoptab == bswap_optab)
3048 				    && mclass == MODE_INT);
3049 
3050 	      temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3051 				  unsignedp);
3052 
3053 	      /* If we are generating clz using wider mode, adjust the
3054 		 result.  Similarly for clrsb.  */
3055 	      if ((unoptab == clz_optab || unoptab == clrsb_optab)
3056 		  && temp != 0)
3057 		{
3058 		  scalar_int_mode wider_int_mode
3059 		    = as_a <scalar_int_mode> (wider_mode);
3060 		  int_mode = as_a <scalar_int_mode> (mode);
3061 		  temp = expand_binop
3062 		    (wider_mode, sub_optab, temp,
3063 		     gen_int_mode (GET_MODE_PRECISION (wider_int_mode)
3064 				   - GET_MODE_PRECISION (int_mode),
3065 				   wider_int_mode),
3066 		     target, true, OPTAB_DIRECT);
3067 		}
3068 
3069 	      /* Likewise for bswap.  */
3070 	      if (unoptab == bswap_optab && temp != 0)
3071 		{
3072 		  scalar_int_mode wider_int_mode
3073 		    = as_a <scalar_int_mode> (wider_mode);
3074 		  int_mode = as_a <scalar_int_mode> (mode);
3075 		  gcc_assert (GET_MODE_PRECISION (wider_int_mode)
3076 			      == GET_MODE_BITSIZE (wider_int_mode)
3077 			      && GET_MODE_PRECISION (int_mode)
3078 				 == GET_MODE_BITSIZE (int_mode));
3079 
3080 		  temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp,
3081 				       GET_MODE_BITSIZE (wider_int_mode)
3082 				       - GET_MODE_BITSIZE (int_mode),
3083 				       NULL_RTX, true);
3084 		}
3085 
3086 	      if (temp)
3087 		{
3088 		  if (mclass != MODE_INT)
3089 		    {
3090 		      if (target == 0)
3091 			target = gen_reg_rtx (mode);
3092 		      convert_move (target, temp, 0);
3093 		      return target;
3094 		    }
3095 		  else
3096 		    return gen_lowpart (mode, temp);
3097 		}
3098 	      else
3099 		delete_insns_since (last);
3100 	    }
3101 	}
3102     }
3103 
3104   /* One final attempt at implementing negation via subtraction,
3105      this time allowing widening of the operand.  */
3106   if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3107     {
3108       rtx temp;
3109       temp = expand_binop (mode,
3110                            unoptab == negv_optab ? subv_optab : sub_optab,
3111                            CONST0_RTX (mode), op0,
3112                            target, unsignedp, OPTAB_LIB_WIDEN);
3113       if (temp)
3114         return temp;
3115     }
3116 
3117   return 0;
3118 }
3119 
3120 /* Emit code to compute the absolute value of OP0, with result to
3121    TARGET if convenient.  (TARGET may be 0.)  The return value says
3122    where the result actually is to be found.
3123 
3124    MODE is the mode of the operand; the mode of the result is
3125    different but can be deduced from MODE.
3126 
3127  */
3128 
3129 rtx
3130 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3131 		   int result_unsignedp)
3132 {
3133   rtx temp;
3134 
3135   if (GET_MODE_CLASS (mode) != MODE_INT
3136       || ! flag_trapv)
3137     result_unsignedp = 1;
3138 
3139   /* First try to do it with a special abs instruction.  */
3140   temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3141                       op0, target, 0);
3142   if (temp != 0)
3143     return temp;
3144 
3145   /* For floating point modes, try clearing the sign bit.  */
3146   scalar_float_mode float_mode;
3147   if (is_a <scalar_float_mode> (mode, &float_mode))
3148     {
3149       temp = expand_absneg_bit (ABS, float_mode, op0, target);
3150       if (temp)
3151 	return temp;
3152     }
3153 
3154   /* If we have a MAX insn, we can do this as MAX (x, -x).  */
3155   if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3156       && !HONOR_SIGNED_ZEROS (mode))
3157     {
3158       rtx_insn *last = get_last_insn ();
3159 
3160       temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3161 			  op0, NULL_RTX, 0);
3162       if (temp != 0)
3163 	temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3164 			     OPTAB_WIDEN);
3165 
3166       if (temp != 0)
3167 	return temp;
3168 
3169       delete_insns_since (last);
3170     }
3171 
3172   /* If this machine has expensive jumps, we can do integer absolute
3173      value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3174      where W is the width of MODE.  */
3175 
3176   scalar_int_mode int_mode;
3177   if (is_int_mode (mode, &int_mode)
3178       && BRANCH_COST (optimize_insn_for_speed_p (),
3179 	      	      false) >= 2)
3180     {
3181       rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3182 				   GET_MODE_PRECISION (int_mode) - 1,
3183 				   NULL_RTX, 0);
3184 
3185       temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3186 			   OPTAB_LIB_WIDEN);
3187       if (temp != 0)
3188 	temp = expand_binop (int_mode,
3189 			     result_unsignedp ? sub_optab : subv_optab,
3190                              temp, extended, target, 0, OPTAB_LIB_WIDEN);
3191 
3192       if (temp != 0)
3193 	return temp;
3194     }
3195 
3196   return NULL_RTX;
3197 }
3198 
3199 rtx
3200 expand_abs (machine_mode mode, rtx op0, rtx target,
3201 	    int result_unsignedp, int safe)
3202 {
3203   rtx temp;
3204   rtx_code_label *op1;
3205 
3206   if (GET_MODE_CLASS (mode) != MODE_INT
3207       || ! flag_trapv)
3208     result_unsignedp = 1;
3209 
3210   temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3211   if (temp != 0)
3212     return temp;
3213 
3214   /* If that does not win, use conditional jump and negate.  */
3215 
3216   /* It is safe to use the target if it is the same
3217      as the source if this is also a pseudo register */
3218   if (op0 == target && REG_P (op0)
3219       && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3220     safe = 1;
3221 
3222   op1 = gen_label_rtx ();
3223   if (target == 0 || ! safe
3224       || GET_MODE (target) != mode
3225       || (MEM_P (target) && MEM_VOLATILE_P (target))
3226       || (REG_P (target)
3227 	  && REGNO (target) < FIRST_PSEUDO_REGISTER))
3228     target = gen_reg_rtx (mode);
3229 
3230   emit_move_insn (target, op0);
3231   NO_DEFER_POP;
3232 
3233   do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3234 			   NULL_RTX, NULL, op1,
3235 			   profile_probability::uninitialized ());
3236 
3237   op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3238                      target, target, 0);
3239   if (op0 != target)
3240     emit_move_insn (target, op0);
3241   emit_label (op1);
3242   OK_DEFER_POP;
3243   return target;
3244 }
3245 
3246 /* Emit code to compute the one's complement absolute value of OP0
3247    (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3248    (TARGET may be NULL_RTX.)  The return value says where the result
3249    actually is to be found.
3250 
3251    MODE is the mode of the operand; the mode of the result is
3252    different but can be deduced from MODE.  */
3253 
3254 rtx
3255 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3256 {
3257   rtx temp;
3258 
3259   /* Not applicable for floating point modes.  */
3260   if (FLOAT_MODE_P (mode))
3261     return NULL_RTX;
3262 
3263   /* If we have a MAX insn, we can do this as MAX (x, ~x).  */
3264   if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3265     {
3266       rtx_insn *last = get_last_insn ();
3267 
3268       temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3269       if (temp != 0)
3270 	temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3271 			     OPTAB_WIDEN);
3272 
3273       if (temp != 0)
3274 	return temp;
3275 
3276       delete_insns_since (last);
3277     }
3278 
3279   /* If this machine has expensive jumps, we can do one's complement
3280      absolute value of X as (((signed) x >> (W-1)) ^ x).  */
3281 
3282   scalar_int_mode int_mode;
3283   if (is_int_mode (mode, &int_mode)
3284       && BRANCH_COST (optimize_insn_for_speed_p (),
3285 	             false) >= 2)
3286     {
3287       rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0,
3288 				   GET_MODE_PRECISION (int_mode) - 1,
3289 				   NULL_RTX, 0);
3290 
3291       temp = expand_binop (int_mode, xor_optab, extended, op0, target, 0,
3292 			   OPTAB_LIB_WIDEN);
3293 
3294       if (temp != 0)
3295 	return temp;
3296     }
3297 
3298   return NULL_RTX;
3299 }
3300 
3301 /* A subroutine of expand_copysign, perform the copysign operation using the
3302    abs and neg primitives advertised to exist on the target.  The assumption
3303    is that we have a split register file, and leaving op0 in fp registers,
3304    and not playing with subregs so much, will help the register allocator.  */
3305 
3306 static rtx
3307 expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3308 		        int bitpos, bool op0_is_abs)
3309 {
3310   scalar_int_mode imode;
3311   enum insn_code icode;
3312   rtx sign;
3313   rtx_code_label *label;
3314 
3315   if (target == op1)
3316     target = NULL_RTX;
3317 
3318   /* Check if the back end provides an insn that handles signbit for the
3319      argument's mode. */
3320   icode = optab_handler (signbit_optab, mode);
3321   if (icode != CODE_FOR_nothing)
3322     {
3323       imode = as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
3324       sign = gen_reg_rtx (imode);
3325       emit_unop_insn (icode, sign, op1, UNKNOWN);
3326     }
3327   else
3328     {
3329       if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3330 	{
3331 	  if (!int_mode_for_mode (mode).exists (&imode))
3332 	    return NULL_RTX;
3333 	  op1 = gen_lowpart (imode, op1);
3334 	}
3335       else
3336 	{
3337 	  int word;
3338 
3339 	  imode = word_mode;
3340 	  if (FLOAT_WORDS_BIG_ENDIAN)
3341 	    word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3342 	  else
3343 	    word = bitpos / BITS_PER_WORD;
3344 	  bitpos = bitpos % BITS_PER_WORD;
3345 	  op1 = operand_subword_force (op1, word, mode);
3346 	}
3347 
3348       wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3349       sign = expand_binop (imode, and_optab, op1,
3350 			   immed_wide_int_const (mask, imode),
3351 			   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3352     }
3353 
3354   if (!op0_is_abs)
3355     {
3356       op0 = expand_unop (mode, abs_optab, op0, target, 0);
3357       if (op0 == NULL)
3358 	return NULL_RTX;
3359       target = op0;
3360     }
3361   else
3362     {
3363       if (target == NULL_RTX)
3364         target = copy_to_reg (op0);
3365       else
3366 	emit_move_insn (target, op0);
3367     }
3368 
3369   label = gen_label_rtx ();
3370   emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3371 
3372   if (CONST_DOUBLE_AS_FLOAT_P (op0))
3373     op0 = simplify_unary_operation (NEG, mode, op0, mode);
3374   else
3375     op0 = expand_unop (mode, neg_optab, op0, target, 0);
3376   if (op0 != target)
3377     emit_move_insn (target, op0);
3378 
3379   emit_label (label);
3380 
3381   return target;
3382 }
3383 
3384 
3385 /* A subroutine of expand_copysign, perform the entire copysign operation
3386    with integer bitmasks.  BITPOS is the position of the sign bit; OP0_IS_ABS
3387    is true if op0 is known to have its sign bit clear.  */
3388 
3389 static rtx
3390 expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
3391 		     int bitpos, bool op0_is_abs)
3392 {
3393   scalar_int_mode imode;
3394   int word, nwords, i;
3395   rtx temp;
3396   rtx_insn *insns;
3397 
3398   if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3399     {
3400       if (!int_mode_for_mode (mode).exists (&imode))
3401 	return NULL_RTX;
3402       word = 0;
3403       nwords = 1;
3404     }
3405   else
3406     {
3407       imode = word_mode;
3408 
3409       if (FLOAT_WORDS_BIG_ENDIAN)
3410 	word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3411       else
3412 	word = bitpos / BITS_PER_WORD;
3413       bitpos = bitpos % BITS_PER_WORD;
3414       nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3415     }
3416 
3417   wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3418 
3419   if (target == 0
3420       || target == op0
3421       || target == op1
3422       || (nwords > 1 && !valid_multiword_target_p (target)))
3423     target = gen_reg_rtx (mode);
3424 
3425   if (nwords > 1)
3426     {
3427       start_sequence ();
3428 
3429       for (i = 0; i < nwords; ++i)
3430 	{
3431 	  rtx targ_piece = operand_subword (target, i, 1, mode);
3432 	  rtx op0_piece = operand_subword_force (op0, i, mode);
3433 
3434 	  if (i == word)
3435 	    {
3436 	      if (!op0_is_abs)
3437 		op0_piece
3438 		  = expand_binop (imode, and_optab, op0_piece,
3439 				  immed_wide_int_const (~mask, imode),
3440 				  NULL_RTX, 1, OPTAB_LIB_WIDEN);
3441 	      op1 = expand_binop (imode, and_optab,
3442 				  operand_subword_force (op1, i, mode),
3443 				  immed_wide_int_const (mask, imode),
3444 				  NULL_RTX, 1, OPTAB_LIB_WIDEN);
3445 
3446 	      temp = expand_binop (imode, ior_optab, op0_piece, op1,
3447 				   targ_piece, 1, OPTAB_LIB_WIDEN);
3448 	      if (temp != targ_piece)
3449 		emit_move_insn (targ_piece, temp);
3450 	    }
3451 	  else
3452 	    emit_move_insn (targ_piece, op0_piece);
3453 	}
3454 
3455       insns = get_insns ();
3456       end_sequence ();
3457 
3458       emit_insn (insns);
3459     }
3460   else
3461     {
3462       op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3463 		          immed_wide_int_const (mask, imode),
3464 		          NULL_RTX, 1, OPTAB_LIB_WIDEN);
3465 
3466       op0 = gen_lowpart (imode, op0);
3467       if (!op0_is_abs)
3468 	op0 = expand_binop (imode, and_optab, op0,
3469 			    immed_wide_int_const (~mask, imode),
3470 			    NULL_RTX, 1, OPTAB_LIB_WIDEN);
3471 
3472       temp = expand_binop (imode, ior_optab, op0, op1,
3473 			   gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3474       target = lowpart_subreg_maybe_copy (mode, temp, imode);
3475     }
3476 
3477   return target;
3478 }
3479 
3480 /* Expand the C99 copysign operation.  OP0 and OP1 must be the same
3481    scalar floating point mode.  Return NULL if we do not know how to
3482    expand the operation inline.  */
3483 
3484 rtx
3485 expand_copysign (rtx op0, rtx op1, rtx target)
3486 {
3487   scalar_float_mode mode;
3488   const struct real_format *fmt;
3489   bool op0_is_abs;
3490   rtx temp;
3491 
3492   mode = as_a <scalar_float_mode> (GET_MODE (op0));
3493   gcc_assert (GET_MODE (op1) == mode);
3494 
3495   /* First try to do it with a special instruction.  */
3496   temp = expand_binop (mode, copysign_optab, op0, op1,
3497 		       target, 0, OPTAB_DIRECT);
3498   if (temp)
3499     return temp;
3500 
3501   fmt = REAL_MODE_FORMAT (mode);
3502   if (fmt == NULL || !fmt->has_signed_zero)
3503     return NULL_RTX;
3504 
3505   op0_is_abs = false;
3506   if (CONST_DOUBLE_AS_FLOAT_P (op0))
3507     {
3508       if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3509 	op0 = simplify_unary_operation (ABS, mode, op0, mode);
3510       op0_is_abs = true;
3511     }
3512 
3513   if (fmt->signbit_ro >= 0
3514       && (CONST_DOUBLE_AS_FLOAT_P (op0)
3515 	  || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3516 	      && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3517     {
3518       temp = expand_copysign_absneg (mode, op0, op1, target,
3519 				     fmt->signbit_ro, op0_is_abs);
3520       if (temp)
3521 	return temp;
3522     }
3523 
3524   if (fmt->signbit_rw < 0)
3525     return NULL_RTX;
3526   return expand_copysign_bit (mode, op0, op1, target,
3527 			      fmt->signbit_rw, op0_is_abs);
3528 }
3529 
3530 /* Generate an instruction whose insn-code is INSN_CODE,
3531    with two operands: an output TARGET and an input OP0.
3532    TARGET *must* be nonzero, and the output is always stored there.
3533    CODE is an rtx code such that (CODE OP0) is an rtx that describes
3534    the value that is stored into TARGET.
3535 
3536    Return false if expansion failed.  */
3537 
3538 bool
3539 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3540 		      enum rtx_code code)
3541 {
3542   struct expand_operand ops[2];
3543   rtx_insn *pat;
3544 
3545   create_output_operand (&ops[0], target, GET_MODE (target));
3546   create_input_operand (&ops[1], op0, GET_MODE (op0));
3547   pat = maybe_gen_insn (icode, 2, ops);
3548   if (!pat)
3549     return false;
3550 
3551   if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3552       && code != UNKNOWN)
3553     add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
3554 
3555   emit_insn (pat);
3556 
3557   if (ops[0].value != target)
3558     emit_move_insn (target, ops[0].value);
3559   return true;
3560 }
3561 /* Generate an instruction whose insn-code is INSN_CODE,
3562    with two operands: an output TARGET and an input OP0.
3563    TARGET *must* be nonzero, and the output is always stored there.
3564    CODE is an rtx code such that (CODE OP0) is an rtx that describes
3565    the value that is stored into TARGET.  */
3566 
3567 void
3568 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3569 {
3570   bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3571   gcc_assert (ok);
3572 }
3573 
3574 struct no_conflict_data
3575 {
3576   rtx target;
3577   rtx_insn *first, *insn;
3578   bool must_stay;
3579 };
3580 
3581 /* Called via note_stores by emit_libcall_block.  Set P->must_stay if
3582    the currently examined clobber / store has to stay in the list of
3583    insns that constitute the actual libcall block.  */
3584 static void
3585 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3586 {
3587   struct no_conflict_data *p= (struct no_conflict_data *) p0;
3588 
3589   /* If this inns directly contributes to setting the target, it must stay.  */
3590   if (reg_overlap_mentioned_p (p->target, dest))
3591     p->must_stay = true;
3592   /* If we haven't committed to keeping any other insns in the list yet,
3593      there is nothing more to check.  */
3594   else if (p->insn == p->first)
3595     return;
3596   /* If this insn sets / clobbers a register that feeds one of the insns
3597      already in the list, this insn has to stay too.  */
3598   else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3599 	   || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3600 	   || reg_used_between_p (dest, p->first, p->insn)
3601 	   /* Likewise if this insn depends on a register set by a previous
3602 	      insn in the list, or if it sets a result (presumably a hard
3603 	      register) that is set or clobbered by a previous insn.
3604 	      N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3605 	      SET_DEST perform the former check on the address, and the latter
3606 	      check on the MEM.  */
3607 	   || (GET_CODE (set) == SET
3608 	       && (modified_in_p (SET_SRC (set), p->first)
3609 		   || modified_in_p (SET_DEST (set), p->first)
3610 		   || modified_between_p (SET_SRC (set), p->first, p->insn)
3611 		   || modified_between_p (SET_DEST (set), p->first, p->insn))))
3612     p->must_stay = true;
3613 }
3614 
3615 
3616 /* Emit code to make a call to a constant function or a library call.
3617 
3618    INSNS is a list containing all insns emitted in the call.
3619    These insns leave the result in RESULT.  Our block is to copy RESULT
3620    to TARGET, which is logically equivalent to EQUIV.
3621 
3622    We first emit any insns that set a pseudo on the assumption that these are
3623    loading constants into registers; doing so allows them to be safely cse'ed
3624    between blocks.  Then we emit all the other insns in the block, followed by
3625    an insn to move RESULT to TARGET.  This last insn will have a REQ_EQUAL
3626    note with an operand of EQUIV.  */
3627 
3628 static void
3629 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3630 		      bool equiv_may_trap)
3631 {
3632   rtx final_dest = target;
3633   rtx_insn *next, *last, *insn;
3634 
3635   /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3636      into a MEM later.  Protect the libcall block from this change.  */
3637   if (! REG_P (target) || REG_USERVAR_P (target))
3638     target = gen_reg_rtx (GET_MODE (target));
3639 
3640   /* If we're using non-call exceptions, a libcall corresponding to an
3641      operation that may trap may also trap.  */
3642   /* ??? See the comment in front of make_reg_eh_region_note.  */
3643   if (cfun->can_throw_non_call_exceptions
3644       && (equiv_may_trap || may_trap_p (equiv)))
3645     {
3646       for (insn = insns; insn; insn = NEXT_INSN (insn))
3647 	if (CALL_P (insn))
3648 	  {
3649 	    rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3650 	    if (note)
3651 	      {
3652 		int lp_nr = INTVAL (XEXP (note, 0));
3653 		if (lp_nr == 0 || lp_nr == INT_MIN)
3654 		  remove_note (insn, note);
3655 	      }
3656 	  }
3657     }
3658   else
3659     {
3660       /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3661 	 reg note to indicate that this call cannot throw or execute a nonlocal
3662 	 goto (unless there is already a REG_EH_REGION note, in which case
3663 	 we update it).  */
3664       for (insn = insns; insn; insn = NEXT_INSN (insn))
3665 	if (CALL_P (insn))
3666 	  make_reg_eh_region_note_nothrow_nononlocal (insn);
3667     }
3668 
3669   /* First emit all insns that set pseudos.  Remove them from the list as
3670      we go.  Avoid insns that set pseudos which were referenced in previous
3671      insns.  These can be generated by move_by_pieces, for example,
3672      to update an address.  Similarly, avoid insns that reference things
3673      set in previous insns.  */
3674 
3675   for (insn = insns; insn; insn = next)
3676     {
3677       rtx set = single_set (insn);
3678 
3679       next = NEXT_INSN (insn);
3680 
3681       if (set != 0 && REG_P (SET_DEST (set))
3682 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3683 	{
3684 	  struct no_conflict_data data;
3685 
3686 	  data.target = const0_rtx;
3687 	  data.first = insns;
3688 	  data.insn = insn;
3689 	  data.must_stay = 0;
3690 	  note_stores (PATTERN (insn), no_conflict_move_test, &data);
3691 	  if (! data.must_stay)
3692 	    {
3693 	      if (PREV_INSN (insn))
3694 		SET_NEXT_INSN (PREV_INSN (insn)) = next;
3695 	      else
3696 		insns = next;
3697 
3698 	      if (next)
3699 		SET_PREV_INSN (next) = PREV_INSN (insn);
3700 
3701 	      add_insn (insn);
3702 	    }
3703 	}
3704 
3705       /* Some ports use a loop to copy large arguments onto the stack.
3706 	 Don't move anything outside such a loop.  */
3707       if (LABEL_P (insn))
3708 	break;
3709     }
3710 
3711   /* Write the remaining insns followed by the final copy.  */
3712   for (insn = insns; insn; insn = next)
3713     {
3714       next = NEXT_INSN (insn);
3715 
3716       add_insn (insn);
3717     }
3718 
3719   last = emit_move_insn (target, result);
3720   if (equiv)
3721     set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
3722 
3723   if (final_dest != target)
3724     emit_move_insn (final_dest, target);
3725 }
3726 
3727 void
3728 emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv)
3729 {
3730   emit_libcall_block_1 (insns, target, result, equiv, false);
3731 }
3732 
3733 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3734    PURPOSE describes how this comparison will be used.  CODE is the rtx
3735    comparison code we will be using.
3736 
3737    ??? Actually, CODE is slightly weaker than that.  A target is still
3738    required to implement all of the normal bcc operations, but not
3739    required to implement all (or any) of the unordered bcc operations.  */
3740 
3741 int
3742 can_compare_p (enum rtx_code code, machine_mode mode,
3743 	       enum can_compare_purpose purpose)
3744 {
3745   rtx test;
3746   test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3747   do
3748     {
3749       enum insn_code icode;
3750 
3751       if (purpose == ccp_jump
3752           && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
3753           && insn_operand_matches (icode, 0, test))
3754         return 1;
3755       if (purpose == ccp_store_flag
3756           && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
3757           && insn_operand_matches (icode, 1, test))
3758         return 1;
3759       if (purpose == ccp_cmov
3760 	  && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
3761 	return 1;
3762 
3763       mode = GET_MODE_WIDER_MODE (mode).else_void ();
3764       PUT_MODE (test, mode);
3765     }
3766   while (mode != VOIDmode);
3767 
3768   return 0;
3769 }
3770 
3771 /* This function is called when we are going to emit a compare instruction that
3772    compares the values found in X and Y, using the rtl operator COMPARISON.
3773 
3774    If they have mode BLKmode, then SIZE specifies the size of both operands.
3775 
3776    UNSIGNEDP nonzero says that the operands are unsigned;
3777    this matters if they need to be widened (as given by METHODS).
3778 
3779    *PTEST is where the resulting comparison RTX is returned or NULL_RTX
3780    if we failed to produce one.
3781 
3782    *PMODE is the mode of the inputs (in case they are const_int).
3783 
3784    This function performs all the setup necessary so that the caller only has
3785    to emit a single comparison insn.  This setup can involve doing a BLKmode
3786    comparison or emitting a library call to perform the comparison if no insn
3787    is available to handle it.
3788    The values which are passed in through pointers can be modified; the caller
3789    should perform the comparison on the modified values.  Constant
3790    comparisons must have already been folded.  */
3791 
3792 static void
3793 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3794 		  int unsignedp, enum optab_methods methods,
3795 		  rtx *ptest, machine_mode *pmode)
3796 {
3797   machine_mode mode = *pmode;
3798   rtx libfunc, test;
3799   machine_mode cmp_mode;
3800   enum mode_class mclass;
3801 
3802   /* The other methods are not needed.  */
3803   gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
3804 	      || methods == OPTAB_LIB_WIDEN);
3805 
3806   /* If we are optimizing, force expensive constants into a register.  */
3807   if (CONSTANT_P (x) && optimize
3808       && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())
3809           > COSTS_N_INSNS (1)))
3810     x = force_reg (mode, x);
3811 
3812   if (CONSTANT_P (y) && optimize
3813       && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ())
3814           > COSTS_N_INSNS (1)))
3815     y = force_reg (mode, y);
3816 
3817 #if HAVE_cc0
3818   /* Make sure if we have a canonical comparison.  The RTL
3819      documentation states that canonical comparisons are required only
3820      for targets which have cc0.  */
3821   gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3822 #endif
3823 
3824   /* Don't let both operands fail to indicate the mode.  */
3825   if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3826     x = force_reg (mode, x);
3827   if (mode == VOIDmode)
3828     mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
3829 
3830   /* Handle all BLKmode compares.  */
3831 
3832   if (mode == BLKmode)
3833     {
3834       machine_mode result_mode;
3835       enum insn_code cmp_code;
3836       rtx result;
3837       rtx opalign
3838 	= GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3839 
3840       gcc_assert (size);
3841 
3842       /* Try to use a memory block compare insn - either cmpstr
3843 	 or cmpmem will do.  */
3844       opt_scalar_int_mode cmp_mode_iter;
3845       FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
3846 	{
3847 	  scalar_int_mode cmp_mode = cmp_mode_iter.require ();
3848 	  cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
3849 	  if (cmp_code == CODE_FOR_nothing)
3850 	    cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
3851 	  if (cmp_code == CODE_FOR_nothing)
3852 	    cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
3853 	  if (cmp_code == CODE_FOR_nothing)
3854 	    continue;
3855 
3856 	  /* Must make sure the size fits the insn's mode.  */
3857 	  if (CONST_INT_P (size)
3858 	      ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
3859 	      : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
3860 		 > GET_MODE_BITSIZE (cmp_mode)))
3861 	    continue;
3862 
3863 	  result_mode = insn_data[cmp_code].operand[0].mode;
3864 	  result = gen_reg_rtx (result_mode);
3865 	  size = convert_to_mode (cmp_mode, size, 1);
3866 	  emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3867 
3868           *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
3869           *pmode = result_mode;
3870 	  return;
3871 	}
3872 
3873       if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
3874 	goto fail;
3875 
3876       /* Otherwise call a library function.  */
3877       result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
3878 
3879       x = result;
3880       y = const0_rtx;
3881       mode = TYPE_MODE (integer_type_node);
3882       methods = OPTAB_LIB_WIDEN;
3883       unsignedp = false;
3884     }
3885 
3886   /* Don't allow operands to the compare to trap, as that can put the
3887      compare and branch in different basic blocks.  */
3888   if (cfun->can_throw_non_call_exceptions)
3889     {
3890       if (may_trap_p (x))
3891 	x = copy_to_reg (x);
3892       if (may_trap_p (y))
3893 	y = copy_to_reg (y);
3894     }
3895 
3896   if (GET_MODE_CLASS (mode) == MODE_CC)
3897     {
3898       enum insn_code icode = optab_handler (cbranch_optab, CCmode);
3899       test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3900       gcc_assert (icode != CODE_FOR_nothing
3901                   && insn_operand_matches (icode, 0, test));
3902       *ptest = test;
3903       return;
3904     }
3905 
3906   mclass = GET_MODE_CLASS (mode);
3907   test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
3908   FOR_EACH_MODE_FROM (cmp_mode, mode)
3909     {
3910       enum insn_code icode;
3911       icode = optab_handler (cbranch_optab, cmp_mode);
3912       if (icode != CODE_FOR_nothing
3913 	  && insn_operand_matches (icode, 0, test))
3914 	{
3915 	  rtx_insn *last = get_last_insn ();
3916 	  rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
3917 	  rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
3918 	  if (op0 && op1
3919 	      && insn_operand_matches (icode, 1, op0)
3920 	      && insn_operand_matches (icode, 2, op1))
3921 	    {
3922 	      XEXP (test, 0) = op0;
3923 	      XEXP (test, 1) = op1;
3924 	      *ptest = test;
3925 	      *pmode = cmp_mode;
3926 	      return;
3927 	    }
3928 	  delete_insns_since (last);
3929 	}
3930 
3931       if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
3932 	break;
3933     }
3934 
3935   if (methods != OPTAB_LIB_WIDEN)
3936     goto fail;
3937 
3938   if (SCALAR_FLOAT_MODE_P (mode))
3939     {
3940       /* Small trick if UNORDERED isn't implemented by the hardware.  */
3941       if (comparison == UNORDERED && rtx_equal_p (x, y))
3942 	{
3943 	  prepare_cmp_insn (x, y, UNLT, NULL_RTX, unsignedp, OPTAB_WIDEN,
3944 			    ptest, pmode);
3945 	  if (*ptest)
3946 	    return;
3947 	}
3948 
3949       prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
3950     }
3951   else
3952     {
3953       rtx result;
3954       machine_mode ret_mode;
3955 
3956       /* Handle a libcall just for the mode we are using.  */
3957       libfunc = optab_libfunc (cmp_optab, mode);
3958       gcc_assert (libfunc);
3959 
3960       /* If we want unsigned, and this mode has a distinct unsigned
3961 	 comparison routine, use that.  */
3962       if (unsignedp)
3963 	{
3964 	  rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
3965 	  if (ulibfunc)
3966 	    libfunc = ulibfunc;
3967 	}
3968 
3969       ret_mode = targetm.libgcc_cmp_return_mode ();
3970       result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3971 					ret_mode, x, mode, y, mode);
3972 
3973       /* There are two kinds of comparison routines. Biased routines
3974 	 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3975 	 of gcc expect that the comparison operation is equivalent
3976 	 to the modified comparison. For signed comparisons compare the
3977 	 result against 1 in the biased case, and zero in the unbiased
3978 	 case. For unsigned comparisons always compare against 1 after
3979 	 biasing the unbiased result by adding 1. This gives us a way to
3980 	 represent LTU.
3981 	 The comparisons in the fixed-point helper library are always
3982 	 biased.  */
3983       x = result;
3984       y = const1_rtx;
3985 
3986       if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
3987 	{
3988 	  if (unsignedp)
3989 	    x = plus_constant (ret_mode, result, 1);
3990 	  else
3991 	    y = const0_rtx;
3992 	}
3993 
3994       *pmode = ret_mode;
3995       prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
3996 			ptest, pmode);
3997     }
3998 
3999   return;
4000 
4001  fail:
4002   *ptest = NULL_RTX;
4003 }
4004 
4005 /* Before emitting an insn with code ICODE, make sure that X, which is going
4006    to be used for operand OPNUM of the insn, is converted from mode MODE to
4007    WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4008    that it is accepted by the operand predicate.  Return the new value.  */
4009 
4010 rtx
4011 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4012 		 machine_mode wider_mode, int unsignedp)
4013 {
4014   if (mode != wider_mode)
4015     x = convert_modes (wider_mode, mode, x, unsignedp);
4016 
4017   if (!insn_operand_matches (icode, opnum, x))
4018     {
4019       machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4020       if (reload_completed)
4021 	return NULL_RTX;
4022       if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4023 	return NULL_RTX;
4024       x = copy_to_mode_reg (op_mode, x);
4025     }
4026 
4027   return x;
4028 }
4029 
4030 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4031    we can do the branch.  */
4032 
4033 static void
4034 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label,
4035 			  profile_probability prob)
4036 {
4037   machine_mode optab_mode;
4038   enum mode_class mclass;
4039   enum insn_code icode;
4040   rtx_insn *insn;
4041 
4042   mclass = GET_MODE_CLASS (mode);
4043   optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4044   icode = optab_handler (cbranch_optab, optab_mode);
4045 
4046   gcc_assert (icode != CODE_FOR_nothing);
4047   gcc_assert (insn_operand_matches (icode, 0, test));
4048   insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4049                                           XEXP (test, 1), label));
4050   if (prob.initialized_p ()
4051       && profile_status_for_fn (cfun) != PROFILE_ABSENT
4052       && insn
4053       && JUMP_P (insn)
4054       && any_condjump_p (insn)
4055       && !find_reg_note (insn, REG_BR_PROB, 0))
4056     add_reg_br_prob_note (insn, prob);
4057 }
4058 
4059 /* Generate code to compare X with Y so that the condition codes are
4060    set and to jump to LABEL if the condition is true.  If X is a
4061    constant and Y is not a constant, then the comparison is swapped to
4062    ensure that the comparison RTL has the canonical form.
4063 
4064    UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4065    need to be widened.  UNSIGNEDP is also used to select the proper
4066    branch condition code.
4067 
4068    If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4069 
4070    MODE is the mode of the inputs (in case they are const_int).
4071 
4072    COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4073    It will be potentially converted into an unsigned variant based on
4074    UNSIGNEDP to select a proper jump instruction.
4075 
4076    PROB is the probability of jumping to LABEL.  */
4077 
4078 void
4079 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4080 			 machine_mode mode, int unsignedp, rtx label,
4081                          profile_probability prob)
4082 {
4083   rtx op0 = x, op1 = y;
4084   rtx test;
4085 
4086   /* Swap operands and condition to ensure canonical RTL.  */
4087   if (swap_commutative_operands_p (x, y)
4088       && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4089     {
4090       op0 = y, op1 = x;
4091       comparison = swap_condition (comparison);
4092     }
4093 
4094   /* If OP0 is still a constant, then both X and Y must be constants
4095      or the opposite comparison is not supported.  Force X into a register
4096      to create canonical RTL.  */
4097   if (CONSTANT_P (op0))
4098     op0 = force_reg (mode, op0);
4099 
4100   if (unsignedp)
4101     comparison = unsigned_condition (comparison);
4102 
4103   prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4104 		    &test, &mode);
4105   emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4106 }
4107 
4108 
4109 /* Emit a library call comparison between floating point X and Y.
4110    COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).  */
4111 
4112 static void
4113 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4114 		       rtx *ptest, machine_mode *pmode)
4115 {
4116   enum rtx_code swapped = swap_condition (comparison);
4117   enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4118   machine_mode orig_mode = GET_MODE (x);
4119   machine_mode mode;
4120   rtx true_rtx, false_rtx;
4121   rtx value, target, equiv;
4122   rtx_insn *insns;
4123   rtx libfunc = 0;
4124   bool reversed_p = false;
4125   scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode ();
4126 
4127   FOR_EACH_MODE_FROM (mode, orig_mode)
4128     {
4129       if (code_to_optab (comparison)
4130 	  && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4131 	break;
4132 
4133       if (code_to_optab (swapped)
4134 	  && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4135 	{
4136 	  std::swap (x, y);
4137 	  comparison = swapped;
4138 	  break;
4139 	}
4140 
4141       if (code_to_optab (reversed)
4142 	  && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4143 	{
4144 	  comparison = reversed;
4145 	  reversed_p = true;
4146 	  break;
4147 	}
4148     }
4149 
4150   gcc_assert (mode != VOIDmode);
4151 
4152   if (mode != orig_mode)
4153     {
4154       x = convert_to_mode (mode, x, 0);
4155       y = convert_to_mode (mode, y, 0);
4156     }
4157 
4158   /* Attach a REG_EQUAL note describing the semantics of the libcall to
4159      the RTL.  The allows the RTL optimizers to delete the libcall if the
4160      condition can be determined at compile-time.  */
4161   if (comparison == UNORDERED
4162       || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4163     {
4164       true_rtx = const_true_rtx;
4165       false_rtx = const0_rtx;
4166     }
4167   else
4168     {
4169       switch (comparison)
4170         {
4171         case EQ:
4172           true_rtx = const0_rtx;
4173           false_rtx = const_true_rtx;
4174           break;
4175 
4176         case NE:
4177           true_rtx = const_true_rtx;
4178           false_rtx = const0_rtx;
4179           break;
4180 
4181         case GT:
4182           true_rtx = const1_rtx;
4183           false_rtx = const0_rtx;
4184           break;
4185 
4186         case GE:
4187           true_rtx = const0_rtx;
4188           false_rtx = constm1_rtx;
4189           break;
4190 
4191         case LT:
4192           true_rtx = constm1_rtx;
4193           false_rtx = const0_rtx;
4194           break;
4195 
4196         case LE:
4197           true_rtx = const0_rtx;
4198           false_rtx = const1_rtx;
4199           break;
4200 
4201         default:
4202           gcc_unreachable ();
4203         }
4204     }
4205 
4206   if (comparison == UNORDERED)
4207     {
4208       rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4209       equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4210       equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4211 				    temp, const_true_rtx, equiv);
4212     }
4213   else
4214     {
4215       equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4216       if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4217         equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4218                                       equiv, true_rtx, false_rtx);
4219     }
4220 
4221   start_sequence ();
4222   value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4223 				   cmp_mode, x, mode, y, mode);
4224   insns = get_insns ();
4225   end_sequence ();
4226 
4227   target = gen_reg_rtx (cmp_mode);
4228   emit_libcall_block (insns, target, value, equiv);
4229 
4230   if (comparison == UNORDERED
4231       || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4232       || reversed_p)
4233     *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4234   else
4235     *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4236 
4237   *pmode = cmp_mode;
4238 }
4239 
4240 /* Generate code to indirectly jump to a location given in the rtx LOC.  */
4241 
4242 void
4243 emit_indirect_jump (rtx loc)
4244 {
4245   if (!targetm.have_indirect_jump ())
4246     sorry ("indirect jumps are not available on this target");
4247   else
4248     {
4249       struct expand_operand ops[1];
4250       create_address_operand (&ops[0], loc);
4251       expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
4252       emit_barrier ();
4253     }
4254 }
4255 
4256 
4257 /* Emit a conditional move instruction if the machine supports one for that
4258    condition and machine mode.
4259 
4260    OP0 and OP1 are the operands that should be compared using CODE.  CMODE is
4261    the mode to use should they be constants.  If it is VOIDmode, they cannot
4262    both be constants.
4263 
4264    OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4265    should be stored there.  MODE is the mode to use should they be constants.
4266    If it is VOIDmode, they cannot both be constants.
4267 
4268    The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4269    is not supported.  */
4270 
4271 rtx
4272 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4273 		       machine_mode cmode, rtx op2, rtx op3,
4274 		       machine_mode mode, int unsignedp)
4275 {
4276   rtx comparison;
4277   rtx_insn *last;
4278   enum insn_code icode;
4279   enum rtx_code reversed;
4280 
4281   /* If the two source operands are identical, that's just a move.  */
4282 
4283   if (rtx_equal_p (op2, op3))
4284     {
4285       if (!target)
4286 	target = gen_reg_rtx (mode);
4287 
4288       emit_move_insn (target, op3);
4289       return target;
4290     }
4291 
4292   /* If one operand is constant, make it the second one.  Only do this
4293      if the other operand is not constant as well.  */
4294 
4295   if (swap_commutative_operands_p (op0, op1))
4296     {
4297       std::swap (op0, op1);
4298       code = swap_condition (code);
4299     }
4300 
4301   /* get_condition will prefer to generate LT and GT even if the old
4302      comparison was against zero, so undo that canonicalization here since
4303      comparisons against zero are cheaper.  */
4304   if (code == LT && op1 == const1_rtx)
4305     code = LE, op1 = const0_rtx;
4306   else if (code == GT && op1 == constm1_rtx)
4307     code = GE, op1 = const0_rtx;
4308 
4309   if (cmode == VOIDmode)
4310     cmode = GET_MODE (op0);
4311 
4312   enum rtx_code orig_code = code;
4313   bool swapped = false;
4314   if (swap_commutative_operands_p (op2, op3)
4315       && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4316           != UNKNOWN))
4317     {
4318       std::swap (op2, op3);
4319       code = reversed;
4320       swapped = true;
4321     }
4322 
4323   if (mode == VOIDmode)
4324     mode = GET_MODE (op2);
4325 
4326   icode = direct_optab_handler (movcc_optab, mode);
4327 
4328   if (icode == CODE_FOR_nothing)
4329     return NULL_RTX;
4330 
4331   if (!target)
4332     target = gen_reg_rtx (mode);
4333 
4334   for (int pass = 0; ; pass++)
4335     {
4336       code = unsignedp ? unsigned_condition (code) : code;
4337       comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4338 
4339       /* We can get const0_rtx or const_true_rtx in some circumstances.  Just
4340 	 punt and let the caller figure out how best to deal with this
4341 	 situation.  */
4342       if (COMPARISON_P (comparison))
4343 	{
4344 	  saved_pending_stack_adjust save;
4345 	  save_pending_stack_adjust (&save);
4346 	  last = get_last_insn ();
4347 	  do_pending_stack_adjust ();
4348 	  machine_mode cmpmode = cmode;
4349 	  prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4350 			    GET_CODE (comparison), NULL_RTX, unsignedp,
4351 			    OPTAB_WIDEN, &comparison, &cmpmode);
4352 	  if (comparison)
4353 	    {
4354 	      struct expand_operand ops[4];
4355 
4356 	      create_output_operand (&ops[0], target, mode);
4357 	      create_fixed_operand (&ops[1], comparison);
4358 	      create_input_operand (&ops[2], op2, mode);
4359 	      create_input_operand (&ops[3], op3, mode);
4360 	      if (maybe_expand_insn (icode, 4, ops))
4361 		{
4362 		  if (ops[0].value != target)
4363 		    convert_move (target, ops[0].value, false);
4364 		  return target;
4365 		}
4366 	    }
4367 	  delete_insns_since (last);
4368 	  restore_pending_stack_adjust (&save);
4369 	}
4370 
4371       if (pass == 1)
4372 	return NULL_RTX;
4373 
4374       /* If the preferred op2/op3 order is not usable, retry with other
4375 	 operand order, perhaps it will expand successfully.  */
4376       if (swapped)
4377 	code = orig_code;
4378       else if ((reversed = reversed_comparison_code_parts (orig_code, op0, op1,
4379 							   NULL))
4380 	       != UNKNOWN)
4381 	code = reversed;
4382       else
4383 	return NULL_RTX;
4384       std::swap (op2, op3);
4385     }
4386 }
4387 
4388 
4389 /* Emit a conditional negate or bitwise complement using the
4390    negcc or notcc optabs if available.  Return NULL_RTX if such operations
4391    are not available.  Otherwise return the RTX holding the result.
4392    TARGET is the desired destination of the result.  COMP is the comparison
4393    on which to negate.  If COND is true move into TARGET the negation
4394    or bitwise complement of OP1.  Otherwise move OP2 into TARGET.
4395    CODE is either NEG or NOT.  MODE is the machine mode in which the
4396    operation is performed.  */
4397 
4398 rtx
4399 emit_conditional_neg_or_complement (rtx target, rtx_code code,
4400 				     machine_mode mode, rtx cond, rtx op1,
4401 				     rtx op2)
4402 {
4403   optab op = unknown_optab;
4404   if (code == NEG)
4405     op = negcc_optab;
4406   else if (code == NOT)
4407     op = notcc_optab;
4408   else
4409     gcc_unreachable ();
4410 
4411   insn_code icode = direct_optab_handler (op, mode);
4412 
4413   if (icode == CODE_FOR_nothing)
4414     return NULL_RTX;
4415 
4416   if (!target)
4417     target = gen_reg_rtx (mode);
4418 
4419   rtx_insn *last = get_last_insn ();
4420   struct expand_operand ops[4];
4421 
4422   create_output_operand (&ops[0], target, mode);
4423   create_fixed_operand (&ops[1], cond);
4424   create_input_operand (&ops[2], op1, mode);
4425   create_input_operand (&ops[3], op2, mode);
4426 
4427   if (maybe_expand_insn (icode, 4, ops))
4428     {
4429       if (ops[0].value != target)
4430 	convert_move (target, ops[0].value, false);
4431 
4432       return target;
4433     }
4434   delete_insns_since (last);
4435   return NULL_RTX;
4436 }
4437 
4438 /* Emit a conditional addition instruction if the machine supports one for that
4439    condition and machine mode.
4440 
4441    OP0 and OP1 are the operands that should be compared using CODE.  CMODE is
4442    the mode to use should they be constants.  If it is VOIDmode, they cannot
4443    both be constants.
4444 
4445    OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4446    should be stored there.  MODE is the mode to use should they be constants.
4447    If it is VOIDmode, they cannot both be constants.
4448 
4449    The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4450    is not supported.  */
4451 
4452 rtx
4453 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4454 		      machine_mode cmode, rtx op2, rtx op3,
4455 		      machine_mode mode, int unsignedp)
4456 {
4457   rtx comparison;
4458   rtx_insn *last;
4459   enum insn_code icode;
4460 
4461   /* If one operand is constant, make it the second one.  Only do this
4462      if the other operand is not constant as well.  */
4463 
4464   if (swap_commutative_operands_p (op0, op1))
4465     {
4466       std::swap (op0, op1);
4467       code = swap_condition (code);
4468     }
4469 
4470   /* get_condition will prefer to generate LT and GT even if the old
4471      comparison was against zero, so undo that canonicalization here since
4472      comparisons against zero are cheaper.  */
4473   if (code == LT && op1 == const1_rtx)
4474     code = LE, op1 = const0_rtx;
4475   else if (code == GT && op1 == constm1_rtx)
4476     code = GE, op1 = const0_rtx;
4477 
4478   if (cmode == VOIDmode)
4479     cmode = GET_MODE (op0);
4480 
4481   if (mode == VOIDmode)
4482     mode = GET_MODE (op2);
4483 
4484   icode = optab_handler (addcc_optab, mode);
4485 
4486   if (icode == CODE_FOR_nothing)
4487     return 0;
4488 
4489   if (!target)
4490     target = gen_reg_rtx (mode);
4491 
4492   code = unsignedp ? unsigned_condition (code) : code;
4493   comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4494 
4495   /* We can get const0_rtx or const_true_rtx in some circumstances.  Just
4496      return NULL and let the caller figure out how best to deal with this
4497      situation.  */
4498   if (!COMPARISON_P (comparison))
4499     return NULL_RTX;
4500 
4501   do_pending_stack_adjust ();
4502   last = get_last_insn ();
4503   prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4504                     GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4505                     &comparison, &cmode);
4506   if (comparison)
4507     {
4508       struct expand_operand ops[4];
4509 
4510       create_output_operand (&ops[0], target, mode);
4511       create_fixed_operand (&ops[1], comparison);
4512       create_input_operand (&ops[2], op2, mode);
4513       create_input_operand (&ops[3], op3, mode);
4514       if (maybe_expand_insn (icode, 4, ops))
4515 	{
4516 	  if (ops[0].value != target)
4517 	    convert_move (target, ops[0].value, false);
4518 	  return target;
4519 	}
4520     }
4521   delete_insns_since (last);
4522   return NULL_RTX;
4523 }
4524 
4525 /* These functions attempt to generate an insn body, rather than
4526    emitting the insn, but if the gen function already emits them, we
4527    make no attempt to turn them back into naked patterns.  */
4528 
4529 /* Generate and return an insn body to add Y to X.  */
4530 
4531 rtx_insn *
4532 gen_add2_insn (rtx x, rtx y)
4533 {
4534   enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4535 
4536   gcc_assert (insn_operand_matches (icode, 0, x));
4537   gcc_assert (insn_operand_matches (icode, 1, x));
4538   gcc_assert (insn_operand_matches (icode, 2, y));
4539 
4540   return GEN_FCN (icode) (x, x, y);
4541 }
4542 
4543 /* Generate and return an insn body to add r1 and c,
4544    storing the result in r0.  */
4545 
4546 rtx_insn *
4547 gen_add3_insn (rtx r0, rtx r1, rtx c)
4548 {
4549   enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4550 
4551   if (icode == CODE_FOR_nothing
4552       || !insn_operand_matches (icode, 0, r0)
4553       || !insn_operand_matches (icode, 1, r1)
4554       || !insn_operand_matches (icode, 2, c))
4555     return NULL;
4556 
4557   return GEN_FCN (icode) (r0, r1, c);
4558 }
4559 
4560 int
4561 have_add2_insn (rtx x, rtx y)
4562 {
4563   enum insn_code icode;
4564 
4565   gcc_assert (GET_MODE (x) != VOIDmode);
4566 
4567   icode = optab_handler (add_optab, GET_MODE (x));
4568 
4569   if (icode == CODE_FOR_nothing)
4570     return 0;
4571 
4572   if (!insn_operand_matches (icode, 0, x)
4573       || !insn_operand_matches (icode, 1, x)
4574       || !insn_operand_matches (icode, 2, y))
4575     return 0;
4576 
4577   return 1;
4578 }
4579 
4580 /* Generate and return an insn body to add Y to X.  */
4581 
4582 rtx_insn *
4583 gen_addptr3_insn (rtx x, rtx y, rtx z)
4584 {
4585   enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4586 
4587   gcc_assert (insn_operand_matches (icode, 0, x));
4588   gcc_assert (insn_operand_matches (icode, 1, y));
4589   gcc_assert (insn_operand_matches (icode, 2, z));
4590 
4591   return GEN_FCN (icode) (x, y, z);
4592 }
4593 
4594 /* Return true if the target implements an addptr pattern and X, Y,
4595    and Z are valid for the pattern predicates.  */
4596 
4597 int
4598 have_addptr3_insn (rtx x, rtx y, rtx z)
4599 {
4600   enum insn_code icode;
4601 
4602   gcc_assert (GET_MODE (x) != VOIDmode);
4603 
4604   icode = optab_handler (addptr3_optab, GET_MODE (x));
4605 
4606   if (icode == CODE_FOR_nothing)
4607     return 0;
4608 
4609   if (!insn_operand_matches (icode, 0, x)
4610       || !insn_operand_matches (icode, 1, y)
4611       || !insn_operand_matches (icode, 2, z))
4612     return 0;
4613 
4614   return 1;
4615 }
4616 
4617 /* Generate and return an insn body to subtract Y from X.  */
4618 
4619 rtx_insn *
4620 gen_sub2_insn (rtx x, rtx y)
4621 {
4622   enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4623 
4624   gcc_assert (insn_operand_matches (icode, 0, x));
4625   gcc_assert (insn_operand_matches (icode, 1, x));
4626   gcc_assert (insn_operand_matches (icode, 2, y));
4627 
4628   return GEN_FCN (icode) (x, x, y);
4629 }
4630 
4631 /* Generate and return an insn body to subtract r1 and c,
4632    storing the result in r0.  */
4633 
4634 rtx_insn *
4635 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4636 {
4637   enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4638 
4639   if (icode == CODE_FOR_nothing
4640       || !insn_operand_matches (icode, 0, r0)
4641       || !insn_operand_matches (icode, 1, r1)
4642       || !insn_operand_matches (icode, 2, c))
4643     return NULL;
4644 
4645   return GEN_FCN (icode) (r0, r1, c);
4646 }
4647 
4648 int
4649 have_sub2_insn (rtx x, rtx y)
4650 {
4651   enum insn_code icode;
4652 
4653   gcc_assert (GET_MODE (x) != VOIDmode);
4654 
4655   icode = optab_handler (sub_optab, GET_MODE (x));
4656 
4657   if (icode == CODE_FOR_nothing)
4658     return 0;
4659 
4660   if (!insn_operand_matches (icode, 0, x)
4661       || !insn_operand_matches (icode, 1, x)
4662       || !insn_operand_matches (icode, 2, y))
4663     return 0;
4664 
4665   return 1;
4666 }
4667 
4668 /* Generate the body of an insn to extend Y (with mode MFROM)
4669    into X (with mode MTO).  Do zero-extension if UNSIGNEDP is nonzero.  */
4670 
4671 rtx_insn *
4672 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4673 		 machine_mode mfrom, int unsignedp)
4674 {
4675   enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4676   return GEN_FCN (icode) (x, y);
4677 }
4678 
4679 /* Generate code to convert FROM to floating point
4680    and store in TO.  FROM must be fixed point and not VOIDmode.
4681    UNSIGNEDP nonzero means regard FROM as unsigned.
4682    Normally this is done by correcting the final value
4683    if it is negative.  */
4684 
4685 void
4686 expand_float (rtx to, rtx from, int unsignedp)
4687 {
4688   enum insn_code icode;
4689   rtx target = to;
4690   scalar_mode from_mode, to_mode;
4691   machine_mode fmode, imode;
4692   bool can_do_signed = false;
4693 
4694   /* Crash now, because we won't be able to decide which mode to use.  */
4695   gcc_assert (GET_MODE (from) != VOIDmode);
4696 
4697   /* Look for an insn to do the conversion.  Do it in the specified
4698      modes if possible; otherwise convert either input, output or both to
4699      wider mode.  If the integer mode is wider than the mode of FROM,
4700      we can do the conversion signed even if the input is unsigned.  */
4701 
4702   FOR_EACH_MODE_FROM (fmode, GET_MODE (to))
4703     FOR_EACH_MODE_FROM (imode, GET_MODE (from))
4704       {
4705 	int doing_unsigned = unsignedp;
4706 
4707 	if (fmode != GET_MODE (to)
4708 	    && (significand_size (fmode)
4709 		< GET_MODE_UNIT_PRECISION (GET_MODE (from))))
4710 	  continue;
4711 
4712 	icode = can_float_p (fmode, imode, unsignedp);
4713 	if (icode == CODE_FOR_nothing && unsignedp)
4714 	  {
4715 	    enum insn_code scode = can_float_p (fmode, imode, 0);
4716 	    if (scode != CODE_FOR_nothing)
4717 	      can_do_signed = true;
4718 	    if (imode != GET_MODE (from))
4719 	      icode = scode, doing_unsigned = 0;
4720 	  }
4721 
4722 	if (icode != CODE_FOR_nothing)
4723 	  {
4724 	    if (imode != GET_MODE (from))
4725 	      from = convert_to_mode (imode, from, unsignedp);
4726 
4727 	    if (fmode != GET_MODE (to))
4728 	      target = gen_reg_rtx (fmode);
4729 
4730 	    emit_unop_insn (icode, target, from,
4731 			    doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4732 
4733 	    if (target != to)
4734 	      convert_move (to, target, 0);
4735 	    return;
4736 	  }
4737       }
4738 
4739   /* Unsigned integer, and no way to convert directly.  Convert as signed,
4740      then unconditionally adjust the result.  */
4741   if (unsignedp
4742       && can_do_signed
4743       && is_a <scalar_mode> (GET_MODE (to), &to_mode)
4744       && is_a <scalar_mode> (GET_MODE (from), &from_mode))
4745     {
4746       opt_scalar_mode fmode_iter;
4747       rtx_code_label *label = gen_label_rtx ();
4748       rtx temp;
4749       REAL_VALUE_TYPE offset;
4750 
4751       /* Look for a usable floating mode FMODE wider than the source and at
4752 	 least as wide as the target.  Using FMODE will avoid rounding woes
4753 	 with unsigned values greater than the signed maximum value.  */
4754 
4755       FOR_EACH_MODE_FROM (fmode_iter, to_mode)
4756 	{
4757 	  scalar_mode fmode = fmode_iter.require ();
4758 	  if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
4759 	      && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
4760 	    break;
4761 	}
4762 
4763       if (!fmode_iter.exists (&fmode))
4764 	{
4765 	  /* There is no such mode.  Pretend the target is wide enough.  */
4766 	  fmode = to_mode;
4767 
4768 	  /* Avoid double-rounding when TO is narrower than FROM.  */
4769 	  if ((significand_size (fmode) + 1)
4770 	      < GET_MODE_PRECISION (from_mode))
4771 	    {
4772 	      rtx temp1;
4773 	      rtx_code_label *neglabel = gen_label_rtx ();
4774 
4775 	      /* Don't use TARGET if it isn't a register, is a hard register,
4776 		 or is the wrong mode.  */
4777 	      if (!REG_P (target)
4778 		  || REGNO (target) < FIRST_PSEUDO_REGISTER
4779 		  || GET_MODE (target) != fmode)
4780 		target = gen_reg_rtx (fmode);
4781 
4782 	      imode = from_mode;
4783 	      do_pending_stack_adjust ();
4784 
4785 	      /* Test whether the sign bit is set.  */
4786 	      emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4787 				       0, neglabel);
4788 
4789 	      /* The sign bit is not set.  Convert as signed.  */
4790 	      expand_float (target, from, 0);
4791 	      emit_jump_insn (targetm.gen_jump (label));
4792 	      emit_barrier ();
4793 
4794 	      /* The sign bit is set.
4795 		 Convert to a usable (positive signed) value by shifting right
4796 		 one bit, while remembering if a nonzero bit was shifted
4797 		 out; i.e., compute  (from & 1) | (from >> 1).  */
4798 
4799 	      emit_label (neglabel);
4800 	      temp = expand_binop (imode, and_optab, from, const1_rtx,
4801 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
4802 	      temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
4803 	      temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4804 				   OPTAB_LIB_WIDEN);
4805 	      expand_float (target, temp, 0);
4806 
4807 	      /* Multiply by 2 to undo the shift above.  */
4808 	      temp = expand_binop (fmode, add_optab, target, target,
4809 				   target, 0, OPTAB_LIB_WIDEN);
4810 	      if (temp != target)
4811 		emit_move_insn (target, temp);
4812 
4813 	      do_pending_stack_adjust ();
4814 	      emit_label (label);
4815 	      goto done;
4816 	    }
4817 	}
4818 
4819       /* If we are about to do some arithmetic to correct for an
4820 	 unsigned operand, do it in a pseudo-register.  */
4821 
4822       if (to_mode != fmode
4823 	  || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4824 	target = gen_reg_rtx (fmode);
4825 
4826       /* Convert as signed integer to floating.  */
4827       expand_float (target, from, 0);
4828 
4829       /* If FROM is negative (and therefore TO is negative),
4830 	 correct its value by 2**bitwidth.  */
4831 
4832       do_pending_stack_adjust ();
4833       emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, from_mode,
4834 			       0, label);
4835 
4836 
4837       real_2expN (&offset, GET_MODE_PRECISION (from_mode), fmode);
4838       temp = expand_binop (fmode, add_optab, target,
4839 			   const_double_from_real_value (offset, fmode),
4840 			   target, 0, OPTAB_LIB_WIDEN);
4841       if (temp != target)
4842 	emit_move_insn (target, temp);
4843 
4844       do_pending_stack_adjust ();
4845       emit_label (label);
4846       goto done;
4847     }
4848 
4849   /* No hardware instruction available; call a library routine.  */
4850     {
4851       rtx libfunc;
4852       rtx_insn *insns;
4853       rtx value;
4854       convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4855 
4856       if (is_narrower_int_mode (GET_MODE (from), SImode))
4857 	from = convert_to_mode (SImode, from, unsignedp);
4858 
4859       libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
4860       gcc_assert (libfunc);
4861 
4862       start_sequence ();
4863 
4864       value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4865 				       GET_MODE (to), from, GET_MODE (from));
4866       insns = get_insns ();
4867       end_sequence ();
4868 
4869       emit_libcall_block (insns, target, value,
4870 			  gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
4871 					 GET_MODE (to), from));
4872     }
4873 
4874  done:
4875 
4876   /* Copy result to requested destination
4877      if we have been computing in a temp location.  */
4878 
4879   if (target != to)
4880     {
4881       if (GET_MODE (target) == GET_MODE (to))
4882 	emit_move_insn (to, target);
4883       else
4884 	convert_move (to, target, 0);
4885     }
4886 }
4887 
4888 /* Generate code to convert FROM to fixed point and store in TO.  FROM
4889    must be floating point.  */
4890 
4891 void
4892 expand_fix (rtx to, rtx from, int unsignedp)
4893 {
4894   enum insn_code icode;
4895   rtx target = to;
4896   machine_mode fmode, imode;
4897   opt_scalar_mode fmode_iter;
4898   bool must_trunc = false;
4899 
4900   /* We first try to find a pair of modes, one real and one integer, at
4901      least as wide as FROM and TO, respectively, in which we can open-code
4902      this conversion.  If the integer mode is wider than the mode of TO,
4903      we can do the conversion either signed or unsigned.  */
4904 
4905   FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
4906     FOR_EACH_MODE_FROM (imode, GET_MODE (to))
4907       {
4908 	int doing_unsigned = unsignedp;
4909 
4910 	icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4911 	if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4912 	  icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4913 
4914 	if (icode != CODE_FOR_nothing)
4915 	  {
4916 	    rtx_insn *last = get_last_insn ();
4917 	    if (fmode != GET_MODE (from))
4918 	      from = convert_to_mode (fmode, from, 0);
4919 
4920 	    if (must_trunc)
4921 	      {
4922 		rtx temp = gen_reg_rtx (GET_MODE (from));
4923 		from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4924 				    temp, 0);
4925 	      }
4926 
4927 	    if (imode != GET_MODE (to))
4928 	      target = gen_reg_rtx (imode);
4929 
4930 	    if (maybe_emit_unop_insn (icode, target, from,
4931 				      doing_unsigned ? UNSIGNED_FIX : FIX))
4932 	      {
4933 		if (target != to)
4934 		  convert_move (to, target, unsignedp);
4935 		return;
4936 	      }
4937 	    delete_insns_since (last);
4938 	  }
4939       }
4940 
4941   /* For an unsigned conversion, there is one more way to do it.
4942      If we have a signed conversion, we generate code that compares
4943      the real value to the largest representable positive number.  If if
4944      is smaller, the conversion is done normally.  Otherwise, subtract
4945      one plus the highest signed number, convert, and add it back.
4946 
4947      We only need to check all real modes, since we know we didn't find
4948      anything with a wider integer mode.
4949 
4950      This code used to extend FP value into mode wider than the destination.
4951      This is needed for decimal float modes which cannot accurately
4952      represent one plus the highest signed number of the same size, but
4953      not for binary modes.  Consider, for instance conversion from SFmode
4954      into DImode.
4955 
4956      The hot path through the code is dealing with inputs smaller than 2^63
4957      and doing just the conversion, so there is no bits to lose.
4958 
4959      In the other path we know the value is positive in the range 2^63..2^64-1
4960      inclusive.  (as for other input overflow happens and result is undefined)
4961      So we know that the most important bit set in mantissa corresponds to
4962      2^63.  The subtraction of 2^63 should not generate any rounding as it
4963      simply clears out that bit.  The rest is trivial.  */
4964 
4965   scalar_int_mode to_mode;
4966   if (unsignedp
4967       && is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
4968       && HWI_COMPUTABLE_MODE_P (to_mode))
4969     FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
4970       {
4971 	scalar_mode fmode = fmode_iter.require ();
4972 	if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
4973 					   0, &must_trunc)
4974 	    && (!DECIMAL_FLOAT_MODE_P (fmode)
4975 		|| (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
4976 	  {
4977 	    int bitsize;
4978 	    REAL_VALUE_TYPE offset;
4979 	    rtx limit;
4980 	    rtx_code_label *lab1, *lab2;
4981 	    rtx_insn *insn;
4982 
4983 	    bitsize = GET_MODE_PRECISION (to_mode);
4984 	    real_2expN (&offset, bitsize - 1, fmode);
4985 	    limit = const_double_from_real_value (offset, fmode);
4986 	    lab1 = gen_label_rtx ();
4987 	    lab2 = gen_label_rtx ();
4988 
4989 	    if (fmode != GET_MODE (from))
4990 	      from = convert_to_mode (fmode, from, 0);
4991 
4992 	    /* See if we need to do the subtraction.  */
4993 	    do_pending_stack_adjust ();
4994 	    emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
4995 				     GET_MODE (from), 0, lab1);
4996 
4997 	    /* If not, do the signed "fix" and branch around fixup code.  */
4998 	    expand_fix (to, from, 0);
4999 	    emit_jump_insn (targetm.gen_jump (lab2));
5000 	    emit_barrier ();
5001 
5002 	    /* Otherwise, subtract 2**(N-1), convert to signed number,
5003 	       then add 2**(N-1).  Do the addition using XOR since this
5004 	       will often generate better code.  */
5005 	    emit_label (lab1);
5006 	    target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5007 				   NULL_RTX, 0, OPTAB_LIB_WIDEN);
5008 	    expand_fix (to, target, 0);
5009 	    target = expand_binop (to_mode, xor_optab, to,
5010 				   gen_int_mode
5011 				   (HOST_WIDE_INT_1 << (bitsize - 1),
5012 				    to_mode),
5013 				   to, 1, OPTAB_LIB_WIDEN);
5014 
5015 	    if (target != to)
5016 	      emit_move_insn (to, target);
5017 
5018 	    emit_label (lab2);
5019 
5020 	    if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
5021 	      {
5022 		/* Make a place for a REG_NOTE and add it.  */
5023 		insn = emit_move_insn (to, to);
5024 		set_dst_reg_note (insn, REG_EQUAL,
5025 				  gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
5026 						 copy_rtx (from)),
5027 				  to);
5028 	      }
5029 
5030 	    return;
5031 	  }
5032       }
5033 
5034   /* We can't do it with an insn, so use a library call.  But first ensure
5035      that the mode of TO is at least as wide as SImode, since those are the
5036      only library calls we know about.  */
5037 
5038   if (is_narrower_int_mode (GET_MODE (to), SImode))
5039     {
5040       target = gen_reg_rtx (SImode);
5041 
5042       expand_fix (target, from, unsignedp);
5043     }
5044   else
5045     {
5046       rtx_insn *insns;
5047       rtx value;
5048       rtx libfunc;
5049 
5050       convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5051       libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5052       gcc_assert (libfunc);
5053 
5054       start_sequence ();
5055 
5056       value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5057 				       GET_MODE (to), from, GET_MODE (from));
5058       insns = get_insns ();
5059       end_sequence ();
5060 
5061       emit_libcall_block (insns, target, value,
5062 			  gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5063 					 GET_MODE (to), from));
5064     }
5065 
5066   if (target != to)
5067     {
5068       if (GET_MODE (to) == GET_MODE (target))
5069         emit_move_insn (to, target);
5070       else
5071         convert_move (to, target, 0);
5072     }
5073 }
5074 
5075 
5076 /* Promote integer arguments for a libcall if necessary.
5077    emit_library_call_value cannot do the promotion because it does not
5078    know if it should do a signed or unsigned promotion.  This is because
5079    there are no tree types defined for libcalls.  */
5080 
5081 static rtx
5082 prepare_libcall_arg (rtx arg, int uintp)
5083 {
5084   scalar_int_mode mode;
5085   machine_mode arg_mode;
5086   if (is_a <scalar_int_mode> (GET_MODE (arg), &mode))
5087     {
5088       /*  If we need to promote the integer function argument we need to do
5089 	  it here instead of inside emit_library_call_value because in
5090 	  emit_library_call_value we don't know if we should do a signed or
5091 	  unsigned promotion.  */
5092 
5093       int unsigned_p = 0;
5094       arg_mode = promote_function_mode (NULL_TREE, mode,
5095 					&unsigned_p, NULL_TREE, 0);
5096       if (arg_mode != mode)
5097 	return convert_to_mode (arg_mode, arg, uintp);
5098     }
5099     return arg;
5100 }
5101 
5102 /* Generate code to convert FROM or TO a fixed-point.
5103    If UINTP is true, either TO or FROM is an unsigned integer.
5104    If SATP is true, we need to saturate the result.  */
5105 
5106 void
5107 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5108 {
5109   machine_mode to_mode = GET_MODE (to);
5110   machine_mode from_mode = GET_MODE (from);
5111   convert_optab tab;
5112   enum rtx_code this_code;
5113   enum insn_code code;
5114   rtx_insn *insns;
5115   rtx value;
5116   rtx libfunc;
5117 
5118   if (to_mode == from_mode)
5119     {
5120       emit_move_insn (to, from);
5121       return;
5122     }
5123 
5124   if (uintp)
5125     {
5126       tab = satp ? satfractuns_optab : fractuns_optab;
5127       this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5128     }
5129   else
5130     {
5131       tab = satp ? satfract_optab : fract_optab;
5132       this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5133     }
5134   code = convert_optab_handler (tab, to_mode, from_mode);
5135   if (code != CODE_FOR_nothing)
5136     {
5137       emit_unop_insn (code, to, from, this_code);
5138       return;
5139     }
5140 
5141   libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5142   gcc_assert (libfunc);
5143 
5144   from = prepare_libcall_arg (from, uintp);
5145   from_mode = GET_MODE (from);
5146 
5147   start_sequence ();
5148   value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5149 				   from, from_mode);
5150   insns = get_insns ();
5151   end_sequence ();
5152 
5153   emit_libcall_block (insns, to, value,
5154 		      gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5155 }
5156 
5157 /* Generate code to convert FROM to fixed point and store in TO.  FROM
5158    must be floating point, TO must be signed.  Use the conversion optab
5159    TAB to do the conversion.  */
5160 
5161 bool
5162 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5163 {
5164   enum insn_code icode;
5165   rtx target = to;
5166   machine_mode fmode, imode;
5167 
5168   /* We first try to find a pair of modes, one real and one integer, at
5169      least as wide as FROM and TO, respectively, in which we can open-code
5170      this conversion.  If the integer mode is wider than the mode of TO,
5171      we can do the conversion either signed or unsigned.  */
5172 
5173   FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
5174     FOR_EACH_MODE_FROM (imode, GET_MODE (to))
5175       {
5176 	icode = convert_optab_handler (tab, imode, fmode);
5177 	if (icode != CODE_FOR_nothing)
5178 	  {
5179 	    rtx_insn *last = get_last_insn ();
5180 	    if (fmode != GET_MODE (from))
5181 	      from = convert_to_mode (fmode, from, 0);
5182 
5183 	    if (imode != GET_MODE (to))
5184 	      target = gen_reg_rtx (imode);
5185 
5186 	    if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5187 	      {
5188 	        delete_insns_since (last);
5189 		continue;
5190 	      }
5191 	    if (target != to)
5192 	      convert_move (to, target, 0);
5193 	    return true;
5194 	  }
5195       }
5196 
5197   return false;
5198 }
5199 
5200 /* Report whether we have an instruction to perform the operation
5201    specified by CODE on operands of mode MODE.  */
5202 int
5203 have_insn_for (enum rtx_code code, machine_mode mode)
5204 {
5205   return (code_to_optab (code)
5206 	  && (optab_handler (code_to_optab (code), mode)
5207 	      != CODE_FOR_nothing));
5208 }
5209 
5210 /* Print information about the current contents of the optabs on
5211    STDERR.  */
5212 
5213 DEBUG_FUNCTION void
5214 debug_optab_libfuncs (void)
5215 {
5216   int i, j, k;
5217 
5218   /* Dump the arithmetic optabs.  */
5219   for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
5220     for (j = 0; j < NUM_MACHINE_MODES; ++j)
5221       {
5222 	rtx l = optab_libfunc ((optab) i, (machine_mode) j);
5223 	if (l)
5224 	  {
5225 	    gcc_assert (GET_CODE (l) == SYMBOL_REF);
5226 	    fprintf (stderr, "%s\t%s:\t%s\n",
5227 		     GET_RTX_NAME (optab_to_code ((optab) i)),
5228 		     GET_MODE_NAME (j),
5229 		     XSTR (l, 0));
5230 	  }
5231       }
5232 
5233   /* Dump the conversion optabs.  */
5234   for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
5235     for (j = 0; j < NUM_MACHINE_MODES; ++j)
5236       for (k = 0; k < NUM_MACHINE_MODES; ++k)
5237 	{
5238 	  rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
5239 					 (machine_mode) k);
5240 	  if (l)
5241 	    {
5242 	      gcc_assert (GET_CODE (l) == SYMBOL_REF);
5243 	      fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5244 		       GET_RTX_NAME (optab_to_code ((optab) i)),
5245 		       GET_MODE_NAME (j),
5246 		       GET_MODE_NAME (k),
5247 		       XSTR (l, 0));
5248 	    }
5249 	}
5250 }
5251 
5252 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5253    CODE.  Return 0 on failure.  */
5254 
5255 rtx_insn *
5256 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
5257 {
5258   machine_mode mode = GET_MODE (op1);
5259   enum insn_code icode;
5260   rtx_insn *insn;
5261   rtx trap_rtx;
5262 
5263   if (mode == VOIDmode)
5264     return 0;
5265 
5266   icode = optab_handler (ctrap_optab, mode);
5267   if (icode == CODE_FOR_nothing)
5268     return 0;
5269 
5270   /* Some targets only accept a zero trap code.  */
5271   if (!insn_operand_matches (icode, 3, tcode))
5272     return 0;
5273 
5274   do_pending_stack_adjust ();
5275   start_sequence ();
5276   prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
5277 		    &trap_rtx, &mode);
5278   if (!trap_rtx)
5279     insn = NULL;
5280   else
5281     insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
5282 			    tcode);
5283 
5284   /* If that failed, then give up.  */
5285   if (insn == 0)
5286     {
5287       end_sequence ();
5288       return 0;
5289     }
5290 
5291   emit_insn (insn);
5292   insn = get_insns ();
5293   end_sequence ();
5294   return insn;
5295 }
5296 
5297 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5298    or unsigned operation code.  */
5299 
5300 enum rtx_code
5301 get_rtx_code (enum tree_code tcode, bool unsignedp)
5302 {
5303   enum rtx_code code;
5304   switch (tcode)
5305     {
5306     case EQ_EXPR:
5307       code = EQ;
5308       break;
5309     case NE_EXPR:
5310       code = NE;
5311       break;
5312     case LT_EXPR:
5313       code = unsignedp ? LTU : LT;
5314       break;
5315     case LE_EXPR:
5316       code = unsignedp ? LEU : LE;
5317       break;
5318     case GT_EXPR:
5319       code = unsignedp ? GTU : GT;
5320       break;
5321     case GE_EXPR:
5322       code = unsignedp ? GEU : GE;
5323       break;
5324 
5325     case UNORDERED_EXPR:
5326       code = UNORDERED;
5327       break;
5328     case ORDERED_EXPR:
5329       code = ORDERED;
5330       break;
5331     case UNLT_EXPR:
5332       code = UNLT;
5333       break;
5334     case UNLE_EXPR:
5335       code = UNLE;
5336       break;
5337     case UNGT_EXPR:
5338       code = UNGT;
5339       break;
5340     case UNGE_EXPR:
5341       code = UNGE;
5342       break;
5343     case UNEQ_EXPR:
5344       code = UNEQ;
5345       break;
5346     case LTGT_EXPR:
5347       code = LTGT;
5348       break;
5349 
5350     case BIT_AND_EXPR:
5351       code = AND;
5352       break;
5353 
5354     case BIT_IOR_EXPR:
5355       code = IOR;
5356       break;
5357 
5358     default:
5359       gcc_unreachable ();
5360     }
5361   return code;
5362 }
5363 
5364 /* Return a comparison rtx of mode CMP_MODE for COND.  Use UNSIGNEDP to
5365    select signed or unsigned operators.  OPNO holds the index of the
5366    first comparison operand for insn ICODE.  Do not generate the
5367    compare instruction itself.  */
5368 
5369 static rtx
5370 vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
5371 		    tree t_op0, tree t_op1, bool unsignedp,
5372 		    enum insn_code icode, unsigned int opno)
5373 {
5374   struct expand_operand ops[2];
5375   rtx rtx_op0, rtx_op1;
5376   machine_mode m0, m1;
5377   enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
5378 
5379   gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
5380 
5381   /* Expand operands.  For vector types with scalar modes, e.g. where int64x1_t
5382      has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5383      cases, use the original mode.  */
5384   rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
5385 			 EXPAND_STACK_PARM);
5386   m0 = GET_MODE (rtx_op0);
5387   if (m0 == VOIDmode)
5388     m0 = TYPE_MODE (TREE_TYPE (t_op0));
5389 
5390   rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
5391 			 EXPAND_STACK_PARM);
5392   m1 = GET_MODE (rtx_op1);
5393   if (m1 == VOIDmode)
5394     m1 = TYPE_MODE (TREE_TYPE (t_op1));
5395 
5396   create_input_operand (&ops[0], rtx_op0, m0);
5397   create_input_operand (&ops[1], rtx_op1, m1);
5398   if (!maybe_legitimize_operands (icode, opno, 2, ops))
5399     gcc_unreachable ();
5400   return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value);
5401 }
5402 
5403 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5404    the first vec_perm operand, assuming the second operand is a constant
5405    vector of zeros.  Return the shift distance in bits if so, or NULL_RTX
5406    if the vec_perm is not a shift.  MODE is the mode of the value being
5407    shifted.  */
5408 static rtx
5409 shift_amt_for_vec_perm_mask (machine_mode mode, const vec_perm_indices &sel)
5410 {
5411   unsigned int bitsize = GET_MODE_UNIT_BITSIZE (mode);
5412   poly_int64 first = sel[0];
5413   if (maybe_ge (sel[0], GET_MODE_NUNITS (mode)))
5414     return NULL_RTX;
5415 
5416   if (!sel.series_p (0, 1, first, 1))
5417     {
5418       unsigned int nelt;
5419       if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5420 	return NULL_RTX;
5421       for (unsigned int i = 1; i < nelt; i++)
5422 	{
5423 	  poly_int64 expected = i + first;
5424 	  /* Indices into the second vector are all equivalent.  */
5425 	  if (maybe_lt (sel[i], nelt)
5426 	      ? maybe_ne (sel[i], expected)
5427 	      : maybe_lt (expected, nelt))
5428 	    return NULL_RTX;
5429 	}
5430     }
5431 
5432   return gen_int_shift_amount (mode, first * bitsize);
5433 }
5434 
5435 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn.  */
5436 
5437 static rtx
5438 expand_vec_perm_1 (enum insn_code icode, rtx target,
5439 		   rtx v0, rtx v1, rtx sel)
5440 {
5441   machine_mode tmode = GET_MODE (target);
5442   machine_mode smode = GET_MODE (sel);
5443   struct expand_operand ops[4];
5444 
5445   gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
5446 	      || mode_for_int_vector (tmode).require () == smode);
5447   create_output_operand (&ops[0], target, tmode);
5448   create_input_operand (&ops[3], sel, smode);
5449 
5450   /* Make an effort to preserve v0 == v1.  The target expander is able to
5451      rely on this to determine if we're permuting a single input operand.  */
5452   if (rtx_equal_p (v0, v1))
5453     {
5454       if (!insn_operand_matches (icode, 1, v0))
5455         v0 = force_reg (tmode, v0);
5456       gcc_checking_assert (insn_operand_matches (icode, 1, v0));
5457       gcc_checking_assert (insn_operand_matches (icode, 2, v0));
5458 
5459       create_fixed_operand (&ops[1], v0);
5460       create_fixed_operand (&ops[2], v0);
5461     }
5462   else
5463     {
5464       create_input_operand (&ops[1], v0, tmode);
5465       create_input_operand (&ops[2], v1, tmode);
5466     }
5467 
5468   if (maybe_expand_insn (icode, 4, ops))
5469     return ops[0].value;
5470   return NULL_RTX;
5471 }
5472 
5473 /* Implement a permutation of vectors v0 and v1 using the permutation
5474    vector in SEL and return the result.  Use TARGET to hold the result
5475    if nonnull and convenient.
5476 
5477    MODE is the mode of the vectors being permuted (V0 and V1).  SEL_MODE
5478    is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5479    to have a particular mode.  */
5480 
5481 rtx
5482 expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1,
5483 		       const vec_perm_builder &sel, machine_mode sel_mode,
5484 		       rtx target)
5485 {
5486   if (!target || !register_operand (target, mode))
5487     target = gen_reg_rtx (mode);
5488 
5489   /* Set QIMODE to a different vector mode with byte elements.
5490      If no such mode, or if MODE already has byte elements, use VOIDmode.  */
5491   machine_mode qimode;
5492   if (!qimode_for_vec_perm (mode).exists (&qimode))
5493     qimode = VOIDmode;
5494 
5495   rtx_insn *last = get_last_insn ();
5496 
5497   bool single_arg_p = rtx_equal_p (v0, v1);
5498   /* Always specify two input vectors here and leave the target to handle
5499      cases in which the inputs are equal.  Not all backends can cope with
5500      the single-input representation when testing for a double-input
5501      target instruction.  */
5502   vec_perm_indices indices (sel, 2, GET_MODE_NUNITS (mode));
5503 
5504   /* See if this can be handled with a vec_shr.  We only do this if the
5505      second vector is all zeroes.  */
5506   insn_code shift_code = optab_handler (vec_shr_optab, mode);
5507   insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
5508 			     ? optab_handler (vec_shr_optab, qimode)
5509 			     : CODE_FOR_nothing);
5510 
5511   if (v1 == CONST0_RTX (GET_MODE (v1))
5512       && (shift_code != CODE_FOR_nothing
5513 	  || shift_code_qi != CODE_FOR_nothing))
5514     {
5515       rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices);
5516       if (shift_amt)
5517 	{
5518 	  struct expand_operand ops[3];
5519 	  if (shift_code != CODE_FOR_nothing)
5520 	    {
5521 	      create_output_operand (&ops[0], target, mode);
5522 	      create_input_operand (&ops[1], v0, mode);
5523 	      create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
5524 	      if (maybe_expand_insn (shift_code, 3, ops))
5525 		return ops[0].value;
5526 	    }
5527 	  if (shift_code_qi != CODE_FOR_nothing)
5528 	    {
5529 	      rtx tmp = gen_reg_rtx (qimode);
5530 	      create_output_operand (&ops[0], tmp, qimode);
5531 	      create_input_operand (&ops[1], gen_lowpart (qimode, v0), qimode);
5532 	      create_convert_operand_from_type (&ops[2], shift_amt, sizetype);
5533 	      if (maybe_expand_insn (shift_code_qi, 3, ops))
5534 		return gen_lowpart (mode, ops[0].value);
5535 	    }
5536 	}
5537     }
5538 
5539   if (targetm.vectorize.vec_perm_const != NULL)
5540     {
5541       v0 = force_reg (mode, v0);
5542       if (single_arg_p)
5543 	v1 = v0;
5544       else
5545 	v1 = force_reg (mode, v1);
5546 
5547       if (targetm.vectorize.vec_perm_const (mode, target, v0, v1, indices))
5548 	return target;
5549     }
5550 
5551   /* Fall back to a constant byte-based permutation.  */
5552   vec_perm_indices qimode_indices;
5553   rtx target_qi = NULL_RTX, v0_qi = NULL_RTX, v1_qi = NULL_RTX;
5554   if (qimode != VOIDmode)
5555     {
5556       qimode_indices.new_expanded_vector (indices, GET_MODE_UNIT_SIZE (mode));
5557       target_qi = gen_reg_rtx (qimode);
5558       v0_qi = gen_lowpart (qimode, v0);
5559       v1_qi = gen_lowpart (qimode, v1);
5560       if (targetm.vectorize.vec_perm_const != NULL
5561 	  && targetm.vectorize.vec_perm_const (qimode, target_qi, v0_qi,
5562 					       v1_qi, qimode_indices))
5563 	return gen_lowpart (mode, target_qi);
5564     }
5565 
5566   /* Otherwise expand as a fully variable permuation.  */
5567 
5568   /* The optabs are only defined for selectors with the same width
5569      as the values being permuted.  */
5570   machine_mode required_sel_mode;
5571   if (!mode_for_int_vector (mode).exists (&required_sel_mode)
5572       || !VECTOR_MODE_P (required_sel_mode))
5573     {
5574       delete_insns_since (last);
5575       return NULL_RTX;
5576     }
5577 
5578   /* We know that it is semantically valid to treat SEL as having SEL_MODE.
5579      If that isn't the mode we want then we need to prove that using
5580      REQUIRED_SEL_MODE is OK.  */
5581   if (sel_mode != required_sel_mode)
5582     {
5583       if (!selector_fits_mode_p (required_sel_mode, indices))
5584 	{
5585 	  delete_insns_since (last);
5586 	  return NULL_RTX;
5587 	}
5588       sel_mode = required_sel_mode;
5589     }
5590 
5591   insn_code icode = direct_optab_handler (vec_perm_optab, mode);
5592   if (icode != CODE_FOR_nothing)
5593     {
5594       rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices);
5595       rtx tmp = expand_vec_perm_1 (icode, target, v0, v1, sel_rtx);
5596       if (tmp)
5597 	return tmp;
5598     }
5599 
5600   if (qimode != VOIDmode
5601       && selector_fits_mode_p (qimode, qimode_indices))
5602     {
5603       icode = direct_optab_handler (vec_perm_optab, qimode);
5604       if (icode != CODE_FOR_nothing)
5605 	{
5606 	  rtx sel_qi = vec_perm_indices_to_rtx (qimode, qimode_indices);
5607 	  rtx tmp = expand_vec_perm_1 (icode, target_qi, v0_qi, v1_qi, sel_qi);
5608 	  if (tmp)
5609 	    return gen_lowpart (mode, tmp);
5610 	}
5611     }
5612 
5613   delete_insns_since (last);
5614   return NULL_RTX;
5615 }
5616 
5617 /* Implement a permutation of vectors v0 and v1 using the permutation
5618    vector in SEL and return the result.  Use TARGET to hold the result
5619    if nonnull and convenient.
5620 
5621    MODE is the mode of the vectors being permuted (V0 and V1).
5622    SEL must have the integer equivalent of MODE and is known to be
5623    unsuitable for permutes with a constant permutation vector.  */
5624 
5625 rtx
5626 expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
5627 {
5628   enum insn_code icode;
5629   unsigned int i, u;
5630   rtx tmp, sel_qi;
5631 
5632   u = GET_MODE_UNIT_SIZE (mode);
5633 
5634   if (!target || GET_MODE (target) != mode)
5635     target = gen_reg_rtx (mode);
5636 
5637   icode = direct_optab_handler (vec_perm_optab, mode);
5638   if (icode != CODE_FOR_nothing)
5639     {
5640       tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
5641       if (tmp)
5642 	return tmp;
5643     }
5644 
5645   /* As a special case to aid several targets, lower the element-based
5646      permutation to a byte-based permutation and try again.  */
5647   machine_mode qimode;
5648   if (!qimode_for_vec_perm (mode).exists (&qimode)
5649       || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
5650     return NULL_RTX;
5651   icode = direct_optab_handler (vec_perm_optab, qimode);
5652   if (icode == CODE_FOR_nothing)
5653     return NULL_RTX;
5654 
5655   /* Multiply each element by its byte size.  */
5656   machine_mode selmode = GET_MODE (sel);
5657   if (u == 2)
5658     sel = expand_simple_binop (selmode, PLUS, sel, sel,
5659 			       NULL, 0, OPTAB_DIRECT);
5660   else
5661     sel = expand_simple_binop (selmode, ASHIFT, sel,
5662 			       gen_int_shift_amount (selmode, exact_log2 (u)),
5663 			       NULL, 0, OPTAB_DIRECT);
5664   gcc_assert (sel != NULL);
5665 
5666   /* Broadcast the low byte each element into each of its bytes.
5667      The encoding has U interleaved stepped patterns, one for each
5668      byte of an element.  */
5669   vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3);
5670   unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0;
5671   for (i = 0; i < 3; ++i)
5672     for (unsigned int j = 0; j < u; ++j)
5673       const_sel.quick_push (i * u + low_byte_in_u);
5674   sel = gen_lowpart (qimode, sel);
5675   sel = expand_vec_perm_const (qimode, sel, sel, const_sel, qimode, NULL);
5676   gcc_assert (sel != NULL);
5677 
5678   /* Add the byte offset to each byte element.  */
5679   /* Note that the definition of the indicies here is memory ordering,
5680      so there should be no difference between big and little endian.  */
5681   rtx_vector_builder byte_indices (qimode, u, 1);
5682   for (i = 0; i < u; ++i)
5683     byte_indices.quick_push (GEN_INT (i));
5684   tmp = byte_indices.build ();
5685   sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
5686 				sel, 0, OPTAB_DIRECT);
5687   gcc_assert (sel_qi != NULL);
5688 
5689   tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
5690   tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
5691 			   gen_lowpart (qimode, v1), sel_qi);
5692   if (tmp)
5693     tmp = gen_lowpart (mode, tmp);
5694   return tmp;
5695 }
5696 
5697 /* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
5698    three operands.  */
5699 
5700 rtx
5701 expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5702 			   rtx target)
5703 {
5704   struct expand_operand ops[4];
5705   machine_mode mode = TYPE_MODE (vec_cond_type);
5706   machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
5707   enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
5708   rtx mask, rtx_op1, rtx_op2;
5709 
5710   if (icode == CODE_FOR_nothing)
5711     return 0;
5712 
5713   mask = expand_normal (op0);
5714   rtx_op1 = expand_normal (op1);
5715   rtx_op2 = expand_normal (op2);
5716 
5717   mask = force_reg (mask_mode, mask);
5718   rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5719 
5720   create_output_operand (&ops[0], target, mode);
5721   create_input_operand (&ops[1], rtx_op1, mode);
5722   create_input_operand (&ops[2], rtx_op2, mode);
5723   create_input_operand (&ops[3], mask, mask_mode);
5724   expand_insn (icode, 4, ops);
5725 
5726   return ops[0].value;
5727 }
5728 
5729 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
5730    three operands.  */
5731 
5732 rtx
5733 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
5734 		      rtx target)
5735 {
5736   struct expand_operand ops[6];
5737   enum insn_code icode;
5738   rtx comparison, rtx_op1, rtx_op2;
5739   machine_mode mode = TYPE_MODE (vec_cond_type);
5740   machine_mode cmp_op_mode;
5741   bool unsignedp;
5742   tree op0a, op0b;
5743   enum tree_code tcode;
5744 
5745   if (COMPARISON_CLASS_P (op0))
5746     {
5747       op0a = TREE_OPERAND (op0, 0);
5748       op0b = TREE_OPERAND (op0, 1);
5749       tcode = TREE_CODE (op0);
5750     }
5751   else
5752     {
5753       gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
5754       if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
5755 	  != CODE_FOR_nothing)
5756 	return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
5757 					  op2, target);
5758       /* Fake op0 < 0.  */
5759       else
5760 	{
5761 	  gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
5762 		      == MODE_VECTOR_INT);
5763 	  op0a = op0;
5764 	  op0b = build_zero_cst (TREE_TYPE (op0));
5765 	  tcode = LT_EXPR;
5766 	}
5767     }
5768   cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
5769   unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5770 
5771 
5772   gcc_assert (known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (cmp_op_mode))
5773 	      && known_eq (GET_MODE_NUNITS (mode),
5774 			   GET_MODE_NUNITS (cmp_op_mode)));
5775 
5776   icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
5777   if (icode == CODE_FOR_nothing)
5778     {
5779       if (tcode == EQ_EXPR || tcode == NE_EXPR)
5780 	icode = get_vcond_eq_icode (mode, cmp_op_mode);
5781       if (icode == CODE_FOR_nothing)
5782 	return 0;
5783     }
5784 
5785   comparison = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp,
5786 				   icode, 4);
5787   rtx_op1 = expand_normal (op1);
5788   rtx_op2 = expand_normal (op2);
5789 
5790   create_output_operand (&ops[0], target, mode);
5791   create_input_operand (&ops[1], rtx_op1, mode);
5792   create_input_operand (&ops[2], rtx_op2, mode);
5793   create_fixed_operand (&ops[3], comparison);
5794   create_fixed_operand (&ops[4], XEXP (comparison, 0));
5795   create_fixed_operand (&ops[5], XEXP (comparison, 1));
5796   expand_insn (icode, 6, ops);
5797   return ops[0].value;
5798 }
5799 
5800 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
5801    Use TARGET for the result if nonnull and convenient.  */
5802 
5803 rtx
5804 expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
5805 {
5806   struct expand_operand ops[3];
5807   enum insn_code icode;
5808   machine_mode emode = GET_MODE_INNER (vmode);
5809 
5810   icode = direct_optab_handler (vec_series_optab, vmode);
5811   gcc_assert (icode != CODE_FOR_nothing);
5812 
5813   create_output_operand (&ops[0], target, vmode);
5814   create_input_operand (&ops[1], op0, emode);
5815   create_input_operand (&ops[2], op1, emode);
5816 
5817   expand_insn (icode, 3, ops);
5818   return ops[0].value;
5819 }
5820 
5821 /* Generate insns for a vector comparison into a mask.  */
5822 
5823 rtx
5824 expand_vec_cmp_expr (tree type, tree exp, rtx target)
5825 {
5826   struct expand_operand ops[4];
5827   enum insn_code icode;
5828   rtx comparison;
5829   machine_mode mask_mode = TYPE_MODE (type);
5830   machine_mode vmode;
5831   bool unsignedp;
5832   tree op0a, op0b;
5833   enum tree_code tcode;
5834 
5835   op0a = TREE_OPERAND (exp, 0);
5836   op0b = TREE_OPERAND (exp, 1);
5837   tcode = TREE_CODE (exp);
5838 
5839   unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
5840   vmode = TYPE_MODE (TREE_TYPE (op0a));
5841 
5842   icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
5843   if (icode == CODE_FOR_nothing)
5844     {
5845       if (tcode == EQ_EXPR || tcode == NE_EXPR)
5846 	icode = get_vec_cmp_eq_icode (vmode, mask_mode);
5847       if (icode == CODE_FOR_nothing)
5848 	return 0;
5849     }
5850 
5851   comparison = vector_compare_rtx (mask_mode, tcode, op0a, op0b,
5852 				   unsignedp, icode, 2);
5853   create_output_operand (&ops[0], target, mask_mode);
5854   create_fixed_operand (&ops[1], comparison);
5855   create_fixed_operand (&ops[2], XEXP (comparison, 0));
5856   create_fixed_operand (&ops[3], XEXP (comparison, 1));
5857   expand_insn (icode, 4, ops);
5858   return ops[0].value;
5859 }
5860 
5861 /* Expand a highpart multiply.  */
5862 
5863 rtx
5864 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
5865 		      rtx target, bool uns_p)
5866 {
5867   struct expand_operand eops[3];
5868   enum insn_code icode;
5869   int method, i;
5870   machine_mode wmode;
5871   rtx m1, m2;
5872   optab tab1, tab2;
5873 
5874   method = can_mult_highpart_p (mode, uns_p);
5875   switch (method)
5876     {
5877     case 0:
5878       return NULL_RTX;
5879     case 1:
5880       tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
5881       return expand_binop (mode, tab1, op0, op1, target, uns_p,
5882 			   OPTAB_LIB_WIDEN);
5883     case 2:
5884       tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
5885       tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
5886       break;
5887     case 3:
5888       tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
5889       tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
5890       if (BYTES_BIG_ENDIAN)
5891 	std::swap (tab1, tab2);
5892       break;
5893     default:
5894       gcc_unreachable ();
5895     }
5896 
5897   icode = optab_handler (tab1, mode);
5898   wmode = insn_data[icode].operand[0].mode;
5899   gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode),
5900 				 GET_MODE_NUNITS (mode)));
5901   gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode)));
5902 
5903   create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5904   create_input_operand (&eops[1], op0, mode);
5905   create_input_operand (&eops[2], op1, mode);
5906   expand_insn (icode, 3, eops);
5907   m1 = gen_lowpart (mode, eops[0].value);
5908 
5909   create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
5910   create_input_operand (&eops[1], op0, mode);
5911   create_input_operand (&eops[2], op1, mode);
5912   expand_insn (optab_handler (tab2, mode), 3, eops);
5913   m2 = gen_lowpart (mode, eops[0].value);
5914 
5915   vec_perm_builder sel;
5916   if (method == 2)
5917     {
5918       /* The encoding has 2 interleaved stepped patterns.  */
5919       sel.new_vector (GET_MODE_NUNITS (mode), 2, 3);
5920       for (i = 0; i < 6; ++i)
5921 	sel.quick_push (!BYTES_BIG_ENDIAN + (i & ~1)
5922 			+ ((i & 1) ? GET_MODE_NUNITS (mode) : 0));
5923     }
5924   else
5925     {
5926       /* The encoding has a single interleaved stepped pattern.  */
5927       sel.new_vector (GET_MODE_NUNITS (mode), 1, 3);
5928       for (i = 0; i < 3; ++i)
5929 	sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
5930     }
5931 
5932   return expand_vec_perm_const (mode, m1, m2, sel, BLKmode, target);
5933 }
5934 
5935 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
5936    pattern.  */
5937 
5938 static void
5939 find_cc_set (rtx x, const_rtx pat, void *data)
5940 {
5941   if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
5942       && GET_CODE (pat) == SET)
5943     {
5944       rtx *p_cc_reg = (rtx *) data;
5945       gcc_assert (!*p_cc_reg);
5946       *p_cc_reg = x;
5947     }
5948 }
5949 
5950 /* This is a helper function for the other atomic operations.  This function
5951    emits a loop that contains SEQ that iterates until a compare-and-swap
5952    operation at the end succeeds.  MEM is the memory to be modified.  SEQ is
5953    a set of instructions that takes a value from OLD_REG as an input and
5954    produces a value in NEW_REG as an output.  Before SEQ, OLD_REG will be
5955    set to the current contents of MEM.  After SEQ, a compare-and-swap will
5956    attempt to update MEM with NEW_REG.  The function returns true when the
5957    loop was generated successfully.  */
5958 
5959 static bool
5960 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5961 {
5962   machine_mode mode = GET_MODE (mem);
5963   rtx_code_label *label;
5964   rtx cmp_reg, success, oldval;
5965 
5966   /* The loop we want to generate looks like
5967 
5968 	cmp_reg = mem;
5969       label:
5970         old_reg = cmp_reg;
5971 	seq;
5972 	(success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
5973 	if (success)
5974 	  goto label;
5975 
5976      Note that we only do the plain load from memory once.  Subsequent
5977      iterations use the value loaded by the compare-and-swap pattern.  */
5978 
5979   label = gen_label_rtx ();
5980   cmp_reg = gen_reg_rtx (mode);
5981 
5982   emit_move_insn (cmp_reg, mem);
5983   emit_label (label);
5984   emit_move_insn (old_reg, cmp_reg);
5985   if (seq)
5986     emit_insn (seq);
5987 
5988   success = NULL_RTX;
5989   oldval = cmp_reg;
5990   if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
5991 				       new_reg, false, MEMMODEL_SYNC_SEQ_CST,
5992 				       MEMMODEL_RELAXED))
5993     return false;
5994 
5995   if (oldval != cmp_reg)
5996     emit_move_insn (cmp_reg, oldval);
5997 
5998   /* Mark this jump predicted not taken.  */
5999   emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
6000 			   GET_MODE (success), 1, label,
6001 			   profile_probability::guessed_never ());
6002   return true;
6003 }
6004 
6005 
6006 /* This function tries to emit an atomic_exchange intruction.  VAL is written
6007    to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6008    using TARGET if possible.  */
6009 
6010 static rtx
6011 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6012 {
6013   machine_mode mode = GET_MODE (mem);
6014   enum insn_code icode;
6015 
6016   /* If the target supports the exchange directly, great.  */
6017   icode = direct_optab_handler (atomic_exchange_optab, mode);
6018   if (icode != CODE_FOR_nothing)
6019     {
6020       struct expand_operand ops[4];
6021 
6022       create_output_operand (&ops[0], target, mode);
6023       create_fixed_operand (&ops[1], mem);
6024       create_input_operand (&ops[2], val, mode);
6025       create_integer_operand (&ops[3], model);
6026       if (maybe_expand_insn (icode, 4, ops))
6027 	return ops[0].value;
6028     }
6029 
6030   return NULL_RTX;
6031 }
6032 
6033 /* This function tries to implement an atomic exchange operation using
6034    __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6035    The previous contents of *MEM are returned, using TARGET if possible.
6036    Since this instructionn is an acquire barrier only, stronger memory
6037    models may require additional barriers to be emitted.  */
6038 
6039 static rtx
6040 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
6041 				   enum memmodel model)
6042 {
6043   machine_mode mode = GET_MODE (mem);
6044   enum insn_code icode;
6045   rtx_insn *last_insn = get_last_insn ();
6046 
6047   icode = optab_handler (sync_lock_test_and_set_optab, mode);
6048 
6049   /* Legacy sync_lock_test_and_set is an acquire barrier.  If the pattern
6050      exists, and the memory model is stronger than acquire, add a release
6051      barrier before the instruction.  */
6052 
6053   if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
6054     expand_mem_thread_fence (model);
6055 
6056   if (icode != CODE_FOR_nothing)
6057     {
6058       struct expand_operand ops[3];
6059       create_output_operand (&ops[0], target, mode);
6060       create_fixed_operand (&ops[1], mem);
6061       create_input_operand (&ops[2], val, mode);
6062       if (maybe_expand_insn (icode, 3, ops))
6063 	return ops[0].value;
6064     }
6065 
6066   /* If an external test-and-set libcall is provided, use that instead of
6067      any external compare-and-swap that we might get from the compare-and-
6068      swap-loop expansion later.  */
6069   if (!can_compare_and_swap_p (mode, false))
6070     {
6071       rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
6072       if (libfunc != NULL)
6073 	{
6074 	  rtx addr;
6075 
6076 	  addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6077 	  return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6078 					  mode, addr, ptr_mode,
6079 					  val, mode);
6080 	}
6081     }
6082 
6083   /* If the test_and_set can't be emitted, eliminate any barrier that might
6084      have been emitted.  */
6085   delete_insns_since (last_insn);
6086   return NULL_RTX;
6087 }
6088 
6089 /* This function tries to implement an atomic exchange operation using a
6090    compare_and_swap loop. VAL is written to *MEM.  The previous contents of
6091    *MEM are returned, using TARGET if possible.  No memory model is required
6092    since a compare_and_swap loop is seq-cst.  */
6093 
6094 static rtx
6095 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
6096 {
6097   machine_mode mode = GET_MODE (mem);
6098 
6099   if (can_compare_and_swap_p (mode, true))
6100     {
6101       if (!target || !register_operand (target, mode))
6102 	target = gen_reg_rtx (mode);
6103       if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6104 	return target;
6105     }
6106 
6107   return NULL_RTX;
6108 }
6109 
6110 /* This function tries to implement an atomic test-and-set operation
6111    using the atomic_test_and_set instruction pattern.  A boolean value
6112    is returned from the operation, using TARGET if possible.  */
6113 
6114 static rtx
6115 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6116 {
6117   machine_mode pat_bool_mode;
6118   struct expand_operand ops[3];
6119 
6120   if (!targetm.have_atomic_test_and_set ())
6121     return NULL_RTX;
6122 
6123   /* While we always get QImode from __atomic_test_and_set, we get
6124      other memory modes from __sync_lock_test_and_set.  Note that we
6125      use no endian adjustment here.  This matches the 4.6 behavior
6126      in the Sparc backend.  */
6127   enum insn_code icode = targetm.code_for_atomic_test_and_set;
6128   gcc_checking_assert (insn_data[icode].operand[1].mode == QImode);
6129   if (GET_MODE (mem) != QImode)
6130     mem = adjust_address_nv (mem, QImode, 0);
6131 
6132   pat_bool_mode = insn_data[icode].operand[0].mode;
6133   create_output_operand (&ops[0], target, pat_bool_mode);
6134   create_fixed_operand (&ops[1], mem);
6135   create_integer_operand (&ops[2], model);
6136 
6137   if (maybe_expand_insn (icode, 3, ops))
6138     return ops[0].value;
6139   return NULL_RTX;
6140 }
6141 
6142 /* This function expands the legacy _sync_lock test_and_set operation which is
6143    generally an atomic exchange.  Some limited targets only allow the
6144    constant 1 to be stored.  This is an ACQUIRE operation.
6145 
6146    TARGET is an optional place to stick the return value.
6147    MEM is where VAL is stored.  */
6148 
6149 rtx
6150 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
6151 {
6152   rtx ret;
6153 
6154   /* Try an atomic_exchange first.  */
6155   ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
6156   if (ret)
6157     return ret;
6158 
6159   ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
6160 					   MEMMODEL_SYNC_ACQUIRE);
6161   if (ret)
6162     return ret;
6163 
6164   ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6165   if (ret)
6166     return ret;
6167 
6168   /* If there are no other options, try atomic_test_and_set if the value
6169      being stored is 1.  */
6170   if (val == const1_rtx)
6171     ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
6172 
6173   return ret;
6174 }
6175 
6176 /* This function expands the atomic test_and_set operation:
6177    atomically store a boolean TRUE into MEM and return the previous value.
6178 
6179    MEMMODEL is the memory model variant to use.
6180    TARGET is an optional place to stick the return value.  */
6181 
6182 rtx
6183 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
6184 {
6185   machine_mode mode = GET_MODE (mem);
6186   rtx ret, trueval, subtarget;
6187 
6188   ret = maybe_emit_atomic_test_and_set (target, mem, model);
6189   if (ret)
6190     return ret;
6191 
6192   /* Be binary compatible with non-default settings of trueval, and different
6193      cpu revisions.  E.g. one revision may have atomic-test-and-set, but
6194      another only has atomic-exchange.  */
6195   if (targetm.atomic_test_and_set_trueval == 1)
6196     {
6197       trueval = const1_rtx;
6198       subtarget = target ? target : gen_reg_rtx (mode);
6199     }
6200   else
6201     {
6202       trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
6203       subtarget = gen_reg_rtx (mode);
6204     }
6205 
6206   /* Try the atomic-exchange optab...  */
6207   ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
6208 
6209   /* ... then an atomic-compare-and-swap loop ... */
6210   if (!ret)
6211     ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
6212 
6213   /* ... before trying the vaguely defined legacy lock_test_and_set. */
6214   if (!ret)
6215     ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
6216 
6217   /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6218      things with the value 1.  Thus we try again without trueval.  */
6219   if (!ret && targetm.atomic_test_and_set_trueval != 1)
6220     ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
6221 
6222   /* Failing all else, assume a single threaded environment and simply
6223      perform the operation.  */
6224   if (!ret)
6225     {
6226       /* If the result is ignored skip the move to target.  */
6227       if (subtarget != const0_rtx)
6228         emit_move_insn (subtarget, mem);
6229 
6230       emit_move_insn (mem, trueval);
6231       ret = subtarget;
6232     }
6233 
6234   /* Recall that have to return a boolean value; rectify if trueval
6235      is not exactly one.  */
6236   if (targetm.atomic_test_and_set_trueval != 1)
6237     ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
6238 
6239   return ret;
6240 }
6241 
6242 /* This function expands the atomic exchange operation:
6243    atomically store VAL in MEM and return the previous value in MEM.
6244 
6245    MEMMODEL is the memory model variant to use.
6246    TARGET is an optional place to stick the return value.  */
6247 
6248 rtx
6249 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
6250 {
6251   machine_mode mode = GET_MODE (mem);
6252   rtx ret;
6253 
6254   /* If loads are not atomic for the required size and we are not called to
6255      provide a __sync builtin, do not do anything so that we stay consistent
6256      with atomic loads of the same size.  */
6257   if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6258     return NULL_RTX;
6259 
6260   ret = maybe_emit_atomic_exchange (target, mem, val, model);
6261 
6262   /* Next try a compare-and-swap loop for the exchange.  */
6263   if (!ret)
6264     ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
6265 
6266   return ret;
6267 }
6268 
6269 /* This function expands the atomic compare exchange operation:
6270 
6271    *PTARGET_BOOL is an optional place to store the boolean success/failure.
6272    *PTARGET_OVAL is an optional place to store the old value from memory.
6273    Both target parameters may be NULL or const0_rtx to indicate that we do
6274    not care about that return value.  Both target parameters are updated on
6275    success to the actual location of the corresponding result.
6276 
6277    MEMMODEL is the memory model variant to use.
6278 
6279    The return value of the function is true for success.  */
6280 
6281 bool
6282 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
6283 				rtx mem, rtx expected, rtx desired,
6284 				bool is_weak, enum memmodel succ_model,
6285 				enum memmodel fail_model)
6286 {
6287   machine_mode mode = GET_MODE (mem);
6288   struct expand_operand ops[8];
6289   enum insn_code icode;
6290   rtx target_oval, target_bool = NULL_RTX;
6291   rtx libfunc;
6292 
6293   /* If loads are not atomic for the required size and we are not called to
6294      provide a __sync builtin, do not do anything so that we stay consistent
6295      with atomic loads of the same size.  */
6296   if (!can_atomic_load_p (mode) && !is_mm_sync (succ_model))
6297     return false;
6298 
6299   /* Load expected into a register for the compare and swap.  */
6300   if (MEM_P (expected))
6301     expected = copy_to_reg (expected);
6302 
6303   /* Make sure we always have some place to put the return oldval.
6304      Further, make sure that place is distinct from the input expected,
6305      just in case we need that path down below.  */
6306   if (ptarget_oval && *ptarget_oval == const0_rtx)
6307     ptarget_oval = NULL;
6308 
6309   if (ptarget_oval == NULL
6310       || (target_oval = *ptarget_oval) == NULL
6311       || reg_overlap_mentioned_p (expected, target_oval))
6312     target_oval = gen_reg_rtx (mode);
6313 
6314   icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
6315   if (icode != CODE_FOR_nothing)
6316     {
6317       machine_mode bool_mode = insn_data[icode].operand[0].mode;
6318 
6319       if (ptarget_bool && *ptarget_bool == const0_rtx)
6320 	ptarget_bool = NULL;
6321 
6322       /* Make sure we always have a place for the bool operand.  */
6323       if (ptarget_bool == NULL
6324 	  || (target_bool = *ptarget_bool) == NULL
6325 	  || GET_MODE (target_bool) != bool_mode)
6326 	target_bool = gen_reg_rtx (bool_mode);
6327 
6328       /* Emit the compare_and_swap.  */
6329       create_output_operand (&ops[0], target_bool, bool_mode);
6330       create_output_operand (&ops[1], target_oval, mode);
6331       create_fixed_operand (&ops[2], mem);
6332       create_input_operand (&ops[3], expected, mode);
6333       create_input_operand (&ops[4], desired, mode);
6334       create_integer_operand (&ops[5], is_weak);
6335       create_integer_operand (&ops[6], succ_model);
6336       create_integer_operand (&ops[7], fail_model);
6337       if (maybe_expand_insn (icode, 8, ops))
6338 	{
6339 	  /* Return success/failure.  */
6340 	  target_bool = ops[0].value;
6341 	  target_oval = ops[1].value;
6342 	  goto success;
6343 	}
6344     }
6345 
6346   /* Otherwise fall back to the original __sync_val_compare_and_swap
6347      which is always seq-cst.  */
6348   icode = optab_handler (sync_compare_and_swap_optab, mode);
6349   if (icode != CODE_FOR_nothing)
6350     {
6351       rtx cc_reg;
6352 
6353       create_output_operand (&ops[0], target_oval, mode);
6354       create_fixed_operand (&ops[1], mem);
6355       create_input_operand (&ops[2], expected, mode);
6356       create_input_operand (&ops[3], desired, mode);
6357       if (!maybe_expand_insn (icode, 4, ops))
6358 	return false;
6359 
6360       target_oval = ops[0].value;
6361 
6362       /* If the caller isn't interested in the boolean return value,
6363 	 skip the computation of it.  */
6364       if (ptarget_bool == NULL)
6365 	goto success;
6366 
6367       /* Otherwise, work out if the compare-and-swap succeeded.  */
6368       cc_reg = NULL_RTX;
6369       if (have_insn_for (COMPARE, CCmode))
6370 	note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
6371       if (cc_reg)
6372 	{
6373 	  target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
6374 					       const0_rtx, VOIDmode, 0, 1);
6375 	  goto success;
6376 	}
6377       goto success_bool_from_val;
6378     }
6379 
6380   /* Also check for library support for __sync_val_compare_and_swap.  */
6381   libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
6382   if (libfunc != NULL)
6383     {
6384       rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6385       rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
6386 					    mode, addr, ptr_mode,
6387 					    expected, mode, desired, mode);
6388       emit_move_insn (target_oval, target);
6389 
6390       /* Compute the boolean return value only if requested.  */
6391       if (ptarget_bool)
6392 	goto success_bool_from_val;
6393       else
6394 	goto success;
6395     }
6396 
6397   /* Failure.  */
6398   return false;
6399 
6400  success_bool_from_val:
6401    target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
6402 					expected, VOIDmode, 1, 1);
6403  success:
6404   /* Make sure that the oval output winds up where the caller asked.  */
6405   if (ptarget_oval)
6406     *ptarget_oval = target_oval;
6407   if (ptarget_bool)
6408     *ptarget_bool = target_bool;
6409   return true;
6410 }
6411 
6412 /* Generate asm volatile("" : : : "memory") as the memory blockage.  */
6413 
6414 static void
6415 expand_asm_memory_blockage (void)
6416 {
6417   rtx asm_op, clob;
6418 
6419   asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
6420 				 rtvec_alloc (0), rtvec_alloc (0),
6421 				 rtvec_alloc (0), UNKNOWN_LOCATION);
6422   MEM_VOLATILE_P (asm_op) = 1;
6423 
6424   clob = gen_rtx_SCRATCH (VOIDmode);
6425   clob = gen_rtx_MEM (BLKmode, clob);
6426   clob = gen_rtx_CLOBBER (VOIDmode, clob);
6427 
6428   emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
6429 }
6430 
6431 /* Do not propagate memory accesses across this point.  */
6432 
6433 static void
6434 expand_memory_blockage (void)
6435 {
6436   if (targetm.have_memory_blockage ())
6437     emit_insn (targetm.gen_memory_blockage ());
6438   else
6439     expand_asm_memory_blockage ();
6440 }
6441 
6442 /* This routine will either emit the mem_thread_fence pattern or issue a
6443    sync_synchronize to generate a fence for memory model MEMMODEL.  */
6444 
6445 void
6446 expand_mem_thread_fence (enum memmodel model)
6447 {
6448   if (is_mm_relaxed (model))
6449     return;
6450   if (targetm.have_mem_thread_fence ())
6451     {
6452       emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
6453       expand_memory_blockage ();
6454     }
6455   else if (targetm.have_memory_barrier ())
6456     emit_insn (targetm.gen_memory_barrier ());
6457   else if (synchronize_libfunc != NULL_RTX)
6458     emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
6459   else
6460     expand_memory_blockage ();
6461 }
6462 
6463 /* Emit a signal fence with given memory model.  */
6464 
6465 void
6466 expand_mem_signal_fence (enum memmodel model)
6467 {
6468   /* No machine barrier is required to implement a signal fence, but
6469      a compiler memory barrier must be issued, except for relaxed MM.  */
6470   if (!is_mm_relaxed (model))
6471     expand_memory_blockage ();
6472 }
6473 
6474 /* This function expands the atomic load operation:
6475    return the atomically loaded value in MEM.
6476 
6477    MEMMODEL is the memory model variant to use.
6478    TARGET is an option place to stick the return value.  */
6479 
6480 rtx
6481 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
6482 {
6483   machine_mode mode = GET_MODE (mem);
6484   enum insn_code icode;
6485 
6486   /* If the target supports the load directly, great.  */
6487   icode = direct_optab_handler (atomic_load_optab, mode);
6488   if (icode != CODE_FOR_nothing)
6489     {
6490       struct expand_operand ops[3];
6491       rtx_insn *last = get_last_insn ();
6492       if (is_mm_seq_cst (model))
6493 	expand_memory_blockage ();
6494 
6495       create_output_operand (&ops[0], target, mode);
6496       create_fixed_operand (&ops[1], mem);
6497       create_integer_operand (&ops[2], model);
6498       if (maybe_expand_insn (icode, 3, ops))
6499 	{
6500 	  if (!is_mm_relaxed (model))
6501 	    expand_memory_blockage ();
6502 	  return ops[0].value;
6503 	}
6504       delete_insns_since (last);
6505     }
6506 
6507   /* If the size of the object is greater than word size on this target,
6508      then we assume that a load will not be atomic.  We could try to
6509      emulate a load with a compare-and-swap operation, but the store that
6510      doing this could result in would be incorrect if this is a volatile
6511      atomic load or targetting read-only-mapped memory.  */
6512   if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
6513     /* If there is no atomic load, leave the library call.  */
6514     return NULL_RTX;
6515 
6516   /* Otherwise assume loads are atomic, and emit the proper barriers.  */
6517   if (!target || target == const0_rtx)
6518     target = gen_reg_rtx (mode);
6519 
6520   /* For SEQ_CST, emit a barrier before the load.  */
6521   if (is_mm_seq_cst (model))
6522     expand_mem_thread_fence (model);
6523 
6524   emit_move_insn (target, mem);
6525 
6526   /* Emit the appropriate barrier after the load.  */
6527   expand_mem_thread_fence (model);
6528 
6529   return target;
6530 }
6531 
6532 /* This function expands the atomic store operation:
6533    Atomically store VAL in MEM.
6534    MEMMODEL is the memory model variant to use.
6535    USE_RELEASE is true if __sync_lock_release can be used as a fall back.
6536    function returns const0_rtx if a pattern was emitted.  */
6537 
6538 rtx
6539 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
6540 {
6541   machine_mode mode = GET_MODE (mem);
6542   enum insn_code icode;
6543   struct expand_operand ops[3];
6544 
6545   /* If the target supports the store directly, great.  */
6546   icode = direct_optab_handler (atomic_store_optab, mode);
6547   if (icode != CODE_FOR_nothing)
6548     {
6549       rtx_insn *last = get_last_insn ();
6550       if (!is_mm_relaxed (model))
6551 	expand_memory_blockage ();
6552       create_fixed_operand (&ops[0], mem);
6553       create_input_operand (&ops[1], val, mode);
6554       create_integer_operand (&ops[2], model);
6555       if (maybe_expand_insn (icode, 3, ops))
6556 	{
6557 	  if (is_mm_seq_cst (model))
6558 	    expand_memory_blockage ();
6559 	  return const0_rtx;
6560 	}
6561       delete_insns_since (last);
6562     }
6563 
6564   /* If using __sync_lock_release is a viable alternative, try it.
6565      Note that this will not be set to true if we are expanding a generic
6566      __atomic_store_n.  */
6567   if (use_release)
6568     {
6569       icode = direct_optab_handler (sync_lock_release_optab, mode);
6570       if (icode != CODE_FOR_nothing)
6571 	{
6572 	  create_fixed_operand (&ops[0], mem);
6573 	  create_input_operand (&ops[1], const0_rtx, mode);
6574 	  if (maybe_expand_insn (icode, 2, ops))
6575 	    {
6576 	      /* lock_release is only a release barrier.  */
6577 	      if (is_mm_seq_cst (model))
6578 		expand_mem_thread_fence (model);
6579 	      return const0_rtx;
6580 	    }
6581 	}
6582     }
6583 
6584   /* If the size of the object is greater than word size on this target,
6585      a default store will not be atomic.  */
6586   if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD))
6587     {
6588       /* If loads are atomic or we are called to provide a __sync builtin,
6589 	 we can try a atomic_exchange and throw away the result.  Otherwise,
6590 	 don't do anything so that we do not create an inconsistency between
6591 	 loads and stores.  */
6592       if (can_atomic_load_p (mode) || is_mm_sync (model))
6593 	{
6594 	  rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
6595 	  if (!target)
6596 	    target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem,
6597 								val);
6598 	  if (target)
6599 	    return const0_rtx;
6600 	}
6601         return NULL_RTX;
6602     }
6603 
6604   /* Otherwise assume stores are atomic, and emit the proper barriers.  */
6605   expand_mem_thread_fence (model);
6606 
6607   emit_move_insn (mem, val);
6608 
6609   /* For SEQ_CST, also emit a barrier after the store.  */
6610   if (is_mm_seq_cst (model))
6611     expand_mem_thread_fence (model);
6612 
6613   return const0_rtx;
6614 }
6615 
6616 
6617 /* Structure containing the pointers and values required to process the
6618    various forms of the atomic_fetch_op and atomic_op_fetch builtins.  */
6619 
6620 struct atomic_op_functions
6621 {
6622   direct_optab mem_fetch_before;
6623   direct_optab mem_fetch_after;
6624   direct_optab mem_no_result;
6625   optab fetch_before;
6626   optab fetch_after;
6627   direct_optab no_result;
6628   enum rtx_code reverse_code;
6629 };
6630 
6631 
6632 /* Fill in structure pointed to by OP with the various optab entries for an
6633    operation of type CODE.  */
6634 
6635 static void
6636 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
6637 {
6638   gcc_assert (op!= NULL);
6639 
6640   /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
6641      in the source code during compilation, and the optab entries are not
6642      computable until runtime.  Fill in the values at runtime.  */
6643   switch (code)
6644     {
6645     case PLUS:
6646       op->mem_fetch_before = atomic_fetch_add_optab;
6647       op->mem_fetch_after = atomic_add_fetch_optab;
6648       op->mem_no_result = atomic_add_optab;
6649       op->fetch_before = sync_old_add_optab;
6650       op->fetch_after = sync_new_add_optab;
6651       op->no_result = sync_add_optab;
6652       op->reverse_code = MINUS;
6653       break;
6654     case MINUS:
6655       op->mem_fetch_before = atomic_fetch_sub_optab;
6656       op->mem_fetch_after = atomic_sub_fetch_optab;
6657       op->mem_no_result = atomic_sub_optab;
6658       op->fetch_before = sync_old_sub_optab;
6659       op->fetch_after = sync_new_sub_optab;
6660       op->no_result = sync_sub_optab;
6661       op->reverse_code = PLUS;
6662       break;
6663     case XOR:
6664       op->mem_fetch_before = atomic_fetch_xor_optab;
6665       op->mem_fetch_after = atomic_xor_fetch_optab;
6666       op->mem_no_result = atomic_xor_optab;
6667       op->fetch_before = sync_old_xor_optab;
6668       op->fetch_after = sync_new_xor_optab;
6669       op->no_result = sync_xor_optab;
6670       op->reverse_code = XOR;
6671       break;
6672     case AND:
6673       op->mem_fetch_before = atomic_fetch_and_optab;
6674       op->mem_fetch_after = atomic_and_fetch_optab;
6675       op->mem_no_result = atomic_and_optab;
6676       op->fetch_before = sync_old_and_optab;
6677       op->fetch_after = sync_new_and_optab;
6678       op->no_result = sync_and_optab;
6679       op->reverse_code = UNKNOWN;
6680       break;
6681     case IOR:
6682       op->mem_fetch_before = atomic_fetch_or_optab;
6683       op->mem_fetch_after = atomic_or_fetch_optab;
6684       op->mem_no_result = atomic_or_optab;
6685       op->fetch_before = sync_old_ior_optab;
6686       op->fetch_after = sync_new_ior_optab;
6687       op->no_result = sync_ior_optab;
6688       op->reverse_code = UNKNOWN;
6689       break;
6690     case NOT:
6691       op->mem_fetch_before = atomic_fetch_nand_optab;
6692       op->mem_fetch_after = atomic_nand_fetch_optab;
6693       op->mem_no_result = atomic_nand_optab;
6694       op->fetch_before = sync_old_nand_optab;
6695       op->fetch_after = sync_new_nand_optab;
6696       op->no_result = sync_nand_optab;
6697       op->reverse_code = UNKNOWN;
6698       break;
6699     default:
6700       gcc_unreachable ();
6701     }
6702 }
6703 
6704 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
6705    using memory order MODEL.  If AFTER is true the operation needs to return
6706    the value of *MEM after the operation, otherwise the previous value.
6707    TARGET is an optional place to place the result.  The result is unused if
6708    it is const0_rtx.
6709    Return the result if there is a better sequence, otherwise NULL_RTX.  */
6710 
6711 static rtx
6712 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6713 			 enum memmodel model, bool after)
6714 {
6715   /* If the value is prefetched, or not used, it may be possible to replace
6716      the sequence with a native exchange operation.  */
6717   if (!after || target == const0_rtx)
6718     {
6719       /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m).  */
6720       if (code == AND && val == const0_rtx)
6721         {
6722 	  if (target == const0_rtx)
6723 	    target = gen_reg_rtx (GET_MODE (mem));
6724 	  return maybe_emit_atomic_exchange (target, mem, val, model);
6725 	}
6726 
6727       /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m).  */
6728       if (code == IOR && val == constm1_rtx)
6729         {
6730 	  if (target == const0_rtx)
6731 	    target = gen_reg_rtx (GET_MODE (mem));
6732 	  return maybe_emit_atomic_exchange (target, mem, val, model);
6733 	}
6734     }
6735 
6736   return NULL_RTX;
6737 }
6738 
6739 /* Try to emit an instruction for a specific operation varaition.
6740    OPTAB contains the OP functions.
6741    TARGET is an optional place to return the result. const0_rtx means unused.
6742    MEM is the memory location to operate on.
6743    VAL is the value to use in the operation.
6744    USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
6745    MODEL is the memory model, if used.
6746    AFTER is true if the returned result is the value after the operation.  */
6747 
6748 static rtx
6749 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
6750 	       rtx val, bool use_memmodel, enum memmodel model, bool after)
6751 {
6752   machine_mode mode = GET_MODE (mem);
6753   struct expand_operand ops[4];
6754   enum insn_code icode;
6755   int op_counter = 0;
6756   int num_ops;
6757 
6758   /* Check to see if there is a result returned.  */
6759   if (target == const0_rtx)
6760     {
6761       if (use_memmodel)
6762         {
6763 	  icode = direct_optab_handler (optab->mem_no_result, mode);
6764 	  create_integer_operand (&ops[2], model);
6765 	  num_ops = 3;
6766 	}
6767       else
6768         {
6769 	  icode = direct_optab_handler (optab->no_result, mode);
6770 	  num_ops = 2;
6771 	}
6772     }
6773   /* Otherwise, we need to generate a result.  */
6774   else
6775     {
6776       if (use_memmodel)
6777         {
6778 	  icode = direct_optab_handler (after ? optab->mem_fetch_after
6779 					: optab->mem_fetch_before, mode);
6780 	  create_integer_operand (&ops[3], model);
6781 	  num_ops = 4;
6782 	}
6783       else
6784 	{
6785 	  icode = optab_handler (after ? optab->fetch_after
6786 				 : optab->fetch_before, mode);
6787 	  num_ops = 3;
6788 	}
6789       create_output_operand (&ops[op_counter++], target, mode);
6790     }
6791   if (icode == CODE_FOR_nothing)
6792     return NULL_RTX;
6793 
6794   create_fixed_operand (&ops[op_counter++], mem);
6795   /* VAL may have been promoted to a wider mode.  Shrink it if so.  */
6796   create_convert_operand_to (&ops[op_counter++], val, mode, true);
6797 
6798   if (maybe_expand_insn (icode, num_ops, ops))
6799     return (target == const0_rtx ? const0_rtx : ops[0].value);
6800 
6801   return NULL_RTX;
6802 }
6803 
6804 
6805 /* This function expands an atomic fetch_OP or OP_fetch operation:
6806    TARGET is an option place to stick the return value.  const0_rtx indicates
6807    the result is unused.
6808    atomically fetch MEM, perform the operation with VAL and return it to MEM.
6809    CODE is the operation being performed (OP)
6810    MEMMODEL is the memory model variant to use.
6811    AFTER is true to return the result of the operation (OP_fetch).
6812    AFTER is false to return the value before the operation (fetch_OP).
6813 
6814    This function will *only* generate instructions if there is a direct
6815    optab. No compare and swap loops or libcalls will be generated. */
6816 
6817 static rtx
6818 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
6819 				    enum rtx_code code, enum memmodel model,
6820 				    bool after)
6821 {
6822   machine_mode mode = GET_MODE (mem);
6823   struct atomic_op_functions optab;
6824   rtx result;
6825   bool unused_result = (target == const0_rtx);
6826 
6827   get_atomic_op_for_code (&optab, code);
6828 
6829   /* Check to see if there are any better instructions.  */
6830   result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
6831   if (result)
6832     return result;
6833 
6834   /* Check for the case where the result isn't used and try those patterns.  */
6835   if (unused_result)
6836     {
6837       /* Try the memory model variant first.  */
6838       result = maybe_emit_op (&optab, target, mem, val, true, model, true);
6839       if (result)
6840         return result;
6841 
6842       /* Next try the old style withuot a memory model.  */
6843       result = maybe_emit_op (&optab, target, mem, val, false, model, true);
6844       if (result)
6845         return result;
6846 
6847       /* There is no no-result pattern, so try patterns with a result.  */
6848       target = NULL_RTX;
6849     }
6850 
6851   /* Try the __atomic version.  */
6852   result = maybe_emit_op (&optab, target, mem, val, true, model, after);
6853   if (result)
6854     return result;
6855 
6856   /* Try the older __sync version.  */
6857   result = maybe_emit_op (&optab, target, mem, val, false, model, after);
6858   if (result)
6859     return result;
6860 
6861   /* If the fetch value can be calculated from the other variation of fetch,
6862      try that operation.  */
6863   if (after || unused_result || optab.reverse_code != UNKNOWN)
6864     {
6865       /* Try the __atomic version, then the older __sync version.  */
6866       result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
6867       if (!result)
6868 	result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
6869 
6870       if (result)
6871 	{
6872 	  /* If the result isn't used, no need to do compensation code.  */
6873 	  if (unused_result)
6874 	    return result;
6875 
6876 	  /* Issue compensation code.  Fetch_after  == fetch_before OP val.
6877 	     Fetch_before == after REVERSE_OP val.  */
6878 	  if (!after)
6879 	    code = optab.reverse_code;
6880 	  if (code == NOT)
6881 	    {
6882 	      result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
6883 					    true, OPTAB_LIB_WIDEN);
6884 	      result = expand_simple_unop (mode, NOT, result, target, true);
6885 	    }
6886 	  else
6887 	    result = expand_simple_binop (mode, code, result, val, target,
6888 					  true, OPTAB_LIB_WIDEN);
6889 	  return result;
6890 	}
6891     }
6892 
6893   /* No direct opcode can be generated.  */
6894   return NULL_RTX;
6895 }
6896 
6897 
6898 
6899 /* This function expands an atomic fetch_OP or OP_fetch operation:
6900    TARGET is an option place to stick the return value.  const0_rtx indicates
6901    the result is unused.
6902    atomically fetch MEM, perform the operation with VAL and return it to MEM.
6903    CODE is the operation being performed (OP)
6904    MEMMODEL is the memory model variant to use.
6905    AFTER is true to return the result of the operation (OP_fetch).
6906    AFTER is false to return the value before the operation (fetch_OP).  */
6907 rtx
6908 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
6909 			enum memmodel model, bool after)
6910 {
6911   machine_mode mode = GET_MODE (mem);
6912   rtx result;
6913   bool unused_result = (target == const0_rtx);
6914 
6915   /* If loads are not atomic for the required size and we are not called to
6916      provide a __sync builtin, do not do anything so that we stay consistent
6917      with atomic loads of the same size.  */
6918   if (!can_atomic_load_p (mode) && !is_mm_sync (model))
6919     return NULL_RTX;
6920 
6921   result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
6922 					       after);
6923 
6924   if (result)
6925     return result;
6926 
6927   /* Add/sub can be implemented by doing the reverse operation with -(val).  */
6928   if (code == PLUS || code == MINUS)
6929     {
6930       rtx tmp;
6931       enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
6932 
6933       start_sequence ();
6934       tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
6935       result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
6936 						   model, after);
6937       if (result)
6938 	{
6939 	  /* PLUS worked so emit the insns and return.  */
6940 	  tmp = get_insns ();
6941 	  end_sequence ();
6942 	  emit_insn (tmp);
6943           return result;
6944 	}
6945 
6946       /* PLUS did not work, so throw away the negation code and continue.  */
6947       end_sequence ();
6948     }
6949 
6950   /* Try the __sync libcalls only if we can't do compare-and-swap inline.  */
6951   if (!can_compare_and_swap_p (mode, false))
6952     {
6953       rtx libfunc;
6954       bool fixup = false;
6955       enum rtx_code orig_code = code;
6956       struct atomic_op_functions optab;
6957 
6958       get_atomic_op_for_code (&optab, code);
6959       libfunc = optab_libfunc (after ? optab.fetch_after
6960 			       : optab.fetch_before, mode);
6961       if (libfunc == NULL
6962 	  && (after || unused_result || optab.reverse_code != UNKNOWN))
6963 	{
6964 	  fixup = true;
6965 	  if (!after)
6966 	    code = optab.reverse_code;
6967 	  libfunc = optab_libfunc (after ? optab.fetch_before
6968 				   : optab.fetch_after, mode);
6969 	}
6970       if (libfunc != NULL)
6971 	{
6972 	  rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
6973 	  result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
6974 					    addr, ptr_mode, val, mode);
6975 
6976 	  if (!unused_result && fixup)
6977 	    result = expand_simple_binop (mode, code, result, val, target,
6978 					  true, OPTAB_LIB_WIDEN);
6979 	  return result;
6980 	}
6981 
6982       /* We need the original code for any further attempts.  */
6983       code = orig_code;
6984     }
6985 
6986   /* If nothing else has succeeded, default to a compare and swap loop.  */
6987   if (can_compare_and_swap_p (mode, true))
6988     {
6989       rtx_insn *insn;
6990       rtx t0 = gen_reg_rtx (mode), t1;
6991 
6992       start_sequence ();
6993 
6994       /* If the result is used, get a register for it.  */
6995       if (!unused_result)
6996         {
6997 	  if (!target || !register_operand (target, mode))
6998 	    target = gen_reg_rtx (mode);
6999 	  /* If fetch_before, copy the value now.  */
7000 	  if (!after)
7001 	    emit_move_insn (target, t0);
7002 	}
7003       else
7004         target = const0_rtx;
7005 
7006       t1 = t0;
7007       if (code == NOT)
7008         {
7009 	  t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7010 				    true, OPTAB_LIB_WIDEN);
7011 	  t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7012 	}
7013       else
7014 	t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
7015 				  OPTAB_LIB_WIDEN);
7016 
7017       /* For after, copy the value now.  */
7018       if (!unused_result && after)
7019         emit_move_insn (target, t1);
7020       insn = get_insns ();
7021       end_sequence ();
7022 
7023       if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7024         return target;
7025     }
7026 
7027   return NULL_RTX;
7028 }
7029 
7030 /* Return true if OPERAND is suitable for operand number OPNO of
7031    instruction ICODE.  */
7032 
7033 bool
7034 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
7035 {
7036   return (!insn_data[(int) icode].operand[opno].predicate
7037 	  || (insn_data[(int) icode].operand[opno].predicate
7038 	      (operand, insn_data[(int) icode].operand[opno].mode)));
7039 }
7040 
7041 /* TARGET is a target of a multiword operation that we are going to
7042    implement as a series of word-mode operations.  Return true if
7043    TARGET is suitable for this purpose.  */
7044 
7045 bool
7046 valid_multiword_target_p (rtx target)
7047 {
7048   machine_mode mode;
7049   int i, size;
7050 
7051   mode = GET_MODE (target);
7052   if (!GET_MODE_SIZE (mode).is_constant (&size))
7053     return false;
7054   for (i = 0; i < size; i += UNITS_PER_WORD)
7055     if (!validate_subreg (word_mode, mode, target, i))
7056       return false;
7057   return true;
7058 }
7059 
7060 /* Make OP describe an input operand that has value INTVAL and that has
7061    no inherent mode.  This function should only be used for operands that
7062    are always expand-time constants.  The backend may request that INTVAL
7063    be copied into a different kind of rtx, but it must specify the mode
7064    of that rtx if so.  */
7065 
7066 void
7067 create_integer_operand (struct expand_operand *op, poly_int64 intval)
7068 {
7069   create_expand_operand (op, EXPAND_INTEGER,
7070 			 gen_int_mode (intval, MAX_MODE_INT),
7071 			 VOIDmode, false, intval);
7072 }
7073 
7074 /* Like maybe_legitimize_operand, but do not change the code of the
7075    current rtx value.  */
7076 
7077 static bool
7078 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
7079 				    struct expand_operand *op)
7080 {
7081   /* See if the operand matches in its current form.  */
7082   if (insn_operand_matches (icode, opno, op->value))
7083     return true;
7084 
7085   /* If the operand is a memory whose address has no side effects,
7086      try forcing the address into a non-virtual pseudo register.
7087      The check for side effects is important because copy_to_mode_reg
7088      cannot handle things like auto-modified addresses.  */
7089   if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
7090     {
7091       rtx addr, mem;
7092 
7093       mem = op->value;
7094       addr = XEXP (mem, 0);
7095       if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
7096 	  && !side_effects_p (addr))
7097 	{
7098 	  rtx_insn *last;
7099 	  machine_mode mode;
7100 
7101 	  last = get_last_insn ();
7102 	  mode = get_address_mode (mem);
7103 	  mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
7104 	  if (insn_operand_matches (icode, opno, mem))
7105 	    {
7106 	      op->value = mem;
7107 	      return true;
7108 	    }
7109 	  delete_insns_since (last);
7110 	}
7111     }
7112 
7113   return false;
7114 }
7115 
7116 /* Try to make OP match operand OPNO of instruction ICODE.  Return true
7117    on success, storing the new operand value back in OP.  */
7118 
7119 static bool
7120 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
7121 			  struct expand_operand *op)
7122 {
7123   machine_mode mode, imode;
7124   bool old_volatile_ok, result;
7125 
7126   mode = op->mode;
7127   switch (op->type)
7128     {
7129     case EXPAND_FIXED:
7130       old_volatile_ok = volatile_ok;
7131       volatile_ok = true;
7132       result = maybe_legitimize_operand_same_code (icode, opno, op);
7133       volatile_ok = old_volatile_ok;
7134       return result;
7135 
7136     case EXPAND_OUTPUT:
7137       gcc_assert (mode != VOIDmode);
7138       if (op->value
7139 	  && op->value != const0_rtx
7140 	  && GET_MODE (op->value) == mode
7141 	  && maybe_legitimize_operand_same_code (icode, opno, op))
7142 	return true;
7143 
7144       op->value = gen_reg_rtx (mode);
7145       op->target = 0;
7146       break;
7147 
7148     case EXPAND_INPUT:
7149     input:
7150       gcc_assert (mode != VOIDmode);
7151       gcc_assert (GET_MODE (op->value) == VOIDmode
7152 		  || GET_MODE (op->value) == mode);
7153       if (maybe_legitimize_operand_same_code (icode, opno, op))
7154 	return true;
7155 
7156       op->value = copy_to_mode_reg (mode, op->value);
7157       break;
7158 
7159     case EXPAND_CONVERT_TO:
7160       gcc_assert (mode != VOIDmode);
7161       op->value = convert_to_mode (mode, op->value, op->unsigned_p);
7162       goto input;
7163 
7164     case EXPAND_CONVERT_FROM:
7165       if (GET_MODE (op->value) != VOIDmode)
7166 	mode = GET_MODE (op->value);
7167       else
7168 	/* The caller must tell us what mode this value has.  */
7169 	gcc_assert (mode != VOIDmode);
7170 
7171       imode = insn_data[(int) icode].operand[opno].mode;
7172       if (imode != VOIDmode && imode != mode)
7173 	{
7174 	  op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
7175 	  mode = imode;
7176 	}
7177       goto input;
7178 
7179     case EXPAND_ADDRESS:
7180       op->value = convert_memory_address (as_a <scalar_int_mode> (mode),
7181 					  op->value);
7182       goto input;
7183 
7184     case EXPAND_INTEGER:
7185       mode = insn_data[(int) icode].operand[opno].mode;
7186       if (mode != VOIDmode
7187 	  && known_eq (trunc_int_for_mode (op->int_value, mode),
7188 		       op->int_value))
7189 	{
7190 	  op->value = gen_int_mode (op->int_value, mode);
7191 	  goto input;
7192 	}
7193       break;
7194     }
7195   return insn_operand_matches (icode, opno, op->value);
7196 }
7197 
7198 /* Make OP describe an input operand that should have the same value
7199    as VALUE, after any mode conversion that the target might request.
7200    TYPE is the type of VALUE.  */
7201 
7202 void
7203 create_convert_operand_from_type (struct expand_operand *op,
7204 				  rtx value, tree type)
7205 {
7206   create_convert_operand_from (op, value, TYPE_MODE (type),
7207 			       TYPE_UNSIGNED (type));
7208 }
7209 
7210 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7211    of instruction ICODE.  Return true on success, leaving the new operand
7212    values in the OPS themselves.  Emit no code on failure.  */
7213 
7214 bool
7215 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
7216 			   unsigned int nops, struct expand_operand *ops)
7217 {
7218   rtx_insn *last;
7219   unsigned int i;
7220 
7221   last = get_last_insn ();
7222   for (i = 0; i < nops; i++)
7223     if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
7224       {
7225 	delete_insns_since (last);
7226 	return false;
7227       }
7228   return true;
7229 }
7230 
7231 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7232    as its operands.  Return the instruction pattern on success,
7233    and emit any necessary set-up code.  Return null and emit no
7234    code on failure.  */
7235 
7236 rtx_insn *
7237 maybe_gen_insn (enum insn_code icode, unsigned int nops,
7238 		struct expand_operand *ops)
7239 {
7240   gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
7241   if (!maybe_legitimize_operands (icode, 0, nops, ops))
7242     return NULL;
7243 
7244   switch (nops)
7245     {
7246     case 1:
7247       return GEN_FCN (icode) (ops[0].value);
7248     case 2:
7249       return GEN_FCN (icode) (ops[0].value, ops[1].value);
7250     case 3:
7251       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
7252     case 4:
7253       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7254 			      ops[3].value);
7255     case 5:
7256       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7257 			      ops[3].value, ops[4].value);
7258     case 6:
7259       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7260 			      ops[3].value, ops[4].value, ops[5].value);
7261     case 7:
7262       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7263 			      ops[3].value, ops[4].value, ops[5].value,
7264 			      ops[6].value);
7265     case 8:
7266       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7267 			      ops[3].value, ops[4].value, ops[5].value,
7268 			      ops[6].value, ops[7].value);
7269     case 9:
7270       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
7271 			      ops[3].value, ops[4].value, ops[5].value,
7272 			      ops[6].value, ops[7].value, ops[8].value);
7273     }
7274   gcc_unreachable ();
7275 }
7276 
7277 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7278    as its operands.  Return true on success and emit no code on failure.  */
7279 
7280 bool
7281 maybe_expand_insn (enum insn_code icode, unsigned int nops,
7282 		   struct expand_operand *ops)
7283 {
7284   rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7285   if (pat)
7286     {
7287       emit_insn (pat);
7288       return true;
7289     }
7290   return false;
7291 }
7292 
7293 /* Like maybe_expand_insn, but for jumps.  */
7294 
7295 bool
7296 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
7297 			struct expand_operand *ops)
7298 {
7299   rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
7300   if (pat)
7301     {
7302       emit_jump_insn (pat);
7303       return true;
7304     }
7305   return false;
7306 }
7307 
7308 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7309    as its operands.  */
7310 
7311 void
7312 expand_insn (enum insn_code icode, unsigned int nops,
7313 	     struct expand_operand *ops)
7314 {
7315   if (!maybe_expand_insn (icode, nops, ops))
7316     gcc_unreachable ();
7317 }
7318 
7319 /* Like expand_insn, but for jumps.  */
7320 
7321 void
7322 expand_jump_insn (enum insn_code icode, unsigned int nops,
7323 		  struct expand_operand *ops)
7324 {
7325   if (!maybe_expand_jump_insn (icode, nops, ops))
7326     gcc_unreachable ();
7327 }
7328