1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 
39 /* Simplification and canonicalization of RTL.  */
40 
41 /* Much code operates on (low, high) pairs; the low value is an
42    unsigned wide int, the high value a signed wide int.  We
43    occasionally need to sign extend from low to high as if low were a
44    signed wide int.  */
45 #define HWI_SIGN_EXTEND(low) \
46   ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
47 
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 					   rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 					    machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 					rtx, rtx, rtx, rtx);
58 
59 /* Negate a CONST_INT rtx.  */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
62 {
63   unsigned HOST_WIDE_INT val = -UINTVAL (i);
64 
65   if (!HWI_COMPUTABLE_MODE_P (mode)
66       && val == UINTVAL (i))
67     return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 					   mode);
69   return gen_int_mode (val, mode);
70 }
71 
72 /* Test whether expression, X, is an immediate constant that represents
73    the most significant bit of machine mode MODE.  */
74 
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
77 {
78   unsigned HOST_WIDE_INT val;
79   unsigned int width;
80   scalar_int_mode int_mode;
81 
82   if (!is_int_mode (mode, &int_mode))
83     return false;
84 
85   width = GET_MODE_PRECISION (int_mode);
86   if (width == 0)
87     return false;
88 
89   if (width <= HOST_BITS_PER_WIDE_INT
90       && CONST_INT_P (x))
91     val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93   else if (CONST_WIDE_INT_P (x))
94     {
95       unsigned int i;
96       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 	return false;
99       for (i = 0; i < elts - 1; i++)
100 	if (CONST_WIDE_INT_ELT (x, i) != 0)
101 	  return false;
102       val = CONST_WIDE_INT_ELT (x, elts - 1);
103       width %= HOST_BITS_PER_WIDE_INT;
104       if (width == 0)
105 	width = HOST_BITS_PER_WIDE_INT;
106     }
107 #else
108   else if (width <= HOST_BITS_PER_DOUBLE_INT
109 	   && CONST_DOUBLE_AS_INT_P (x)
110 	   && CONST_DOUBLE_LOW (x) == 0)
111     {
112       val = CONST_DOUBLE_HIGH (x);
113       width -= HOST_BITS_PER_WIDE_INT;
114     }
115 #endif
116   else
117     /* X is not an integer constant.  */
118     return false;
119 
120   if (width < HOST_BITS_PER_WIDE_INT)
121     val &= (HOST_WIDE_INT_1U << width) - 1;
122   return val == (HOST_WIDE_INT_1U << (width - 1));
123 }
124 
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126    (after masking with the mode mask of MODE).  Returns false if the
127    precision of MODE is too large to handle.  */
128 
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132   unsigned int width;
133   scalar_int_mode int_mode;
134 
135   if (!is_int_mode (mode, &int_mode))
136     return false;
137 
138   width = GET_MODE_PRECISION (int_mode);
139   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140     return false;
141 
142   val &= GET_MODE_MASK (int_mode);
143   return val == (HOST_WIDE_INT_1U << (width - 1));
144 }
145 
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147    Returns false if the precision of MODE is too large to handle.  */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150 {
151   unsigned int width;
152 
153   scalar_int_mode int_mode;
154   if (!is_int_mode (mode, &int_mode))
155     return false;
156 
157   width = GET_MODE_PRECISION (int_mode);
158   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159     return false;
160 
161   val &= HOST_WIDE_INT_1U << (width - 1);
162   return val != 0;
163 }
164 
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166    Returns false if the precision of MODE is too large to handle.  */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169 {
170   unsigned int width;
171 
172   scalar_int_mode int_mode;
173   if (!is_int_mode (mode, &int_mode))
174     return false;
175 
176   width = GET_MODE_PRECISION (int_mode);
177   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178     return false;
179 
180   val &= HOST_WIDE_INT_1U << (width - 1);
181   return val == 0;
182 }
183 
184 /* Make a binary operation by properly ordering the operands and
185    seeing if the expression folds.  */
186 
187 rtx
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 		     rtx op1)
190 {
191   rtx tem;
192 
193   /* If this simplifies, do it.  */
194   tem = simplify_binary_operation (code, mode, op0, op1);
195   if (tem)
196     return tem;
197 
198   /* Put complex operands first and constants second if commutative.  */
199   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200       && swap_commutative_operands_p (op0, op1))
201     std::swap (op0, op1);
202 
203   return gen_rtx_fmt_ee (code, mode, op0, op1);
204 }
205 
206 /* If X is a MEM referencing the constant pool, return the real value.
207    Otherwise return X.  */
208 rtx
209 avoid_constant_pool_reference (rtx x)
210 {
211   rtx c, tmp, addr;
212   machine_mode cmode;
213   HOST_WIDE_INT offset = 0;
214 
215   switch (GET_CODE (x))
216     {
217     case MEM:
218       break;
219 
220     case FLOAT_EXTEND:
221       /* Handle float extensions of constant pool references.  */
222       tmp = XEXP (x, 0);
223       c = avoid_constant_pool_reference (tmp);
224       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 	return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 					     GET_MODE (x));
227       return x;
228 
229     default:
230       return x;
231     }
232 
233   if (GET_MODE (x) == BLKmode)
234     return x;
235 
236   addr = XEXP (x, 0);
237 
238   /* Call target hook to avoid the effects of -fpic etc....  */
239   addr = targetm.delegitimize_address (addr);
240 
241   /* Split the address into a base and integer offset.  */
242   if (GET_CODE (addr) == CONST
243       && GET_CODE (XEXP (addr, 0)) == PLUS
244       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245     {
246       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247       addr = XEXP (XEXP (addr, 0), 0);
248     }
249 
250   if (GET_CODE (addr) == LO_SUM)
251     addr = XEXP (addr, 1);
252 
253   /* If this is a constant pool reference, we can turn it into its
254      constant and hope that simplifications happen.  */
255   if (GET_CODE (addr) == SYMBOL_REF
256       && CONSTANT_POOL_ADDRESS_P (addr))
257     {
258       c = get_pool_constant (addr);
259       cmode = get_pool_mode (addr);
260 
261       /* If we're accessing the constant in a different mode than it was
262          originally stored, attempt to fix that up via subreg simplifications.
263          If that fails we have no choice but to return the original memory.  */
264       if (offset == 0 && cmode == GET_MODE (x))
265 	return c;
266       else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
267         {
268           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269           if (tem && CONSTANT_P (tem))
270             return tem;
271         }
272     }
273 
274   return x;
275 }
276 
277 /* Simplify a MEM based on its attributes.  This is the default
278    delegitimize_address target hook, and it's recommended that every
279    overrider call it.  */
280 
281 rtx
282 delegitimize_mem_from_attrs (rtx x)
283 {
284   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285      use their base addresses as equivalent.  */
286   if (MEM_P (x)
287       && MEM_EXPR (x)
288       && MEM_OFFSET_KNOWN_P (x))
289     {
290       tree decl = MEM_EXPR (x);
291       machine_mode mode = GET_MODE (x);
292       poly_int64 offset = 0;
293 
294       switch (TREE_CODE (decl))
295 	{
296 	default:
297 	  decl = NULL;
298 	  break;
299 
300 	case VAR_DECL:
301 	  break;
302 
303 	case ARRAY_REF:
304 	case ARRAY_RANGE_REF:
305 	case COMPONENT_REF:
306 	case BIT_FIELD_REF:
307 	case REALPART_EXPR:
308 	case IMAGPART_EXPR:
309 	case VIEW_CONVERT_EXPR:
310 	  {
311 	    poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
312 	    tree toffset;
313 	    int unsignedp, reversep, volatilep = 0;
314 
315 	    decl
316 	      = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 				     &unsignedp, &reversep, &volatilep);
318 	    if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
319 		|| !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
320 		|| (toffset && !poly_int_tree_p (toffset, &toffset_val)))
321 	      decl = NULL;
322 	    else
323 	      offset += bytepos + toffset_val;
324 	    break;
325 	  }
326 	}
327 
328       if (decl
329 	  && mode == GET_MODE (x)
330 	  && VAR_P (decl)
331 	  && (TREE_STATIC (decl)
332 	      || DECL_THREAD_LOCAL_P (decl))
333 	  && DECL_RTL_SET_P (decl)
334 	  && MEM_P (DECL_RTL (decl)))
335 	{
336 	  rtx newx;
337 
338 	  offset += MEM_OFFSET (x);
339 
340 	  newx = DECL_RTL (decl);
341 
342 	  if (MEM_P (newx))
343 	    {
344 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345 	      poly_int64 n_offset, o_offset;
346 
347 	      /* Avoid creating a new MEM needlessly if we already had
348 		 the same address.  We do if there's no OFFSET and the
349 		 old address X is identical to NEWX, or if X is of the
350 		 form (plus NEWX OFFSET), or the NEWX is of the form
351 		 (plus Y (const_int Z)) and X is that with the offset
352 		 added: (plus Y (const_int Z+OFFSET)).  */
353 	      n = strip_offset (n, &n_offset);
354 	      o = strip_offset (o, &o_offset);
355 	      if (!(known_eq (o_offset, n_offset + offset)
356 		    && rtx_equal_p (o, n)))
357 		x = adjust_address_nv (newx, mode, offset);
358 	    }
359 	  else if (GET_MODE (x) == GET_MODE (newx)
360 		   && known_eq (offset, 0))
361 	    x = newx;
362 	}
363     }
364 
365   return x;
366 }
367 
368 /* Make a unary operation by first seeing if it folds and otherwise making
369    the specified operation.  */
370 
371 rtx
372 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
373 		    machine_mode op_mode)
374 {
375   rtx tem;
376 
377   /* If this simplifies, use it.  */
378   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
379     return tem;
380 
381   return gen_rtx_fmt_e (code, mode, op);
382 }
383 
384 /* Likewise for ternary operations.  */
385 
386 rtx
387 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
388 		      machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
389 {
390   rtx tem;
391 
392   /* If this simplifies, use it.  */
393   if ((tem = simplify_ternary_operation (code, mode, op0_mode,
394 					 op0, op1, op2)) != 0)
395     return tem;
396 
397   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
398 }
399 
400 /* Likewise, for relational operations.
401    CMP_MODE specifies mode comparison is done in.  */
402 
403 rtx
404 simplify_gen_relational (enum rtx_code code, machine_mode mode,
405 			 machine_mode cmp_mode, rtx op0, rtx op1)
406 {
407   rtx tem;
408 
409   if ((tem = simplify_relational_operation (code, mode, cmp_mode,
410 					    op0, op1)) != 0)
411     return tem;
412 
413   return gen_rtx_fmt_ee (code, mode, op0, op1);
414 }
415 
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417    and simplify the result.  If FN is non-NULL, call this callback on each
418    X, if it returns non-NULL, replace X with its return value and simplify the
419    result.  */
420 
421 rtx
422 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
423 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
424 {
425   enum rtx_code code = GET_CODE (x);
426   machine_mode mode = GET_MODE (x);
427   machine_mode op_mode;
428   const char *fmt;
429   rtx op0, op1, op2, newx, op;
430   rtvec vec, newvec;
431   int i, j;
432 
433   if (__builtin_expect (fn != NULL, 0))
434     {
435       newx = fn (x, old_rtx, data);
436       if (newx)
437 	return newx;
438     }
439   else if (rtx_equal_p (x, old_rtx))
440     return copy_rtx ((rtx) data);
441 
442   switch (GET_RTX_CLASS (code))
443     {
444     case RTX_UNARY:
445       op0 = XEXP (x, 0);
446       op_mode = GET_MODE (op0);
447       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
448       if (op0 == XEXP (x, 0))
449 	return x;
450       return simplify_gen_unary (code, mode, op0, op_mode);
451 
452     case RTX_BIN_ARITH:
453     case RTX_COMM_ARITH:
454       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
455       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
456       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 	return x;
458       return simplify_gen_binary (code, mode, op0, op1);
459 
460     case RTX_COMPARE:
461     case RTX_COMM_COMPARE:
462       op0 = XEXP (x, 0);
463       op1 = XEXP (x, 1);
464       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
465       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
467       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
468 	return x;
469       return simplify_gen_relational (code, mode, op_mode, op0, op1);
470 
471     case RTX_TERNARY:
472     case RTX_BITFIELD_OPS:
473       op0 = XEXP (x, 0);
474       op_mode = GET_MODE (op0);
475       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
478       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
479 	return x;
480       if (op_mode == VOIDmode)
481 	op_mode = GET_MODE (op0);
482       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
483 
484     case RTX_EXTRA:
485       if (code == SUBREG)
486 	{
487 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
488 	  if (op0 == SUBREG_REG (x))
489 	    return x;
490 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
491 				     GET_MODE (SUBREG_REG (x)),
492 				     SUBREG_BYTE (x));
493 	  return op0 ? op0 : x;
494 	}
495       break;
496 
497     case RTX_OBJ:
498       if (code == MEM)
499 	{
500 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 	  if (op0 == XEXP (x, 0))
502 	    return x;
503 	  return replace_equiv_address_nv (x, op0);
504 	}
505       else if (code == LO_SUM)
506 	{
507 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
508 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
509 
510 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
511 	  if (GET_CODE (op0) == HIGH)
512 	    {
513 	      rtx base0, base1, offset0, offset1;
514 	      split_const (XEXP (op0, 0), &base0, &offset0);
515 	      split_const (op1, &base1, &offset1);
516 	      if (rtx_equal_p (base0, base1))
517 		return op1;
518 	    }
519 
520 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
521 	    return x;
522 	  return gen_rtx_LO_SUM (mode, op0, op1);
523 	}
524       break;
525 
526     default:
527       break;
528     }
529 
530   newx = x;
531   fmt = GET_RTX_FORMAT (code);
532   for (i = 0; fmt[i]; i++)
533     switch (fmt[i])
534       {
535       case 'E':
536 	vec = XVEC (x, i);
537 	newvec = XVEC (newx, i);
538 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
539 	  {
540 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
541 					  old_rtx, fn, data);
542 	    if (op != RTVEC_ELT (vec, j))
543 	      {
544 		if (newvec == vec)
545 		  {
546 		    newvec = shallow_copy_rtvec (vec);
547 		    if (x == newx)
548 		      newx = shallow_copy_rtx (x);
549 		    XVEC (newx, i) = newvec;
550 		  }
551 		RTVEC_ELT (newvec, j) = op;
552 	      }
553 	  }
554 	break;
555 
556       case 'e':
557 	if (XEXP (x, i))
558 	  {
559 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
560 	    if (op != XEXP (x, i))
561 	      {
562 		if (x == newx)
563 		  newx = shallow_copy_rtx (x);
564 		XEXP (newx, i) = op;
565 	      }
566 	  }
567 	break;
568       }
569   return newx;
570 }
571 
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573    resulting RTX.  Return a new RTX which is as simplified as possible.  */
574 
575 rtx
576 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
577 {
578   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
579 }
580 
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582    Only handle cases where the truncated value is inherently an rvalue.
583 
584    RTL provides two ways of truncating a value:
585 
586    1. a lowpart subreg.  This form is only a truncation when both
587       the outer and inner modes (here MODE and OP_MODE respectively)
588       are scalar integers, and only then when the subreg is used as
589       an rvalue.
590 
591       It is only valid to form such truncating subregs if the
592       truncation requires no action by the target.  The onus for
593       proving this is on the creator of the subreg -- e.g. the
594       caller to simplify_subreg or simplify_gen_subreg -- and typically
595       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
596 
597    2. a TRUNCATE.  This form handles both scalar and compound integers.
598 
599    The first form is preferred where valid.  However, the TRUNCATE
600    handling in simplify_unary_operation turns the second form into the
601    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602    so it is generally safe to form rvalue truncations using:
603 
604       simplify_gen_unary (TRUNCATE, ...)
605 
606    and leave simplify_unary_operation to work out which representation
607    should be used.
608 
609    Because of the proof requirements on (1), simplify_truncation must
610    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611    regardless of whether the outer truncation came from a SUBREG or a
612    TRUNCATE.  For example, if the caller has proven that an SImode
613    truncation of:
614 
615       (and:DI X Y)
616 
617    is a no-op and can be represented as a subreg, it does not follow
618    that SImode truncations of X and Y are also no-ops.  On a target
619    like 64-bit MIPS that requires SImode values to be stored in
620    sign-extended form, an SImode truncation of:
621 
622       (and:DI (reg:DI X) (const_int 63))
623 
624    is trivially a no-op because only the lower 6 bits can be set.
625    However, X is still an arbitrary 64-bit number and so we cannot
626    assume that truncating it too is a no-op.  */
627 
628 static rtx
629 simplify_truncation (machine_mode mode, rtx op,
630 		     machine_mode op_mode)
631 {
632   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
633   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
634   scalar_int_mode int_mode, int_op_mode, subreg_mode;
635 
636   gcc_assert (precision <= op_precision);
637 
638   /* Optimize truncations of zero and sign extended values.  */
639   if (GET_CODE (op) == ZERO_EXTEND
640       || GET_CODE (op) == SIGN_EXTEND)
641     {
642       /* There are three possibilities.  If MODE is the same as the
643 	 origmode, we can omit both the extension and the subreg.
644 	 If MODE is not larger than the origmode, we can apply the
645 	 truncation without the extension.  Finally, if the outermode
646 	 is larger than the origmode, we can just extend to the appropriate
647 	 mode.  */
648       machine_mode origmode = GET_MODE (XEXP (op, 0));
649       if (mode == origmode)
650 	return XEXP (op, 0);
651       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 	return simplify_gen_unary (TRUNCATE, mode,
653 				   XEXP (op, 0), origmode);
654       else
655 	return simplify_gen_unary (GET_CODE (op), mode,
656 				   XEXP (op, 0), origmode);
657     }
658 
659   /* If the machine can perform operations in the truncated mode, distribute
660      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
662   if (1
663       && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
664       && (GET_CODE (op) == PLUS
665 	  || GET_CODE (op) == MINUS
666 	  || GET_CODE (op) == MULT))
667     {
668       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
669       if (op0)
670 	{
671 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
672 	  if (op1)
673 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
674 	}
675     }
676 
677   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679      the outer subreg is effectively a truncation to the original mode.  */
680   if ((GET_CODE (op) == LSHIFTRT
681        || GET_CODE (op) == ASHIFTRT)
682       /* Ensure that OP_MODE is at least twice as wide as MODE
683 	 to avoid the possibility that an outer LSHIFTRT shifts by more
684 	 than the sign extension's sign_bit_copies and introduces zeros
685 	 into the high bits of the result.  */
686       && 2 * precision <= op_precision
687       && CONST_INT_P (XEXP (op, 1))
688       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
689       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
690       && UINTVAL (XEXP (op, 1)) < precision)
691     return simplify_gen_binary (ASHIFTRT, mode,
692 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
693 
694   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696      the outer subreg is effectively a truncation to the original mode.  */
697   if ((GET_CODE (op) == LSHIFTRT
698        || GET_CODE (op) == ASHIFTRT)
699       && CONST_INT_P (XEXP (op, 1))
700       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
702       && UINTVAL (XEXP (op, 1)) < precision)
703     return simplify_gen_binary (LSHIFTRT, mode,
704 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
705 
706   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707      to (ashift:QI (x:QI) C), where C is a suitable small constant and
708      the outer subreg is effectively a truncation to the original mode.  */
709   if (GET_CODE (op) == ASHIFT
710       && CONST_INT_P (XEXP (op, 1))
711       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
713       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
714       && UINTVAL (XEXP (op, 1)) < precision)
715     return simplify_gen_binary (ASHIFT, mode,
716 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
717 
718   /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719      (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
720      and C2.  */
721   if (GET_CODE (op) == AND
722       && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
723 	  || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
724       && CONST_INT_P (XEXP (XEXP (op, 0), 1))
725       && CONST_INT_P (XEXP (op, 1)))
726     {
727       rtx op0 = (XEXP (XEXP (op, 0), 0));
728       rtx shift_op = XEXP (XEXP (op, 0), 1);
729       rtx mask_op = XEXP (op, 1);
730       unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
731       unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
732 
733       if (shift < precision
734 	  /* If doing this transform works for an X with all bits set,
735 	     it works for any X.  */
736 	  && ((GET_MODE_MASK (mode) >> shift) & mask)
737 	     == ((GET_MODE_MASK (op_mode) >> shift) & mask)
738 	  && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
739 	  && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
740 	{
741 	  mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
742 	  return simplify_gen_binary (AND, mode, op0, mask_op);
743 	}
744     }
745 
746   /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747      (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
748      changing len.  */
749   if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
750       && REG_P (XEXP (op, 0))
751       && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
752       && CONST_INT_P (XEXP (op, 1))
753       && CONST_INT_P (XEXP (op, 2)))
754     {
755       rtx op0 = XEXP (op, 0);
756       unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
757       unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
758       if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
759 	{
760 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
761 	  if (op0)
762 	    {
763 	      pos -= op_precision - precision;
764 	      return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
765 					   XEXP (op, 1), GEN_INT (pos));
766 	    }
767 	}
768       else if (!BITS_BIG_ENDIAN && precision >= len + pos)
769 	{
770 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 	  if (op0)
772 	    return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
773 					 XEXP (op, 1), XEXP (op, 2));
774 	}
775     }
776 
777   /* Recognize a word extraction from a multi-word subreg.  */
778   if ((GET_CODE (op) == LSHIFTRT
779        || GET_CODE (op) == ASHIFTRT)
780       && SCALAR_INT_MODE_P (mode)
781       && SCALAR_INT_MODE_P (op_mode)
782       && precision >= BITS_PER_WORD
783       && 2 * precision <= op_precision
784       && CONST_INT_P (XEXP (op, 1))
785       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
786       && UINTVAL (XEXP (op, 1)) < op_precision)
787     {
788       poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
789       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
790       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
791 				  (WORDS_BIG_ENDIAN
792 				   ? byte - shifted_bytes
793 				   : byte + shifted_bytes));
794     }
795 
796   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797      and try replacing the TRUNCATE and shift with it.  Don't do this
798      if the MEM has a mode-dependent address.  */
799   if ((GET_CODE (op) == LSHIFTRT
800        || GET_CODE (op) == ASHIFTRT)
801       && is_a <scalar_int_mode> (mode, &int_mode)
802       && is_a <scalar_int_mode> (op_mode, &int_op_mode)
803       && MEM_P (XEXP (op, 0))
804       && CONST_INT_P (XEXP (op, 1))
805       && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
806       && INTVAL (XEXP (op, 1)) > 0
807       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
808       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
809 				     MEM_ADDR_SPACE (XEXP (op, 0)))
810       && ! MEM_VOLATILE_P (XEXP (op, 0))
811       && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
812 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
813     {
814       poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
815       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
816       return adjust_address_nv (XEXP (op, 0), int_mode,
817 				(WORDS_BIG_ENDIAN
818 				 ? byte - shifted_bytes
819 				 : byte + shifted_bytes));
820     }
821 
822   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823      (OP:SI foo:SI) if OP is NEG or ABS.  */
824   if ((GET_CODE (op) == ABS
825        || GET_CODE (op) == NEG)
826       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
827 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
828       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
829     return simplify_gen_unary (GET_CODE (op), mode,
830 			       XEXP (XEXP (op, 0), 0), mode);
831 
832   /* (truncate:A (subreg:B (truncate:C X) 0)) is
833      (truncate:A X).  */
834   if (GET_CODE (op) == SUBREG
835       && is_a <scalar_int_mode> (mode, &int_mode)
836       && SCALAR_INT_MODE_P (op_mode)
837       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
838       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
839       && subreg_lowpart_p (op))
840     {
841       rtx inner = XEXP (SUBREG_REG (op), 0);
842       if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
843 	return simplify_gen_unary (TRUNCATE, int_mode, inner,
844 				   GET_MODE (inner));
845       else
846 	/* If subreg above is paradoxical and C is narrower
847 	   than A, return (subreg:A (truncate:C X) 0).  */
848 	return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
849     }
850 
851   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
852   if (GET_CODE (op) == TRUNCATE)
853     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
854 			       GET_MODE (XEXP (op, 0)));
855 
856   /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
857      in mode A.  */
858   if (GET_CODE (op) == IOR
859       && SCALAR_INT_MODE_P (mode)
860       && SCALAR_INT_MODE_P (op_mode)
861       && CONST_INT_P (XEXP (op, 1))
862       && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
863     return constm1_rtx;
864 
865   return NULL_RTX;
866 }
867 
868 /* Try to simplify a unary operation CODE whose output mode is to be
869    MODE with input operand OP whose mode was originally OP_MODE.
870    Return zero if no simplification can be made.  */
871 rtx
872 simplify_unary_operation (enum rtx_code code, machine_mode mode,
873 			  rtx op, machine_mode op_mode)
874 {
875   rtx trueop, tem;
876 
877   trueop = avoid_constant_pool_reference (op);
878 
879   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
880   if (tem)
881     return tem;
882 
883   return simplify_unary_operation_1 (code, mode, op);
884 }
885 
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
887    to be exact.  */
888 
889 static bool
890 exact_int_to_float_conversion_p (const_rtx op)
891 {
892   int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
893   machine_mode op0_mode = GET_MODE (XEXP (op, 0));
894   /* Constants shouldn't reach here.  */
895   gcc_assert (op0_mode != VOIDmode);
896   int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
897   int in_bits = in_prec;
898   if (HWI_COMPUTABLE_MODE_P (op0_mode))
899     {
900       unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
901       if (GET_CODE (op) == FLOAT)
902 	in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
903       else if (GET_CODE (op) == UNSIGNED_FLOAT)
904 	in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
905       else
906 	gcc_unreachable ();
907       in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
908     }
909   return in_bits <= out_bits;
910 }
911 
912 /* Perform some simplifications we can do even if the operands
913    aren't constant.  */
914 static rtx
915 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
916 {
917   enum rtx_code reversed;
918   rtx temp, elt, base, step;
919   scalar_int_mode inner, int_mode, op_mode, op0_mode;
920 
921   switch (code)
922     {
923     case NOT:
924       /* (not (not X)) == X.  */
925       if (GET_CODE (op) == NOT)
926 	return XEXP (op, 0);
927 
928       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 	 comparison is all ones.   */
930       if (COMPARISON_P (op)
931 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
932 	  && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
933 	return simplify_gen_relational (reversed, mode, VOIDmode,
934 					XEXP (op, 0), XEXP (op, 1));
935 
936       /* (not (plus X -1)) can become (neg X).  */
937       if (GET_CODE (op) == PLUS
938 	  && XEXP (op, 1) == constm1_rtx)
939 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
940 
941       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
942 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 	 and MODE_VECTOR_INT.  */
944       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
945 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
946 				    CONSTM1_RTX (mode));
947 
948       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
949       if (GET_CODE (op) == XOR
950 	  && CONST_INT_P (XEXP (op, 1))
951 	  && (temp = simplify_unary_operation (NOT, mode,
952 					       XEXP (op, 1), mode)) != 0)
953 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
954 
955       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
956       if (GET_CODE (op) == PLUS
957 	  && CONST_INT_P (XEXP (op, 1))
958 	  && mode_signbit_p (mode, XEXP (op, 1))
959 	  && (temp = simplify_unary_operation (NOT, mode,
960 					       XEXP (op, 1), mode)) != 0)
961 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
962 
963 
964       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
965 	 operands other than 1, but that is not valid.  We could do a
966 	 similar simplification for (not (lshiftrt C X)) where C is
967 	 just the sign bit, but this doesn't seem common enough to
968 	 bother with.  */
969       if (GET_CODE (op) == ASHIFT
970 	  && XEXP (op, 0) == const1_rtx)
971 	{
972 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
973 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
974 	}
975 
976       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 	 so we can perform the above simplification.  */
979       if (STORE_FLAG_VALUE == -1
980 	  && is_a <scalar_int_mode> (mode, &int_mode)
981 	  && GET_CODE (op) == ASHIFTRT
982 	  && CONST_INT_P (XEXP (op, 1))
983 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
984 	return simplify_gen_relational (GE, int_mode, VOIDmode,
985 					XEXP (op, 0), const0_rtx);
986 
987 
988       if (partial_subreg_p (op)
989 	  && subreg_lowpart_p (op)
990 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
991 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
992 	{
993 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
994 	  rtx x;
995 
996 	  x = gen_rtx_ROTATE (inner_mode,
997 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
998 						  inner_mode),
999 			      XEXP (SUBREG_REG (op), 1));
1000 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1001 	  if (temp)
1002 	    return temp;
1003 	}
1004 
1005       /* Apply De Morgan's laws to reduce number of patterns for machines
1006 	 with negating logical insns (and-not, nand, etc.).  If result has
1007 	 only one NOT, put it first, since that is how the patterns are
1008 	 coded.  */
1009       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1010 	{
1011 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1012 	  machine_mode op_mode;
1013 
1014 	  op_mode = GET_MODE (in1);
1015 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1016 
1017 	  op_mode = GET_MODE (in2);
1018 	  if (op_mode == VOIDmode)
1019 	    op_mode = mode;
1020 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1021 
1022 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1023 	    std::swap (in1, in2);
1024 
1025 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1026 				 mode, in1, in2);
1027 	}
1028 
1029       /* (not (bswap x)) -> (bswap (not x)).  */
1030       if (GET_CODE (op) == BSWAP)
1031 	{
1032 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1033 	  return simplify_gen_unary (BSWAP, mode, x, mode);
1034 	}
1035       break;
1036 
1037     case NEG:
1038       /* (neg (neg X)) == X.  */
1039       if (GET_CODE (op) == NEG)
1040 	return XEXP (op, 0);
1041 
1042       /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 	 If comparison is not reversible use
1044 	 x ? y : (neg y).  */
1045       if (GET_CODE (op) == IF_THEN_ELSE)
1046 	{
1047 	  rtx cond = XEXP (op, 0);
1048 	  rtx true_rtx = XEXP (op, 1);
1049 	  rtx false_rtx = XEXP (op, 2);
1050 
1051 	  if ((GET_CODE (true_rtx) == NEG
1052 	       && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1053 	       || (GET_CODE (false_rtx) == NEG
1054 		   && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1055 	    {
1056 	      if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1057 		temp = reversed_comparison (cond, mode);
1058 	      else
1059 		{
1060 		  temp = cond;
1061 		  std::swap (true_rtx, false_rtx);
1062 		}
1063 	      return simplify_gen_ternary (IF_THEN_ELSE, mode,
1064 					    mode, temp, true_rtx, false_rtx);
1065 	    }
1066 	}
1067 
1068       /* (neg (plus X 1)) can become (not X).  */
1069       if (GET_CODE (op) == PLUS
1070 	  && XEXP (op, 1) == const1_rtx)
1071 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1072 
1073       /* Similarly, (neg (not X)) is (plus X 1).  */
1074       if (GET_CODE (op) == NOT)
1075 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1076 				    CONST1_RTX (mode));
1077 
1078       /* (neg (minus X Y)) can become (minus Y X).  This transformation
1079 	 isn't safe for modes with signed zeros, since if X and Y are
1080 	 both +0, (minus Y X) is the same as (minus X Y).  If the
1081 	 rounding mode is towards +infinity (or -infinity) then the two
1082 	 expressions will be rounded differently.  */
1083       if (GET_CODE (op) == MINUS
1084 	  && !HONOR_SIGNED_ZEROS (mode)
1085 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1087 
1088       if (GET_CODE (op) == PLUS
1089 	  && !HONOR_SIGNED_ZEROS (mode)
1090 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1091 	{
1092 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1093 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1094 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1095 	    {
1096 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1097 	      if (temp)
1098 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1099 	    }
1100 
1101 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1102 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1103 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1104 	}
1105 
1106       /* (neg (mult A B)) becomes (mult A (neg B)).
1107 	 This works even for floating-point values.  */
1108       if (GET_CODE (op) == MULT
1109 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1110 	{
1111 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1112 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1113 	}
1114 
1115       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1116 	 this if we can then eliminate the NEG (e.g., if the operand
1117 	 is a constant).  */
1118       if (GET_CODE (op) == ASHIFT)
1119 	{
1120 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1121 	  if (temp)
1122 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1123 	}
1124 
1125       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 	 C is equal to the width of MODE minus 1.  */
1127       if (GET_CODE (op) == ASHIFTRT
1128 	  && CONST_INT_P (XEXP (op, 1))
1129 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1130 	return simplify_gen_binary (LSHIFTRT, mode,
1131 				    XEXP (op, 0), XEXP (op, 1));
1132 
1133       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 	 C is equal to the width of MODE minus 1.  */
1135       if (GET_CODE (op) == LSHIFTRT
1136 	  && CONST_INT_P (XEXP (op, 1))
1137 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1138 	return simplify_gen_binary (ASHIFTRT, mode,
1139 				    XEXP (op, 0), XEXP (op, 1));
1140 
1141       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1142       if (GET_CODE (op) == XOR
1143 	  && XEXP (op, 1) == const1_rtx
1144 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1145 	return plus_constant (mode, XEXP (op, 0), -1);
1146 
1147       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1148       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1149       if (GET_CODE (op) == LT
1150 	  && XEXP (op, 1) == const0_rtx
1151 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1152 	{
1153 	  int_mode = as_a <scalar_int_mode> (mode);
1154 	  int isize = GET_MODE_PRECISION (inner);
1155 	  if (STORE_FLAG_VALUE == 1)
1156 	    {
1157 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1158 					  gen_int_shift_amount (inner,
1159 								isize - 1));
1160 	      if (int_mode == inner)
1161 		return temp;
1162 	      if (GET_MODE_PRECISION (int_mode) > isize)
1163 		return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1164 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1165 	    }
1166 	  else if (STORE_FLAG_VALUE == -1)
1167 	    {
1168 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1169 					  gen_int_shift_amount (inner,
1170 								isize - 1));
1171 	      if (int_mode == inner)
1172 		return temp;
1173 	      if (GET_MODE_PRECISION (int_mode) > isize)
1174 		return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1175 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1176 	    }
1177 	}
1178 
1179       if (vec_series_p (op, &base, &step))
1180 	{
1181 	  /* Only create a new series if we can simplify both parts.  In other
1182 	     cases this isn't really a simplification, and it's not necessarily
1183 	     a win to replace a vector operation with a scalar operation.  */
1184 	  scalar_mode inner_mode = GET_MODE_INNER (mode);
1185 	  base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1186 	  if (base)
1187 	    {
1188 	      step = simplify_unary_operation (NEG, inner_mode,
1189 					       step, inner_mode);
1190 	      if (step)
1191 		return gen_vec_series (mode, base, step);
1192 	    }
1193 	}
1194       break;
1195 
1196     case TRUNCATE:
1197       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 	 with the umulXi3_highpart patterns.  */
1199       if (GET_CODE (op) == LSHIFTRT
1200 	  && GET_CODE (XEXP (op, 0)) == MULT)
1201 	break;
1202 
1203       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1204 	{
1205 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1206 	    {
1207 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 	      if (temp)
1209 		return temp;
1210 	    }
1211 	  /* We can't handle truncation to a partial integer mode here
1212 	     because we don't know the real bitsize of the partial
1213 	     integer mode.  */
1214 	  break;
1215 	}
1216 
1217       if (GET_MODE (op) != VOIDmode)
1218 	{
1219 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1220 	  if (temp)
1221 	    return temp;
1222 	}
1223 
1224       /* If we know that the value is already truncated, we can
1225 	 replace the TRUNCATE with a SUBREG.  */
1226       if (known_eq (GET_MODE_NUNITS (mode), 1)
1227 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1228 	      || truncated_to_mode (mode, op)))
1229 	{
1230 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 	  if (temp)
1232 	    return temp;
1233 	}
1234 
1235       /* A truncate of a comparison can be replaced with a subreg if
1236          STORE_FLAG_VALUE permits.  This is like the previous test,
1237          but it works even if the comparison is done in a mode larger
1238          than HOST_BITS_PER_WIDE_INT.  */
1239       if (HWI_COMPUTABLE_MODE_P (mode)
1240 	  && COMPARISON_P (op)
1241 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1242 	{
1243 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 	  if (temp)
1245 	    return temp;
1246 	}
1247 
1248       /* A truncate of a memory is just loading the low part of the memory
1249 	 if we are not changing the meaning of the address. */
1250       if (GET_CODE (op) == MEM
1251 	  && !VECTOR_MODE_P (mode)
1252 	  && !MEM_VOLATILE_P (op)
1253 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1254 	{
1255 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 	  if (temp)
1257 	    return temp;
1258 	}
1259 
1260       break;
1261 
1262     case FLOAT_TRUNCATE:
1263       if (DECIMAL_FLOAT_MODE_P (mode))
1264 	break;
1265 
1266       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1267       if (GET_CODE (op) == FLOAT_EXTEND
1268 	  && GET_MODE (XEXP (op, 0)) == mode)
1269 	return XEXP (op, 0);
1270 
1271       /* (float_truncate:SF (float_truncate:DF foo:XF))
1272          = (float_truncate:SF foo:XF).
1273 	 This may eliminate double rounding, so it is unsafe.
1274 
1275          (float_truncate:SF (float_extend:XF foo:DF))
1276          = (float_truncate:SF foo:DF).
1277 
1278          (float_truncate:DF (float_extend:XF foo:SF))
1279          = (float_extend:DF foo:SF).  */
1280       if ((GET_CODE (op) == FLOAT_TRUNCATE
1281 	   && flag_unsafe_math_optimizations)
1282 	  || GET_CODE (op) == FLOAT_EXTEND)
1283 	return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1284 	  			   > GET_MODE_UNIT_SIZE (mode)
1285 	  			   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1286 				   mode,
1287 				   XEXP (op, 0), mode);
1288 
1289       /*  (float_truncate (float x)) is (float x)  */
1290       if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1291 	  && (flag_unsafe_math_optimizations
1292 	      || exact_int_to_float_conversion_p (op)))
1293 	return simplify_gen_unary (GET_CODE (op), mode,
1294 				   XEXP (op, 0),
1295 				   GET_MODE (XEXP (op, 0)));
1296 
1297       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1299       if ((GET_CODE (op) == ABS
1300 	   || GET_CODE (op) == NEG)
1301 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1302 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1303 	return simplify_gen_unary (GET_CODE (op), mode,
1304 				   XEXP (XEXP (op, 0), 0), mode);
1305 
1306       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 	 is (float_truncate:SF x).  */
1308       if (GET_CODE (op) == SUBREG
1309 	  && subreg_lowpart_p (op)
1310 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1311 	return SUBREG_REG (op);
1312       break;
1313 
1314     case FLOAT_EXTEND:
1315       if (DECIMAL_FLOAT_MODE_P (mode))
1316 	break;
1317 
1318       /*  (float_extend (float_extend x)) is (float_extend x)
1319 
1320 	  (float_extend (float x)) is (float x) assuming that double
1321 	  rounding can't happen.
1322           */
1323       if (GET_CODE (op) == FLOAT_EXTEND
1324 	  || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1325 	      && exact_int_to_float_conversion_p (op)))
1326 	return simplify_gen_unary (GET_CODE (op), mode,
1327 				   XEXP (op, 0),
1328 				   GET_MODE (XEXP (op, 0)));
1329 
1330       break;
1331 
1332     case ABS:
1333       /* (abs (neg <foo>)) -> (abs <foo>) */
1334       if (GET_CODE (op) == NEG)
1335 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1336 				   GET_MODE (XEXP (op, 0)));
1337 
1338       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1339          do nothing.  */
1340       if (GET_MODE (op) == VOIDmode)
1341 	break;
1342 
1343       /* If operand is something known to be positive, ignore the ABS.  */
1344       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1345 	  || val_signbit_known_clear_p (GET_MODE (op),
1346 					nonzero_bits (op, GET_MODE (op))))
1347 	return op;
1348 
1349       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1350       if (is_a <scalar_int_mode> (mode, &int_mode)
1351 	  && (num_sign_bit_copies (op, int_mode)
1352 	      == GET_MODE_PRECISION (int_mode)))
1353 	return gen_rtx_NEG (int_mode, op);
1354 
1355       break;
1356 
1357     case FFS:
1358       /* (ffs (*_extend <X>)) = (ffs <X>) */
1359       if (GET_CODE (op) == SIGN_EXTEND
1360 	  || GET_CODE (op) == ZERO_EXTEND)
1361 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1362 				   GET_MODE (XEXP (op, 0)));
1363       break;
1364 
1365     case POPCOUNT:
1366       switch (GET_CODE (op))
1367 	{
1368 	case BSWAP:
1369 	case ZERO_EXTEND:
1370 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 				     GET_MODE (XEXP (op, 0)));
1373 
1374 	case ROTATE:
1375 	case ROTATERT:
1376 	  /* Rotations don't affect popcount.  */
1377 	  if (!side_effects_p (XEXP (op, 1)))
1378 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1379 				       GET_MODE (XEXP (op, 0)));
1380 	  break;
1381 
1382 	default:
1383 	  break;
1384 	}
1385       break;
1386 
1387     case PARITY:
1388       switch (GET_CODE (op))
1389 	{
1390 	case NOT:
1391 	case BSWAP:
1392 	case ZERO_EXTEND:
1393 	case SIGN_EXTEND:
1394 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 				     GET_MODE (XEXP (op, 0)));
1396 
1397 	case ROTATE:
1398 	case ROTATERT:
1399 	  /* Rotations don't affect parity.  */
1400 	  if (!side_effects_p (XEXP (op, 1)))
1401 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1402 				       GET_MODE (XEXP (op, 0)));
1403 	  break;
1404 
1405 	default:
1406 	  break;
1407 	}
1408       break;
1409 
1410     case BSWAP:
1411       /* (bswap (bswap x)) -> x.  */
1412       if (GET_CODE (op) == BSWAP)
1413 	return XEXP (op, 0);
1414       break;
1415 
1416     case FLOAT:
1417       /* (float (sign_extend <X>)) = (float <X>).  */
1418       if (GET_CODE (op) == SIGN_EXTEND)
1419 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1420 				   GET_MODE (XEXP (op, 0)));
1421       break;
1422 
1423     case SIGN_EXTEND:
1424       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 	 becomes just the MINUS if its mode is MODE.  This allows
1426 	 folding switch statements on machines using casesi (such as
1427 	 the VAX).  */
1428       if (GET_CODE (op) == TRUNCATE
1429 	  && GET_MODE (XEXP (op, 0)) == mode
1430 	  && GET_CODE (XEXP (op, 0)) == MINUS
1431 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1432 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1433 	return XEXP (op, 0);
1434 
1435       /* Extending a widening multiplication should be canonicalized to
1436 	 a wider widening multiplication.  */
1437       if (GET_CODE (op) == MULT)
1438 	{
1439 	  rtx lhs = XEXP (op, 0);
1440 	  rtx rhs = XEXP (op, 1);
1441 	  enum rtx_code lcode = GET_CODE (lhs);
1442 	  enum rtx_code rcode = GET_CODE (rhs);
1443 
1444 	  /* Widening multiplies usually extend both operands, but sometimes
1445 	     they use a shift to extract a portion of a register.  */
1446 	  if ((lcode == SIGN_EXTEND
1447 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1448 	      && (rcode == SIGN_EXTEND
1449 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1450 	    {
1451 	      machine_mode lmode = GET_MODE (lhs);
1452 	      machine_mode rmode = GET_MODE (rhs);
1453 	      int bits;
1454 
1455 	      if (lcode == ASHIFTRT)
1456 		/* Number of bits not shifted off the end.  */
1457 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1458 			- INTVAL (XEXP (lhs, 1)));
1459 	      else /* lcode == SIGN_EXTEND */
1460 		/* Size of inner mode.  */
1461 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1462 
1463 	      if (rcode == ASHIFTRT)
1464 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1465 			 - INTVAL (XEXP (rhs, 1)));
1466 	      else /* rcode == SIGN_EXTEND */
1467 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1468 
1469 	      /* We can only widen multiplies if the result is mathematiclly
1470 		 equivalent.  I.e. if overflow was impossible.  */
1471 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1472 		return simplify_gen_binary
1473 			 (MULT, mode,
1474 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1475 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1476 	    }
1477 	}
1478 
1479       /* Check for a sign extension of a subreg of a promoted
1480 	 variable, where the promotion is sign-extended, and the
1481 	 target mode is the same as the variable's promotion.  */
1482       if (GET_CODE (op) == SUBREG
1483 	  && SUBREG_PROMOTED_VAR_P (op)
1484 	  && SUBREG_PROMOTED_SIGNED_P (op)
1485 	  && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1486 	{
1487 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1488 	  if (temp)
1489 	    return temp;
1490 	}
1491 
1492       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1494       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1495 	{
1496 	  gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1497 		      > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1498 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1499 				     GET_MODE (XEXP (op, 0)));
1500 	}
1501 
1502       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 	 GET_MODE_BITSIZE (N) - I bits.
1505 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1507       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1508 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1509 	  && is_a <scalar_int_mode> (mode, &int_mode)
1510 	  && CONST_INT_P (XEXP (op, 1))
1511 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1512 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1513 	      GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1514 	{
1515 	  scalar_int_mode tmode;
1516 	  gcc_assert (GET_MODE_BITSIZE (int_mode)
1517 		      > GET_MODE_BITSIZE (op_mode));
1518 	  if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1519 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1520 	    {
1521 	      rtx inner =
1522 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1523 	      if (inner)
1524 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1525 					   ? SIGN_EXTEND : ZERO_EXTEND,
1526 					   int_mode, inner, tmode);
1527 	    }
1528 	}
1529 
1530       /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531          (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0.  */
1532       if (GET_CODE (op) == LSHIFTRT
1533 	  && CONST_INT_P (XEXP (op, 1))
1534 	  && XEXP (op, 1) != const0_rtx)
1535 	return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1536 
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538       /* As we do not know which address space the pointer is referring to,
1539 	 we can do this only if the target does not support different pointer
1540 	 or address modes depending on the address space.  */
1541       if (target_default_pointer_address_modes_p ()
1542 	  && ! POINTERS_EXTEND_UNSIGNED
1543 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1544 	  && (CONSTANT_P (op)
1545 	      || (GET_CODE (op) == SUBREG
1546 		  && REG_P (SUBREG_REG (op))
1547 		  && REG_POINTER (SUBREG_REG (op))
1548 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1549 	  && !targetm.have_ptr_extend ())
1550 	{
1551 	  temp
1552 	    = convert_memory_address_addr_space_1 (Pmode, op,
1553 						   ADDR_SPACE_GENERIC, false,
1554 						   true);
1555 	  if (temp)
1556 	    return temp;
1557 	}
1558 #endif
1559       break;
1560 
1561     case ZERO_EXTEND:
1562       /* Check for a zero extension of a subreg of a promoted
1563 	 variable, where the promotion is zero-extended, and the
1564 	 target mode is the same as the variable's promotion.  */
1565       if (GET_CODE (op) == SUBREG
1566 	  && SUBREG_PROMOTED_VAR_P (op)
1567 	  && SUBREG_PROMOTED_UNSIGNED_P (op)
1568 	  && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1569 	{
1570 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1571 	  if (temp)
1572 	    return temp;
1573 	}
1574 
1575       /* Extending a widening multiplication should be canonicalized to
1576 	 a wider widening multiplication.  */
1577       if (GET_CODE (op) == MULT)
1578 	{
1579 	  rtx lhs = XEXP (op, 0);
1580 	  rtx rhs = XEXP (op, 1);
1581 	  enum rtx_code lcode = GET_CODE (lhs);
1582 	  enum rtx_code rcode = GET_CODE (rhs);
1583 
1584 	  /* Widening multiplies usually extend both operands, but sometimes
1585 	     they use a shift to extract a portion of a register.  */
1586 	  if ((lcode == ZERO_EXTEND
1587 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1588 	      && (rcode == ZERO_EXTEND
1589 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1590 	    {
1591 	      machine_mode lmode = GET_MODE (lhs);
1592 	      machine_mode rmode = GET_MODE (rhs);
1593 	      int bits;
1594 
1595 	      if (lcode == LSHIFTRT)
1596 		/* Number of bits not shifted off the end.  */
1597 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1598 			- INTVAL (XEXP (lhs, 1)));
1599 	      else /* lcode == ZERO_EXTEND */
1600 		/* Size of inner mode.  */
1601 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1602 
1603 	      if (rcode == LSHIFTRT)
1604 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1605 			 - INTVAL (XEXP (rhs, 1)));
1606 	      else /* rcode == ZERO_EXTEND */
1607 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1608 
1609 	      /* We can only widen multiplies if the result is mathematiclly
1610 		 equivalent.  I.e. if overflow was impossible.  */
1611 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1612 		return simplify_gen_binary
1613 			 (MULT, mode,
1614 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1615 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1616 	    }
1617 	}
1618 
1619       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1620       if (GET_CODE (op) == ZERO_EXTEND)
1621 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1622 				   GET_MODE (XEXP (op, 0)));
1623 
1624       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 	 GET_MODE_PRECISION (N) - I bits.  */
1627       if (GET_CODE (op) == LSHIFTRT
1628 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1629 	  && is_a <scalar_int_mode> (mode, &int_mode)
1630 	  && CONST_INT_P (XEXP (op, 1))
1631 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1632 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1633 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1634 	{
1635 	  scalar_int_mode tmode;
1636 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1637 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1638 	    {
1639 	      rtx inner =
1640 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1641 	      if (inner)
1642 		return simplify_gen_unary (ZERO_EXTEND, int_mode,
1643 					   inner, tmode);
1644 	    }
1645 	}
1646 
1647       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1649 	 of mode N.  E.g.
1650 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 	 (and:SI (reg:SI) (const_int 63)).  */
1652       if (partial_subreg_p (op)
1653 	  && is_a <scalar_int_mode> (mode, &int_mode)
1654 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1655 	  && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1656 	  && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1657 	  && subreg_lowpart_p (op)
1658 	  && (nonzero_bits (SUBREG_REG (op), op0_mode)
1659 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1660 	{
1661 	  if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1662 	    return SUBREG_REG (op);
1663 	  return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1664 				     op0_mode);
1665 	}
1666 
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668       /* As we do not know which address space the pointer is referring to,
1669 	 we can do this only if the target does not support different pointer
1670 	 or address modes depending on the address space.  */
1671       if (target_default_pointer_address_modes_p ()
1672 	  && POINTERS_EXTEND_UNSIGNED > 0
1673 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1674 	  && (CONSTANT_P (op)
1675 	      || (GET_CODE (op) == SUBREG
1676 		  && REG_P (SUBREG_REG (op))
1677 		  && REG_POINTER (SUBREG_REG (op))
1678 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1679 	  && !targetm.have_ptr_extend ())
1680 	{
1681 	  temp
1682 	    = convert_memory_address_addr_space_1 (Pmode, op,
1683 						   ADDR_SPACE_GENERIC, false,
1684 						   true);
1685 	  if (temp)
1686 	    return temp;
1687 	}
1688 #endif
1689       break;
1690 
1691     default:
1692       break;
1693     }
1694 
1695   if (VECTOR_MODE_P (mode)
1696       && vec_duplicate_p (op, &elt)
1697       && code != VEC_DUPLICATE)
1698     {
1699       /* Try applying the operator to ELT and see if that simplifies.
1700 	 We can duplicate the result if so.
1701 
1702 	 The reason we don't use simplify_gen_unary is that it isn't
1703 	 necessarily a win to convert things like:
1704 
1705 	   (neg:V (vec_duplicate:V (reg:S R)))
1706 
1707 	 to:
1708 
1709 	   (vec_duplicate:V (neg:S (reg:S R)))
1710 
1711 	 The first might be done entirely in vector registers while the
1712 	 second might need a move between register files.  */
1713       temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1714 				       elt, GET_MODE_INNER (GET_MODE (op)));
1715       if (temp)
1716 	return gen_vec_duplicate (mode, temp);
1717     }
1718 
1719   return 0;
1720 }
1721 
1722 /* Try to compute the value of a unary operation CODE whose output mode is to
1723    be MODE with input operand OP whose mode was originally OP_MODE.
1724    Return zero if the value cannot be computed.  */
1725 rtx
1726 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1727 				rtx op, machine_mode op_mode)
1728 {
1729   scalar_int_mode result_mode;
1730 
1731   if (code == VEC_DUPLICATE)
1732     {
1733       gcc_assert (VECTOR_MODE_P (mode));
1734       if (GET_MODE (op) != VOIDmode)
1735       {
1736 	if (!VECTOR_MODE_P (GET_MODE (op)))
1737 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1738 	else
1739 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1740 						(GET_MODE (op)));
1741       }
1742       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1743 	return gen_const_vec_duplicate (mode, op);
1744       unsigned int n_elts;
1745       if (GET_CODE (op) == CONST_VECTOR
1746 	  && GET_MODE_NUNITS (mode).is_constant (&n_elts))
1747 	{
1748 	  /* This must be constant if we're duplicating it to a constant
1749 	     number of elements.  */
1750 	  unsigned int in_n_elts = CONST_VECTOR_NUNITS (op).to_constant ();
1751 	  gcc_assert (in_n_elts < n_elts);
1752 	  gcc_assert ((n_elts % in_n_elts) == 0);
1753 	  rtvec v = rtvec_alloc (n_elts);
1754 	  for (unsigned i = 0; i < n_elts; i++)
1755 	    RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1756 	  return gen_rtx_CONST_VECTOR (mode, v);
1757 	}
1758     }
1759 
1760   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1761     {
1762       unsigned int n_elts;
1763       if (!CONST_VECTOR_NUNITS (op).is_constant (&n_elts))
1764 	return NULL_RTX;
1765 
1766       machine_mode opmode = GET_MODE (op);
1767       gcc_assert (known_eq (GET_MODE_NUNITS (mode), n_elts));
1768       gcc_assert (known_eq (GET_MODE_NUNITS (opmode), n_elts));
1769 
1770       rtvec v = rtvec_alloc (n_elts);
1771       unsigned int i;
1772 
1773       for (i = 0; i < n_elts; i++)
1774 	{
1775 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1776 					    CONST_VECTOR_ELT (op, i),
1777 					    GET_MODE_INNER (opmode));
1778 	  if (!x || !valid_for_const_vector_p (mode, x))
1779 	    return 0;
1780 	  RTVEC_ELT (v, i) = x;
1781 	}
1782       return gen_rtx_CONST_VECTOR (mode, v);
1783     }
1784 
1785   /* The order of these tests is critical so that, for example, we don't
1786      check the wrong mode (input vs. output) for a conversion operation,
1787      such as FIX.  At some point, this should be simplified.  */
1788 
1789   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1790     {
1791       REAL_VALUE_TYPE d;
1792 
1793       if (op_mode == VOIDmode)
1794 	{
1795 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1796 	     the bits of the constant are significant, though, this is
1797 	     a dangerous assumption as many times CONST_INTs are
1798 	     created and used with garbage in the bits outside of the
1799 	     precision of the implied mode of the const_int.  */
1800 	  op_mode = MAX_MODE_INT;
1801 	}
1802 
1803       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1804 
1805       /* Avoid the folding if flag_signaling_nans is on and
1806          operand is a signaling NaN.  */
1807       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1808         return 0;
1809 
1810       d = real_value_truncate (mode, d);
1811       return const_double_from_real_value (d, mode);
1812     }
1813   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1814     {
1815       REAL_VALUE_TYPE d;
1816 
1817       if (op_mode == VOIDmode)
1818 	{
1819 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1820 	     the bits of the constant are significant, though, this is
1821 	     a dangerous assumption as many times CONST_INTs are
1822 	     created and used with garbage in the bits outside of the
1823 	     precision of the implied mode of the const_int.  */
1824 	  op_mode = MAX_MODE_INT;
1825 	}
1826 
1827       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1828 
1829       /* Avoid the folding if flag_signaling_nans is on and
1830          operand is a signaling NaN.  */
1831       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1832         return 0;
1833 
1834       d = real_value_truncate (mode, d);
1835       return const_double_from_real_value (d, mode);
1836     }
1837 
1838   if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1839     {
1840       unsigned int width = GET_MODE_PRECISION (result_mode);
1841       wide_int result;
1842       scalar_int_mode imode = (op_mode == VOIDmode
1843 			       ? result_mode
1844 			       : as_a <scalar_int_mode> (op_mode));
1845       rtx_mode_t op0 = rtx_mode_t (op, imode);
1846       int int_value;
1847 
1848 #if TARGET_SUPPORTS_WIDE_INT == 0
1849       /* This assert keeps the simplification from producing a result
1850 	 that cannot be represented in a CONST_DOUBLE but a lot of
1851 	 upstream callers expect that this function never fails to
1852 	 simplify something and so you if you added this to the test
1853 	 above the code would die later anyway.  If this assert
1854 	 happens, you just need to make the port support wide int.  */
1855       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1856 #endif
1857 
1858       switch (code)
1859 	{
1860 	case NOT:
1861 	  result = wi::bit_not (op0);
1862 	  break;
1863 
1864 	case NEG:
1865 	  result = wi::neg (op0);
1866 	  break;
1867 
1868 	case ABS:
1869 	  result = wi::abs (op0);
1870 	  break;
1871 
1872 	case FFS:
1873 	  result = wi::shwi (wi::ffs (op0), result_mode);
1874 	  break;
1875 
1876 	case CLZ:
1877 	  if (wi::ne_p (op0, 0))
1878 	    int_value = wi::clz (op0);
1879 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1880 	    return NULL_RTX;
1881 	  result = wi::shwi (int_value, result_mode);
1882 	  break;
1883 
1884 	case CLRSB:
1885 	  result = wi::shwi (wi::clrsb (op0), result_mode);
1886 	  break;
1887 
1888 	case CTZ:
1889 	  if (wi::ne_p (op0, 0))
1890 	    int_value = wi::ctz (op0);
1891 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1892 	    return NULL_RTX;
1893 	  result = wi::shwi (int_value, result_mode);
1894 	  break;
1895 
1896 	case POPCOUNT:
1897 	  result = wi::shwi (wi::popcount (op0), result_mode);
1898 	  break;
1899 
1900 	case PARITY:
1901 	  result = wi::shwi (wi::parity (op0), result_mode);
1902 	  break;
1903 
1904 	case BSWAP:
1905 	  result = wide_int (op0).bswap ();
1906 	  break;
1907 
1908 	case TRUNCATE:
1909 	case ZERO_EXTEND:
1910 	  result = wide_int::from (op0, width, UNSIGNED);
1911 	  break;
1912 
1913 	case SIGN_EXTEND:
1914 	  result = wide_int::from (op0, width, SIGNED);
1915 	  break;
1916 
1917 	case SQRT:
1918 	default:
1919 	  return 0;
1920 	}
1921 
1922       return immed_wide_int_const (result, result_mode);
1923     }
1924 
1925   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1926 	   && SCALAR_FLOAT_MODE_P (mode)
1927 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1928     {
1929       REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1930       switch (code)
1931 	{
1932 	case SQRT:
1933 	  return 0;
1934 	case ABS:
1935 	  d = real_value_abs (&d);
1936 	  break;
1937 	case NEG:
1938 	  d = real_value_negate (&d);
1939 	  break;
1940 	case FLOAT_TRUNCATE:
1941 	  /* Don't perform the operation if flag_signaling_nans is on
1942 	     and the operand is a signaling NaN.  */
1943 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1944 	    return NULL_RTX;
1945 	  d = real_value_truncate (mode, d);
1946 	  break;
1947 	case FLOAT_EXTEND:
1948 	  /* Don't perform the operation if flag_signaling_nans is on
1949 	     and the operand is a signaling NaN.  */
1950 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1951 	    return NULL_RTX;
1952 	  /* All this does is change the mode, unless changing
1953 	     mode class.  */
1954 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1955 	    real_convert (&d, mode, &d);
1956 	  break;
1957 	case FIX:
1958 	  /* Don't perform the operation if flag_signaling_nans is on
1959 	     and the operand is a signaling NaN.  */
1960 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1961 	    return NULL_RTX;
1962 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1963 	  break;
1964 	case NOT:
1965 	  {
1966 	    long tmp[4];
1967 	    int i;
1968 
1969 	    real_to_target (tmp, &d, GET_MODE (op));
1970 	    for (i = 0; i < 4; i++)
1971 	      tmp[i] = ~tmp[i];
1972 	    real_from_target (&d, tmp, mode);
1973 	    break;
1974 	  }
1975 	default:
1976 	  gcc_unreachable ();
1977 	}
1978       return const_double_from_real_value (d, mode);
1979     }
1980   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1981 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1982 	   && is_int_mode (mode, &result_mode))
1983     {
1984       unsigned int width = GET_MODE_PRECISION (result_mode);
1985       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1986 	 operators are intentionally left unspecified (to ease implementation
1987 	 by target backends), for consistency, this routine implements the
1988 	 same semantics for constant folding as used by the middle-end.  */
1989 
1990       /* This was formerly used only for non-IEEE float.
1991 	 eggert@twinsun.com says it is safe for IEEE also.  */
1992       REAL_VALUE_TYPE t;
1993       const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1994       wide_int wmax, wmin;
1995       /* This is part of the abi to real_to_integer, but we check
1996 	 things before making this call.  */
1997       bool fail;
1998 
1999       switch (code)
2000 	{
2001 	case FIX:
2002 	  if (REAL_VALUE_ISNAN (*x))
2003 	    return const0_rtx;
2004 
2005 	  /* Test against the signed upper bound.  */
2006 	  wmax = wi::max_value (width, SIGNED);
2007 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
2008 	  if (real_less (&t, x))
2009 	    return immed_wide_int_const (wmax, mode);
2010 
2011 	  /* Test against the signed lower bound.  */
2012 	  wmin = wi::min_value (width, SIGNED);
2013 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
2014 	  if (real_less (x, &t))
2015 	    return immed_wide_int_const (wmin, mode);
2016 
2017 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2018 				       mode);
2019 
2020 	case UNSIGNED_FIX:
2021 	  if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2022 	    return const0_rtx;
2023 
2024 	  /* Test against the unsigned upper bound.  */
2025 	  wmax = wi::max_value (width, UNSIGNED);
2026 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2027 	  if (real_less (&t, x))
2028 	    return immed_wide_int_const (wmax, mode);
2029 
2030 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2031 				       mode);
2032 
2033 	default:
2034 	  gcc_unreachable ();
2035 	}
2036     }
2037 
2038   /* Handle polynomial integers.  */
2039   else if (CONST_POLY_INT_P (op))
2040     {
2041       poly_wide_int result;
2042       switch (code)
2043 	{
2044 	case NEG:
2045 	  result = -const_poly_int_value (op);
2046 	  break;
2047 
2048 	case NOT:
2049 	  result = ~const_poly_int_value (op);
2050 	  break;
2051 
2052 	default:
2053 	  return NULL_RTX;
2054 	}
2055       return immed_wide_int_const (result, mode);
2056     }
2057 
2058   return NULL_RTX;
2059 }
2060 
2061 /* Subroutine of simplify_binary_operation to simplify a binary operation
2062    CODE that can commute with byte swapping, with result mode MODE and
2063    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
2064    Return zero if no simplification or canonicalization is possible.  */
2065 
2066 static rtx
2067 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2068 				  rtx op0, rtx op1)
2069 {
2070   rtx tem;
2071 
2072   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
2073   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2074     {
2075       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2076 				 simplify_gen_unary (BSWAP, mode, op1, mode));
2077       return simplify_gen_unary (BSWAP, mode, tem, mode);
2078     }
2079 
2080   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
2081   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2082     {
2083       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2084       return simplify_gen_unary (BSWAP, mode, tem, mode);
2085     }
2086 
2087   return NULL_RTX;
2088 }
2089 
2090 /* Subroutine of simplify_binary_operation to simplify a commutative,
2091    associative binary operation CODE with result mode MODE, operating
2092    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2093    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2094    canonicalization is possible.  */
2095 
2096 static rtx
2097 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2098 				rtx op0, rtx op1)
2099 {
2100   rtx tem;
2101 
2102   /* Linearize the operator to the left.  */
2103   if (GET_CODE (op1) == code)
2104     {
2105       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2106       if (GET_CODE (op0) == code)
2107 	{
2108 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2109 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2110 	}
2111 
2112       /* "a op (b op c)" becomes "(b op c) op a".  */
2113       if (! swap_commutative_operands_p (op1, op0))
2114 	return simplify_gen_binary (code, mode, op1, op0);
2115 
2116       std::swap (op0, op1);
2117     }
2118 
2119   if (GET_CODE (op0) == code)
2120     {
2121       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2122       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2123 	{
2124 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2125 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2126 	}
2127 
2128       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2129       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2130       if (tem != 0)
2131         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2132 
2133       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2134       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2135       if (tem != 0)
2136         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2137     }
2138 
2139   return 0;
2140 }
2141 
2142 
2143 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2144    and OP1.  Return 0 if no simplification is possible.
2145 
2146    Don't use this for relational operations such as EQ or LT.
2147    Use simplify_relational_operation instead.  */
2148 rtx
2149 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2150 			   rtx op0, rtx op1)
2151 {
2152   rtx trueop0, trueop1;
2153   rtx tem;
2154 
2155   /* Relational operations don't work here.  We must know the mode
2156      of the operands in order to do the comparison correctly.
2157      Assuming a full word can give incorrect results.
2158      Consider comparing 128 with -128 in QImode.  */
2159   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2160   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2161 
2162   /* Make sure the constant is second.  */
2163   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2164       && swap_commutative_operands_p (op0, op1))
2165     std::swap (op0, op1);
2166 
2167   trueop0 = avoid_constant_pool_reference (op0);
2168   trueop1 = avoid_constant_pool_reference (op1);
2169 
2170   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2171   if (tem)
2172     return tem;
2173   tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2174 
2175   if (tem)
2176     return tem;
2177 
2178   /* If the above steps did not result in a simplification and op0 or op1
2179      were constant pool references, use the referenced constants directly.  */
2180   if (trueop0 != op0 || trueop1 != op1)
2181     return simplify_gen_binary (code, mode, trueop0, trueop1);
2182 
2183   return NULL_RTX;
2184 }
2185 
2186 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2187    which OP0 and OP1 are both vector series or vector duplicates
2188    (which are really just series with a step of 0).  If so, try to
2189    form a new series by applying CODE to the bases and to the steps.
2190    Return null if no simplification is possible.
2191 
2192    MODE is the mode of the operation and is known to be a vector
2193    integer mode.  */
2194 
2195 static rtx
2196 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2197 				  rtx op0, rtx op1)
2198 {
2199   rtx base0, step0;
2200   if (vec_duplicate_p (op0, &base0))
2201     step0 = const0_rtx;
2202   else if (!vec_series_p (op0, &base0, &step0))
2203     return NULL_RTX;
2204 
2205   rtx base1, step1;
2206   if (vec_duplicate_p (op1, &base1))
2207     step1 = const0_rtx;
2208   else if (!vec_series_p (op1, &base1, &step1))
2209     return NULL_RTX;
2210 
2211   /* Only create a new series if we can simplify both parts.  In other
2212      cases this isn't really a simplification, and it's not necessarily
2213      a win to replace a vector operation with a scalar operation.  */
2214   scalar_mode inner_mode = GET_MODE_INNER (mode);
2215   rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2216   if (!new_base)
2217     return NULL_RTX;
2218 
2219   rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2220   if (!new_step)
2221     return NULL_RTX;
2222 
2223   return gen_vec_series (mode, new_base, new_step);
2224 }
2225 
2226 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2227    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2228    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2229    actual constants.  */
2230 
2231 static rtx
2232 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2233 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2234 {
2235   rtx tem, reversed, opleft, opright, elt0, elt1;
2236   HOST_WIDE_INT val;
2237   scalar_int_mode int_mode, inner_mode;
2238   poly_int64 offset;
2239 
2240   /* Even if we can't compute a constant result,
2241      there are some cases worth simplifying.  */
2242 
2243   switch (code)
2244     {
2245     case PLUS:
2246       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2247 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2248 	 when x is -0 and the rounding mode is not towards -infinity,
2249 	 since (-0) + 0 is then 0.  */
2250       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2251 	return op0;
2252 
2253       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2254 	 transformations are safe even for IEEE.  */
2255       if (GET_CODE (op0) == NEG)
2256 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2257       else if (GET_CODE (op1) == NEG)
2258 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2259 
2260       /* (~a) + 1 -> -a */
2261       if (INTEGRAL_MODE_P (mode)
2262 	  && GET_CODE (op0) == NOT
2263 	  && trueop1 == const1_rtx)
2264 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2265 
2266       /* Handle both-operands-constant cases.  We can only add
2267 	 CONST_INTs to constants since the sum of relocatable symbols
2268 	 can't be handled by most assemblers.  Don't add CONST_INT
2269 	 to CONST_INT since overflow won't be computed properly if wider
2270 	 than HOST_BITS_PER_WIDE_INT.  */
2271 
2272       if ((GET_CODE (op0) == CONST
2273 	   || GET_CODE (op0) == SYMBOL_REF
2274 	   || GET_CODE (op0) == LABEL_REF)
2275 	  && CONST_INT_P (op1))
2276 	return plus_constant (mode, op0, INTVAL (op1));
2277       else if ((GET_CODE (op1) == CONST
2278 		|| GET_CODE (op1) == SYMBOL_REF
2279 		|| GET_CODE (op1) == LABEL_REF)
2280 	       && CONST_INT_P (op0))
2281 	return plus_constant (mode, op1, INTVAL (op0));
2282 
2283       /* See if this is something like X * C - X or vice versa or
2284 	 if the multiplication is written as a shift.  If so, we can
2285 	 distribute and make a new multiply, shift, or maybe just
2286 	 have X (if C is 2 in the example above).  But don't make
2287 	 something more expensive than we had before.  */
2288 
2289       if (is_a <scalar_int_mode> (mode, &int_mode))
2290 	{
2291 	  rtx lhs = op0, rhs = op1;
2292 
2293 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2294 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2295 
2296 	  if (GET_CODE (lhs) == NEG)
2297 	    {
2298 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2299 	      lhs = XEXP (lhs, 0);
2300 	    }
2301 	  else if (GET_CODE (lhs) == MULT
2302 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2303 	    {
2304 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2305 	      lhs = XEXP (lhs, 0);
2306 	    }
2307 	  else if (GET_CODE (lhs) == ASHIFT
2308 		   && CONST_INT_P (XEXP (lhs, 1))
2309                    && INTVAL (XEXP (lhs, 1)) >= 0
2310 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2311 	    {
2312 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2313 					    GET_MODE_PRECISION (int_mode));
2314 	      lhs = XEXP (lhs, 0);
2315 	    }
2316 
2317 	  if (GET_CODE (rhs) == NEG)
2318 	    {
2319 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2320 	      rhs = XEXP (rhs, 0);
2321 	    }
2322 	  else if (GET_CODE (rhs) == MULT
2323 		   && CONST_INT_P (XEXP (rhs, 1)))
2324 	    {
2325 	      coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2326 	      rhs = XEXP (rhs, 0);
2327 	    }
2328 	  else if (GET_CODE (rhs) == ASHIFT
2329 		   && CONST_INT_P (XEXP (rhs, 1))
2330 		   && INTVAL (XEXP (rhs, 1)) >= 0
2331 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2332 	    {
2333 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2334 					    GET_MODE_PRECISION (int_mode));
2335 	      rhs = XEXP (rhs, 0);
2336 	    }
2337 
2338 	  if (rtx_equal_p (lhs, rhs))
2339 	    {
2340 	      rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2341 	      rtx coeff;
2342 	      bool speed = optimize_function_for_speed_p (cfun);
2343 
2344 	      coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2345 
2346 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2347 	      return (set_src_cost (tem, int_mode, speed)
2348 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2349 	    }
2350 	}
2351 
2352       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2353       if (CONST_SCALAR_INT_P (op1)
2354 	  && GET_CODE (op0) == XOR
2355 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2356 	  && mode_signbit_p (mode, op1))
2357 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2358 				    simplify_gen_binary (XOR, mode, op1,
2359 							 XEXP (op0, 1)));
2360 
2361       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2362       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2363 	  && GET_CODE (op0) == MULT
2364 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2365 	{
2366 	  rtx in1, in2;
2367 
2368 	  in1 = XEXP (XEXP (op0, 0), 0);
2369 	  in2 = XEXP (op0, 1);
2370 	  return simplify_gen_binary (MINUS, mode, op1,
2371 				      simplify_gen_binary (MULT, mode,
2372 							   in1, in2));
2373 	}
2374 
2375       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2376 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2377 	 is 1.  */
2378       if (COMPARISON_P (op0)
2379 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2380 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2381 	  && (reversed = reversed_comparison (op0, mode)))
2382 	return
2383 	  simplify_gen_unary (NEG, mode, reversed, mode);
2384 
2385       /* If one of the operands is a PLUS or a MINUS, see if we can
2386 	 simplify this by the associative law.
2387 	 Don't use the associative law for floating point.
2388 	 The inaccuracy makes it nonassociative,
2389 	 and subtle programs can break if operations are associated.  */
2390 
2391       if (INTEGRAL_MODE_P (mode)
2392 	  && (plus_minus_operand_p (op0)
2393 	      || plus_minus_operand_p (op1))
2394 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2395 	return tem;
2396 
2397       /* Reassociate floating point addition only when the user
2398 	 specifies associative math operations.  */
2399       if (FLOAT_MODE_P (mode)
2400 	  && flag_associative_math)
2401 	{
2402 	  tem = simplify_associative_operation (code, mode, op0, op1);
2403 	  if (tem)
2404 	    return tem;
2405 	}
2406 
2407       /* Handle vector series.  */
2408       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2409 	{
2410 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2411 	  if (tem)
2412 	    return tem;
2413 	}
2414       break;
2415 
2416     case COMPARE:
2417       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2418       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2419 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2420 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2421 	{
2422 	  rtx xop00 = XEXP (op0, 0);
2423 	  rtx xop10 = XEXP (op1, 0);
2424 
2425 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2426 	      return xop00;
2427 
2428 	    if (REG_P (xop00) && REG_P (xop10)
2429 		&& REGNO (xop00) == REGNO (xop10)
2430 		&& GET_MODE (xop00) == mode
2431 		&& GET_MODE (xop10) == mode
2432 		&& GET_MODE_CLASS (mode) == MODE_CC)
2433 	      return xop00;
2434 	}
2435       break;
2436 
2437     case MINUS:
2438       /* We can't assume x-x is 0 even with non-IEEE floating point,
2439 	 but since it is zero except in very strange circumstances, we
2440 	 will treat it as zero with -ffinite-math-only.  */
2441       if (rtx_equal_p (trueop0, trueop1)
2442 	  && ! side_effects_p (op0)
2443 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2444 	return CONST0_RTX (mode);
2445 
2446       /* Change subtraction from zero into negation.  (0 - x) is the
2447 	 same as -x when x is NaN, infinite, or finite and nonzero.
2448 	 But if the mode has signed zeros, and does not round towards
2449 	 -infinity, then 0 - 0 is 0, not -0.  */
2450       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2451 	return simplify_gen_unary (NEG, mode, op1, mode);
2452 
2453       /* (-1 - a) is ~a, unless the expression contains symbolic
2454 	 constants, in which case not retaining additions and
2455 	 subtractions could cause invalid assembly to be produced.  */
2456       if (trueop0 == constm1_rtx
2457 	  && !contains_symbolic_reference_p (op1))
2458 	return simplify_gen_unary (NOT, mode, op1, mode);
2459 
2460       /* Subtracting 0 has no effect unless the mode has signed zeros
2461 	 and supports rounding towards -infinity.  In such a case,
2462 	 0 - 0 is -0.  */
2463       if (!(HONOR_SIGNED_ZEROS (mode)
2464 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2465 	  && trueop1 == CONST0_RTX (mode))
2466 	return op0;
2467 
2468       /* See if this is something like X * C - X or vice versa or
2469 	 if the multiplication is written as a shift.  If so, we can
2470 	 distribute and make a new multiply, shift, or maybe just
2471 	 have X (if C is 2 in the example above).  But don't make
2472 	 something more expensive than we had before.  */
2473 
2474       if (is_a <scalar_int_mode> (mode, &int_mode))
2475 	{
2476 	  rtx lhs = op0, rhs = op1;
2477 
2478 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2479 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2480 
2481 	  if (GET_CODE (lhs) == NEG)
2482 	    {
2483 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2484 	      lhs = XEXP (lhs, 0);
2485 	    }
2486 	  else if (GET_CODE (lhs) == MULT
2487 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2488 	    {
2489 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2490 	      lhs = XEXP (lhs, 0);
2491 	    }
2492 	  else if (GET_CODE (lhs) == ASHIFT
2493 		   && CONST_INT_P (XEXP (lhs, 1))
2494 		   && INTVAL (XEXP (lhs, 1)) >= 0
2495 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2496 	    {
2497 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2498 					    GET_MODE_PRECISION (int_mode));
2499 	      lhs = XEXP (lhs, 0);
2500 	    }
2501 
2502 	  if (GET_CODE (rhs) == NEG)
2503 	    {
2504 	      negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2505 	      rhs = XEXP (rhs, 0);
2506 	    }
2507 	  else if (GET_CODE (rhs) == MULT
2508 		   && CONST_INT_P (XEXP (rhs, 1)))
2509 	    {
2510 	      negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2511 	      rhs = XEXP (rhs, 0);
2512 	    }
2513 	  else if (GET_CODE (rhs) == ASHIFT
2514 		   && CONST_INT_P (XEXP (rhs, 1))
2515 		   && INTVAL (XEXP (rhs, 1)) >= 0
2516 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2517 	    {
2518 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2519 					       GET_MODE_PRECISION (int_mode));
2520 	      negcoeff1 = -negcoeff1;
2521 	      rhs = XEXP (rhs, 0);
2522 	    }
2523 
2524 	  if (rtx_equal_p (lhs, rhs))
2525 	    {
2526 	      rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2527 	      rtx coeff;
2528 	      bool speed = optimize_function_for_speed_p (cfun);
2529 
2530 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2531 
2532 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2533 	      return (set_src_cost (tem, int_mode, speed)
2534 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2535 	    }
2536 	}
2537 
2538       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2539       if (GET_CODE (op1) == NEG)
2540 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2541 
2542       /* (-x - c) may be simplified as (-c - x).  */
2543       if (GET_CODE (op0) == NEG
2544 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2545 	{
2546 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2547 	  if (tem)
2548 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2549 	}
2550 
2551       if ((GET_CODE (op0) == CONST
2552 	   || GET_CODE (op0) == SYMBOL_REF
2553 	   || GET_CODE (op0) == LABEL_REF)
2554 	  && poly_int_rtx_p (op1, &offset))
2555 	return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2556 
2557       /* Don't let a relocatable value get a negative coeff.  */
2558       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2559 	return simplify_gen_binary (PLUS, mode,
2560 				    op0,
2561 				    neg_const_int (mode, op1));
2562 
2563       /* (x - (x & y)) -> (x & ~y) */
2564       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2565 	{
2566 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2567 	    {
2568 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2569 					GET_MODE (XEXP (op1, 1)));
2570 	      return simplify_gen_binary (AND, mode, op0, tem);
2571 	    }
2572 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2573 	    {
2574 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2575 					GET_MODE (XEXP (op1, 0)));
2576 	      return simplify_gen_binary (AND, mode, op0, tem);
2577 	    }
2578 	}
2579 
2580       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2581 	 by reversing the comparison code if valid.  */
2582       if (STORE_FLAG_VALUE == 1
2583 	  && trueop0 == const1_rtx
2584 	  && COMPARISON_P (op1)
2585 	  && (reversed = reversed_comparison (op1, mode)))
2586 	return reversed;
2587 
2588       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2589       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2590 	  && GET_CODE (op1) == MULT
2591 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2592 	{
2593 	  rtx in1, in2;
2594 
2595 	  in1 = XEXP (XEXP (op1, 0), 0);
2596 	  in2 = XEXP (op1, 1);
2597 	  return simplify_gen_binary (PLUS, mode,
2598 				      simplify_gen_binary (MULT, mode,
2599 							   in1, in2),
2600 				      op0);
2601 	}
2602 
2603       /* Canonicalize (minus (neg A) (mult B C)) to
2604 	 (minus (mult (neg B) C) A).  */
2605       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2606 	  && GET_CODE (op1) == MULT
2607 	  && GET_CODE (op0) == NEG)
2608 	{
2609 	  rtx in1, in2;
2610 
2611 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2612 	  in2 = XEXP (op1, 1);
2613 	  return simplify_gen_binary (MINUS, mode,
2614 				      simplify_gen_binary (MULT, mode,
2615 							   in1, in2),
2616 				      XEXP (op0, 0));
2617 	}
2618 
2619       /* If one of the operands is a PLUS or a MINUS, see if we can
2620 	 simplify this by the associative law.  This will, for example,
2621          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2622 	 Don't use the associative law for floating point.
2623 	 The inaccuracy makes it nonassociative,
2624 	 and subtle programs can break if operations are associated.  */
2625 
2626       if (INTEGRAL_MODE_P (mode)
2627 	  && (plus_minus_operand_p (op0)
2628 	      || plus_minus_operand_p (op1))
2629 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2630 	return tem;
2631 
2632       /* Handle vector series.  */
2633       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2634 	{
2635 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2636 	  if (tem)
2637 	    return tem;
2638 	}
2639       break;
2640 
2641     case MULT:
2642       if (trueop1 == constm1_rtx)
2643 	return simplify_gen_unary (NEG, mode, op0, mode);
2644 
2645       if (GET_CODE (op0) == NEG)
2646 	{
2647 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2648 	  /* If op1 is a MULT as well and simplify_unary_operation
2649 	     just moved the NEG to the second operand, simplify_gen_binary
2650 	     below could through simplify_associative_operation move
2651 	     the NEG around again and recurse endlessly.  */
2652 	  if (temp
2653 	      && GET_CODE (op1) == MULT
2654 	      && GET_CODE (temp) == MULT
2655 	      && XEXP (op1, 0) == XEXP (temp, 0)
2656 	      && GET_CODE (XEXP (temp, 1)) == NEG
2657 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2658 	    temp = NULL_RTX;
2659 	  if (temp)
2660 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2661 	}
2662       if (GET_CODE (op1) == NEG)
2663 	{
2664 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2665 	  /* If op0 is a MULT as well and simplify_unary_operation
2666 	     just moved the NEG to the second operand, simplify_gen_binary
2667 	     below could through simplify_associative_operation move
2668 	     the NEG around again and recurse endlessly.  */
2669 	  if (temp
2670 	      && GET_CODE (op0) == MULT
2671 	      && GET_CODE (temp) == MULT
2672 	      && XEXP (op0, 0) == XEXP (temp, 0)
2673 	      && GET_CODE (XEXP (temp, 1)) == NEG
2674 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2675 	    temp = NULL_RTX;
2676 	  if (temp)
2677 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2678 	}
2679 
2680       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2681 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2682 	 when the mode has signed zeros, since multiplying a negative
2683 	 number by 0 will give -0, not 0.  */
2684       if (!HONOR_NANS (mode)
2685 	  && !HONOR_SIGNED_ZEROS (mode)
2686 	  && trueop1 == CONST0_RTX (mode)
2687 	  && ! side_effects_p (op0))
2688 	return op1;
2689 
2690       /* In IEEE floating point, x*1 is not equivalent to x for
2691 	 signalling NaNs.  */
2692       if (!HONOR_SNANS (mode)
2693 	  && trueop1 == CONST1_RTX (mode))
2694 	return op0;
2695 
2696       /* Convert multiply by constant power of two into shift.  */
2697       if (CONST_SCALAR_INT_P (trueop1))
2698 	{
2699 	  val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2700 	  if (val >= 0)
2701 	    return simplify_gen_binary (ASHIFT, mode, op0,
2702 					gen_int_shift_amount (mode, val));
2703 	}
2704 
2705       /* x*2 is x+x and x*(-1) is -x */
2706       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2707 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2708 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2709 	  && GET_MODE (op0) == mode)
2710 	{
2711 	  const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2712 
2713 	  if (real_equal (d1, &dconst2))
2714 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2715 
2716 	  if (!HONOR_SNANS (mode)
2717 	      && real_equal (d1, &dconstm1))
2718 	    return simplify_gen_unary (NEG, mode, op0, mode);
2719 	}
2720 
2721       /* Optimize -x * -x as x * x.  */
2722       if (FLOAT_MODE_P (mode)
2723 	  && GET_CODE (op0) == NEG
2724 	  && GET_CODE (op1) == NEG
2725 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2726 	  && !side_effects_p (XEXP (op0, 0)))
2727 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2728 
2729       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2730       if (SCALAR_FLOAT_MODE_P (mode)
2731 	  && GET_CODE (op0) == ABS
2732 	  && GET_CODE (op1) == ABS
2733 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2734 	  && !side_effects_p (XEXP (op0, 0)))
2735 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2736 
2737       /* Reassociate multiplication, but for floating point MULTs
2738 	 only when the user specifies unsafe math optimizations.  */
2739       if (! FLOAT_MODE_P (mode)
2740 	  || flag_unsafe_math_optimizations)
2741 	{
2742 	  tem = simplify_associative_operation (code, mode, op0, op1);
2743 	  if (tem)
2744 	    return tem;
2745 	}
2746       break;
2747 
2748     case IOR:
2749       if (trueop1 == CONST0_RTX (mode))
2750 	return op0;
2751       if (INTEGRAL_MODE_P (mode)
2752 	  && trueop1 == CONSTM1_RTX (mode)
2753 	  && !side_effects_p (op0))
2754 	return op1;
2755       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2756 	return op0;
2757       /* A | (~A) -> -1 */
2758       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2759 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2760 	  && ! side_effects_p (op0)
2761 	  && SCALAR_INT_MODE_P (mode))
2762 	return constm1_rtx;
2763 
2764       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2765       if (CONST_INT_P (op1)
2766 	  && HWI_COMPUTABLE_MODE_P (mode)
2767 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2768 	  && !side_effects_p (op0))
2769 	return op1;
2770 
2771       /* Canonicalize (X & C1) | C2.  */
2772       if (GET_CODE (op0) == AND
2773 	  && CONST_INT_P (trueop1)
2774 	  && CONST_INT_P (XEXP (op0, 1)))
2775 	{
2776 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2777 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2778 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2779 
2780 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2.  */
2781 	  if ((c1 & c2) == c1
2782 	      && !side_effects_p (XEXP (op0, 0)))
2783 	    return trueop1;
2784 
2785 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2786 	  if (((c1|c2) & mask) == mask)
2787 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2788 	}
2789 
2790       /* Convert (A & B) | A to A.  */
2791       if (GET_CODE (op0) == AND
2792 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2793 	      || rtx_equal_p (XEXP (op0, 1), op1))
2794 	  && ! side_effects_p (XEXP (op0, 0))
2795 	  && ! side_effects_p (XEXP (op0, 1)))
2796 	return op1;
2797 
2798       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2799          mode size to (rotate A CX).  */
2800 
2801       if (GET_CODE (op1) == ASHIFT
2802           || GET_CODE (op1) == SUBREG)
2803         {
2804 	  opleft = op1;
2805 	  opright = op0;
2806 	}
2807       else
2808         {
2809 	  opright = op1;
2810 	  opleft = op0;
2811 	}
2812 
2813       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2814           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2815           && CONST_INT_P (XEXP (opleft, 1))
2816           && CONST_INT_P (XEXP (opright, 1))
2817           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2818 	      == GET_MODE_UNIT_PRECISION (mode)))
2819         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2820 
2821       /* Same, but for ashift that has been "simplified" to a wider mode
2822         by simplify_shift_const.  */
2823 
2824       if (GET_CODE (opleft) == SUBREG
2825 	  && is_a <scalar_int_mode> (mode, &int_mode)
2826 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2827 				     &inner_mode)
2828           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2829           && GET_CODE (opright) == LSHIFTRT
2830           && GET_CODE (XEXP (opright, 0)) == SUBREG
2831 	  && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2832 	  && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2833           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2834                           SUBREG_REG (XEXP (opright, 0)))
2835           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2836           && CONST_INT_P (XEXP (opright, 1))
2837 	  && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2838 	      + INTVAL (XEXP (opright, 1))
2839 	      == GET_MODE_PRECISION (int_mode)))
2840 	return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2841 			       XEXP (SUBREG_REG (opleft), 1));
2842 
2843       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2844          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2845 	 the PLUS does not affect any of the bits in OP1: then we can do
2846 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2847          can be safely shifted left C bits.  */
2848       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2849           && GET_CODE (XEXP (op0, 0)) == PLUS
2850           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2851           && CONST_INT_P (XEXP (op0, 1))
2852           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2853         {
2854 	  int count = INTVAL (XEXP (op0, 1));
2855 	  HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2856 
2857           if (mask >> count == INTVAL (trueop1)
2858 	      && trunc_int_for_mode (mask, mode) == mask
2859               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2860 	    return simplify_gen_binary (ASHIFTRT, mode,
2861 					plus_constant (mode, XEXP (op0, 0),
2862 						       mask),
2863 					XEXP (op0, 1));
2864         }
2865 
2866       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2867       if (tem)
2868 	return tem;
2869 
2870       tem = simplify_associative_operation (code, mode, op0, op1);
2871       if (tem)
2872 	return tem;
2873       break;
2874 
2875     case XOR:
2876       if (trueop1 == CONST0_RTX (mode))
2877 	return op0;
2878       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2879 	return simplify_gen_unary (NOT, mode, op0, mode);
2880       if (rtx_equal_p (trueop0, trueop1)
2881 	  && ! side_effects_p (op0)
2882 	  && GET_MODE_CLASS (mode) != MODE_CC)
2883 	 return CONST0_RTX (mode);
2884 
2885       /* Canonicalize XOR of the most significant bit to PLUS.  */
2886       if (CONST_SCALAR_INT_P (op1)
2887 	  && mode_signbit_p (mode, op1))
2888 	return simplify_gen_binary (PLUS, mode, op0, op1);
2889       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2890       if (CONST_SCALAR_INT_P (op1)
2891 	  && GET_CODE (op0) == PLUS
2892 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2893 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2894 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2895 				    simplify_gen_binary (XOR, mode, op1,
2896 							 XEXP (op0, 1)));
2897 
2898       /* If we are XORing two things that have no bits in common,
2899 	 convert them into an IOR.  This helps to detect rotation encoded
2900 	 using those methods and possibly other simplifications.  */
2901 
2902       if (HWI_COMPUTABLE_MODE_P (mode)
2903 	  && (nonzero_bits (op0, mode)
2904 	      & nonzero_bits (op1, mode)) == 0)
2905 	return (simplify_gen_binary (IOR, mode, op0, op1));
2906 
2907       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2908 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2909 	 (NOT y).  */
2910       {
2911 	int num_negated = 0;
2912 
2913 	if (GET_CODE (op0) == NOT)
2914 	  num_negated++, op0 = XEXP (op0, 0);
2915 	if (GET_CODE (op1) == NOT)
2916 	  num_negated++, op1 = XEXP (op1, 0);
2917 
2918 	if (num_negated == 2)
2919 	  return simplify_gen_binary (XOR, mode, op0, op1);
2920 	else if (num_negated == 1)
2921 	  return simplify_gen_unary (NOT, mode,
2922 				     simplify_gen_binary (XOR, mode, op0, op1),
2923 				     mode);
2924       }
2925 
2926       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2927 	 correspond to a machine insn or result in further simplifications
2928 	 if B is a constant.  */
2929 
2930       if (GET_CODE (op0) == AND
2931 	  && rtx_equal_p (XEXP (op0, 1), op1)
2932 	  && ! side_effects_p (op1))
2933 	return simplify_gen_binary (AND, mode,
2934 				    simplify_gen_unary (NOT, mode,
2935 							XEXP (op0, 0), mode),
2936 				    op1);
2937 
2938       else if (GET_CODE (op0) == AND
2939 	       && rtx_equal_p (XEXP (op0, 0), op1)
2940 	       && ! side_effects_p (op1))
2941 	return simplify_gen_binary (AND, mode,
2942 				    simplify_gen_unary (NOT, mode,
2943 							XEXP (op0, 1), mode),
2944 				    op1);
2945 
2946       /* Given (xor (ior (xor A B) C) D), where B, C and D are
2947 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2948 	 out bits inverted twice and not set by C.  Similarly, given
2949 	 (xor (and (xor A B) C) D), simplify without inverting C in
2950 	 the xor operand: (xor (and A C) (B&C)^D).
2951       */
2952       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2953 	       && GET_CODE (XEXP (op0, 0)) == XOR
2954 	       && CONST_INT_P (op1)
2955 	       && CONST_INT_P (XEXP (op0, 1))
2956 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2957 	{
2958 	  enum rtx_code op = GET_CODE (op0);
2959 	  rtx a = XEXP (XEXP (op0, 0), 0);
2960 	  rtx b = XEXP (XEXP (op0, 0), 1);
2961 	  rtx c = XEXP (op0, 1);
2962 	  rtx d = op1;
2963 	  HOST_WIDE_INT bval = INTVAL (b);
2964 	  HOST_WIDE_INT cval = INTVAL (c);
2965 	  HOST_WIDE_INT dval = INTVAL (d);
2966 	  HOST_WIDE_INT xcval;
2967 
2968 	  if (op == IOR)
2969 	    xcval = ~cval;
2970 	  else
2971 	    xcval = cval;
2972 
2973 	  return simplify_gen_binary (XOR, mode,
2974 				      simplify_gen_binary (op, mode, a, c),
2975 				      gen_int_mode ((bval & xcval) ^ dval,
2976 						    mode));
2977 	}
2978 
2979       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2980 	 we can transform like this:
2981             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2982                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2983                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2984 	 Attempt a few simplifications when B and C are both constants.  */
2985       if (GET_CODE (op0) == AND
2986 	  && CONST_INT_P (op1)
2987 	  && CONST_INT_P (XEXP (op0, 1)))
2988 	{
2989 	  rtx a = XEXP (op0, 0);
2990 	  rtx b = XEXP (op0, 1);
2991 	  rtx c = op1;
2992 	  HOST_WIDE_INT bval = INTVAL (b);
2993 	  HOST_WIDE_INT cval = INTVAL (c);
2994 
2995 	  /* Instead of computing ~A&C, we compute its negated value,
2996 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
2997 	     optimize for sure.  If it does not simplify, we still try
2998 	     to compute ~A&C below, but since that always allocates
2999 	     RTL, we don't try that before committing to returning a
3000 	     simplified expression.  */
3001 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3002 						  GEN_INT (~cval));
3003 
3004 	  if ((~cval & bval) == 0)
3005 	    {
3006 	      rtx na_c = NULL_RTX;
3007 	      if (n_na_c)
3008 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3009 	      else
3010 		{
3011 		  /* If ~A does not simplify, don't bother: we don't
3012 		     want to simplify 2 operations into 3, and if na_c
3013 		     were to simplify with na, n_na_c would have
3014 		     simplified as well.  */
3015 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
3016 		  if (na)
3017 		    na_c = simplify_gen_binary (AND, mode, na, c);
3018 		}
3019 
3020 	      /* Try to simplify ~A&C | ~B&C.  */
3021 	      if (na_c != NULL_RTX)
3022 		return simplify_gen_binary (IOR, mode, na_c,
3023 					    gen_int_mode (~bval & cval, mode));
3024 	    }
3025 	  else
3026 	    {
3027 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
3028 	      if (n_na_c == CONSTM1_RTX (mode))
3029 		{
3030 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3031 						    gen_int_mode (~cval & bval,
3032 								  mode));
3033 		  return simplify_gen_binary (IOR, mode, a_nc_b,
3034 					      gen_int_mode (~bval & cval,
3035 							    mode));
3036 		}
3037 	    }
3038 	}
3039 
3040       /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3041 	 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3042 	 machines, and also has shorter instruction path length.  */
3043       if (GET_CODE (op0) == AND
3044 	  && GET_CODE (XEXP (op0, 0)) == XOR
3045 	  && CONST_INT_P (XEXP (op0, 1))
3046 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3047 	{
3048 	  rtx a = trueop1;
3049 	  rtx b = XEXP (XEXP (op0, 0), 1);
3050 	  rtx c = XEXP (op0, 1);
3051 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3052 	  rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3053 	  rtx bc = simplify_gen_binary (AND, mode, b, c);
3054 	  return simplify_gen_binary (IOR, mode, a_nc, bc);
3055 	}
3056       /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C))  */
3057       else if (GET_CODE (op0) == AND
3058 	  && GET_CODE (XEXP (op0, 0)) == XOR
3059 	  && CONST_INT_P (XEXP (op0, 1))
3060 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3061 	{
3062 	  rtx a = XEXP (XEXP (op0, 0), 0);
3063 	  rtx b = trueop1;
3064 	  rtx c = XEXP (op0, 1);
3065 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3066 	  rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3067 	  rtx ac = simplify_gen_binary (AND, mode, a, c);
3068 	  return simplify_gen_binary (IOR, mode, ac, b_nc);
3069 	}
3070 
3071       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3072 	 comparison if STORE_FLAG_VALUE is 1.  */
3073       if (STORE_FLAG_VALUE == 1
3074 	  && trueop1 == const1_rtx
3075 	  && COMPARISON_P (op0)
3076 	  && (reversed = reversed_comparison (op0, mode)))
3077 	return reversed;
3078 
3079       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3080 	 is (lt foo (const_int 0)), so we can perform the above
3081 	 simplification if STORE_FLAG_VALUE is 1.  */
3082 
3083       if (is_a <scalar_int_mode> (mode, &int_mode)
3084 	  && STORE_FLAG_VALUE == 1
3085 	  && trueop1 == const1_rtx
3086 	  && GET_CODE (op0) == LSHIFTRT
3087 	  && CONST_INT_P (XEXP (op0, 1))
3088 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3089 	return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3090 
3091       /* (xor (comparison foo bar) (const_int sign-bit))
3092 	 when STORE_FLAG_VALUE is the sign bit.  */
3093       if (is_a <scalar_int_mode> (mode, &int_mode)
3094 	  && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3095 	  && trueop1 == const_true_rtx
3096 	  && COMPARISON_P (op0)
3097 	  && (reversed = reversed_comparison (op0, int_mode)))
3098 	return reversed;
3099 
3100       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3101       if (tem)
3102 	return tem;
3103 
3104       tem = simplify_associative_operation (code, mode, op0, op1);
3105       if (tem)
3106 	return tem;
3107       break;
3108 
3109     case AND:
3110       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3111 	return trueop1;
3112       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3113 	return op0;
3114       if (HWI_COMPUTABLE_MODE_P (mode))
3115 	{
3116 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3117 	  HOST_WIDE_INT nzop1;
3118 	  if (CONST_INT_P (trueop1))
3119 	    {
3120 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
3121 	      /* If we are turning off bits already known off in OP0, we need
3122 		 not do an AND.  */
3123 	      if ((nzop0 & ~val1) == 0)
3124 		return op0;
3125 	    }
3126 	  nzop1 = nonzero_bits (trueop1, mode);
3127 	  /* If we are clearing all the nonzero bits, the result is zero.  */
3128 	  if ((nzop1 & nzop0) == 0
3129 	      && !side_effects_p (op0) && !side_effects_p (op1))
3130 	    return CONST0_RTX (mode);
3131 	}
3132       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3133 	  && GET_MODE_CLASS (mode) != MODE_CC)
3134 	return op0;
3135       /* A & (~A) -> 0 */
3136       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3137 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3138 	  && ! side_effects_p (op0)
3139 	  && GET_MODE_CLASS (mode) != MODE_CC)
3140 	return CONST0_RTX (mode);
3141 
3142       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3143 	 there are no nonzero bits of C outside of X's mode.  */
3144       if ((GET_CODE (op0) == SIGN_EXTEND
3145 	   || GET_CODE (op0) == ZERO_EXTEND)
3146 	  && CONST_INT_P (trueop1)
3147 	  && HWI_COMPUTABLE_MODE_P (mode)
3148 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3149 	      & UINTVAL (trueop1)) == 0)
3150 	{
3151 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3152 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3153 				     gen_int_mode (INTVAL (trueop1),
3154 						   imode));
3155 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3156 	}
3157 
3158       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
3159 	 we might be able to further simplify the AND with X and potentially
3160 	 remove the truncation altogether.  */
3161       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3162 	{
3163 	  rtx x = XEXP (op0, 0);
3164 	  machine_mode xmode = GET_MODE (x);
3165 	  tem = simplify_gen_binary (AND, xmode, x,
3166 				     gen_int_mode (INTVAL (trueop1), xmode));
3167 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3168 	}
3169 
3170       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3171       if (GET_CODE (op0) == IOR
3172 	  && CONST_INT_P (trueop1)
3173 	  && CONST_INT_P (XEXP (op0, 1)))
3174 	{
3175 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3176 	  return simplify_gen_binary (IOR, mode,
3177 				      simplify_gen_binary (AND, mode,
3178 							   XEXP (op0, 0), op1),
3179 				      gen_int_mode (tmp, mode));
3180 	}
3181 
3182       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3183 	 insn (and may simplify more).  */
3184       if (GET_CODE (op0) == XOR
3185 	  && rtx_equal_p (XEXP (op0, 0), op1)
3186 	  && ! side_effects_p (op1))
3187 	return simplify_gen_binary (AND, mode,
3188 				    simplify_gen_unary (NOT, mode,
3189 							XEXP (op0, 1), mode),
3190 				    op1);
3191 
3192       if (GET_CODE (op0) == XOR
3193 	  && rtx_equal_p (XEXP (op0, 1), op1)
3194 	  && ! side_effects_p (op1))
3195 	return simplify_gen_binary (AND, mode,
3196 				    simplify_gen_unary (NOT, mode,
3197 							XEXP (op0, 0), mode),
3198 				    op1);
3199 
3200       /* Similarly for (~(A ^ B)) & A.  */
3201       if (GET_CODE (op0) == NOT
3202 	  && GET_CODE (XEXP (op0, 0)) == XOR
3203 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3204 	  && ! side_effects_p (op1))
3205 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3206 
3207       if (GET_CODE (op0) == NOT
3208 	  && GET_CODE (XEXP (op0, 0)) == XOR
3209 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3210 	  && ! side_effects_p (op1))
3211 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3212 
3213       /* Convert (A | B) & A to A.  */
3214       if (GET_CODE (op0) == IOR
3215 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3216 	      || rtx_equal_p (XEXP (op0, 1), op1))
3217 	  && ! side_effects_p (XEXP (op0, 0))
3218 	  && ! side_effects_p (XEXP (op0, 1)))
3219 	return op1;
3220 
3221       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3222 	 ((A & N) + B) & M -> (A + B) & M
3223 	 Similarly if (N & M) == 0,
3224 	 ((A | N) + B) & M -> (A + B) & M
3225 	 and for - instead of + and/or ^ instead of |.
3226          Also, if (N & M) == 0, then
3227 	 (A +- N) & M -> A & M.  */
3228       if (CONST_INT_P (trueop1)
3229 	  && HWI_COMPUTABLE_MODE_P (mode)
3230 	  && ~UINTVAL (trueop1)
3231 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3232 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3233 	{
3234 	  rtx pmop[2];
3235 	  int which;
3236 
3237 	  pmop[0] = XEXP (op0, 0);
3238 	  pmop[1] = XEXP (op0, 1);
3239 
3240 	  if (CONST_INT_P (pmop[1])
3241 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3242 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3243 
3244 	  for (which = 0; which < 2; which++)
3245 	    {
3246 	      tem = pmop[which];
3247 	      switch (GET_CODE (tem))
3248 		{
3249 		case AND:
3250 		  if (CONST_INT_P (XEXP (tem, 1))
3251 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3252 		      == UINTVAL (trueop1))
3253 		    pmop[which] = XEXP (tem, 0);
3254 		  break;
3255 		case IOR:
3256 		case XOR:
3257 		  if (CONST_INT_P (XEXP (tem, 1))
3258 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3259 		    pmop[which] = XEXP (tem, 0);
3260 		  break;
3261 		default:
3262 		  break;
3263 		}
3264 	    }
3265 
3266 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3267 	    {
3268 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3269 					 pmop[0], pmop[1]);
3270 	      return simplify_gen_binary (code, mode, tem, op1);
3271 	    }
3272 	}
3273 
3274       /* (and X (ior (not X) Y) -> (and X Y) */
3275       if (GET_CODE (op1) == IOR
3276 	  && GET_CODE (XEXP (op1, 0)) == NOT
3277 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3278        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3279 
3280       /* (and (ior (not X) Y) X) -> (and X Y) */
3281       if (GET_CODE (op0) == IOR
3282 	  && GET_CODE (XEXP (op0, 0)) == NOT
3283 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3284 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3285 
3286       /* (and X (ior Y (not X)) -> (and X Y) */
3287       if (GET_CODE (op1) == IOR
3288 	  && GET_CODE (XEXP (op1, 1)) == NOT
3289 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3290        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3291 
3292       /* (and (ior Y (not X)) X) -> (and X Y) */
3293       if (GET_CODE (op0) == IOR
3294 	  && GET_CODE (XEXP (op0, 1)) == NOT
3295 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3296 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3297 
3298       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3299       if (tem)
3300 	return tem;
3301 
3302       tem = simplify_associative_operation (code, mode, op0, op1);
3303       if (tem)
3304 	return tem;
3305       break;
3306 
3307     case UDIV:
3308       /* 0/x is 0 (or x&0 if x has side-effects).  */
3309       if (trueop0 == CONST0_RTX (mode)
3310 	  && !cfun->can_throw_non_call_exceptions)
3311 	{
3312 	  if (side_effects_p (op1))
3313 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3314 	  return trueop0;
3315 	}
3316       /* x/1 is x.  */
3317       if (trueop1 == CONST1_RTX (mode))
3318 	{
3319 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3320 	  if (tem)
3321 	    return tem;
3322 	}
3323       /* Convert divide by power of two into shift.  */
3324       if (CONST_INT_P (trueop1)
3325 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3326 	return simplify_gen_binary (LSHIFTRT, mode, op0,
3327 				    gen_int_shift_amount (mode, val));
3328       break;
3329 
3330     case DIV:
3331       /* Handle floating point and integers separately.  */
3332       if (SCALAR_FLOAT_MODE_P (mode))
3333 	{
3334 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3335 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3336 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3337 	     zeros, since dividing 0 by a negative number gives -0.0  */
3338 	  if (trueop0 == CONST0_RTX (mode)
3339 	      && !HONOR_NANS (mode)
3340 	      && !HONOR_SIGNED_ZEROS (mode)
3341 	      && ! side_effects_p (op1))
3342 	    return op0;
3343 	  /* x/1.0 is x.  */
3344 	  if (trueop1 == CONST1_RTX (mode)
3345 	      && !HONOR_SNANS (mode))
3346 	    return op0;
3347 
3348 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3349 	      && trueop1 != CONST0_RTX (mode))
3350 	    {
3351 	      const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3352 
3353 	      /* x/-1.0 is -x.  */
3354 	      if (real_equal (d1, &dconstm1)
3355 		  && !HONOR_SNANS (mode))
3356 		return simplify_gen_unary (NEG, mode, op0, mode);
3357 
3358 	      /* Change FP division by a constant into multiplication.
3359 		 Only do this with -freciprocal-math.  */
3360 	      if (flag_reciprocal_math
3361 		  && !real_equal (d1, &dconst0))
3362 		{
3363 		  REAL_VALUE_TYPE d;
3364 		  real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3365 		  tem = const_double_from_real_value (d, mode);
3366 		  return simplify_gen_binary (MULT, mode, op0, tem);
3367 		}
3368 	    }
3369 	}
3370       else if (SCALAR_INT_MODE_P (mode))
3371 	{
3372 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3373 	  if (trueop0 == CONST0_RTX (mode)
3374 	      && !cfun->can_throw_non_call_exceptions)
3375 	    {
3376 	      if (side_effects_p (op1))
3377 		return simplify_gen_binary (AND, mode, op1, trueop0);
3378 	      return trueop0;
3379 	    }
3380 	  /* x/1 is x.  */
3381 	  if (trueop1 == CONST1_RTX (mode))
3382 	    {
3383 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3384 	      if (tem)
3385 		return tem;
3386 	    }
3387 	  /* x/-1 is -x.  */
3388 	  if (trueop1 == constm1_rtx)
3389 	    {
3390 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3391 	      if (x)
3392 		return simplify_gen_unary (NEG, mode, x, mode);
3393 	    }
3394 	}
3395       break;
3396 
3397     case UMOD:
3398       /* 0%x is 0 (or x&0 if x has side-effects).  */
3399       if (trueop0 == CONST0_RTX (mode))
3400 	{
3401 	  if (side_effects_p (op1))
3402 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3403 	  return trueop0;
3404 	}
3405       /* x%1 is 0 (of x&0 if x has side-effects).  */
3406       if (trueop1 == CONST1_RTX (mode))
3407 	{
3408 	  if (side_effects_p (op0))
3409 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3410 	  return CONST0_RTX (mode);
3411 	}
3412       /* Implement modulus by power of two as AND.  */
3413       if (CONST_INT_P (trueop1)
3414 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3415 	return simplify_gen_binary (AND, mode, op0,
3416 				    gen_int_mode (UINTVAL (trueop1) - 1,
3417 						  mode));
3418       break;
3419 
3420     case MOD:
3421       /* 0%x is 0 (or x&0 if x has side-effects).  */
3422       if (trueop0 == CONST0_RTX (mode))
3423 	{
3424 	  if (side_effects_p (op1))
3425 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3426 	  return trueop0;
3427 	}
3428       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3429       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3430 	{
3431 	  if (side_effects_p (op0))
3432 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3433 	  return CONST0_RTX (mode);
3434 	}
3435       break;
3436 
3437     case ROTATERT:
3438     case ROTATE:
3439       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3440 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3441 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3442 	 amount instead.  */
3443 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3444       if (CONST_INT_P (trueop1)
3445 	  && IN_RANGE (INTVAL (trueop1),
3446 		       GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3447 		       GET_MODE_UNIT_PRECISION (mode) - 1))
3448 	{
3449 	  int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3450 	  rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3451 	  return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3452 				      mode, op0, new_amount_rtx);
3453 	}
3454 #endif
3455       /* FALLTHRU */
3456     case ASHIFTRT:
3457       if (trueop1 == CONST0_RTX (mode))
3458 	return op0;
3459       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3460 	return op0;
3461       /* Rotating ~0 always results in ~0.  */
3462       if (CONST_INT_P (trueop0)
3463 	  && HWI_COMPUTABLE_MODE_P (mode)
3464 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3465 	  && ! side_effects_p (op1))
3466 	return op0;
3467 
3468     canonicalize_shift:
3469       /* Given:
3470 	 scalar modes M1, M2
3471 	 scalar constants c1, c2
3472 	 size (M2) > size (M1)
3473 	 c1 == size (M2) - size (M1)
3474 	 optimize:
3475 	 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3476 				 <low_part>)
3477 		      (const_int <c2>))
3478 	 to:
3479 	 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3480 		    <low_part>).  */
3481       if ((code == ASHIFTRT || code == LSHIFTRT)
3482 	  && is_a <scalar_int_mode> (mode, &int_mode)
3483 	  && SUBREG_P (op0)
3484 	  && CONST_INT_P (op1)
3485 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3486 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3487 				     &inner_mode)
3488 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3489 	  && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3490 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3491 	      == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3492 	  && subreg_lowpart_p (op0))
3493 	{
3494 	  rtx tmp = gen_int_shift_amount
3495 	    (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3496 	  tmp = simplify_gen_binary (code, inner_mode,
3497 				     XEXP (SUBREG_REG (op0), 0),
3498 				     tmp);
3499 	  return lowpart_subreg (int_mode, tmp, inner_mode);
3500 	}
3501 
3502       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3503 	{
3504 	  val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3505 	  if (val != INTVAL (op1))
3506 	    return simplify_gen_binary (code, mode, op0,
3507 					gen_int_shift_amount (mode, val));
3508 	}
3509       break;
3510 
3511     case ASHIFT:
3512     case SS_ASHIFT:
3513     case US_ASHIFT:
3514       if (trueop1 == CONST0_RTX (mode))
3515 	return op0;
3516       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3517 	return op0;
3518       goto canonicalize_shift;
3519 
3520     case LSHIFTRT:
3521       if (trueop1 == CONST0_RTX (mode))
3522 	return op0;
3523       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3524 	return op0;
3525       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3526       if (GET_CODE (op0) == CLZ
3527 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3528 	  && CONST_INT_P (trueop1)
3529 	  && STORE_FLAG_VALUE == 1
3530 	  && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3531 	{
3532 	  unsigned HOST_WIDE_INT zero_val = 0;
3533 
3534 	  if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3535 	      && zero_val == GET_MODE_PRECISION (inner_mode)
3536 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3537 	    return simplify_gen_relational (EQ, mode, inner_mode,
3538 					    XEXP (op0, 0), const0_rtx);
3539 	}
3540       goto canonicalize_shift;
3541 
3542     case SMIN:
3543       if (HWI_COMPUTABLE_MODE_P (mode)
3544 	  && mode_signbit_p (mode, trueop1)
3545 	  && ! side_effects_p (op0))
3546 	return op1;
3547       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3548 	return op0;
3549       tem = simplify_associative_operation (code, mode, op0, op1);
3550       if (tem)
3551 	return tem;
3552       break;
3553 
3554     case SMAX:
3555       if (HWI_COMPUTABLE_MODE_P (mode)
3556 	  && CONST_INT_P (trueop1)
3557 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3558 	  && ! side_effects_p (op0))
3559 	return op1;
3560       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3561 	return op0;
3562       tem = simplify_associative_operation (code, mode, op0, op1);
3563       if (tem)
3564 	return tem;
3565       break;
3566 
3567     case UMIN:
3568       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3569 	return op1;
3570       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3571 	return op0;
3572       tem = simplify_associative_operation (code, mode, op0, op1);
3573       if (tem)
3574 	return tem;
3575       break;
3576 
3577     case UMAX:
3578       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3579 	return op1;
3580       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3581 	return op0;
3582       tem = simplify_associative_operation (code, mode, op0, op1);
3583       if (tem)
3584 	return tem;
3585       break;
3586 
3587     case SS_PLUS:
3588     case US_PLUS:
3589     case SS_MINUS:
3590     case US_MINUS:
3591     case SS_MULT:
3592     case US_MULT:
3593     case SS_DIV:
3594     case US_DIV:
3595       /* ??? There are simplifications that can be done.  */
3596       return 0;
3597 
3598     case VEC_SERIES:
3599       if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3600 	return gen_vec_duplicate (mode, op0);
3601       if (valid_for_const_vector_p (mode, op0)
3602 	  && valid_for_const_vector_p (mode, op1))
3603 	return gen_const_vec_series (mode, op0, op1);
3604       return 0;
3605 
3606     case VEC_SELECT:
3607       if (!VECTOR_MODE_P (mode))
3608 	{
3609 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3610 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3611 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3612 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3613 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3614 
3615 	  if (vec_duplicate_p (trueop0, &elt0))
3616 	    return elt0;
3617 
3618 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3619 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3620 						      (trueop1, 0, 0)));
3621 
3622 	  /* Extract a scalar element from a nested VEC_SELECT expression
3623 	     (with optional nested VEC_CONCAT expression).  Some targets
3624 	     (i386) extract scalar element from a vector using chain of
3625 	     nested VEC_SELECT expressions.  When input operand is a memory
3626 	     operand, this operation can be simplified to a simple scalar
3627 	     load from an offseted memory address.  */
3628 	  int n_elts;
3629 	  if (GET_CODE (trueop0) == VEC_SELECT
3630 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3631 		  .is_constant (&n_elts)))
3632 	    {
3633 	      rtx op0 = XEXP (trueop0, 0);
3634 	      rtx op1 = XEXP (trueop0, 1);
3635 
3636 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3637 	      int elem;
3638 
3639 	      rtvec vec;
3640 	      rtx tmp_op, tmp;
3641 
3642 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3643 	      gcc_assert (i < n_elts);
3644 
3645 	      /* Select element, pointed by nested selector.  */
3646 	      elem = INTVAL (XVECEXP (op1, 0, i));
3647 
3648 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3649 	      if (GET_CODE (op0) == VEC_CONCAT)
3650 		{
3651 		  rtx op00 = XEXP (op0, 0);
3652 		  rtx op01 = XEXP (op0, 1);
3653 
3654 		  machine_mode mode00, mode01;
3655 		  int n_elts00, n_elts01;
3656 
3657 		  mode00 = GET_MODE (op00);
3658 		  mode01 = GET_MODE (op01);
3659 
3660 		  /* Find out the number of elements of each operand.
3661 		     Since the concatenated result has a constant number
3662 		     of elements, the operands must too.  */
3663 		  n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3664 		  n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3665 
3666 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3667 
3668 		  /* Select correct operand of VEC_CONCAT
3669 		     and adjust selector. */
3670 		  if (elem < n_elts01)
3671 		    tmp_op = op00;
3672 		  else
3673 		    {
3674 		      tmp_op = op01;
3675 		      elem -= n_elts00;
3676 		    }
3677 		}
3678 	      else
3679 		tmp_op = op0;
3680 
3681 	      vec = rtvec_alloc (1);
3682 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3683 
3684 	      tmp = gen_rtx_fmt_ee (code, mode,
3685 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3686 	      return tmp;
3687 	    }
3688 	}
3689       else
3690 	{
3691 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3692 	  gcc_assert (GET_MODE_INNER (mode)
3693 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3694 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3695 
3696 	  if (vec_duplicate_p (trueop0, &elt0))
3697 	    /* It doesn't matter which elements are selected by trueop1,
3698 	       because they are all the same.  */
3699 	    return gen_vec_duplicate (mode, elt0);
3700 
3701 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3702 	    {
3703 	      unsigned n_elts = XVECLEN (trueop1, 0);
3704 	      rtvec v = rtvec_alloc (n_elts);
3705 	      unsigned int i;
3706 
3707 	      gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3708 	      for (i = 0; i < n_elts; i++)
3709 		{
3710 		  rtx x = XVECEXP (trueop1, 0, i);
3711 
3712 		  gcc_assert (CONST_INT_P (x));
3713 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3714 						       INTVAL (x));
3715 		}
3716 
3717 	      return gen_rtx_CONST_VECTOR (mode, v);
3718 	    }
3719 
3720 	  /* Recognize the identity.  */
3721 	  if (GET_MODE (trueop0) == mode)
3722 	    {
3723 	      bool maybe_ident = true;
3724 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3725 		{
3726 		  rtx j = XVECEXP (trueop1, 0, i);
3727 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3728 		    {
3729 		      maybe_ident = false;
3730 		      break;
3731 		    }
3732 		}
3733 	      if (maybe_ident)
3734 		return trueop0;
3735 	    }
3736 
3737 	  /* If we build {a,b} then permute it, build the result directly.  */
3738 	  if (XVECLEN (trueop1, 0) == 2
3739 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3740 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3741 	      && GET_CODE (trueop0) == VEC_CONCAT
3742 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3743 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3744 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3745 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3746 	    {
3747 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3748 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3749 	      rtx subop0, subop1;
3750 
3751 	      gcc_assert (i0 < 4 && i1 < 4);
3752 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3753 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3754 
3755 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3756 	    }
3757 
3758 	  if (XVECLEN (trueop1, 0) == 2
3759 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3760 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3761 	      && GET_CODE (trueop0) == VEC_CONCAT
3762 	      && GET_MODE (trueop0) == mode)
3763 	    {
3764 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3765 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3766 	      rtx subop0, subop1;
3767 
3768 	      gcc_assert (i0 < 2 && i1 < 2);
3769 	      subop0 = XEXP (trueop0, i0);
3770 	      subop1 = XEXP (trueop0, i1);
3771 
3772 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3773 	    }
3774 
3775 	  /* If we select one half of a vec_concat, return that.  */
3776 	  int l0, l1;
3777 	  if (GET_CODE (trueop0) == VEC_CONCAT
3778 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3779 		  .is_constant (&l0))
3780 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3781 		  .is_constant (&l1))
3782 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3783 	    {
3784 	      rtx subop0 = XEXP (trueop0, 0);
3785 	      rtx subop1 = XEXP (trueop0, 1);
3786 	      machine_mode mode0 = GET_MODE (subop0);
3787 	      machine_mode mode1 = GET_MODE (subop1);
3788 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3789 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3790 		{
3791 		  bool success = true;
3792 		  for (int i = 1; i < l0; ++i)
3793 		    {
3794 		      rtx j = XVECEXP (trueop1, 0, i);
3795 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
3796 			{
3797 			  success = false;
3798 			  break;
3799 			}
3800 		    }
3801 		  if (success)
3802 		    return subop0;
3803 		}
3804 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3805 		{
3806 		  bool success = true;
3807 		  for (int i = 1; i < l1; ++i)
3808 		    {
3809 		      rtx j = XVECEXP (trueop1, 0, i);
3810 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3811 			{
3812 			  success = false;
3813 			  break;
3814 			}
3815 		    }
3816 		  if (success)
3817 		    return subop1;
3818 		}
3819 	    }
3820 	}
3821 
3822       if (XVECLEN (trueop1, 0) == 1
3823 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3824 	  && GET_CODE (trueop0) == VEC_CONCAT)
3825 	{
3826 	  rtx vec = trueop0;
3827 	  offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3828 
3829 	  /* Try to find the element in the VEC_CONCAT.  */
3830 	  while (GET_MODE (vec) != mode
3831 		 && GET_CODE (vec) == VEC_CONCAT)
3832 	    {
3833 	      poly_int64 vec_size;
3834 
3835 	      if (CONST_INT_P (XEXP (vec, 0)))
3836 	        {
3837 	          /* vec_concat of two const_ints doesn't make sense with
3838 	             respect to modes.  */
3839 	          if (CONST_INT_P (XEXP (vec, 1)))
3840 	            return 0;
3841 
3842 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3843 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3844 	        }
3845 	      else
3846 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3847 
3848 	      if (known_lt (offset, vec_size))
3849 		vec = XEXP (vec, 0);
3850 	      else if (known_ge (offset, vec_size))
3851 		{
3852 		  offset -= vec_size;
3853 		  vec = XEXP (vec, 1);
3854 		}
3855 	      else
3856 		break;
3857 	      vec = avoid_constant_pool_reference (vec);
3858 	    }
3859 
3860 	  if (GET_MODE (vec) == mode)
3861 	    return vec;
3862 	}
3863 
3864       /* If we select elements in a vec_merge that all come from the same
3865 	 operand, select from that operand directly.  */
3866       if (GET_CODE (op0) == VEC_MERGE)
3867 	{
3868 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3869 	  if (CONST_INT_P (trueop02))
3870 	    {
3871 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3872 	      bool all_operand0 = true;
3873 	      bool all_operand1 = true;
3874 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3875 		{
3876 		  rtx j = XVECEXP (trueop1, 0, i);
3877 		  if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3878 		    all_operand1 = false;
3879 		  else
3880 		    all_operand0 = false;
3881 		}
3882 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3883 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3884 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3885 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3886 	    }
3887 	}
3888 
3889       /* If we have two nested selects that are inverses of each
3890 	 other, replace them with the source operand.  */
3891       if (GET_CODE (trueop0) == VEC_SELECT
3892 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
3893 	{
3894 	  rtx op0_subop1 = XEXP (trueop0, 1);
3895 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3896 	  gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3897 
3898 	  /* Apply the outer ordering vector to the inner one.  (The inner
3899 	     ordering vector is expressly permitted to be of a different
3900 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
3901 	     then the two VEC_SELECTs cancel.  */
3902 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3903 	    {
3904 	      rtx x = XVECEXP (trueop1, 0, i);
3905 	      if (!CONST_INT_P (x))
3906 		return 0;
3907 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3908 	      if (!CONST_INT_P (y) || i != INTVAL (y))
3909 		return 0;
3910 	    }
3911 	  return XEXP (trueop0, 0);
3912 	}
3913 
3914       return 0;
3915     case VEC_CONCAT:
3916       {
3917 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3918 				      ? GET_MODE (trueop0)
3919 				      : GET_MODE_INNER (mode));
3920 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3921 				      ? GET_MODE (trueop1)
3922 				      : GET_MODE_INNER (mode));
3923 
3924 	gcc_assert (VECTOR_MODE_P (mode));
3925 	gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3926 			      + GET_MODE_SIZE (op1_mode),
3927 			      GET_MODE_SIZE (mode)));
3928 
3929 	if (VECTOR_MODE_P (op0_mode))
3930 	  gcc_assert (GET_MODE_INNER (mode)
3931 		      == GET_MODE_INNER (op0_mode));
3932 	else
3933 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3934 
3935 	if (VECTOR_MODE_P (op1_mode))
3936 	  gcc_assert (GET_MODE_INNER (mode)
3937 		      == GET_MODE_INNER (op1_mode));
3938 	else
3939 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3940 
3941 	unsigned int n_elts, in_n_elts;
3942 	if ((GET_CODE (trueop0) == CONST_VECTOR
3943 	     || CONST_SCALAR_INT_P (trueop0)
3944 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3945 	    && (GET_CODE (trueop1) == CONST_VECTOR
3946 		|| CONST_SCALAR_INT_P (trueop1)
3947 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1))
3948 	    && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3949 	    && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3950 	  {
3951 	    rtvec v = rtvec_alloc (n_elts);
3952 	    unsigned int i;
3953 	    for (i = 0; i < n_elts; i++)
3954 	      {
3955 		if (i < in_n_elts)
3956 		  {
3957 		    if (!VECTOR_MODE_P (op0_mode))
3958 		      RTVEC_ELT (v, i) = trueop0;
3959 		    else
3960 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3961 		  }
3962 		else
3963 		  {
3964 		    if (!VECTOR_MODE_P (op1_mode))
3965 		      RTVEC_ELT (v, i) = trueop1;
3966 		    else
3967 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3968 							   i - in_n_elts);
3969 		  }
3970 	      }
3971 
3972 	    return gen_rtx_CONST_VECTOR (mode, v);
3973 	  }
3974 
3975 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
3976 	   Restrict the transformation to avoid generating a VEC_SELECT with a
3977 	   mode unrelated to its operand.  */
3978 	if (GET_CODE (trueop0) == VEC_SELECT
3979 	    && GET_CODE (trueop1) == VEC_SELECT
3980 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3981 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
3982 	  {
3983 	    rtx par0 = XEXP (trueop0, 1);
3984 	    rtx par1 = XEXP (trueop1, 1);
3985 	    int len0 = XVECLEN (par0, 0);
3986 	    int len1 = XVECLEN (par1, 0);
3987 	    rtvec vec = rtvec_alloc (len0 + len1);
3988 	    for (int i = 0; i < len0; i++)
3989 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3990 	    for (int i = 0; i < len1; i++)
3991 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3992 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3993 					gen_rtx_PARALLEL (VOIDmode, vec));
3994 	  }
3995       }
3996       return 0;
3997 
3998     default:
3999       gcc_unreachable ();
4000     }
4001 
4002   if (mode == GET_MODE (op0)
4003       && mode == GET_MODE (op1)
4004       && vec_duplicate_p (op0, &elt0)
4005       && vec_duplicate_p (op1, &elt1))
4006     {
4007       /* Try applying the operator to ELT and see if that simplifies.
4008 	 We can duplicate the result if so.
4009 
4010 	 The reason we don't use simplify_gen_binary is that it isn't
4011 	 necessarily a win to convert things like:
4012 
4013 	   (plus:V (vec_duplicate:V (reg:S R1))
4014 		   (vec_duplicate:V (reg:S R2)))
4015 
4016 	 to:
4017 
4018 	   (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4019 
4020 	 The first might be done entirely in vector registers while the
4021 	 second might need a move between register files.  */
4022       tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4023 				       elt0, elt1);
4024       if (tem)
4025 	return gen_vec_duplicate (mode, tem);
4026     }
4027 
4028   return 0;
4029 }
4030 
4031 rtx
4032 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4033 				 rtx op0, rtx op1)
4034 {
4035   if (VECTOR_MODE_P (mode)
4036       && code != VEC_CONCAT
4037       && GET_CODE (op0) == CONST_VECTOR
4038       && GET_CODE (op1) == CONST_VECTOR)
4039     {
4040       unsigned int n_elts;
4041       if (!CONST_VECTOR_NUNITS (op0).is_constant (&n_elts))
4042 	return NULL_RTX;
4043 
4044       gcc_assert (known_eq (n_elts, CONST_VECTOR_NUNITS (op1)));
4045       gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4046       rtvec v = rtvec_alloc (n_elts);
4047       unsigned int i;
4048 
4049       for (i = 0; i < n_elts; i++)
4050 	{
4051 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4052 					     CONST_VECTOR_ELT (op0, i),
4053 					     CONST_VECTOR_ELT (op1, i));
4054 	  if (!x || !valid_for_const_vector_p (mode, x))
4055 	    return 0;
4056 	  RTVEC_ELT (v, i) = x;
4057 	}
4058 
4059       return gen_rtx_CONST_VECTOR (mode, v);
4060     }
4061 
4062   if (VECTOR_MODE_P (mode)
4063       && code == VEC_CONCAT
4064       && (CONST_SCALAR_INT_P (op0)
4065 	  || CONST_FIXED_P (op0)
4066 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
4067       && (CONST_SCALAR_INT_P (op1)
4068 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
4069 	  || CONST_FIXED_P (op1)))
4070     {
4071       /* Both inputs have a constant number of elements, so the result
4072 	 must too.  */
4073       unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4074       rtvec v = rtvec_alloc (n_elts);
4075 
4076       gcc_assert (n_elts >= 2);
4077       if (n_elts == 2)
4078 	{
4079 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4080 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4081 
4082 	  RTVEC_ELT (v, 0) = op0;
4083 	  RTVEC_ELT (v, 1) = op1;
4084 	}
4085       else
4086 	{
4087 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4088 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4089 	  unsigned i;
4090 
4091 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4092 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4093 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4094 
4095 	  for (i = 0; i < op0_n_elts; ++i)
4096 	    RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4097 	  for (i = 0; i < op1_n_elts; ++i)
4098 	    RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4099 	}
4100 
4101       return gen_rtx_CONST_VECTOR (mode, v);
4102     }
4103 
4104   if (SCALAR_FLOAT_MODE_P (mode)
4105       && CONST_DOUBLE_AS_FLOAT_P (op0)
4106       && CONST_DOUBLE_AS_FLOAT_P (op1)
4107       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4108     {
4109       if (code == AND
4110 	  || code == IOR
4111 	  || code == XOR)
4112 	{
4113 	  long tmp0[4];
4114 	  long tmp1[4];
4115 	  REAL_VALUE_TYPE r;
4116 	  int i;
4117 
4118 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4119 			  GET_MODE (op0));
4120 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4121 			  GET_MODE (op1));
4122 	  for (i = 0; i < 4; i++)
4123 	    {
4124 	      switch (code)
4125 	      {
4126 	      case AND:
4127 		tmp0[i] &= tmp1[i];
4128 		break;
4129 	      case IOR:
4130 		tmp0[i] |= tmp1[i];
4131 		break;
4132 	      case XOR:
4133 		tmp0[i] ^= tmp1[i];
4134 		break;
4135 	      default:
4136 		gcc_unreachable ();
4137 	      }
4138 	    }
4139 	   real_from_target (&r, tmp0, mode);
4140 	   return const_double_from_real_value (r, mode);
4141 	}
4142       else
4143 	{
4144 	  REAL_VALUE_TYPE f0, f1, value, result;
4145 	  const REAL_VALUE_TYPE *opr0, *opr1;
4146 	  bool inexact;
4147 
4148 	  opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4149 	  opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4150 
4151 	  if (HONOR_SNANS (mode)
4152 	      && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4153 	          || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4154 	    return 0;
4155 
4156 	  real_convert (&f0, mode, opr0);
4157 	  real_convert (&f1, mode, opr1);
4158 
4159 	  if (code == DIV
4160 	      && real_equal (&f1, &dconst0)
4161 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4162 	    return 0;
4163 
4164 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4165 	      && flag_trapping_math
4166 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4167 	    {
4168 	      int s0 = REAL_VALUE_NEGATIVE (f0);
4169 	      int s1 = REAL_VALUE_NEGATIVE (f1);
4170 
4171 	      switch (code)
4172 		{
4173 		case PLUS:
4174 		  /* Inf + -Inf = NaN plus exception.  */
4175 		  if (s0 != s1)
4176 		    return 0;
4177 		  break;
4178 		case MINUS:
4179 		  /* Inf - Inf = NaN plus exception.  */
4180 		  if (s0 == s1)
4181 		    return 0;
4182 		  break;
4183 		case DIV:
4184 		  /* Inf / Inf = NaN plus exception.  */
4185 		  return 0;
4186 		default:
4187 		  break;
4188 		}
4189 	    }
4190 
4191 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4192 	      && flag_trapping_math
4193 	      && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4194 		  || (REAL_VALUE_ISINF (f1)
4195 		      && real_equal (&f0, &dconst0))))
4196 	    /* Inf * 0 = NaN plus exception.  */
4197 	    return 0;
4198 
4199 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4200 				     &f0, &f1);
4201 	  real_convert (&result, mode, &value);
4202 
4203 	  /* Don't constant fold this floating point operation if
4204 	     the result has overflowed and flag_trapping_math.  */
4205 
4206 	  if (flag_trapping_math
4207 	      && MODE_HAS_INFINITIES (mode)
4208 	      && REAL_VALUE_ISINF (result)
4209 	      && !REAL_VALUE_ISINF (f0)
4210 	      && !REAL_VALUE_ISINF (f1))
4211 	    /* Overflow plus exception.  */
4212 	    return 0;
4213 
4214 	  /* Don't constant fold this floating point operation if the
4215 	     result may dependent upon the run-time rounding mode and
4216 	     flag_rounding_math is set, or if GCC's software emulation
4217 	     is unable to accurately represent the result.  */
4218 
4219 	  if ((flag_rounding_math
4220 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4221 	      && (inexact || !real_identical (&result, &value)))
4222 	    return NULL_RTX;
4223 
4224 	  return const_double_from_real_value (result, mode);
4225 	}
4226     }
4227 
4228   /* We can fold some multi-word operations.  */
4229   scalar_int_mode int_mode;
4230   if (is_a <scalar_int_mode> (mode, &int_mode)
4231       && CONST_SCALAR_INT_P (op0)
4232       && CONST_SCALAR_INT_P (op1))
4233     {
4234       wide_int result;
4235       bool overflow;
4236       rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4237       rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4238 
4239 #if TARGET_SUPPORTS_WIDE_INT == 0
4240       /* This assert keeps the simplification from producing a result
4241 	 that cannot be represented in a CONST_DOUBLE but a lot of
4242 	 upstream callers expect that this function never fails to
4243 	 simplify something and so you if you added this to the test
4244 	 above the code would die later anyway.  If this assert
4245 	 happens, you just need to make the port support wide int.  */
4246       gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4247 #endif
4248       switch (code)
4249 	{
4250 	case MINUS:
4251 	  result = wi::sub (pop0, pop1);
4252 	  break;
4253 
4254 	case PLUS:
4255 	  result = wi::add (pop0, pop1);
4256 	  break;
4257 
4258 	case MULT:
4259 	  result = wi::mul (pop0, pop1);
4260 	  break;
4261 
4262 	case DIV:
4263 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4264 	  if (overflow)
4265 	    return NULL_RTX;
4266 	  break;
4267 
4268 	case MOD:
4269 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4270 	  if (overflow)
4271 	    return NULL_RTX;
4272 	  break;
4273 
4274 	case UDIV:
4275 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4276 	  if (overflow)
4277 	    return NULL_RTX;
4278 	  break;
4279 
4280 	case UMOD:
4281 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4282 	  if (overflow)
4283 	    return NULL_RTX;
4284 	  break;
4285 
4286 	case AND:
4287 	  result = wi::bit_and (pop0, pop1);
4288 	  break;
4289 
4290 	case IOR:
4291 	  result = wi::bit_or (pop0, pop1);
4292 	  break;
4293 
4294 	case XOR:
4295 	  result = wi::bit_xor (pop0, pop1);
4296 	  break;
4297 
4298 	case SMIN:
4299 	  result = wi::smin (pop0, pop1);
4300 	  break;
4301 
4302 	case SMAX:
4303 	  result = wi::smax (pop0, pop1);
4304 	  break;
4305 
4306 	case UMIN:
4307 	  result = wi::umin (pop0, pop1);
4308 	  break;
4309 
4310 	case UMAX:
4311 	  result = wi::umax (pop0, pop1);
4312 	  break;
4313 
4314 	case LSHIFTRT:
4315 	case ASHIFTRT:
4316 	case ASHIFT:
4317 	  {
4318 	    wide_int wop1 = pop1;
4319 	    if (SHIFT_COUNT_TRUNCATED)
4320 	      wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4321 	    else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4322 	      return NULL_RTX;
4323 
4324 	    switch (code)
4325 	      {
4326 	      case LSHIFTRT:
4327 		result = wi::lrshift (pop0, wop1);
4328 		break;
4329 
4330 	      case ASHIFTRT:
4331 		result = wi::arshift (pop0, wop1);
4332 		break;
4333 
4334 	      case ASHIFT:
4335 		result = wi::lshift (pop0, wop1);
4336 		break;
4337 
4338 	      default:
4339 		gcc_unreachable ();
4340 	      }
4341 	    break;
4342 	  }
4343 	case ROTATE:
4344 	case ROTATERT:
4345 	  {
4346 	    if (wi::neg_p (pop1))
4347 	      return NULL_RTX;
4348 
4349 	    switch (code)
4350 	      {
4351 	      case ROTATE:
4352 		result = wi::lrotate (pop0, pop1);
4353 		break;
4354 
4355 	      case ROTATERT:
4356 		result = wi::rrotate (pop0, pop1);
4357 		break;
4358 
4359 	      default:
4360 		gcc_unreachable ();
4361 	      }
4362 	    break;
4363 	  }
4364 	default:
4365 	  return NULL_RTX;
4366 	}
4367       return immed_wide_int_const (result, int_mode);
4368     }
4369 
4370   /* Handle polynomial integers.  */
4371   if (NUM_POLY_INT_COEFFS > 1
4372       && is_a <scalar_int_mode> (mode, &int_mode)
4373       && poly_int_rtx_p (op0)
4374       && poly_int_rtx_p (op1))
4375     {
4376       poly_wide_int result;
4377       switch (code)
4378 	{
4379 	case PLUS:
4380 	  result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4381 	  break;
4382 
4383 	case MINUS:
4384 	  result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4385 	  break;
4386 
4387 	case MULT:
4388 	  if (CONST_SCALAR_INT_P (op1))
4389 	    result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4390 	  else
4391 	    return NULL_RTX;
4392 	  break;
4393 
4394 	case ASHIFT:
4395 	  if (CONST_SCALAR_INT_P (op1))
4396 	    {
4397 	      wide_int shift = rtx_mode_t (op1, mode);
4398 	      if (SHIFT_COUNT_TRUNCATED)
4399 		shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4400 	      else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4401 		return NULL_RTX;
4402 	      result = wi::to_poly_wide (op0, mode) << shift;
4403 	    }
4404 	  else
4405 	    return NULL_RTX;
4406 	  break;
4407 
4408 	case IOR:
4409 	  if (!CONST_SCALAR_INT_P (op1)
4410 	      || !can_ior_p (wi::to_poly_wide (op0, mode),
4411 			     rtx_mode_t (op1, mode), &result))
4412 	    return NULL_RTX;
4413 	  break;
4414 
4415 	default:
4416 	  return NULL_RTX;
4417 	}
4418       return immed_wide_int_const (result, int_mode);
4419     }
4420 
4421   return NULL_RTX;
4422 }
4423 
4424 
4425 
4426 /* Return a positive integer if X should sort after Y.  The value
4427    returned is 1 if and only if X and Y are both regs.  */
4428 
4429 static int
4430 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4431 {
4432   int result;
4433 
4434   result = (commutative_operand_precedence (y)
4435 	    - commutative_operand_precedence (x));
4436   if (result)
4437     return result + result;
4438 
4439   /* Group together equal REGs to do more simplification.  */
4440   if (REG_P (x) && REG_P (y))
4441     return REGNO (x) > REGNO (y);
4442 
4443   return 0;
4444 }
4445 
4446 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4447    operands may be another PLUS or MINUS.
4448 
4449    Rather than test for specific case, we do this by a brute-force method
4450    and do all possible simplifications until no more changes occur.  Then
4451    we rebuild the operation.
4452 
4453    May return NULL_RTX when no changes were made.  */
4454 
4455 static rtx
4456 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4457 		     rtx op1)
4458 {
4459   struct simplify_plus_minus_op_data
4460   {
4461     rtx op;
4462     short neg;
4463   } ops[16];
4464   rtx result, tem;
4465   int n_ops = 2;
4466   int changed, n_constants, canonicalized = 0;
4467   int i, j;
4468 
4469   memset (ops, 0, sizeof ops);
4470 
4471   /* Set up the two operands and then expand them until nothing has been
4472      changed.  If we run out of room in our array, give up; this should
4473      almost never happen.  */
4474 
4475   ops[0].op = op0;
4476   ops[0].neg = 0;
4477   ops[1].op = op1;
4478   ops[1].neg = (code == MINUS);
4479 
4480   do
4481     {
4482       changed = 0;
4483       n_constants = 0;
4484 
4485       for (i = 0; i < n_ops; i++)
4486 	{
4487 	  rtx this_op = ops[i].op;
4488 	  int this_neg = ops[i].neg;
4489 	  enum rtx_code this_code = GET_CODE (this_op);
4490 
4491 	  switch (this_code)
4492 	    {
4493 	    case PLUS:
4494 	    case MINUS:
4495 	      if (n_ops == ARRAY_SIZE (ops))
4496 		return NULL_RTX;
4497 
4498 	      ops[n_ops].op = XEXP (this_op, 1);
4499 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4500 	      n_ops++;
4501 
4502 	      ops[i].op = XEXP (this_op, 0);
4503 	      changed = 1;
4504 	      /* If this operand was negated then we will potentially
4505 		 canonicalize the expression.  Similarly if we don't
4506 		 place the operands adjacent we're re-ordering the
4507 		 expression and thus might be performing a
4508 		 canonicalization.  Ignore register re-ordering.
4509 		 ??? It might be better to shuffle the ops array here,
4510 		 but then (plus (plus (A, B), plus (C, D))) wouldn't
4511 		 be seen as non-canonical.  */
4512 	      if (this_neg
4513 		  || (i != n_ops - 2
4514 		      && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4515 		canonicalized = 1;
4516 	      break;
4517 
4518 	    case NEG:
4519 	      ops[i].op = XEXP (this_op, 0);
4520 	      ops[i].neg = ! this_neg;
4521 	      changed = 1;
4522 	      canonicalized = 1;
4523 	      break;
4524 
4525 	    case CONST:
4526 	      if (n_ops != ARRAY_SIZE (ops)
4527 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4528 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4529 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4530 		{
4531 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4532 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4533 		  ops[n_ops].neg = this_neg;
4534 		  n_ops++;
4535 		  changed = 1;
4536 		  canonicalized = 1;
4537 		}
4538 	      break;
4539 
4540 	    case NOT:
4541 	      /* ~a -> (-a - 1) */
4542 	      if (n_ops != ARRAY_SIZE (ops))
4543 		{
4544 		  ops[n_ops].op = CONSTM1_RTX (mode);
4545 		  ops[n_ops++].neg = this_neg;
4546 		  ops[i].op = XEXP (this_op, 0);
4547 		  ops[i].neg = !this_neg;
4548 		  changed = 1;
4549 		  canonicalized = 1;
4550 		}
4551 	      break;
4552 
4553 	    case CONST_INT:
4554 	      n_constants++;
4555 	      if (this_neg)
4556 		{
4557 		  ops[i].op = neg_const_int (mode, this_op);
4558 		  ops[i].neg = 0;
4559 		  changed = 1;
4560 		  canonicalized = 1;
4561 		}
4562 	      break;
4563 
4564 	    default:
4565 	      break;
4566 	    }
4567 	}
4568     }
4569   while (changed);
4570 
4571   if (n_constants > 1)
4572     canonicalized = 1;
4573 
4574   gcc_assert (n_ops >= 2);
4575 
4576   /* If we only have two operands, we can avoid the loops.  */
4577   if (n_ops == 2)
4578     {
4579       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4580       rtx lhs, rhs;
4581 
4582       /* Get the two operands.  Be careful with the order, especially for
4583 	 the cases where code == MINUS.  */
4584       if (ops[0].neg && ops[1].neg)
4585 	{
4586 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4587 	  rhs = ops[1].op;
4588 	}
4589       else if (ops[0].neg)
4590 	{
4591 	  lhs = ops[1].op;
4592 	  rhs = ops[0].op;
4593 	}
4594       else
4595 	{
4596 	  lhs = ops[0].op;
4597 	  rhs = ops[1].op;
4598 	}
4599 
4600       return simplify_const_binary_operation (code, mode, lhs, rhs);
4601     }
4602 
4603   /* Now simplify each pair of operands until nothing changes.  */
4604   while (1)
4605     {
4606       /* Insertion sort is good enough for a small array.  */
4607       for (i = 1; i < n_ops; i++)
4608 	{
4609 	  struct simplify_plus_minus_op_data save;
4610 	  int cmp;
4611 
4612 	  j = i - 1;
4613 	  cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4614 	  if (cmp <= 0)
4615 	    continue;
4616 	  /* Just swapping registers doesn't count as canonicalization.  */
4617 	  if (cmp != 1)
4618 	    canonicalized = 1;
4619 
4620 	  save = ops[i];
4621 	  do
4622 	    ops[j + 1] = ops[j];
4623 	  while (j--
4624 		 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4625 	  ops[j + 1] = save;
4626 	}
4627 
4628       changed = 0;
4629       for (i = n_ops - 1; i > 0; i--)
4630 	for (j = i - 1; j >= 0; j--)
4631 	  {
4632 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4633 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4634 
4635 	    if (lhs != 0 && rhs != 0)
4636 	      {
4637 		enum rtx_code ncode = PLUS;
4638 
4639 		if (lneg != rneg)
4640 		  {
4641 		    ncode = MINUS;
4642 		    if (lneg)
4643 		      std::swap (lhs, rhs);
4644 		  }
4645 		else if (swap_commutative_operands_p (lhs, rhs))
4646 		  std::swap (lhs, rhs);
4647 
4648 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4649 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4650 		  {
4651 		    rtx tem_lhs, tem_rhs;
4652 
4653 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4654 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4655 		    tem = simplify_binary_operation (ncode, mode, tem_lhs,
4656 						     tem_rhs);
4657 
4658 		    if (tem && !CONSTANT_P (tem))
4659 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4660 		  }
4661 		else
4662 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4663 
4664 		if (tem)
4665 		  {
4666 		    /* Reject "simplifications" that just wrap the two
4667 		       arguments in a CONST.  Failure to do so can result
4668 		       in infinite recursion with simplify_binary_operation
4669 		       when it calls us to simplify CONST operations.
4670 		       Also, if we find such a simplification, don't try
4671 		       any more combinations with this rhs:  We must have
4672 		       something like symbol+offset, ie. one of the
4673 		       trivial CONST expressions we handle later.  */
4674 		    if (GET_CODE (tem) == CONST
4675 			&& GET_CODE (XEXP (tem, 0)) == ncode
4676 			&& XEXP (XEXP (tem, 0), 0) == lhs
4677 			&& XEXP (XEXP (tem, 0), 1) == rhs)
4678 		      break;
4679 		    lneg &= rneg;
4680 		    if (GET_CODE (tem) == NEG)
4681 		      tem = XEXP (tem, 0), lneg = !lneg;
4682 		    if (CONST_INT_P (tem) && lneg)
4683 		      tem = neg_const_int (mode, tem), lneg = 0;
4684 
4685 		    ops[i].op = tem;
4686 		    ops[i].neg = lneg;
4687 		    ops[j].op = NULL_RTX;
4688 		    changed = 1;
4689 		    canonicalized = 1;
4690 		  }
4691 	      }
4692 	  }
4693 
4694       if (!changed)
4695 	break;
4696 
4697       /* Pack all the operands to the lower-numbered entries.  */
4698       for (i = 0, j = 0; j < n_ops; j++)
4699 	if (ops[j].op)
4700 	  {
4701 	    ops[i] = ops[j];
4702 	    i++;
4703 	  }
4704       n_ops = i;
4705     }
4706 
4707   /* If nothing changed, check that rematerialization of rtl instructions
4708      is still required.  */
4709   if (!canonicalized)
4710     {
4711       /* Perform rematerialization if only all operands are registers and
4712 	 all operations are PLUS.  */
4713       /* ??? Also disallow (non-global, non-frame) fixed registers to work
4714 	 around rs6000 and how it uses the CA register.  See PR67145.  */
4715       for (i = 0; i < n_ops; i++)
4716 	if (ops[i].neg
4717 	    || !REG_P (ops[i].op)
4718 	    || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4719 		&& fixed_regs[REGNO (ops[i].op)]
4720 		&& !global_regs[REGNO (ops[i].op)]
4721 		&& ops[i].op != frame_pointer_rtx
4722 		&& ops[i].op != arg_pointer_rtx
4723 		&& ops[i].op != stack_pointer_rtx))
4724 	  return NULL_RTX;
4725       goto gen_result;
4726     }
4727 
4728   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4729   if (n_ops == 2
4730       && CONST_INT_P (ops[1].op)
4731       && CONSTANT_P (ops[0].op)
4732       && ops[0].neg)
4733     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4734 
4735   /* We suppressed creation of trivial CONST expressions in the
4736      combination loop to avoid recursion.  Create one manually now.
4737      The combination loop should have ensured that there is exactly
4738      one CONST_INT, and the sort will have ensured that it is last
4739      in the array and that any other constant will be next-to-last.  */
4740 
4741   if (n_ops > 1
4742       && CONST_INT_P (ops[n_ops - 1].op)
4743       && CONSTANT_P (ops[n_ops - 2].op))
4744     {
4745       rtx value = ops[n_ops - 1].op;
4746       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4747 	value = neg_const_int (mode, value);
4748       if (CONST_INT_P (value))
4749 	{
4750 	  ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4751 					     INTVAL (value));
4752 	  n_ops--;
4753 	}
4754     }
4755 
4756   /* Put a non-negated operand first, if possible.  */
4757 
4758   for (i = 0; i < n_ops && ops[i].neg; i++)
4759     continue;
4760   if (i == n_ops)
4761     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4762   else if (i != 0)
4763     {
4764       tem = ops[0].op;
4765       ops[0] = ops[i];
4766       ops[i].op = tem;
4767       ops[i].neg = 1;
4768     }
4769 
4770   /* Now make the result by performing the requested operations.  */
4771  gen_result:
4772   result = ops[0].op;
4773   for (i = 1; i < n_ops; i++)
4774     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4775 			     mode, result, ops[i].op);
4776 
4777   return result;
4778 }
4779 
4780 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4781 static bool
4782 plus_minus_operand_p (const_rtx x)
4783 {
4784   return GET_CODE (x) == PLUS
4785          || GET_CODE (x) == MINUS
4786 	 || (GET_CODE (x) == CONST
4787 	     && GET_CODE (XEXP (x, 0)) == PLUS
4788 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4789 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4790 }
4791 
4792 /* Like simplify_binary_operation except used for relational operators.
4793    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4794    not also be VOIDmode.
4795 
4796    CMP_MODE specifies in which mode the comparison is done in, so it is
4797    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4798    the operands or, if both are VOIDmode, the operands are compared in
4799    "infinite precision".  */
4800 rtx
4801 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4802 			       machine_mode cmp_mode, rtx op0, rtx op1)
4803 {
4804   rtx tem, trueop0, trueop1;
4805 
4806   if (cmp_mode == VOIDmode)
4807     cmp_mode = GET_MODE (op0);
4808   if (cmp_mode == VOIDmode)
4809     cmp_mode = GET_MODE (op1);
4810 
4811   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4812   if (tem)
4813     {
4814       if (SCALAR_FLOAT_MODE_P (mode))
4815 	{
4816           if (tem == const0_rtx)
4817             return CONST0_RTX (mode);
4818 #ifdef FLOAT_STORE_FLAG_VALUE
4819 	  {
4820 	    REAL_VALUE_TYPE val;
4821 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4822 	    return const_double_from_real_value (val, mode);
4823 	  }
4824 #else
4825 	  return NULL_RTX;
4826 #endif
4827 	}
4828       if (VECTOR_MODE_P (mode))
4829 	{
4830 	  if (tem == const0_rtx)
4831 	    return CONST0_RTX (mode);
4832 #ifdef VECTOR_STORE_FLAG_VALUE
4833 	  {
4834 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4835 	    if (val == NULL_RTX)
4836 	      return NULL_RTX;
4837 	    if (val == const1_rtx)
4838 	      return CONST1_RTX (mode);
4839 
4840 	    return gen_const_vec_duplicate (mode, val);
4841 	  }
4842 #else
4843 	  return NULL_RTX;
4844 #endif
4845 	}
4846 
4847       return tem;
4848     }
4849 
4850   /* For the following tests, ensure const0_rtx is op1.  */
4851   if (swap_commutative_operands_p (op0, op1)
4852       || (op0 == const0_rtx && op1 != const0_rtx))
4853     std::swap (op0, op1), code = swap_condition (code);
4854 
4855   /* If op0 is a compare, extract the comparison arguments from it.  */
4856   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4857     return simplify_gen_relational (code, mode, VOIDmode,
4858 				    XEXP (op0, 0), XEXP (op0, 1));
4859 
4860   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4861       || CC0_P (op0))
4862     return NULL_RTX;
4863 
4864   trueop0 = avoid_constant_pool_reference (op0);
4865   trueop1 = avoid_constant_pool_reference (op1);
4866   return simplify_relational_operation_1 (code, mode, cmp_mode,
4867 		  			  trueop0, trueop1);
4868 }
4869 
4870 /* This part of simplify_relational_operation is only used when CMP_MODE
4871    is not in class MODE_CC (i.e. it is a real comparison).
4872 
4873    MODE is the mode of the result, while CMP_MODE specifies in which
4874    mode the comparison is done in, so it is the mode of the operands.  */
4875 
4876 static rtx
4877 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4878 				 machine_mode cmp_mode, rtx op0, rtx op1)
4879 {
4880   enum rtx_code op0code = GET_CODE (op0);
4881 
4882   if (op1 == const0_rtx && COMPARISON_P (op0))
4883     {
4884       /* If op0 is a comparison, extract the comparison arguments
4885          from it.  */
4886       if (code == NE)
4887 	{
4888 	  if (GET_MODE (op0) == mode)
4889 	    return simplify_rtx (op0);
4890 	  else
4891 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4892 					    XEXP (op0, 0), XEXP (op0, 1));
4893 	}
4894       else if (code == EQ)
4895 	{
4896 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4897 	  if (new_code != UNKNOWN)
4898 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4899 					    XEXP (op0, 0), XEXP (op0, 1));
4900 	}
4901     }
4902 
4903   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4904      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4905   if ((code == LTU || code == GEU)
4906       && GET_CODE (op0) == PLUS
4907       && CONST_INT_P (XEXP (op0, 1))
4908       && (rtx_equal_p (op1, XEXP (op0, 0))
4909 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4910       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4911       && XEXP (op0, 1) != const0_rtx)
4912     {
4913       rtx new_cmp
4914 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4915       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4916 				      cmp_mode, XEXP (op0, 0), new_cmp);
4917     }
4918 
4919   /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4920      transformed into (LTU a -C).  */
4921   if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4922       && CONST_INT_P (XEXP (op0, 1))
4923       && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4924       && XEXP (op0, 1) != const0_rtx)
4925     {
4926       rtx new_cmp
4927 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4928       return simplify_gen_relational (LTU, mode, cmp_mode,
4929 				       XEXP (op0, 0), new_cmp);
4930     }
4931 
4932   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4933   if ((code == LTU || code == GEU)
4934       && GET_CODE (op0) == PLUS
4935       && rtx_equal_p (op1, XEXP (op0, 1))
4936       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4937       && !rtx_equal_p (op1, XEXP (op0, 0)))
4938     return simplify_gen_relational (code, mode, cmp_mode, op0,
4939 				    copy_rtx (XEXP (op0, 0)));
4940 
4941   if (op1 == const0_rtx)
4942     {
4943       /* Canonicalize (GTU x 0) as (NE x 0).  */
4944       if (code == GTU)
4945         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4946       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4947       if (code == LEU)
4948         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4949     }
4950   else if (op1 == const1_rtx)
4951     {
4952       switch (code)
4953         {
4954         case GE:
4955 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4956 	  return simplify_gen_relational (GT, mode, cmp_mode,
4957 					  op0, const0_rtx);
4958 	case GEU:
4959 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4960 	  return simplify_gen_relational (NE, mode, cmp_mode,
4961 					  op0, const0_rtx);
4962 	case LT:
4963 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4964 	  return simplify_gen_relational (LE, mode, cmp_mode,
4965 					  op0, const0_rtx);
4966 	case LTU:
4967 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4968 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4969 					  op0, const0_rtx);
4970 	default:
4971 	  break;
4972 	}
4973     }
4974   else if (op1 == constm1_rtx)
4975     {
4976       /* Canonicalize (LE x -1) as (LT x 0).  */
4977       if (code == LE)
4978         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4979       /* Canonicalize (GT x -1) as (GE x 0).  */
4980       if (code == GT)
4981         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4982     }
4983 
4984   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4985   if ((code == EQ || code == NE)
4986       && (op0code == PLUS || op0code == MINUS)
4987       && CONSTANT_P (op1)
4988       && CONSTANT_P (XEXP (op0, 1))
4989       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4990     {
4991       rtx x = XEXP (op0, 0);
4992       rtx c = XEXP (op0, 1);
4993       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4994       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4995 
4996       /* Detect an infinite recursive condition, where we oscillate at this
4997 	 simplification case between:
4998 	    A + B == C  <--->  C - B == A,
4999 	 where A, B, and C are all constants with non-simplifiable expressions,
5000 	 usually SYMBOL_REFs.  */
5001       if (GET_CODE (tem) == invcode
5002 	  && CONSTANT_P (x)
5003 	  && rtx_equal_p (c, XEXP (tem, 1)))
5004 	return NULL_RTX;
5005 
5006       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5007     }
5008 
5009   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5010      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
5011   scalar_int_mode int_mode, int_cmp_mode;
5012   if (code == NE
5013       && op1 == const0_rtx
5014       && is_int_mode (mode, &int_mode)
5015       && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5016       /* ??? Work-around BImode bugs in the ia64 backend.  */
5017       && int_mode != BImode
5018       && int_cmp_mode != BImode
5019       && nonzero_bits (op0, int_cmp_mode) == 1
5020       && STORE_FLAG_VALUE == 1)
5021     return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5022 	   ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5023 	   : lowpart_subreg (int_mode, op0, int_cmp_mode);
5024 
5025   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
5026   if ((code == EQ || code == NE)
5027       && op1 == const0_rtx
5028       && op0code == XOR)
5029     return simplify_gen_relational (code, mode, cmp_mode,
5030 				    XEXP (op0, 0), XEXP (op0, 1));
5031 
5032   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
5033   if ((code == EQ || code == NE)
5034       && op0code == XOR
5035       && rtx_equal_p (XEXP (op0, 0), op1)
5036       && !side_effects_p (XEXP (op0, 0)))
5037     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5038 				    CONST0_RTX (mode));
5039 
5040   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
5041   if ((code == EQ || code == NE)
5042       && op0code == XOR
5043       && rtx_equal_p (XEXP (op0, 1), op1)
5044       && !side_effects_p (XEXP (op0, 1)))
5045     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5046 				    CONST0_RTX (mode));
5047 
5048   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
5049   if ((code == EQ || code == NE)
5050       && op0code == XOR
5051       && CONST_SCALAR_INT_P (op1)
5052       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5053     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5054 				    simplify_gen_binary (XOR, cmp_mode,
5055 							 XEXP (op0, 1), op1));
5056 
5057   /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5058      constant folding if x/y is a constant.  */
5059   if ((code == EQ || code == NE)
5060       && (op0code == AND || op0code == IOR)
5061       && !side_effects_p (op1)
5062       && op1 != CONST0_RTX (cmp_mode))
5063     {
5064       /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5065 	 (eq/ne (and (not y) x) 0).  */
5066       if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5067 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5068 	{
5069 	  rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5070 					  cmp_mode);
5071 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5072 
5073 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5074 					  CONST0_RTX (cmp_mode));
5075 	}
5076 
5077       /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5078 	 (eq/ne (and (not x) y) 0).  */
5079       if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5080 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5081 	{
5082 	  rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5083 					  cmp_mode);
5084 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5085 
5086 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5087 					  CONST0_RTX (cmp_mode));
5088 	}
5089     }
5090 
5091   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
5092   if ((code == EQ || code == NE)
5093       && GET_CODE (op0) == BSWAP
5094       && CONST_SCALAR_INT_P (op1))
5095     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5096 				    simplify_gen_unary (BSWAP, cmp_mode,
5097 							op1, cmp_mode));
5098 
5099   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
5100   if ((code == EQ || code == NE)
5101       && GET_CODE (op0) == BSWAP
5102       && GET_CODE (op1) == BSWAP)
5103     return simplify_gen_relational (code, mode, cmp_mode,
5104 				    XEXP (op0, 0), XEXP (op1, 0));
5105 
5106   if (op0code == POPCOUNT && op1 == const0_rtx)
5107     switch (code)
5108       {
5109       case EQ:
5110       case LE:
5111       case LEU:
5112 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
5113 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5114 					XEXP (op0, 0), const0_rtx);
5115 
5116       case NE:
5117       case GT:
5118       case GTU:
5119 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
5120 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5121 					XEXP (op0, 0), const0_rtx);
5122 
5123       default:
5124 	break;
5125       }
5126 
5127   return NULL_RTX;
5128 }
5129 
5130 enum
5131 {
5132   CMP_EQ = 1,
5133   CMP_LT = 2,
5134   CMP_GT = 4,
5135   CMP_LTU = 8,
5136   CMP_GTU = 16
5137 };
5138 
5139 
5140 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5141    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5142    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5143    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5144    For floating-point comparisons, assume that the operands were ordered.  */
5145 
5146 static rtx
5147 comparison_result (enum rtx_code code, int known_results)
5148 {
5149   switch (code)
5150     {
5151     case EQ:
5152     case UNEQ:
5153       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5154     case NE:
5155     case LTGT:
5156       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5157 
5158     case LT:
5159     case UNLT:
5160       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5161     case GE:
5162     case UNGE:
5163       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5164 
5165     case GT:
5166     case UNGT:
5167       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5168     case LE:
5169     case UNLE:
5170       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5171 
5172     case LTU:
5173       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5174     case GEU:
5175       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5176 
5177     case GTU:
5178       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5179     case LEU:
5180       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5181 
5182     case ORDERED:
5183       return const_true_rtx;
5184     case UNORDERED:
5185       return const0_rtx;
5186     default:
5187       gcc_unreachable ();
5188     }
5189 }
5190 
5191 /* Check if the given comparison (done in the given MODE) is actually
5192    a tautology or a contradiction.  If the mode is VOID_mode, the
5193    comparison is done in "infinite precision".  If no simplification
5194    is possible, this function returns zero.  Otherwise, it returns
5195    either const_true_rtx or const0_rtx.  */
5196 
5197 rtx
5198 simplify_const_relational_operation (enum rtx_code code,
5199 				     machine_mode mode,
5200 				     rtx op0, rtx op1)
5201 {
5202   rtx tem;
5203   rtx trueop0;
5204   rtx trueop1;
5205 
5206   gcc_assert (mode != VOIDmode
5207 	      || (GET_MODE (op0) == VOIDmode
5208 		  && GET_MODE (op1) == VOIDmode));
5209 
5210   /* If op0 is a compare, extract the comparison arguments from it.  */
5211   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5212     {
5213       op1 = XEXP (op0, 1);
5214       op0 = XEXP (op0, 0);
5215 
5216       if (GET_MODE (op0) != VOIDmode)
5217 	mode = GET_MODE (op0);
5218       else if (GET_MODE (op1) != VOIDmode)
5219 	mode = GET_MODE (op1);
5220       else
5221 	return 0;
5222     }
5223 
5224   /* We can't simplify MODE_CC values since we don't know what the
5225      actual comparison is.  */
5226   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5227     return 0;
5228 
5229   /* Make sure the constant is second.  */
5230   if (swap_commutative_operands_p (op0, op1))
5231     {
5232       std::swap (op0, op1);
5233       code = swap_condition (code);
5234     }
5235 
5236   trueop0 = avoid_constant_pool_reference (op0);
5237   trueop1 = avoid_constant_pool_reference (op1);
5238 
5239   /* For integer comparisons of A and B maybe we can simplify A - B and can
5240      then simplify a comparison of that with zero.  If A and B are both either
5241      a register or a CONST_INT, this can't help; testing for these cases will
5242      prevent infinite recursion here and speed things up.
5243 
5244      We can only do this for EQ and NE comparisons as otherwise we may
5245      lose or introduce overflow which we cannot disregard as undefined as
5246      we do not know the signedness of the operation on either the left or
5247      the right hand side of the comparison.  */
5248 
5249   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5250       && (code == EQ || code == NE)
5251       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5252 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
5253       && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5254       /* We cannot do this if tem is a nonzero address.  */
5255       && ! nonzero_address_p (tem))
5256     return simplify_const_relational_operation (signed_condition (code),
5257 						mode, tem, const0_rtx);
5258 
5259   if (! HONOR_NANS (mode) && code == ORDERED)
5260     return const_true_rtx;
5261 
5262   if (! HONOR_NANS (mode) && code == UNORDERED)
5263     return const0_rtx;
5264 
5265   /* For modes without NaNs, if the two operands are equal, we know the
5266      result except if they have side-effects.  Even with NaNs we know
5267      the result of unordered comparisons and, if signaling NaNs are
5268      irrelevant, also the result of LT/GT/LTGT.  */
5269   if ((! HONOR_NANS (trueop0)
5270        || code == UNEQ || code == UNLE || code == UNGE
5271        || ((code == LT || code == GT || code == LTGT)
5272 	   && ! HONOR_SNANS (trueop0)))
5273       && rtx_equal_p (trueop0, trueop1)
5274       && ! side_effects_p (trueop0))
5275     return comparison_result (code, CMP_EQ);
5276 
5277   /* If the operands are floating-point constants, see if we can fold
5278      the result.  */
5279   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5280       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5281       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5282     {
5283       const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5284       const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5285 
5286       /* Comparisons are unordered iff at least one of the values is NaN.  */
5287       if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5288 	switch (code)
5289 	  {
5290 	  case UNEQ:
5291 	  case UNLT:
5292 	  case UNGT:
5293 	  case UNLE:
5294 	  case UNGE:
5295 	  case NE:
5296 	  case UNORDERED:
5297 	    return const_true_rtx;
5298 	  case EQ:
5299 	  case LT:
5300 	  case GT:
5301 	  case LE:
5302 	  case GE:
5303 	  case LTGT:
5304 	  case ORDERED:
5305 	    return const0_rtx;
5306 	  default:
5307 	    return 0;
5308 	  }
5309 
5310       return comparison_result (code,
5311 				(real_equal (d0, d1) ? CMP_EQ :
5312 				 real_less (d0, d1) ? CMP_LT : CMP_GT));
5313     }
5314 
5315   /* Otherwise, see if the operands are both integers.  */
5316   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5317       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5318     {
5319       /* It would be nice if we really had a mode here.  However, the
5320 	 largest int representable on the target is as good as
5321 	 infinite.  */
5322       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5323       rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5324       rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5325 
5326       if (wi::eq_p (ptrueop0, ptrueop1))
5327 	return comparison_result (code, CMP_EQ);
5328       else
5329 	{
5330 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5331 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5332 	  return comparison_result (code, cr);
5333 	}
5334     }
5335 
5336   /* Optimize comparisons with upper and lower bounds.  */
5337   scalar_int_mode int_mode;
5338   if (CONST_INT_P (trueop1)
5339       && is_a <scalar_int_mode> (mode, &int_mode)
5340       && HWI_COMPUTABLE_MODE_P (int_mode)
5341       && !side_effects_p (trueop0))
5342     {
5343       int sign;
5344       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5345       HOST_WIDE_INT val = INTVAL (trueop1);
5346       HOST_WIDE_INT mmin, mmax;
5347 
5348       if (code == GEU
5349 	  || code == LEU
5350 	  || code == GTU
5351 	  || code == LTU)
5352 	sign = 0;
5353       else
5354 	sign = 1;
5355 
5356       /* Get a reduced range if the sign bit is zero.  */
5357       if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5358 	{
5359 	  mmin = 0;
5360 	  mmax = nonzero;
5361 	}
5362       else
5363 	{
5364 	  rtx mmin_rtx, mmax_rtx;
5365 	  get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5366 
5367 	  mmin = INTVAL (mmin_rtx);
5368 	  mmax = INTVAL (mmax_rtx);
5369 	  if (sign)
5370 	    {
5371 	      unsigned int sign_copies
5372 		= num_sign_bit_copies (trueop0, int_mode);
5373 
5374 	      mmin >>= (sign_copies - 1);
5375 	      mmax >>= (sign_copies - 1);
5376 	    }
5377 	}
5378 
5379       switch (code)
5380 	{
5381 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5382 	case GEU:
5383 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5384 	    return const_true_rtx;
5385 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5386 	    return const0_rtx;
5387 	  break;
5388 	case GE:
5389 	  if (val <= mmin)
5390 	    return const_true_rtx;
5391 	  if (val > mmax)
5392 	    return const0_rtx;
5393 	  break;
5394 
5395 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5396 	case LEU:
5397 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5398 	    return const_true_rtx;
5399 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5400 	    return const0_rtx;
5401 	  break;
5402 	case LE:
5403 	  if (val >= mmax)
5404 	    return const_true_rtx;
5405 	  if (val < mmin)
5406 	    return const0_rtx;
5407 	  break;
5408 
5409 	case EQ:
5410 	  /* x == y is always false for y out of range.  */
5411 	  if (val < mmin || val > mmax)
5412 	    return const0_rtx;
5413 	  break;
5414 
5415 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5416 	case GTU:
5417 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5418 	    return const0_rtx;
5419 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5420 	    return const_true_rtx;
5421 	  break;
5422 	case GT:
5423 	  if (val >= mmax)
5424 	    return const0_rtx;
5425 	  if (val < mmin)
5426 	    return const_true_rtx;
5427 	  break;
5428 
5429 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5430 	case LTU:
5431 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5432 	    return const0_rtx;
5433 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5434 	    return const_true_rtx;
5435 	  break;
5436 	case LT:
5437 	  if (val <= mmin)
5438 	    return const0_rtx;
5439 	  if (val > mmax)
5440 	    return const_true_rtx;
5441 	  break;
5442 
5443 	case NE:
5444 	  /* x != y is always true for y out of range.  */
5445 	  if (val < mmin || val > mmax)
5446 	    return const_true_rtx;
5447 	  break;
5448 
5449 	default:
5450 	  break;
5451 	}
5452     }
5453 
5454   /* Optimize integer comparisons with zero.  */
5455   if (is_a <scalar_int_mode> (mode, &int_mode)
5456       && trueop1 == const0_rtx
5457       && !side_effects_p (trueop0))
5458     {
5459       /* Some addresses are known to be nonzero.  We don't know
5460 	 their sign, but equality comparisons are known.  */
5461       if (nonzero_address_p (trueop0))
5462 	{
5463 	  if (code == EQ || code == LEU)
5464 	    return const0_rtx;
5465 	  if (code == NE || code == GTU)
5466 	    return const_true_rtx;
5467 	}
5468 
5469       /* See if the first operand is an IOR with a constant.  If so, we
5470 	 may be able to determine the result of this comparison.  */
5471       if (GET_CODE (op0) == IOR)
5472 	{
5473 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5474 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5475 	    {
5476 	      int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5477 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5478 			      && (UINTVAL (inner_const)
5479 				  & (HOST_WIDE_INT_1U
5480 				     << sign_bitnum)));
5481 
5482 	      switch (code)
5483 		{
5484 		case EQ:
5485 		case LEU:
5486 		  return const0_rtx;
5487 		case NE:
5488 		case GTU:
5489 		  return const_true_rtx;
5490 		case LT:
5491 		case LE:
5492 		  if (has_sign)
5493 		    return const_true_rtx;
5494 		  break;
5495 		case GT:
5496 		case GE:
5497 		  if (has_sign)
5498 		    return const0_rtx;
5499 		  break;
5500 		default:
5501 		  break;
5502 		}
5503 	    }
5504 	}
5505     }
5506 
5507   /* Optimize comparison of ABS with zero.  */
5508   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5509       && (GET_CODE (trueop0) == ABS
5510 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5511 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5512     {
5513       switch (code)
5514 	{
5515 	case LT:
5516 	  /* Optimize abs(x) < 0.0.  */
5517 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5518 	    return const0_rtx;
5519 	  break;
5520 
5521 	case GE:
5522 	  /* Optimize abs(x) >= 0.0.  */
5523 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5524 	    return const_true_rtx;
5525 	  break;
5526 
5527 	case UNGE:
5528 	  /* Optimize ! (abs(x) < 0.0).  */
5529 	  return const_true_rtx;
5530 
5531 	default:
5532 	  break;
5533 	}
5534     }
5535 
5536   return 0;
5537 }
5538 
5539 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5540    where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5541    or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5542    can be simplified to that or NULL_RTX if not.
5543    Assume X is compared against zero with CMP_CODE and the true
5544    arm is TRUE_VAL and the false arm is FALSE_VAL.  */
5545 
5546 static rtx
5547 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5548 {
5549   if (cmp_code != EQ && cmp_code != NE)
5550     return NULL_RTX;
5551 
5552   /* Result on X == 0 and X !=0 respectively.  */
5553   rtx on_zero, on_nonzero;
5554   if (cmp_code == EQ)
5555     {
5556       on_zero = true_val;
5557       on_nonzero = false_val;
5558     }
5559   else
5560     {
5561       on_zero = false_val;
5562       on_nonzero = true_val;
5563     }
5564 
5565   rtx_code op_code = GET_CODE (on_nonzero);
5566   if ((op_code != CLZ && op_code != CTZ)
5567       || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5568       || !CONST_INT_P (on_zero))
5569     return NULL_RTX;
5570 
5571   HOST_WIDE_INT op_val;
5572   scalar_int_mode mode ATTRIBUTE_UNUSED
5573     = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5574   if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5575        || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5576       && op_val == INTVAL (on_zero))
5577     return on_nonzero;
5578 
5579   return NULL_RTX;
5580 }
5581 
5582 
5583 /* Simplify CODE, an operation with result mode MODE and three operands,
5584    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5585    a constant.  Return 0 if no simplifications is possible.  */
5586 
5587 rtx
5588 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5589 			    machine_mode op0_mode, rtx op0, rtx op1,
5590 			    rtx op2)
5591 {
5592   bool any_change = false;
5593   rtx tem, trueop2;
5594   scalar_int_mode int_mode, int_op0_mode;
5595   unsigned int n_elts;
5596 
5597   switch (code)
5598     {
5599     case FMA:
5600       /* Simplify negations around the multiplication.  */
5601       /* -a * -b + c  =>  a * b + c.  */
5602       if (GET_CODE (op0) == NEG)
5603 	{
5604 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5605 	  if (tem)
5606 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5607 	}
5608       else if (GET_CODE (op1) == NEG)
5609 	{
5610 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5611 	  if (tem)
5612 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5613 	}
5614 
5615       /* Canonicalize the two multiplication operands.  */
5616       /* a * -b + c  =>  -b * a + c.  */
5617       if (swap_commutative_operands_p (op0, op1))
5618 	std::swap (op0, op1), any_change = true;
5619 
5620       if (any_change)
5621 	return gen_rtx_FMA (mode, op0, op1, op2);
5622       return NULL_RTX;
5623 
5624     case SIGN_EXTRACT:
5625     case ZERO_EXTRACT:
5626       if (CONST_INT_P (op0)
5627 	  && CONST_INT_P (op1)
5628 	  && CONST_INT_P (op2)
5629 	  && is_a <scalar_int_mode> (mode, &int_mode)
5630 	  && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5631 	  && HWI_COMPUTABLE_MODE_P (int_mode))
5632 	{
5633 	  /* Extracting a bit-field from a constant */
5634 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5635 	  HOST_WIDE_INT op1val = INTVAL (op1);
5636 	  HOST_WIDE_INT op2val = INTVAL (op2);
5637 	  if (!BITS_BIG_ENDIAN)
5638 	    val >>= op2val;
5639 	  else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5640 	    val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5641 	  else
5642 	    /* Not enough information to calculate the bit position.  */
5643 	    break;
5644 
5645 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5646 	    {
5647 	      /* First zero-extend.  */
5648 	      val &= (HOST_WIDE_INT_1U << op1val) - 1;
5649 	      /* If desired, propagate sign bit.  */
5650 	      if (code == SIGN_EXTRACT
5651 		  && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5652 		     != 0)
5653 		val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5654 	    }
5655 
5656 	  return gen_int_mode (val, int_mode);
5657 	}
5658       break;
5659 
5660     case IF_THEN_ELSE:
5661       if (CONST_INT_P (op0))
5662 	return op0 != const0_rtx ? op1 : op2;
5663 
5664       /* Convert c ? a : a into "a".  */
5665       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5666 	return op1;
5667 
5668       /* Convert a != b ? a : b into "a".  */
5669       if (GET_CODE (op0) == NE
5670 	  && ! side_effects_p (op0)
5671 	  && ! HONOR_NANS (mode)
5672 	  && ! HONOR_SIGNED_ZEROS (mode)
5673 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5674 	       && rtx_equal_p (XEXP (op0, 1), op2))
5675 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5676 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5677 	return op1;
5678 
5679       /* Convert a == b ? a : b into "b".  */
5680       if (GET_CODE (op0) == EQ
5681 	  && ! side_effects_p (op0)
5682 	  && ! HONOR_NANS (mode)
5683 	  && ! HONOR_SIGNED_ZEROS (mode)
5684 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5685 	       && rtx_equal_p (XEXP (op0, 1), op2))
5686 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5687 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5688 	return op2;
5689 
5690       /* Convert (!c) != {0,...,0} ? a : b into
5691          c != {0,...,0} ? b : a for vector modes.  */
5692       if (VECTOR_MODE_P (GET_MODE (op1))
5693 	  && GET_CODE (op0) == NE
5694 	  && GET_CODE (XEXP (op0, 0)) == NOT
5695 	  && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5696 	{
5697 	  rtx cv = XEXP (op0, 1);
5698 	  int nunits;
5699 	  bool ok = true;
5700 	  if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5701 	    ok = false;
5702 	  else
5703 	    for (int i = 0; i < nunits; ++i)
5704 	      if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5705 		{
5706 		  ok = false;
5707 		  break;
5708 		}
5709 	  if (ok)
5710 	    {
5711 	      rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5712 					XEXP (XEXP (op0, 0), 0),
5713 					XEXP (op0, 1));
5714 	      rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5715 	      return retval;
5716 	    }
5717 	}
5718 
5719       /* Convert x == 0 ? N : clz (x) into clz (x) when
5720 	 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5721 	 Similarly for ctz (x).  */
5722       if (COMPARISON_P (op0) && !side_effects_p (op0)
5723 	  && XEXP (op0, 1) == const0_rtx)
5724 	{
5725 	  rtx simplified
5726 	    = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5727 				     op1, op2);
5728 	  if (simplified)
5729 	    return simplified;
5730 	}
5731 
5732       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5733 	{
5734 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5735 					? GET_MODE (XEXP (op0, 1))
5736 					: GET_MODE (XEXP (op0, 0)));
5737 	  rtx temp;
5738 
5739 	  /* Look for happy constants in op1 and op2.  */
5740 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5741 	    {
5742 	      HOST_WIDE_INT t = INTVAL (op1);
5743 	      HOST_WIDE_INT f = INTVAL (op2);
5744 
5745 	      if (t == STORE_FLAG_VALUE && f == 0)
5746 	        code = GET_CODE (op0);
5747 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5748 		{
5749 		  enum rtx_code tmp;
5750 		  tmp = reversed_comparison_code (op0, NULL);
5751 		  if (tmp == UNKNOWN)
5752 		    break;
5753 		  code = tmp;
5754 		}
5755 	      else
5756 		break;
5757 
5758 	      return simplify_gen_relational (code, mode, cmp_mode,
5759 					      XEXP (op0, 0), XEXP (op0, 1));
5760 	    }
5761 
5762 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5763 			  			cmp_mode, XEXP (op0, 0),
5764 						XEXP (op0, 1));
5765 
5766 	  /* See if any simplifications were possible.  */
5767 	  if (temp)
5768 	    {
5769 	      if (CONST_INT_P (temp))
5770 		return temp == const0_rtx ? op2 : op1;
5771 	      else if (temp)
5772 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5773 	    }
5774 	}
5775       break;
5776 
5777     case VEC_MERGE:
5778       gcc_assert (GET_MODE (op0) == mode);
5779       gcc_assert (GET_MODE (op1) == mode);
5780       gcc_assert (VECTOR_MODE_P (mode));
5781       trueop2 = avoid_constant_pool_reference (op2);
5782       if (CONST_INT_P (trueop2)
5783 	  && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5784 	{
5785 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5786 	  unsigned HOST_WIDE_INT mask;
5787 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
5788 	    mask = -1;
5789 	  else
5790 	    mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5791 
5792 	  if (!(sel & mask) && !side_effects_p (op0))
5793 	    return op1;
5794 	  if ((sel & mask) == mask && !side_effects_p (op1))
5795 	    return op0;
5796 
5797 	  rtx trueop0 = avoid_constant_pool_reference (op0);
5798 	  rtx trueop1 = avoid_constant_pool_reference (op1);
5799 	  if (GET_CODE (trueop0) == CONST_VECTOR
5800 	      && GET_CODE (trueop1) == CONST_VECTOR)
5801 	    {
5802 	      rtvec v = rtvec_alloc (n_elts);
5803 	      unsigned int i;
5804 
5805 	      for (i = 0; i < n_elts; i++)
5806 		RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5807 				    ? CONST_VECTOR_ELT (trueop0, i)
5808 				    : CONST_VECTOR_ELT (trueop1, i));
5809 	      return gen_rtx_CONST_VECTOR (mode, v);
5810 	    }
5811 
5812 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5813 	     if no element from a appears in the result.  */
5814 	  if (GET_CODE (op0) == VEC_MERGE)
5815 	    {
5816 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
5817 	      if (CONST_INT_P (tem))
5818 		{
5819 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5820 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5821 		    return simplify_gen_ternary (code, mode, mode,
5822 						 XEXP (op0, 1), op1, op2);
5823 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5824 		    return simplify_gen_ternary (code, mode, mode,
5825 						 XEXP (op0, 0), op1, op2);
5826 		}
5827 	    }
5828 	  if (GET_CODE (op1) == VEC_MERGE)
5829 	    {
5830 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
5831 	      if (CONST_INT_P (tem))
5832 		{
5833 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5834 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5835 		    return simplify_gen_ternary (code, mode, mode,
5836 						 op0, XEXP (op1, 1), op2);
5837 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5838 		    return simplify_gen_ternary (code, mode, mode,
5839 						 op0, XEXP (op1, 0), op2);
5840 		}
5841 	    }
5842 
5843 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5844 	     with a.  */
5845 	  if (GET_CODE (op0) == VEC_DUPLICATE
5846 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5847 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5848 	      && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5849 	    {
5850 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5851 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
5852 		{
5853 		  if (XEXP (XEXP (op0, 0), 0) == op1
5854 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5855 		    return op1;
5856 		}
5857 	    }
5858 	  /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5859 	     (const_int N))
5860 	     with (vec_concat (X) (B)) if N == 1 or
5861 	     (vec_concat (A) (X)) if N == 2.  */
5862 	  if (GET_CODE (op0) == VEC_DUPLICATE
5863 	      && GET_CODE (op1) == CONST_VECTOR
5864 	      && known_eq (CONST_VECTOR_NUNITS (op1), 2)
5865 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5866 	      && IN_RANGE (sel, 1, 2))
5867 	    {
5868 	      rtx newop0 = XEXP (op0, 0);
5869 	      rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5870 	      if (sel == 2)
5871 		std::swap (newop0, newop1);
5872 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5873 	    }
5874 	  /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5875 	     with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5876 	     Only applies for vectors of two elements.  */
5877 	  if (GET_CODE (op0) == VEC_DUPLICATE
5878 	      && GET_CODE (op1) == VEC_CONCAT
5879 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5880 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5881 	      && IN_RANGE (sel, 1, 2))
5882 	    {
5883 	      rtx newop0 = XEXP (op0, 0);
5884 	      rtx newop1 = XEXP (op1, 2 - sel);
5885 	      rtx otherop = XEXP (op1, sel - 1);
5886 	      if (sel == 2)
5887 		std::swap (newop0, newop1);
5888 	      /* Don't want to throw away the other part of the vec_concat if
5889 		 it has side-effects.  */
5890 	      if (!side_effects_p (otherop))
5891 		return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5892 	    }
5893 
5894 	  /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5895 				 (const_int n))
5896 	     with (vec_concat x y) or (vec_concat y x) depending on value
5897 	     of N.  */
5898 	  if (GET_CODE (op0) == VEC_DUPLICATE
5899 	      && GET_CODE (op1) == VEC_DUPLICATE
5900 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5901 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5902 	      && IN_RANGE (sel, 1, 2))
5903 	    {
5904 	      rtx newop0 = XEXP (op0, 0);
5905 	      rtx newop1 = XEXP (op1, 0);
5906 	      if (sel == 2)
5907 		std::swap (newop0, newop1);
5908 
5909 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5910 	    }
5911 	}
5912 
5913       if (rtx_equal_p (op0, op1)
5914 	  && !side_effects_p (op2) && !side_effects_p (op1))
5915 	return op0;
5916 
5917       break;
5918 
5919     default:
5920       gcc_unreachable ();
5921     }
5922 
5923   return 0;
5924 }
5925 
5926 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5927    or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5928    CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5929 
5930    Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
5931    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5932    and then repacking them again for OUTERMODE.  If OP is a CONST_VECTOR,
5933    FIRST_ELEM is the number of the first element to extract, otherwise
5934    FIRST_ELEM is ignored.  */
5935 
5936 static rtx
5937 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5938 		       machine_mode innermode, unsigned int byte,
5939 		       unsigned int first_elem, unsigned int inner_bytes)
5940 {
5941   enum {
5942     value_bit = 8,
5943     value_mask = (1 << value_bit) - 1
5944   };
5945   unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5946   int value_start;
5947   int i;
5948   int elem;
5949 
5950   int num_elem;
5951   rtx * elems;
5952   int elem_bitsize;
5953   rtx result_s = NULL;
5954   rtvec result_v = NULL;
5955   enum mode_class outer_class;
5956   scalar_mode outer_submode;
5957   int max_bitsize;
5958 
5959   /* Some ports misuse CCmode.  */
5960   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5961     return op;
5962 
5963   /* We have no way to represent a complex constant at the rtl level.  */
5964   if (COMPLEX_MODE_P (outermode))
5965     return NULL_RTX;
5966 
5967   /* We support any size mode.  */
5968   max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5969 		     inner_bytes * BITS_PER_UNIT);
5970 
5971   /* Unpack the value.  */
5972 
5973   if (GET_CODE (op) == CONST_VECTOR)
5974     {
5975       num_elem = CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode));
5976       elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5977     }
5978   else
5979     {
5980       num_elem = 1;
5981       elem_bitsize = max_bitsize;
5982     }
5983   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5984   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5985   /* I don't know how to handle endianness of sub-units.  */
5986   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5987 
5988   for (elem = 0; elem < num_elem; elem++)
5989     {
5990       unsigned char * vp;
5991       rtx el = (GET_CODE (op) == CONST_VECTOR
5992 		? CONST_VECTOR_ELT (op, first_elem + elem)
5993 		: op);
5994 
5995       /* Vectors are kept in target memory order.  (This is probably
5996 	 a mistake.)  */
5997       {
5998 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5999 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6000 			  / BITS_PER_UNIT);
6001 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6002 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6003 	unsigned bytele = (subword_byte % UNITS_PER_WORD
6004 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6005 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6006       }
6007 
6008       switch (GET_CODE (el))
6009 	{
6010 	case CONST_INT:
6011 	  for (i = 0;
6012 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6013 	       i += value_bit)
6014 	    *vp++ = INTVAL (el) >> i;
6015 	  /* CONST_INTs are always logically sign-extended.  */
6016 	  for (; i < elem_bitsize; i += value_bit)
6017 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
6018 	  break;
6019 
6020 	case CONST_WIDE_INT:
6021 	  {
6022 	    rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6023 	    unsigned char extend = wi::sign_mask (val);
6024 	    int prec = wi::get_precision (val);
6025 
6026 	    for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6027 	      *vp++ = wi::extract_uhwi (val, i, value_bit);
6028 	    for (; i < elem_bitsize; i += value_bit)
6029 	      *vp++ = extend;
6030 	  }
6031 	  break;
6032 
6033 	case CONST_DOUBLE:
6034 	  if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6035 	    {
6036 	      unsigned char extend = 0;
6037 	      /* If this triggers, someone should have generated a
6038 		 CONST_INT instead.  */
6039 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6040 
6041 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6042 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
6043 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6044 		{
6045 		  *vp++
6046 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6047 		  i += value_bit;
6048 		}
6049 
6050 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6051 		extend = -1;
6052 	      for (; i < elem_bitsize; i += value_bit)
6053 		*vp++ = extend;
6054 	    }
6055 	  else
6056 	    {
6057 	      /* This is big enough for anything on the platform.  */
6058 	      long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6059 	      scalar_float_mode el_mode;
6060 
6061 	      el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6062 	      int bitsize = GET_MODE_BITSIZE (el_mode);
6063 
6064 	      gcc_assert (bitsize <= elem_bitsize);
6065 	      gcc_assert (bitsize % value_bit == 0);
6066 
6067 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6068 			      GET_MODE (el));
6069 
6070 	      /* real_to_target produces its result in words affected by
6071 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
6072 		 and use WORDS_BIG_ENDIAN instead; see the documentation
6073 	         of SUBREG in rtl.texi.  */
6074 	      for (i = 0; i < bitsize; i += value_bit)
6075 		{
6076 		  int ibase;
6077 		  if (WORDS_BIG_ENDIAN)
6078 		    ibase = bitsize - 1 - i;
6079 		  else
6080 		    ibase = i;
6081 		  *vp++ = tmp[ibase / 32] >> i % 32;
6082 		}
6083 
6084 	      /* It shouldn't matter what's done here, so fill it with
6085 		 zero.  */
6086 	      for (; i < elem_bitsize; i += value_bit)
6087 		*vp++ = 0;
6088 	    }
6089 	  break;
6090 
6091         case CONST_FIXED:
6092 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6093 	    {
6094 	      for (i = 0; i < elem_bitsize; i += value_bit)
6095 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6096 	    }
6097 	  else
6098 	    {
6099 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6100 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6101               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6102 		   i += value_bit)
6103 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
6104 			>> (i - HOST_BITS_PER_WIDE_INT);
6105 	      for (; i < elem_bitsize; i += value_bit)
6106 		*vp++ = 0;
6107 	    }
6108           break;
6109 
6110 	default:
6111 	  gcc_unreachable ();
6112 	}
6113     }
6114 
6115   /* Now, pick the right byte to start with.  */
6116   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
6117      case is paradoxical SUBREGs, which shouldn't be adjusted since they
6118      will already have offset 0.  */
6119   if (inner_bytes >= GET_MODE_SIZE (outermode))
6120     {
6121       unsigned ibyte = inner_bytes - GET_MODE_SIZE (outermode) - byte;
6122       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6123       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6124       byte = (subword_byte % UNITS_PER_WORD
6125 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6126     }
6127 
6128   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
6129      so if it's become negative it will instead be very large.)  */
6130   gcc_assert (byte < inner_bytes);
6131 
6132   /* Convert from bytes to chunks of size value_bit.  */
6133   value_start = byte * (BITS_PER_UNIT / value_bit);
6134 
6135   /* Re-pack the value.  */
6136   num_elem = GET_MODE_NUNITS (outermode);
6137 
6138   if (VECTOR_MODE_P (outermode))
6139     {
6140       result_v = rtvec_alloc (num_elem);
6141       elems = &RTVEC_ELT (result_v, 0);
6142     }
6143   else
6144     elems = &result_s;
6145 
6146   outer_submode = GET_MODE_INNER (outermode);
6147   outer_class = GET_MODE_CLASS (outer_submode);
6148   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6149 
6150   gcc_assert (elem_bitsize % value_bit == 0);
6151   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6152 
6153   for (elem = 0; elem < num_elem; elem++)
6154     {
6155       unsigned char *vp;
6156 
6157       /* Vectors are stored in target memory order.  (This is probably
6158 	 a mistake.)  */
6159       {
6160 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6161 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6162 			  / BITS_PER_UNIT);
6163 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6164 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6165 	unsigned bytele = (subword_byte % UNITS_PER_WORD
6166 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6167 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6168       }
6169 
6170       switch (outer_class)
6171 	{
6172 	case MODE_INT:
6173 	case MODE_PARTIAL_INT:
6174 	  {
6175 	    int u;
6176 	    int base = 0;
6177 	    int units
6178 	      = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6179 	      / HOST_BITS_PER_WIDE_INT;
6180 	    HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6181 	    wide_int r;
6182 
6183 	    if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6184 	      return NULL_RTX;
6185 	    for (u = 0; u < units; u++)
6186 	      {
6187 		unsigned HOST_WIDE_INT buf = 0;
6188 		for (i = 0;
6189 		     i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6190 		     i += value_bit)
6191 		  buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6192 
6193 		tmp[u] = buf;
6194 		base += HOST_BITS_PER_WIDE_INT;
6195 	      }
6196 	    r = wide_int::from_array (tmp, units,
6197 				      GET_MODE_PRECISION (outer_submode));
6198 #if TARGET_SUPPORTS_WIDE_INT == 0
6199 	    /* Make sure r will fit into CONST_INT or CONST_DOUBLE.  */
6200 	    if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6201 	      return NULL_RTX;
6202 #endif
6203 	    elems[elem] = immed_wide_int_const (r, outer_submode);
6204 	  }
6205 	  break;
6206 
6207 	case MODE_FLOAT:
6208 	case MODE_DECIMAL_FLOAT:
6209 	  {
6210 	    REAL_VALUE_TYPE r;
6211 	    long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6212 
6213 	    /* real_from_target wants its input in words affected by
6214 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
6215 	       and use WORDS_BIG_ENDIAN instead; see the documentation
6216 	       of SUBREG in rtl.texi.  */
6217 	    for (i = 0; i < elem_bitsize; i += value_bit)
6218 	      {
6219 		int ibase;
6220 		if (WORDS_BIG_ENDIAN)
6221 		  ibase = elem_bitsize - 1 - i;
6222 		else
6223 		  ibase = i;
6224 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6225 	      }
6226 
6227 	    real_from_target (&r, tmp, outer_submode);
6228 	    elems[elem] = const_double_from_real_value (r, outer_submode);
6229 	  }
6230 	  break;
6231 
6232 	case MODE_FRACT:
6233 	case MODE_UFRACT:
6234 	case MODE_ACCUM:
6235 	case MODE_UACCUM:
6236 	  {
6237 	    FIXED_VALUE_TYPE f;
6238 	    f.data.low = 0;
6239 	    f.data.high = 0;
6240 	    f.mode = outer_submode;
6241 
6242 	    for (i = 0;
6243 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6244 		 i += value_bit)
6245 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6246 	    for (; i < elem_bitsize; i += value_bit)
6247 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6248 			     << (i - HOST_BITS_PER_WIDE_INT));
6249 
6250 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6251           }
6252           break;
6253 
6254 	default:
6255 	  gcc_unreachable ();
6256 	}
6257     }
6258   if (VECTOR_MODE_P (outermode))
6259     return gen_rtx_CONST_VECTOR (outermode, result_v);
6260   else
6261     return result_s;
6262 }
6263 
6264 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6265    Return 0 if no simplifications are possible.  */
6266 rtx
6267 simplify_subreg (machine_mode outermode, rtx op,
6268 		 machine_mode innermode, poly_uint64 byte)
6269 {
6270   /* Little bit of sanity checking.  */
6271   gcc_assert (innermode != VOIDmode);
6272   gcc_assert (outermode != VOIDmode);
6273   gcc_assert (innermode != BLKmode);
6274   gcc_assert (outermode != BLKmode);
6275 
6276   gcc_assert (GET_MODE (op) == innermode
6277 	      || GET_MODE (op) == VOIDmode);
6278 
6279   poly_uint64 outersize = GET_MODE_SIZE (outermode);
6280   if (!multiple_p (byte, outersize))
6281     return NULL_RTX;
6282 
6283   poly_uint64 innersize = GET_MODE_SIZE (innermode);
6284   if (maybe_ge (byte, innersize))
6285     return NULL_RTX;
6286 
6287   if (outermode == innermode && known_eq (byte, 0U))
6288     return op;
6289 
6290   if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6291     {
6292       rtx elt;
6293 
6294       if (VECTOR_MODE_P (outermode)
6295 	  && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6296 	  && vec_duplicate_p (op, &elt))
6297 	return gen_vec_duplicate (outermode, elt);
6298 
6299       if (outermode == GET_MODE_INNER (innermode)
6300 	  && vec_duplicate_p (op, &elt))
6301 	return elt;
6302     }
6303 
6304   if (CONST_SCALAR_INT_P (op)
6305       || CONST_DOUBLE_AS_FLOAT_P (op)
6306       || CONST_FIXED_P (op)
6307       || GET_CODE (op) == CONST_VECTOR)
6308     {
6309       /* simplify_immed_subreg deconstructs OP into bytes and constructs
6310 	 the result from bytes, so it only works if the sizes of the modes
6311 	 and the value of the offset are known at compile time.  Cases that
6312 	 that apply to general modes and offsets should be handled here
6313 	 before calling simplify_immed_subreg.  */
6314       fixed_size_mode fs_outermode, fs_innermode;
6315       unsigned HOST_WIDE_INT cbyte;
6316       if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6317 	  && is_a <fixed_size_mode> (innermode, &fs_innermode)
6318 	  && byte.is_constant (&cbyte))
6319 	return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte,
6320 				      0, GET_MODE_SIZE (fs_innermode));
6321 
6322       /* Handle constant-sized outer modes and variable-sized inner modes.  */
6323       unsigned HOST_WIDE_INT first_elem;
6324       if (GET_CODE (op) == CONST_VECTOR
6325 	  && is_a <fixed_size_mode> (outermode, &fs_outermode)
6326 	  && constant_multiple_p (byte, GET_MODE_UNIT_SIZE (innermode),
6327 				  &first_elem))
6328 	return simplify_immed_subreg (fs_outermode, op, innermode, 0,
6329 				      first_elem,
6330 				      GET_MODE_SIZE (fs_outermode));
6331 
6332       return NULL_RTX;
6333     }
6334 
6335   /* Changing mode twice with SUBREG => just change it once,
6336      or not at all if changing back op starting mode.  */
6337   if (GET_CODE (op) == SUBREG)
6338     {
6339       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6340       poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6341       rtx newx;
6342 
6343       if (outermode == innermostmode
6344 	  && known_eq (byte, 0U)
6345 	  && known_eq (SUBREG_BYTE (op), 0))
6346 	return SUBREG_REG (op);
6347 
6348       /* Work out the memory offset of the final OUTERMODE value relative
6349 	 to the inner value of OP.  */
6350       poly_int64 mem_offset = subreg_memory_offset (outermode,
6351 						    innermode, byte);
6352       poly_int64 op_mem_offset = subreg_memory_offset (op);
6353       poly_int64 final_offset = mem_offset + op_mem_offset;
6354 
6355       /* See whether resulting subreg will be paradoxical.  */
6356       if (!paradoxical_subreg_p (outermode, innermostmode))
6357 	{
6358 	  /* Bail out in case resulting subreg would be incorrect.  */
6359 	  if (maybe_lt (final_offset, 0)
6360 	      || maybe_ge (poly_uint64 (final_offset), innermostsize)
6361 	      || !multiple_p (final_offset, outersize))
6362 	    return NULL_RTX;
6363 	}
6364       else
6365 	{
6366 	  poly_int64 required_offset = subreg_memory_offset (outermode,
6367 							     innermostmode, 0);
6368 	  if (maybe_ne (final_offset, required_offset))
6369 	    return NULL_RTX;
6370 	  /* Paradoxical subregs always have byte offset 0.  */
6371 	  final_offset = 0;
6372 	}
6373 
6374       /* Recurse for further possible simplifications.  */
6375       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6376 			      final_offset);
6377       if (newx)
6378 	return newx;
6379       if (validate_subreg (outermode, innermostmode,
6380 			   SUBREG_REG (op), final_offset))
6381 	{
6382 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6383 	  if (SUBREG_PROMOTED_VAR_P (op)
6384 	      && SUBREG_PROMOTED_SIGN (op) >= 0
6385 	      && GET_MODE_CLASS (outermode) == MODE_INT
6386 	      && known_ge (outersize, innersize)
6387 	      && known_le (outersize, innermostsize)
6388 	      && subreg_lowpart_p (newx))
6389 	    {
6390 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
6391 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6392 	    }
6393 	  return newx;
6394 	}
6395       return NULL_RTX;
6396     }
6397 
6398   /* SUBREG of a hard register => just change the register number
6399      and/or mode.  If the hard register is not valid in that mode,
6400      suppress this simplification.  If the hard register is the stack,
6401      frame, or argument pointer, leave this as a SUBREG.  */
6402 
6403   if (REG_P (op) && HARD_REGISTER_P (op))
6404     {
6405       unsigned int regno, final_regno;
6406 
6407       regno = REGNO (op);
6408       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6409       if (HARD_REGISTER_NUM_P (final_regno))
6410 	{
6411 	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6412 				      subreg_memory_offset (outermode,
6413 							    innermode, byte));
6414 
6415 	  /* Propagate original regno.  We don't have any way to specify
6416 	     the offset inside original regno, so do so only for lowpart.
6417 	     The information is used only by alias analysis that can not
6418 	     grog partial register anyway.  */
6419 
6420 	  if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6421 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6422 	  return x;
6423 	}
6424     }
6425 
6426   /* If we have a SUBREG of a register that we are replacing and we are
6427      replacing it with a MEM, make a new MEM and try replacing the
6428      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
6429      or if we would be widening it.  */
6430 
6431   if (MEM_P (op)
6432       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6433       /* Allow splitting of volatile memory references in case we don't
6434          have instruction to move the whole thing.  */
6435       && (! MEM_VOLATILE_P (op)
6436 	  || ! have_insn_for (SET, innermode))
6437       && known_le (outersize, innersize))
6438     return adjust_address_nv (op, outermode, byte);
6439 
6440   /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6441      of two parts.  */
6442   if (GET_CODE (op) == CONCAT
6443       || GET_CODE (op) == VEC_CONCAT)
6444     {
6445       poly_uint64 final_offset;
6446       rtx part, res;
6447 
6448       machine_mode part_mode = GET_MODE (XEXP (op, 0));
6449       if (part_mode == VOIDmode)
6450 	part_mode = GET_MODE_INNER (GET_MODE (op));
6451       poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6452       if (known_lt (byte, part_size))
6453 	{
6454 	  part = XEXP (op, 0);
6455 	  final_offset = byte;
6456 	}
6457       else if (known_ge (byte, part_size))
6458 	{
6459 	  part = XEXP (op, 1);
6460 	  final_offset = byte - part_size;
6461 	}
6462       else
6463 	return NULL_RTX;
6464 
6465       if (maybe_gt (final_offset + outersize, part_size))
6466 	return NULL_RTX;
6467 
6468       part_mode = GET_MODE (part);
6469       if (part_mode == VOIDmode)
6470 	part_mode = GET_MODE_INNER (GET_MODE (op));
6471       res = simplify_subreg (outermode, part, part_mode, final_offset);
6472       if (res)
6473 	return res;
6474       if (validate_subreg (outermode, part_mode, part, final_offset))
6475 	return gen_rtx_SUBREG (outermode, part, final_offset);
6476       return NULL_RTX;
6477     }
6478 
6479   /* A SUBREG resulting from a zero extension may fold to zero if
6480      it extracts higher bits that the ZERO_EXTEND's source bits.  */
6481   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6482     {
6483       poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6484       if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6485 	return CONST0_RTX (outermode);
6486     }
6487 
6488   scalar_int_mode int_outermode, int_innermode;
6489   if (is_a <scalar_int_mode> (outermode, &int_outermode)
6490       && is_a <scalar_int_mode> (innermode, &int_innermode)
6491       && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6492     {
6493       /* Handle polynomial integers.  The upper bits of a paradoxical
6494 	 subreg are undefined, so this is safe regardless of whether
6495 	 we're truncating or extending.  */
6496       if (CONST_POLY_INT_P (op))
6497 	{
6498 	  poly_wide_int val
6499 	    = poly_wide_int::from (const_poly_int_value (op),
6500 				   GET_MODE_PRECISION (int_outermode),
6501 				   SIGNED);
6502 	  return immed_wide_int_const (val, int_outermode);
6503 	}
6504 
6505       if (GET_MODE_PRECISION (int_outermode)
6506 	  < GET_MODE_PRECISION (int_innermode))
6507 	{
6508 	  rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6509 	  if (tem)
6510 	    return tem;
6511 	}
6512     }
6513 
6514   return NULL_RTX;
6515 }
6516 
6517 /* Make a SUBREG operation or equivalent if it folds.  */
6518 
6519 rtx
6520 simplify_gen_subreg (machine_mode outermode, rtx op,
6521 		     machine_mode innermode, poly_uint64 byte)
6522 {
6523   rtx newx;
6524 
6525   newx = simplify_subreg (outermode, op, innermode, byte);
6526   if (newx)
6527     return newx;
6528 
6529   if (GET_CODE (op) == SUBREG
6530       || GET_CODE (op) == CONCAT
6531       || GET_MODE (op) == VOIDmode)
6532     return NULL_RTX;
6533 
6534   if (validate_subreg (outermode, innermode, op, byte))
6535     return gen_rtx_SUBREG (outermode, op, byte);
6536 
6537   return NULL_RTX;
6538 }
6539 
6540 /* Generates a subreg to get the least significant part of EXPR (in mode
6541    INNER_MODE) to OUTER_MODE.  */
6542 
6543 rtx
6544 lowpart_subreg (machine_mode outer_mode, rtx expr,
6545 			     machine_mode inner_mode)
6546 {
6547   return simplify_gen_subreg (outer_mode, expr, inner_mode,
6548 			      subreg_lowpart_offset (outer_mode, inner_mode));
6549 }
6550 
6551 /* Simplify X, an rtx expression.
6552 
6553    Return the simplified expression or NULL if no simplifications
6554    were possible.
6555 
6556    This is the preferred entry point into the simplification routines;
6557    however, we still allow passes to call the more specific routines.
6558 
6559    Right now GCC has three (yes, three) major bodies of RTL simplification
6560    code that need to be unified.
6561 
6562 	1. fold_rtx in cse.c.  This code uses various CSE specific
6563 	   information to aid in RTL simplification.
6564 
6565 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
6566 	   it uses combine specific information to aid in RTL
6567 	   simplification.
6568 
6569 	3. The routines in this file.
6570 
6571 
6572    Long term we want to only have one body of simplification code; to
6573    get to that state I recommend the following steps:
6574 
6575 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
6576 	   which are not pass dependent state into these routines.
6577 
6578 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
6579 	   use this routine whenever possible.
6580 
6581 	3. Allow for pass dependent state to be provided to these
6582 	   routines and add simplifications based on the pass dependent
6583 	   state.  Remove code from cse.c & combine.c that becomes
6584 	   redundant/dead.
6585 
6586     It will take time, but ultimately the compiler will be easier to
6587     maintain and improve.  It's totally silly that when we add a
6588     simplification that it needs to be added to 4 places (3 for RTL
6589     simplification and 1 for tree simplification.  */
6590 
6591 rtx
6592 simplify_rtx (const_rtx x)
6593 {
6594   const enum rtx_code code = GET_CODE (x);
6595   const machine_mode mode = GET_MODE (x);
6596 
6597   switch (GET_RTX_CLASS (code))
6598     {
6599     case RTX_UNARY:
6600       return simplify_unary_operation (code, mode,
6601 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6602     case RTX_COMM_ARITH:
6603       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6604 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6605 
6606       /* Fall through.  */
6607 
6608     case RTX_BIN_ARITH:
6609       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6610 
6611     case RTX_TERNARY:
6612     case RTX_BITFIELD_OPS:
6613       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6614 					 XEXP (x, 0), XEXP (x, 1),
6615 					 XEXP (x, 2));
6616 
6617     case RTX_COMPARE:
6618     case RTX_COMM_COMPARE:
6619       return simplify_relational_operation (code, mode,
6620                                             ((GET_MODE (XEXP (x, 0))
6621                                              != VOIDmode)
6622                                             ? GET_MODE (XEXP (x, 0))
6623                                             : GET_MODE (XEXP (x, 1))),
6624                                             XEXP (x, 0),
6625                                             XEXP (x, 1));
6626 
6627     case RTX_EXTRA:
6628       if (code == SUBREG)
6629 	return simplify_subreg (mode, SUBREG_REG (x),
6630 				GET_MODE (SUBREG_REG (x)),
6631 				SUBREG_BYTE (x));
6632       break;
6633 
6634     case RTX_OBJ:
6635       if (code == LO_SUM)
6636 	{
6637 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6638 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6639 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6640 	  return XEXP (x, 1);
6641 	}
6642       break;
6643 
6644     default:
6645       break;
6646     }
6647   return NULL;
6648 }
6649 
6650 #if CHECKING_P
6651 
6652 namespace selftest {
6653 
6654 /* Make a unique pseudo REG of mode MODE for use by selftests.  */
6655 
6656 static rtx
6657 make_test_reg (machine_mode mode)
6658 {
6659   static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6660 
6661   return gen_rtx_REG (mode, test_reg_num++);
6662 }
6663 
6664 /* Test vector simplifications involving VEC_DUPLICATE in which the
6665    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
6666    register that holds one element of MODE.  */
6667 
6668 static void
6669 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6670 {
6671   scalar_mode inner_mode = GET_MODE_INNER (mode);
6672   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6673   poly_uint64 nunits = GET_MODE_NUNITS (mode);
6674   if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6675     {
6676       /* Test some simple unary cases with VEC_DUPLICATE arguments.  */
6677       rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6678       rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6679       ASSERT_RTX_EQ (duplicate,
6680 		     simplify_unary_operation (NOT, mode,
6681 					       duplicate_not, mode));
6682 
6683       rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6684       rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6685       ASSERT_RTX_EQ (duplicate,
6686 		     simplify_unary_operation (NEG, mode,
6687 					       duplicate_neg, mode));
6688 
6689       /* Test some simple binary cases with VEC_DUPLICATE arguments.  */
6690       ASSERT_RTX_EQ (duplicate,
6691 		     simplify_binary_operation (PLUS, mode, duplicate,
6692 						CONST0_RTX (mode)));
6693 
6694       ASSERT_RTX_EQ (duplicate,
6695 		     simplify_binary_operation (MINUS, mode, duplicate,
6696 						CONST0_RTX (mode)));
6697 
6698       ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6699 			 simplify_binary_operation (MINUS, mode, duplicate,
6700 						    duplicate));
6701     }
6702 
6703   /* Test a scalar VEC_SELECT of a VEC_DUPLICATE.  */
6704   rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6705   ASSERT_RTX_PTR_EQ (scalar_reg,
6706 		     simplify_binary_operation (VEC_SELECT, inner_mode,
6707 						duplicate, zero_par));
6708 
6709   /* And again with the final element.  */
6710   unsigned HOST_WIDE_INT const_nunits;
6711   if (nunits.is_constant (&const_nunits))
6712     {
6713       rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6714       rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6715       ASSERT_RTX_PTR_EQ (scalar_reg,
6716 			 simplify_binary_operation (VEC_SELECT, inner_mode,
6717 						    duplicate, last_par));
6718     }
6719 
6720   /* Test a scalar subreg of a VEC_DUPLICATE.  */
6721   poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6722   ASSERT_RTX_EQ (scalar_reg,
6723 		 simplify_gen_subreg (inner_mode, duplicate,
6724 				      mode, offset));
6725 
6726   machine_mode narrower_mode;
6727   if (maybe_ne (nunits, 2U)
6728       && multiple_p (nunits, 2)
6729       && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6730       && VECTOR_MODE_P (narrower_mode))
6731     {
6732       /* Test VEC_SELECT of a vector.  */
6733       rtx vec_par
6734 	= gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6735       rtx narrower_duplicate
6736 	= gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6737       ASSERT_RTX_EQ (narrower_duplicate,
6738 		     simplify_binary_operation (VEC_SELECT, narrower_mode,
6739 						duplicate, vec_par));
6740 
6741       /* Test a vector subreg of a VEC_DUPLICATE.  */
6742       poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6743       ASSERT_RTX_EQ (narrower_duplicate,
6744 		     simplify_gen_subreg (narrower_mode, duplicate,
6745 					  mode, offset));
6746     }
6747 }
6748 
6749 /* Test vector simplifications involving VEC_SERIES in which the
6750    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
6751    register that holds one element of MODE.  */
6752 
6753 static void
6754 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6755 {
6756   /* Test unary cases with VEC_SERIES arguments.  */
6757   scalar_mode inner_mode = GET_MODE_INNER (mode);
6758   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6759   rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6760   rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6761   rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6762   rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6763   rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6764   rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6765   rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6766 					 neg_scalar_reg);
6767   ASSERT_RTX_EQ (series_0_r,
6768 		 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6769   ASSERT_RTX_EQ (series_r_m1,
6770 		 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6771   ASSERT_RTX_EQ (series_r_r,
6772 		 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6773 
6774   /* Test that a VEC_SERIES with a zero step is simplified away.  */
6775   ASSERT_RTX_EQ (duplicate,
6776 		 simplify_binary_operation (VEC_SERIES, mode,
6777 					    scalar_reg, const0_rtx));
6778 
6779   /* Test PLUS and MINUS with VEC_SERIES.  */
6780   rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6781   rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6782   rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6783   ASSERT_RTX_EQ (series_r_r,
6784 		 simplify_binary_operation (PLUS, mode, series_0_r,
6785 					    duplicate));
6786   ASSERT_RTX_EQ (series_r_1,
6787 		 simplify_binary_operation (PLUS, mode, duplicate,
6788 					    series_0_1));
6789   ASSERT_RTX_EQ (series_r_m1,
6790 		 simplify_binary_operation (PLUS, mode, duplicate,
6791 					    series_0_m1));
6792   ASSERT_RTX_EQ (series_0_r,
6793 		 simplify_binary_operation (MINUS, mode, series_r_r,
6794 					    duplicate));
6795   ASSERT_RTX_EQ (series_r_m1,
6796 		 simplify_binary_operation (MINUS, mode, duplicate,
6797 					    series_0_1));
6798   ASSERT_RTX_EQ (series_r_1,
6799 		 simplify_binary_operation (MINUS, mode, duplicate,
6800 					    series_0_m1));
6801   ASSERT_RTX_EQ (series_0_m1,
6802 		 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6803 					    constm1_rtx));
6804 }
6805 
6806 /* Verify some simplifications involving vectors.  */
6807 
6808 static void
6809 test_vector_ops ()
6810 {
6811   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6812     {
6813       machine_mode mode = (machine_mode) i;
6814       if (VECTOR_MODE_P (mode))
6815 	{
6816 	  rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6817 	  test_vector_ops_duplicate (mode, scalar_reg);
6818 	  if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6819 	      && maybe_gt (GET_MODE_NUNITS (mode), 2))
6820 	    test_vector_ops_series (mode, scalar_reg);
6821 	}
6822     }
6823 }
6824 
6825 template<unsigned int N>
6826 struct simplify_const_poly_int_tests
6827 {
6828   static void run ();
6829 };
6830 
6831 template<>
6832 struct simplify_const_poly_int_tests<1>
6833 {
6834   static void run () {}
6835 };
6836 
6837 /* Test various CONST_POLY_INT properties.  */
6838 
6839 template<unsigned int N>
6840 void
6841 simplify_const_poly_int_tests<N>::run ()
6842 {
6843   rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
6844   rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
6845   rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
6846   rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
6847   rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
6848   rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
6849   rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
6850   rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
6851   rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
6852   rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
6853   rtx two = GEN_INT (2);
6854   rtx six = GEN_INT (6);
6855   poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
6856 
6857   /* These tests only try limited operation combinations.  Fuller arithmetic
6858      testing is done directly on poly_ints.  */
6859   ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
6860   ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
6861   ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
6862   ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
6863   ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
6864   ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
6865   ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
6866   ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
6867   ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
6868   ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
6869   ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
6870 }
6871 
6872 /* Run all of the selftests within this file.  */
6873 
6874 void
6875 simplify_rtx_c_tests ()
6876 {
6877   test_vector_ops ();
6878   simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
6879 }
6880 
6881 } // namespace selftest
6882 
6883 #endif /* CHECKING_P */
6884