1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39 
40 /* Simplification and canonicalization of RTL.  */
41 
42 /* Much code operates on (low, high) pairs; the low value is an
43    unsigned wide int, the high value a signed wide int.  We
44    occasionally need to sign extend from low to high as if low were a
45    signed wide int.  */
46 #define HWI_SIGN_EXTEND(low) \
47   ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 
49 static bool plus_minus_operand_p (const_rtx);
50 
51 /* Negate I, which satisfies poly_int_rtx_p.  MODE is the mode of I.  */
52 
53 static rtx
neg_poly_int_rtx(machine_mode mode,const_rtx i)54 neg_poly_int_rtx (machine_mode mode, const_rtx i)
55 {
56   return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
57 }
58 
59 /* Test whether expression, X, is an immediate constant that represents
60    the most significant bit of machine mode MODE.  */
61 
62 bool
mode_signbit_p(machine_mode mode,const_rtx x)63 mode_signbit_p (machine_mode mode, const_rtx x)
64 {
65   unsigned HOST_WIDE_INT val;
66   unsigned int width;
67   scalar_int_mode int_mode;
68 
69   if (!is_int_mode (mode, &int_mode))
70     return false;
71 
72   width = GET_MODE_PRECISION (int_mode);
73   if (width == 0)
74     return false;
75 
76   if (width <= HOST_BITS_PER_WIDE_INT
77       && CONST_INT_P (x))
78     val = INTVAL (x);
79 #if TARGET_SUPPORTS_WIDE_INT
80   else if (CONST_WIDE_INT_P (x))
81     {
82       unsigned int i;
83       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
84       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
85 	return false;
86       for (i = 0; i < elts - 1; i++)
87 	if (CONST_WIDE_INT_ELT (x, i) != 0)
88 	  return false;
89       val = CONST_WIDE_INT_ELT (x, elts - 1);
90       width %= HOST_BITS_PER_WIDE_INT;
91       if (width == 0)
92 	width = HOST_BITS_PER_WIDE_INT;
93     }
94 #else
95   else if (width <= HOST_BITS_PER_DOUBLE_INT
96 	   && CONST_DOUBLE_AS_INT_P (x)
97 	   && CONST_DOUBLE_LOW (x) == 0)
98     {
99       val = CONST_DOUBLE_HIGH (x);
100       width -= HOST_BITS_PER_WIDE_INT;
101     }
102 #endif
103   else
104     /* X is not an integer constant.  */
105     return false;
106 
107   if (width < HOST_BITS_PER_WIDE_INT)
108     val &= (HOST_WIDE_INT_1U << width) - 1;
109   return val == (HOST_WIDE_INT_1U << (width - 1));
110 }
111 
112 /* Test whether VAL is equal to the most significant bit of mode MODE
113    (after masking with the mode mask of MODE).  Returns false if the
114    precision of MODE is too large to handle.  */
115 
116 bool
val_signbit_p(machine_mode mode,unsigned HOST_WIDE_INT val)117 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
118 {
119   unsigned int width;
120   scalar_int_mode int_mode;
121 
122   if (!is_int_mode (mode, &int_mode))
123     return false;
124 
125   width = GET_MODE_PRECISION (int_mode);
126   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
127     return false;
128 
129   val &= GET_MODE_MASK (int_mode);
130   return val == (HOST_WIDE_INT_1U << (width - 1));
131 }
132 
133 /* Test whether the most significant bit of mode MODE is set in VAL.
134    Returns false if the precision of MODE is too large to handle.  */
135 bool
val_signbit_known_set_p(machine_mode mode,unsigned HOST_WIDE_INT val)136 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
137 {
138   unsigned int width;
139 
140   scalar_int_mode int_mode;
141   if (!is_int_mode (mode, &int_mode))
142     return false;
143 
144   width = GET_MODE_PRECISION (int_mode);
145   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
146     return false;
147 
148   val &= HOST_WIDE_INT_1U << (width - 1);
149   return val != 0;
150 }
151 
152 /* Test whether the most significant bit of mode MODE is clear in VAL.
153    Returns false if the precision of MODE is too large to handle.  */
154 bool
val_signbit_known_clear_p(machine_mode mode,unsigned HOST_WIDE_INT val)155 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
156 {
157   unsigned int width;
158 
159   scalar_int_mode int_mode;
160   if (!is_int_mode (mode, &int_mode))
161     return false;
162 
163   width = GET_MODE_PRECISION (int_mode);
164   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
165     return false;
166 
167   val &= HOST_WIDE_INT_1U << (width - 1);
168   return val == 0;
169 }
170 
171 /* Make a binary operation by properly ordering the operands and
172    seeing if the expression folds.  */
173 
174 rtx
simplify_gen_binary(rtx_code code,machine_mode mode,rtx op0,rtx op1)175 simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
176 				       rtx op0, rtx op1)
177 {
178   rtx tem;
179 
180   /* If this simplifies, do it.  */
181   tem = simplify_binary_operation (code, mode, op0, op1);
182   if (tem)
183     return tem;
184 
185   /* Put complex operands first and constants second if commutative.  */
186   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
187       && swap_commutative_operands_p (op0, op1))
188     std::swap (op0, op1);
189 
190   return gen_rtx_fmt_ee (code, mode, op0, op1);
191 }
192 
193 /* If X is a MEM referencing the constant pool, return the real value.
194    Otherwise return X.  */
195 rtx
avoid_constant_pool_reference(rtx x)196 avoid_constant_pool_reference (rtx x)
197 {
198   rtx c, tmp, addr;
199   machine_mode cmode;
200   poly_int64 offset = 0;
201 
202   switch (GET_CODE (x))
203     {
204     case MEM:
205       break;
206 
207     case FLOAT_EXTEND:
208       /* Handle float extensions of constant pool references.  */
209       tmp = XEXP (x, 0);
210       c = avoid_constant_pool_reference (tmp);
211       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
212 	return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
213 					     GET_MODE (x));
214       return x;
215 
216     default:
217       return x;
218     }
219 
220   if (GET_MODE (x) == BLKmode)
221     return x;
222 
223   addr = XEXP (x, 0);
224 
225   /* Call target hook to avoid the effects of -fpic etc....  */
226   addr = targetm.delegitimize_address (addr);
227 
228   /* Split the address into a base and integer offset.  */
229   addr = strip_offset (addr, &offset);
230 
231   if (GET_CODE (addr) == LO_SUM)
232     addr = XEXP (addr, 1);
233 
234   /* If this is a constant pool reference, we can turn it into its
235      constant and hope that simplifications happen.  */
236   if (GET_CODE (addr) == SYMBOL_REF
237       && CONSTANT_POOL_ADDRESS_P (addr))
238     {
239       c = get_pool_constant (addr);
240       cmode = get_pool_mode (addr);
241 
242       /* If we're accessing the constant in a different mode than it was
243          originally stored, attempt to fix that up via subreg simplifications.
244          If that fails we have no choice but to return the original memory.  */
245       if (known_eq (offset, 0) && cmode == GET_MODE (x))
246 	return c;
247       else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
248         {
249           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250           if (tem && CONSTANT_P (tem))
251             return tem;
252         }
253     }
254 
255   return x;
256 }
257 
258 /* Simplify a MEM based on its attributes.  This is the default
259    delegitimize_address target hook, and it's recommended that every
260    overrider call it.  */
261 
262 rtx
delegitimize_mem_from_attrs(rtx x)263 delegitimize_mem_from_attrs (rtx x)
264 {
265   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
266      use their base addresses as equivalent.  */
267   if (MEM_P (x)
268       && MEM_EXPR (x)
269       && MEM_OFFSET_KNOWN_P (x))
270     {
271       tree decl = MEM_EXPR (x);
272       machine_mode mode = GET_MODE (x);
273       poly_int64 offset = 0;
274 
275       switch (TREE_CODE (decl))
276 	{
277 	default:
278 	  decl = NULL;
279 	  break;
280 
281 	case VAR_DECL:
282 	  break;
283 
284 	case ARRAY_REF:
285 	case ARRAY_RANGE_REF:
286 	case COMPONENT_REF:
287 	case BIT_FIELD_REF:
288 	case REALPART_EXPR:
289 	case IMAGPART_EXPR:
290 	case VIEW_CONVERT_EXPR:
291 	  {
292 	    poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
293 	    tree toffset;
294 	    int unsignedp, reversep, volatilep = 0;
295 
296 	    decl
297 	      = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
298 				     &unsignedp, &reversep, &volatilep);
299 	    if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
300 		|| !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
301 		|| (toffset && !poly_int_tree_p (toffset, &toffset_val)))
302 	      decl = NULL;
303 	    else
304 	      offset += bytepos + toffset_val;
305 	    break;
306 	  }
307 	}
308 
309       if (decl
310 	  && mode == GET_MODE (x)
311 	  && VAR_P (decl)
312 	  && (TREE_STATIC (decl)
313 	      || DECL_THREAD_LOCAL_P (decl))
314 	  && DECL_RTL_SET_P (decl)
315 	  && MEM_P (DECL_RTL (decl)))
316 	{
317 	  rtx newx;
318 
319 	  offset += MEM_OFFSET (x);
320 
321 	  newx = DECL_RTL (decl);
322 
323 	  if (MEM_P (newx))
324 	    {
325 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
326 	      poly_int64 n_offset, o_offset;
327 
328 	      /* Avoid creating a new MEM needlessly if we already had
329 		 the same address.  We do if there's no OFFSET and the
330 		 old address X is identical to NEWX, or if X is of the
331 		 form (plus NEWX OFFSET), or the NEWX is of the form
332 		 (plus Y (const_int Z)) and X is that with the offset
333 		 added: (plus Y (const_int Z+OFFSET)).  */
334 	      n = strip_offset (n, &n_offset);
335 	      o = strip_offset (o, &o_offset);
336 	      if (!(known_eq (o_offset, n_offset + offset)
337 		    && rtx_equal_p (o, n)))
338 		x = adjust_address_nv (newx, mode, offset);
339 	    }
340 	  else if (GET_MODE (x) == GET_MODE (newx)
341 		   && known_eq (offset, 0))
342 	    x = newx;
343 	}
344     }
345 
346   return x;
347 }
348 
349 /* Make a unary operation by first seeing if it folds and otherwise making
350    the specified operation.  */
351 
352 rtx
simplify_gen_unary(rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)353 simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
354 				      machine_mode op_mode)
355 {
356   rtx tem;
357 
358   /* If this simplifies, use it.  */
359   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
360     return tem;
361 
362   return gen_rtx_fmt_e (code, mode, op);
363 }
364 
365 /* Likewise for ternary operations.  */
366 
367 rtx
simplify_gen_ternary(rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)368 simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
369 					machine_mode op0_mode,
370 					rtx op0, rtx op1, rtx op2)
371 {
372   rtx tem;
373 
374   /* If this simplifies, use it.  */
375   if ((tem = simplify_ternary_operation (code, mode, op0_mode,
376 					 op0, op1, op2)) != 0)
377     return tem;
378 
379   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
380 }
381 
382 /* Likewise, for relational operations.
383    CMP_MODE specifies mode comparison is done in.  */
384 
385 rtx
simplify_gen_relational(rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)386 simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
387 					   machine_mode cmp_mode,
388 					   rtx op0, rtx op1)
389 {
390   rtx tem;
391 
392   if ((tem = simplify_relational_operation (code, mode, cmp_mode,
393 					    op0, op1)) != 0)
394     return tem;
395 
396   return gen_rtx_fmt_ee (code, mode, op0, op1);
397 }
398 
399 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
400    and simplify the result.  If FN is non-NULL, call this callback on each
401    X, if it returns non-NULL, replace X with its return value and simplify the
402    result.  */
403 
404 rtx
simplify_replace_fn_rtx(rtx x,const_rtx old_rtx,rtx (* fn)(rtx,const_rtx,void *),void * data)405 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
406 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
407 {
408   enum rtx_code code = GET_CODE (x);
409   machine_mode mode = GET_MODE (x);
410   machine_mode op_mode;
411   const char *fmt;
412   rtx op0, op1, op2, newx, op;
413   rtvec vec, newvec;
414   int i, j;
415 
416   if (__builtin_expect (fn != NULL, 0))
417     {
418       newx = fn (x, old_rtx, data);
419       if (newx)
420 	return newx;
421     }
422   else if (rtx_equal_p (x, old_rtx))
423     return copy_rtx ((rtx) data);
424 
425   switch (GET_RTX_CLASS (code))
426     {
427     case RTX_UNARY:
428       op0 = XEXP (x, 0);
429       op_mode = GET_MODE (op0);
430       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
431       if (op0 == XEXP (x, 0))
432 	return x;
433       return simplify_gen_unary (code, mode, op0, op_mode);
434 
435     case RTX_BIN_ARITH:
436     case RTX_COMM_ARITH:
437       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
439       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
440 	return x;
441       return simplify_gen_binary (code, mode, op0, op1);
442 
443     case RTX_COMPARE:
444     case RTX_COMM_COMPARE:
445       op0 = XEXP (x, 0);
446       op1 = XEXP (x, 1);
447       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
448       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
449       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
450       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 	return x;
452       return simplify_gen_relational (code, mode, op_mode, op0, op1);
453 
454     case RTX_TERNARY:
455     case RTX_BITFIELD_OPS:
456       op0 = XEXP (x, 0);
457       op_mode = GET_MODE (op0);
458       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
459       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
460       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
461       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
462 	return x;
463       if (op_mode == VOIDmode)
464 	op_mode = GET_MODE (op0);
465       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
466 
467     case RTX_EXTRA:
468       if (code == SUBREG)
469 	{
470 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
471 	  if (op0 == SUBREG_REG (x))
472 	    return x;
473 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
474 				     GET_MODE (SUBREG_REG (x)),
475 				     SUBREG_BYTE (x));
476 	  return op0 ? op0 : x;
477 	}
478       break;
479 
480     case RTX_OBJ:
481       if (code == MEM)
482 	{
483 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
484 	  if (op0 == XEXP (x, 0))
485 	    return x;
486 	  return replace_equiv_address_nv (x, op0);
487 	}
488       else if (code == LO_SUM)
489 	{
490 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
491 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
492 
493 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
494 	  if (GET_CODE (op0) == HIGH)
495 	    {
496 	      rtx base0, base1, offset0, offset1;
497 	      split_const (XEXP (op0, 0), &base0, &offset0);
498 	      split_const (op1, &base1, &offset1);
499 	      if (rtx_equal_p (base0, base1))
500 		return op1;
501 	    }
502 
503 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
504 	    return x;
505 	  return gen_rtx_LO_SUM (mode, op0, op1);
506 	}
507       break;
508 
509     default:
510       break;
511     }
512 
513   newx = x;
514   fmt = GET_RTX_FORMAT (code);
515   for (i = 0; fmt[i]; i++)
516     switch (fmt[i])
517       {
518       case 'E':
519 	vec = XVEC (x, i);
520 	newvec = XVEC (newx, i);
521 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
522 	  {
523 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
524 					  old_rtx, fn, data);
525 	    if (op != RTVEC_ELT (vec, j))
526 	      {
527 		if (newvec == vec)
528 		  {
529 		    newvec = shallow_copy_rtvec (vec);
530 		    if (x == newx)
531 		      newx = shallow_copy_rtx (x);
532 		    XVEC (newx, i) = newvec;
533 		  }
534 		RTVEC_ELT (newvec, j) = op;
535 	      }
536 	  }
537 	break;
538 
539       case 'e':
540 	if (XEXP (x, i))
541 	  {
542 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
543 	    if (op != XEXP (x, i))
544 	      {
545 		if (x == newx)
546 		  newx = shallow_copy_rtx (x);
547 		XEXP (newx, i) = op;
548 	      }
549 	  }
550 	break;
551       }
552   return newx;
553 }
554 
555 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
556    resulting RTX.  Return a new RTX which is as simplified as possible.  */
557 
558 rtx
simplify_replace_rtx(rtx x,const_rtx old_rtx,rtx new_rtx)559 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
560 {
561   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
562 }
563 
564 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
565    Only handle cases where the truncated value is inherently an rvalue.
566 
567    RTL provides two ways of truncating a value:
568 
569    1. a lowpart subreg.  This form is only a truncation when both
570       the outer and inner modes (here MODE and OP_MODE respectively)
571       are scalar integers, and only then when the subreg is used as
572       an rvalue.
573 
574       It is only valid to form such truncating subregs if the
575       truncation requires no action by the target.  The onus for
576       proving this is on the creator of the subreg -- e.g. the
577       caller to simplify_subreg or simplify_gen_subreg -- and typically
578       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
579 
580    2. a TRUNCATE.  This form handles both scalar and compound integers.
581 
582    The first form is preferred where valid.  However, the TRUNCATE
583    handling in simplify_unary_operation turns the second form into the
584    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
585    so it is generally safe to form rvalue truncations using:
586 
587       simplify_gen_unary (TRUNCATE, ...)
588 
589    and leave simplify_unary_operation to work out which representation
590    should be used.
591 
592    Because of the proof requirements on (1), simplify_truncation must
593    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
594    regardless of whether the outer truncation came from a SUBREG or a
595    TRUNCATE.  For example, if the caller has proven that an SImode
596    truncation of:
597 
598       (and:DI X Y)
599 
600    is a no-op and can be represented as a subreg, it does not follow
601    that SImode truncations of X and Y are also no-ops.  On a target
602    like 64-bit MIPS that requires SImode values to be stored in
603    sign-extended form, an SImode truncation of:
604 
605       (and:DI (reg:DI X) (const_int 63))
606 
607    is trivially a no-op because only the lower 6 bits can be set.
608    However, X is still an arbitrary 64-bit number and so we cannot
609    assume that truncating it too is a no-op.  */
610 
611 rtx
simplify_truncation(machine_mode mode,rtx op,machine_mode op_mode)612 simplify_context::simplify_truncation (machine_mode mode, rtx op,
613 				       machine_mode op_mode)
614 {
615   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
616   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
617   scalar_int_mode int_mode, int_op_mode, subreg_mode;
618 
619   gcc_assert (precision <= op_precision);
620 
621   /* Optimize truncations of zero and sign extended values.  */
622   if (GET_CODE (op) == ZERO_EXTEND
623       || GET_CODE (op) == SIGN_EXTEND)
624     {
625       /* There are three possibilities.  If MODE is the same as the
626 	 origmode, we can omit both the extension and the subreg.
627 	 If MODE is not larger than the origmode, we can apply the
628 	 truncation without the extension.  Finally, if the outermode
629 	 is larger than the origmode, we can just extend to the appropriate
630 	 mode.  */
631       machine_mode origmode = GET_MODE (XEXP (op, 0));
632       if (mode == origmode)
633 	return XEXP (op, 0);
634       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 	return simplify_gen_unary (TRUNCATE, mode,
636 				   XEXP (op, 0), origmode);
637       else
638 	return simplify_gen_unary (GET_CODE (op), mode,
639 				   XEXP (op, 0), origmode);
640     }
641 
642   /* If the machine can perform operations in the truncated mode, distribute
643      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
644      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
645   if (1
646       && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
647       && (GET_CODE (op) == PLUS
648 	  || GET_CODE (op) == MINUS
649 	  || GET_CODE (op) == MULT))
650     {
651       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
652       if (op0)
653 	{
654 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
655 	  if (op1)
656 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
657 	}
658     }
659 
660   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
661      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
662      the outer subreg is effectively a truncation to the original mode.  */
663   if ((GET_CODE (op) == LSHIFTRT
664        || GET_CODE (op) == ASHIFTRT)
665       /* Ensure that OP_MODE is at least twice as wide as MODE
666 	 to avoid the possibility that an outer LSHIFTRT shifts by more
667 	 than the sign extension's sign_bit_copies and introduces zeros
668 	 into the high bits of the result.  */
669       && 2 * precision <= op_precision
670       && CONST_INT_P (XEXP (op, 1))
671       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
672       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
673       && UINTVAL (XEXP (op, 1)) < precision)
674     return simplify_gen_binary (ASHIFTRT, mode,
675 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
676 
677   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
678      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
679      the outer subreg is effectively a truncation to the original mode.  */
680   if ((GET_CODE (op) == LSHIFTRT
681        || GET_CODE (op) == ASHIFTRT)
682       && CONST_INT_P (XEXP (op, 1))
683       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
684       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
685       && UINTVAL (XEXP (op, 1)) < precision)
686     return simplify_gen_binary (LSHIFTRT, mode,
687 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
688 
689   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
690      to (ashift:QI (x:QI) C), where C is a suitable small constant and
691      the outer subreg is effectively a truncation to the original mode.  */
692   if (GET_CODE (op) == ASHIFT
693       && CONST_INT_P (XEXP (op, 1))
694       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
695 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
696       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
697       && UINTVAL (XEXP (op, 1)) < precision)
698     return simplify_gen_binary (ASHIFT, mode,
699 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
700 
701   /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
702      (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
703      and C2.  */
704   if (GET_CODE (op) == AND
705       && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
706 	  || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
707       && CONST_INT_P (XEXP (XEXP (op, 0), 1))
708       && CONST_INT_P (XEXP (op, 1)))
709     {
710       rtx op0 = (XEXP (XEXP (op, 0), 0));
711       rtx shift_op = XEXP (XEXP (op, 0), 1);
712       rtx mask_op = XEXP (op, 1);
713       unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
714       unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
715 
716       if (shift < precision
717 	  /* If doing this transform works for an X with all bits set,
718 	     it works for any X.  */
719 	  && ((GET_MODE_MASK (mode) >> shift) & mask)
720 	     == ((GET_MODE_MASK (op_mode) >> shift) & mask)
721 	  && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
722 	  && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
723 	{
724 	  mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
725 	  return simplify_gen_binary (AND, mode, op0, mask_op);
726 	}
727     }
728 
729   /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
730      (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
731      changing len.  */
732   if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
733       && REG_P (XEXP (op, 0))
734       && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
735       && CONST_INT_P (XEXP (op, 1))
736       && CONST_INT_P (XEXP (op, 2)))
737     {
738       rtx op0 = XEXP (op, 0);
739       unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
740       unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
741       if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
742 	{
743 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
744 	  if (op0)
745 	    {
746 	      pos -= op_precision - precision;
747 	      return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
748 					   XEXP (op, 1), GEN_INT (pos));
749 	    }
750 	}
751       else if (!BITS_BIG_ENDIAN && precision >= len + pos)
752 	{
753 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
754 	  if (op0)
755 	    return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
756 					 XEXP (op, 1), XEXP (op, 2));
757 	}
758     }
759 
760   /* Recognize a word extraction from a multi-word subreg.  */
761   if ((GET_CODE (op) == LSHIFTRT
762        || GET_CODE (op) == ASHIFTRT)
763       && SCALAR_INT_MODE_P (mode)
764       && SCALAR_INT_MODE_P (op_mode)
765       && precision >= BITS_PER_WORD
766       && 2 * precision <= op_precision
767       && CONST_INT_P (XEXP (op, 1))
768       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
769       && UINTVAL (XEXP (op, 1)) < op_precision)
770     {
771       poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
772       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
773       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
774 				  (WORDS_BIG_ENDIAN
775 				   ? byte - shifted_bytes
776 				   : byte + shifted_bytes));
777     }
778 
779   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
780      and try replacing the TRUNCATE and shift with it.  Don't do this
781      if the MEM has a mode-dependent address.  */
782   if ((GET_CODE (op) == LSHIFTRT
783        || GET_CODE (op) == ASHIFTRT)
784       && is_a <scalar_int_mode> (mode, &int_mode)
785       && is_a <scalar_int_mode> (op_mode, &int_op_mode)
786       && MEM_P (XEXP (op, 0))
787       && CONST_INT_P (XEXP (op, 1))
788       && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
789       && INTVAL (XEXP (op, 1)) > 0
790       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
791       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
792 				     MEM_ADDR_SPACE (XEXP (op, 0)))
793       && ! MEM_VOLATILE_P (XEXP (op, 0))
794       && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
795 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
796     {
797       poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
798       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
799       return adjust_address_nv (XEXP (op, 0), int_mode,
800 				(WORDS_BIG_ENDIAN
801 				 ? byte - shifted_bytes
802 				 : byte + shifted_bytes));
803     }
804 
805   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
806      (OP:SI foo:SI) if OP is NEG or ABS.  */
807   if ((GET_CODE (op) == ABS
808        || GET_CODE (op) == NEG)
809       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
810 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
811       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
812     return simplify_gen_unary (GET_CODE (op), mode,
813 			       XEXP (XEXP (op, 0), 0), mode);
814 
815   /* (truncate:A (subreg:B (truncate:C X) 0)) is
816      (truncate:A X).  */
817   if (GET_CODE (op) == SUBREG
818       && is_a <scalar_int_mode> (mode, &int_mode)
819       && SCALAR_INT_MODE_P (op_mode)
820       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
821       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
822       && subreg_lowpart_p (op))
823     {
824       rtx inner = XEXP (SUBREG_REG (op), 0);
825       if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
826 	return simplify_gen_unary (TRUNCATE, int_mode, inner,
827 				   GET_MODE (inner));
828       else
829 	/* If subreg above is paradoxical and C is narrower
830 	   than A, return (subreg:A (truncate:C X) 0).  */
831 	return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
832     }
833 
834   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
835   if (GET_CODE (op) == TRUNCATE)
836     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
837 			       GET_MODE (XEXP (op, 0)));
838 
839   /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
840      in mode A.  */
841   if (GET_CODE (op) == IOR
842       && SCALAR_INT_MODE_P (mode)
843       && SCALAR_INT_MODE_P (op_mode)
844       && CONST_INT_P (XEXP (op, 1))
845       && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
846     return constm1_rtx;
847 
848   return NULL_RTX;
849 }
850 
851 /* Try to simplify a unary operation CODE whose output mode is to be
852    MODE with input operand OP whose mode was originally OP_MODE.
853    Return zero if no simplification can be made.  */
854 rtx
simplify_unary_operation(rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)855 simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
856 					    rtx op, machine_mode op_mode)
857 {
858   rtx trueop, tem;
859 
860   trueop = avoid_constant_pool_reference (op);
861 
862   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
863   if (tem)
864     return tem;
865 
866   return simplify_unary_operation_1 (code, mode, op);
867 }
868 
869 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
870    to be exact.  */
871 
872 static bool
exact_int_to_float_conversion_p(const_rtx op)873 exact_int_to_float_conversion_p (const_rtx op)
874 {
875   int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
876   machine_mode op0_mode = GET_MODE (XEXP (op, 0));
877   /* Constants shouldn't reach here.  */
878   gcc_assert (op0_mode != VOIDmode);
879   int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
880   int in_bits = in_prec;
881   if (HWI_COMPUTABLE_MODE_P (op0_mode))
882     {
883       unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
884       if (GET_CODE (op) == FLOAT)
885 	in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
886       else if (GET_CODE (op) == UNSIGNED_FLOAT)
887 	in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
888       else
889 	gcc_unreachable ();
890       in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
891     }
892   return in_bits <= out_bits;
893 }
894 
895 /* Perform some simplifications we can do even if the operands
896    aren't constant.  */
897 rtx
simplify_unary_operation_1(rtx_code code,machine_mode mode,rtx op)898 simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
899 					      rtx op)
900 {
901   enum rtx_code reversed;
902   rtx temp, elt, base, step;
903   scalar_int_mode inner, int_mode, op_mode, op0_mode;
904 
905   switch (code)
906     {
907     case NOT:
908       /* (not (not X)) == X.  */
909       if (GET_CODE (op) == NOT)
910 	return XEXP (op, 0);
911 
912       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
913 	 comparison is all ones.   */
914       if (COMPARISON_P (op)
915 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
916 	  && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
917 	return simplify_gen_relational (reversed, mode, VOIDmode,
918 					XEXP (op, 0), XEXP (op, 1));
919 
920       /* (not (plus X -1)) can become (neg X).  */
921       if (GET_CODE (op) == PLUS
922 	  && XEXP (op, 1) == constm1_rtx)
923 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
924 
925       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
926 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
927 	 and MODE_VECTOR_INT.  */
928       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
929 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
930 				    CONSTM1_RTX (mode));
931 
932       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
933       if (GET_CODE (op) == XOR
934 	  && CONST_INT_P (XEXP (op, 1))
935 	  && (temp = simplify_unary_operation (NOT, mode,
936 					       XEXP (op, 1), mode)) != 0)
937 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
938 
939       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
940       if (GET_CODE (op) == PLUS
941 	  && CONST_INT_P (XEXP (op, 1))
942 	  && mode_signbit_p (mode, XEXP (op, 1))
943 	  && (temp = simplify_unary_operation (NOT, mode,
944 					       XEXP (op, 1), mode)) != 0)
945 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
946 
947 
948       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
949 	 operands other than 1, but that is not valid.  We could do a
950 	 similar simplification for (not (lshiftrt C X)) where C is
951 	 just the sign bit, but this doesn't seem common enough to
952 	 bother with.  */
953       if (GET_CODE (op) == ASHIFT
954 	  && XEXP (op, 0) == const1_rtx)
955 	{
956 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
957 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
958 	}
959 
960       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
961 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
962 	 so we can perform the above simplification.  */
963       if (STORE_FLAG_VALUE == -1
964 	  && is_a <scalar_int_mode> (mode, &int_mode)
965 	  && GET_CODE (op) == ASHIFTRT
966 	  && CONST_INT_P (XEXP (op, 1))
967 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
968 	return simplify_gen_relational (GE, int_mode, VOIDmode,
969 					XEXP (op, 0), const0_rtx);
970 
971 
972       if (partial_subreg_p (op)
973 	  && subreg_lowpart_p (op)
974 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
975 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
976 	{
977 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
978 	  rtx x;
979 
980 	  x = gen_rtx_ROTATE (inner_mode,
981 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
982 						  inner_mode),
983 			      XEXP (SUBREG_REG (op), 1));
984 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
985 	  if (temp)
986 	    return temp;
987 	}
988 
989       /* Apply De Morgan's laws to reduce number of patterns for machines
990 	 with negating logical insns (and-not, nand, etc.).  If result has
991 	 only one NOT, put it first, since that is how the patterns are
992 	 coded.  */
993       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
994 	{
995 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
996 	  machine_mode op_mode;
997 
998 	  op_mode = GET_MODE (in1);
999 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1000 
1001 	  op_mode = GET_MODE (in2);
1002 	  if (op_mode == VOIDmode)
1003 	    op_mode = mode;
1004 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1005 
1006 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1007 	    std::swap (in1, in2);
1008 
1009 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1010 				 mode, in1, in2);
1011 	}
1012 
1013       /* (not (bswap x)) -> (bswap (not x)).  */
1014       if (GET_CODE (op) == BSWAP)
1015 	{
1016 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1017 	  return simplify_gen_unary (BSWAP, mode, x, mode);
1018 	}
1019       break;
1020 
1021     case NEG:
1022       /* (neg (neg X)) == X.  */
1023       if (GET_CODE (op) == NEG)
1024 	return XEXP (op, 0);
1025 
1026       /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1027 	 If comparison is not reversible use
1028 	 x ? y : (neg y).  */
1029       if (GET_CODE (op) == IF_THEN_ELSE)
1030 	{
1031 	  rtx cond = XEXP (op, 0);
1032 	  rtx true_rtx = XEXP (op, 1);
1033 	  rtx false_rtx = XEXP (op, 2);
1034 
1035 	  if ((GET_CODE (true_rtx) == NEG
1036 	       && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1037 	       || (GET_CODE (false_rtx) == NEG
1038 		   && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1039 	    {
1040 	      if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1041 		temp = reversed_comparison (cond, mode);
1042 	      else
1043 		{
1044 		  temp = cond;
1045 		  std::swap (true_rtx, false_rtx);
1046 		}
1047 	      return simplify_gen_ternary (IF_THEN_ELSE, mode,
1048 					    mode, temp, true_rtx, false_rtx);
1049 	    }
1050 	}
1051 
1052       /* (neg (plus X 1)) can become (not X).  */
1053       if (GET_CODE (op) == PLUS
1054 	  && XEXP (op, 1) == const1_rtx)
1055 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1056 
1057       /* Similarly, (neg (not X)) is (plus X 1).  */
1058       if (GET_CODE (op) == NOT)
1059 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1060 				    CONST1_RTX (mode));
1061 
1062       /* (neg (minus X Y)) can become (minus Y X).  This transformation
1063 	 isn't safe for modes with signed zeros, since if X and Y are
1064 	 both +0, (minus Y X) is the same as (minus X Y).  If the
1065 	 rounding mode is towards +infinity (or -infinity) then the two
1066 	 expressions will be rounded differently.  */
1067       if (GET_CODE (op) == MINUS
1068 	  && !HONOR_SIGNED_ZEROS (mode)
1069 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1070 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1071 
1072       if (GET_CODE (op) == PLUS
1073 	  && !HONOR_SIGNED_ZEROS (mode)
1074 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1075 	{
1076 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1077 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1078 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1079 	    {
1080 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1081 	      if (temp)
1082 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1083 	    }
1084 
1085 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1086 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1087 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1088 	}
1089 
1090       /* (neg (mult A B)) becomes (mult A (neg B)).
1091 	 This works even for floating-point values.  */
1092       if (GET_CODE (op) == MULT
1093 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1094 	{
1095 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1096 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1097 	}
1098 
1099       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1100 	 this if we can then eliminate the NEG (e.g., if the operand
1101 	 is a constant).  */
1102       if (GET_CODE (op) == ASHIFT)
1103 	{
1104 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1105 	  if (temp)
1106 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1107 	}
1108 
1109       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1110 	 C is equal to the width of MODE minus 1.  */
1111       if (GET_CODE (op) == ASHIFTRT
1112 	  && CONST_INT_P (XEXP (op, 1))
1113 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1114 	return simplify_gen_binary (LSHIFTRT, mode,
1115 				    XEXP (op, 0), XEXP (op, 1));
1116 
1117       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1118 	 C is equal to the width of MODE minus 1.  */
1119       if (GET_CODE (op) == LSHIFTRT
1120 	  && CONST_INT_P (XEXP (op, 1))
1121 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1122 	return simplify_gen_binary (ASHIFTRT, mode,
1123 				    XEXP (op, 0), XEXP (op, 1));
1124 
1125       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1126       if (GET_CODE (op) == XOR
1127 	  && XEXP (op, 1) == const1_rtx
1128 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1129 	return plus_constant (mode, XEXP (op, 0), -1);
1130 
1131       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1132       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1133       if (GET_CODE (op) == LT
1134 	  && XEXP (op, 1) == const0_rtx
1135 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1136 	{
1137 	  int_mode = as_a <scalar_int_mode> (mode);
1138 	  int isize = GET_MODE_PRECISION (inner);
1139 	  if (STORE_FLAG_VALUE == 1)
1140 	    {
1141 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1142 					  gen_int_shift_amount (inner,
1143 								isize - 1));
1144 	      if (int_mode == inner)
1145 		return temp;
1146 	      if (GET_MODE_PRECISION (int_mode) > isize)
1147 		return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1148 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1149 	    }
1150 	  else if (STORE_FLAG_VALUE == -1)
1151 	    {
1152 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1153 					  gen_int_shift_amount (inner,
1154 								isize - 1));
1155 	      if (int_mode == inner)
1156 		return temp;
1157 	      if (GET_MODE_PRECISION (int_mode) > isize)
1158 		return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1159 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1160 	    }
1161 	}
1162 
1163       if (vec_series_p (op, &base, &step))
1164 	{
1165 	  /* Only create a new series if we can simplify both parts.  In other
1166 	     cases this isn't really a simplification, and it's not necessarily
1167 	     a win to replace a vector operation with a scalar operation.  */
1168 	  scalar_mode inner_mode = GET_MODE_INNER (mode);
1169 	  base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1170 	  if (base)
1171 	    {
1172 	      step = simplify_unary_operation (NEG, inner_mode,
1173 					       step, inner_mode);
1174 	      if (step)
1175 		return gen_vec_series (mode, base, step);
1176 	    }
1177 	}
1178       break;
1179 
1180     case TRUNCATE:
1181       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1182 	 with the umulXi3_highpart patterns.  */
1183       if (GET_CODE (op) == LSHIFTRT
1184 	  && GET_CODE (XEXP (op, 0)) == MULT)
1185 	break;
1186 
1187       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1188 	{
1189 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1190 	    {
1191 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1192 	      if (temp)
1193 		return temp;
1194 	    }
1195 	  /* We can't handle truncation to a partial integer mode here
1196 	     because we don't know the real bitsize of the partial
1197 	     integer mode.  */
1198 	  break;
1199 	}
1200 
1201       if (GET_MODE (op) != VOIDmode)
1202 	{
1203 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1204 	  if (temp)
1205 	    return temp;
1206 	}
1207 
1208       /* If we know that the value is already truncated, we can
1209 	 replace the TRUNCATE with a SUBREG.  */
1210       if (known_eq (GET_MODE_NUNITS (mode), 1)
1211 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1212 	      || truncated_to_mode (mode, op)))
1213 	{
1214 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1215 	  if (temp)
1216 	    return temp;
1217 	}
1218 
1219       /* A truncate of a comparison can be replaced with a subreg if
1220          STORE_FLAG_VALUE permits.  This is like the previous test,
1221          but it works even if the comparison is done in a mode larger
1222          than HOST_BITS_PER_WIDE_INT.  */
1223       if (HWI_COMPUTABLE_MODE_P (mode)
1224 	  && COMPARISON_P (op)
1225 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1226 	{
1227 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1228 	  if (temp)
1229 	    return temp;
1230 	}
1231 
1232       /* A truncate of a memory is just loading the low part of the memory
1233 	 if we are not changing the meaning of the address. */
1234       if (GET_CODE (op) == MEM
1235 	  && !VECTOR_MODE_P (mode)
1236 	  && !MEM_VOLATILE_P (op)
1237 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1238 	{
1239 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1240 	  if (temp)
1241 	    return temp;
1242 	}
1243 
1244       break;
1245 
1246     case FLOAT_TRUNCATE:
1247       if (DECIMAL_FLOAT_MODE_P (mode))
1248 	break;
1249 
1250       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1251       if (GET_CODE (op) == FLOAT_EXTEND
1252 	  && GET_MODE (XEXP (op, 0)) == mode)
1253 	return XEXP (op, 0);
1254 
1255       /* (float_truncate:SF (float_truncate:DF foo:XF))
1256          = (float_truncate:SF foo:XF).
1257 	 This may eliminate double rounding, so it is unsafe.
1258 
1259          (float_truncate:SF (float_extend:XF foo:DF))
1260          = (float_truncate:SF foo:DF).
1261 
1262          (float_truncate:DF (float_extend:XF foo:SF))
1263          = (float_extend:DF foo:SF).  */
1264       if ((GET_CODE (op) == FLOAT_TRUNCATE
1265 	   && flag_unsafe_math_optimizations)
1266 	  || GET_CODE (op) == FLOAT_EXTEND)
1267 	return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1268 	  			   > GET_MODE_UNIT_SIZE (mode)
1269 	  			   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1270 				   mode,
1271 				   XEXP (op, 0), mode);
1272 
1273       /*  (float_truncate (float x)) is (float x)  */
1274       if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1275 	  && (flag_unsafe_math_optimizations
1276 	      || exact_int_to_float_conversion_p (op)))
1277 	return simplify_gen_unary (GET_CODE (op), mode,
1278 				   XEXP (op, 0),
1279 				   GET_MODE (XEXP (op, 0)));
1280 
1281       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1282 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1283       if ((GET_CODE (op) == ABS
1284 	   || GET_CODE (op) == NEG)
1285 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1286 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1287 	return simplify_gen_unary (GET_CODE (op), mode,
1288 				   XEXP (XEXP (op, 0), 0), mode);
1289 
1290       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1291 	 is (float_truncate:SF x).  */
1292       if (GET_CODE (op) == SUBREG
1293 	  && subreg_lowpart_p (op)
1294 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1295 	return SUBREG_REG (op);
1296       break;
1297 
1298     case FLOAT_EXTEND:
1299       if (DECIMAL_FLOAT_MODE_P (mode))
1300 	break;
1301 
1302       /*  (float_extend (float_extend x)) is (float_extend x)
1303 
1304 	  (float_extend (float x)) is (float x) assuming that double
1305 	  rounding can't happen.
1306           */
1307       if (GET_CODE (op) == FLOAT_EXTEND
1308 	  || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1309 	      && exact_int_to_float_conversion_p (op)))
1310 	return simplify_gen_unary (GET_CODE (op), mode,
1311 				   XEXP (op, 0),
1312 				   GET_MODE (XEXP (op, 0)));
1313 
1314       break;
1315 
1316     case ABS:
1317       /* (abs (neg <foo>)) -> (abs <foo>) */
1318       if (GET_CODE (op) == NEG)
1319 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1320 				   GET_MODE (XEXP (op, 0)));
1321 
1322       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1323          do nothing.  */
1324       if (GET_MODE (op) == VOIDmode)
1325 	break;
1326 
1327       /* If operand is something known to be positive, ignore the ABS.  */
1328       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1329 	  || val_signbit_known_clear_p (GET_MODE (op),
1330 					nonzero_bits (op, GET_MODE (op))))
1331 	return op;
1332 
1333       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1334       if (is_a <scalar_int_mode> (mode, &int_mode)
1335 	  && (num_sign_bit_copies (op, int_mode)
1336 	      == GET_MODE_PRECISION (int_mode)))
1337 	return gen_rtx_NEG (int_mode, op);
1338 
1339       break;
1340 
1341     case FFS:
1342       /* (ffs (*_extend <X>)) = (ffs <X>) */
1343       if (GET_CODE (op) == SIGN_EXTEND
1344 	  || GET_CODE (op) == ZERO_EXTEND)
1345 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1346 				   GET_MODE (XEXP (op, 0)));
1347       break;
1348 
1349     case POPCOUNT:
1350       switch (GET_CODE (op))
1351 	{
1352 	case BSWAP:
1353 	case ZERO_EXTEND:
1354 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1355 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1356 				     GET_MODE (XEXP (op, 0)));
1357 
1358 	case ROTATE:
1359 	case ROTATERT:
1360 	  /* Rotations don't affect popcount.  */
1361 	  if (!side_effects_p (XEXP (op, 1)))
1362 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1363 				       GET_MODE (XEXP (op, 0)));
1364 	  break;
1365 
1366 	default:
1367 	  break;
1368 	}
1369       break;
1370 
1371     case PARITY:
1372       switch (GET_CODE (op))
1373 	{
1374 	case NOT:
1375 	case BSWAP:
1376 	case ZERO_EXTEND:
1377 	case SIGN_EXTEND:
1378 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1379 				     GET_MODE (XEXP (op, 0)));
1380 
1381 	case ROTATE:
1382 	case ROTATERT:
1383 	  /* Rotations don't affect parity.  */
1384 	  if (!side_effects_p (XEXP (op, 1)))
1385 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1386 				       GET_MODE (XEXP (op, 0)));
1387 	  break;
1388 
1389 	case PARITY:
1390 	  /* (parity (parity x)) -> parity (x).  */
1391 	  return op;
1392 
1393 	default:
1394 	  break;
1395 	}
1396       break;
1397 
1398     case BSWAP:
1399       /* (bswap (bswap x)) -> x.  */
1400       if (GET_CODE (op) == BSWAP)
1401 	return XEXP (op, 0);
1402       break;
1403 
1404     case FLOAT:
1405       /* (float (sign_extend <X>)) = (float <X>).  */
1406       if (GET_CODE (op) == SIGN_EXTEND)
1407 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1408 				   GET_MODE (XEXP (op, 0)));
1409       break;
1410 
1411     case SIGN_EXTEND:
1412       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1413 	 becomes just the MINUS if its mode is MODE.  This allows
1414 	 folding switch statements on machines using casesi (such as
1415 	 the VAX).  */
1416       if (GET_CODE (op) == TRUNCATE
1417 	  && GET_MODE (XEXP (op, 0)) == mode
1418 	  && GET_CODE (XEXP (op, 0)) == MINUS
1419 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1420 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1421 	return XEXP (op, 0);
1422 
1423       /* Extending a widening multiplication should be canonicalized to
1424 	 a wider widening multiplication.  */
1425       if (GET_CODE (op) == MULT)
1426 	{
1427 	  rtx lhs = XEXP (op, 0);
1428 	  rtx rhs = XEXP (op, 1);
1429 	  enum rtx_code lcode = GET_CODE (lhs);
1430 	  enum rtx_code rcode = GET_CODE (rhs);
1431 
1432 	  /* Widening multiplies usually extend both operands, but sometimes
1433 	     they use a shift to extract a portion of a register.  */
1434 	  if ((lcode == SIGN_EXTEND
1435 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1436 	      && (rcode == SIGN_EXTEND
1437 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1438 	    {
1439 	      machine_mode lmode = GET_MODE (lhs);
1440 	      machine_mode rmode = GET_MODE (rhs);
1441 	      int bits;
1442 
1443 	      if (lcode == ASHIFTRT)
1444 		/* Number of bits not shifted off the end.  */
1445 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1446 			- INTVAL (XEXP (lhs, 1)));
1447 	      else /* lcode == SIGN_EXTEND */
1448 		/* Size of inner mode.  */
1449 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1450 
1451 	      if (rcode == ASHIFTRT)
1452 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1453 			 - INTVAL (XEXP (rhs, 1)));
1454 	      else /* rcode == SIGN_EXTEND */
1455 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1456 
1457 	      /* We can only widen multiplies if the result is mathematiclly
1458 		 equivalent.  I.e. if overflow was impossible.  */
1459 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1460 		return simplify_gen_binary
1461 			 (MULT, mode,
1462 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1463 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1464 	    }
1465 	}
1466 
1467       /* Check for a sign extension of a subreg of a promoted
1468 	 variable, where the promotion is sign-extended, and the
1469 	 target mode is the same as the variable's promotion.  */
1470       if (GET_CODE (op) == SUBREG
1471 	  && SUBREG_PROMOTED_VAR_P (op)
1472 	  && SUBREG_PROMOTED_SIGNED_P (op)
1473 	  && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1474 	{
1475 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1476 	  if (temp)
1477 	    return temp;
1478 	}
1479 
1480       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1481 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1482       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1483 	{
1484 	  gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1485 		      > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1486 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1487 				     GET_MODE (XEXP (op, 0)));
1488 	}
1489 
1490       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1491 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1492 	 GET_MODE_BITSIZE (N) - I bits.
1493 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1494 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1495       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1496 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1497 	  && is_a <scalar_int_mode> (mode, &int_mode)
1498 	  && CONST_INT_P (XEXP (op, 1))
1499 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1500 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1501 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1502 	{
1503 	  scalar_int_mode tmode;
1504 	  gcc_assert (GET_MODE_PRECISION (int_mode)
1505 		      > GET_MODE_PRECISION (op_mode));
1506 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1507 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1508 	    {
1509 	      rtx inner =
1510 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1511 	      if (inner)
1512 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1513 					   ? SIGN_EXTEND : ZERO_EXTEND,
1514 					   int_mode, inner, tmode);
1515 	    }
1516 	}
1517 
1518       /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1519          (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0.  */
1520       if (GET_CODE (op) == LSHIFTRT
1521 	  && CONST_INT_P (XEXP (op, 1))
1522 	  && XEXP (op, 1) != const0_rtx)
1523 	return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1524 
1525       /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1526 	 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1527 	 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1528 	 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1529 	 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1530 	 wider than O.  */
1531       if (GET_CODE (op) == TRUNCATE
1532 	  && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1533 	  && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1534 	{
1535 	  scalar_int_mode m_mode, n_mode, o_mode;
1536 	  rtx old_shift = XEXP (op, 0);
1537 	  if (is_a <scalar_int_mode> (mode, &m_mode)
1538 	      && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1539 	      && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1540 	      && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1541 		 == INTVAL (XEXP (old_shift, 1)))
1542 	    {
1543 	      rtx new_shift = simplify_gen_binary (ASHIFTRT,
1544 						   GET_MODE (old_shift),
1545 						   XEXP (old_shift, 0),
1546 						   XEXP (old_shift, 1));
1547 	      if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1548 		return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1549 					   GET_MODE (new_shift));
1550 	      if (mode != GET_MODE (new_shift))
1551 		return simplify_gen_unary (TRUNCATE, mode, new_shift,
1552 					   GET_MODE (new_shift));
1553 	      return new_shift;
1554 	    }
1555 	}
1556 
1557 #if defined(POINTERS_EXTEND_UNSIGNED)
1558       /* As we do not know which address space the pointer is referring to,
1559 	 we can do this only if the target does not support different pointer
1560 	 or address modes depending on the address space.  */
1561       if (target_default_pointer_address_modes_p ()
1562 	  && ! POINTERS_EXTEND_UNSIGNED
1563 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1564 	  && (CONSTANT_P (op)
1565 	      || (GET_CODE (op) == SUBREG
1566 		  && REG_P (SUBREG_REG (op))
1567 		  && REG_POINTER (SUBREG_REG (op))
1568 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1569 	  && !targetm.have_ptr_extend ())
1570 	{
1571 	  temp
1572 	    = convert_memory_address_addr_space_1 (Pmode, op,
1573 						   ADDR_SPACE_GENERIC, false,
1574 						   true);
1575 	  if (temp)
1576 	    return temp;
1577 	}
1578 #endif
1579       break;
1580 
1581     case ZERO_EXTEND:
1582       /* Check for a zero extension of a subreg of a promoted
1583 	 variable, where the promotion is zero-extended, and the
1584 	 target mode is the same as the variable's promotion.  */
1585       if (GET_CODE (op) == SUBREG
1586 	  && SUBREG_PROMOTED_VAR_P (op)
1587 	  && SUBREG_PROMOTED_UNSIGNED_P (op)
1588 	  && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1589 	{
1590 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1591 	  if (temp)
1592 	    return temp;
1593 	}
1594 
1595       /* Extending a widening multiplication should be canonicalized to
1596 	 a wider widening multiplication.  */
1597       if (GET_CODE (op) == MULT)
1598 	{
1599 	  rtx lhs = XEXP (op, 0);
1600 	  rtx rhs = XEXP (op, 1);
1601 	  enum rtx_code lcode = GET_CODE (lhs);
1602 	  enum rtx_code rcode = GET_CODE (rhs);
1603 
1604 	  /* Widening multiplies usually extend both operands, but sometimes
1605 	     they use a shift to extract a portion of a register.  */
1606 	  if ((lcode == ZERO_EXTEND
1607 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1608 	      && (rcode == ZERO_EXTEND
1609 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1610 	    {
1611 	      machine_mode lmode = GET_MODE (lhs);
1612 	      machine_mode rmode = GET_MODE (rhs);
1613 	      int bits;
1614 
1615 	      if (lcode == LSHIFTRT)
1616 		/* Number of bits not shifted off the end.  */
1617 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1618 			- INTVAL (XEXP (lhs, 1)));
1619 	      else /* lcode == ZERO_EXTEND */
1620 		/* Size of inner mode.  */
1621 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1622 
1623 	      if (rcode == LSHIFTRT)
1624 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1625 			 - INTVAL (XEXP (rhs, 1)));
1626 	      else /* rcode == ZERO_EXTEND */
1627 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1628 
1629 	      /* We can only widen multiplies if the result is mathematiclly
1630 		 equivalent.  I.e. if overflow was impossible.  */
1631 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1632 		return simplify_gen_binary
1633 			 (MULT, mode,
1634 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1635 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1636 	    }
1637 	}
1638 
1639       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1640       if (GET_CODE (op) == ZERO_EXTEND)
1641 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1642 				   GET_MODE (XEXP (op, 0)));
1643 
1644       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1645 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1646 	 GET_MODE_PRECISION (N) - I bits.  */
1647       if (GET_CODE (op) == LSHIFTRT
1648 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1649 	  && is_a <scalar_int_mode> (mode, &int_mode)
1650 	  && CONST_INT_P (XEXP (op, 1))
1651 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1652 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1653 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1654 	{
1655 	  scalar_int_mode tmode;
1656 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1657 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1658 	    {
1659 	      rtx inner =
1660 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1661 	      if (inner)
1662 		return simplify_gen_unary (ZERO_EXTEND, int_mode,
1663 					   inner, tmode);
1664 	    }
1665 	}
1666 
1667       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1668 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1669 	 of mode N.  E.g.
1670 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1671 	 (and:SI (reg:SI) (const_int 63)).  */
1672       if (partial_subreg_p (op)
1673 	  && is_a <scalar_int_mode> (mode, &int_mode)
1674 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1675 	  && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1676 	  && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1677 	  && subreg_lowpart_p (op)
1678 	  && (nonzero_bits (SUBREG_REG (op), op0_mode)
1679 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1680 	{
1681 	  if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1682 	    return SUBREG_REG (op);
1683 	  return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1684 				     op0_mode);
1685 	}
1686 
1687 #if defined(POINTERS_EXTEND_UNSIGNED)
1688       /* As we do not know which address space the pointer is referring to,
1689 	 we can do this only if the target does not support different pointer
1690 	 or address modes depending on the address space.  */
1691       if (target_default_pointer_address_modes_p ()
1692 	  && POINTERS_EXTEND_UNSIGNED > 0
1693 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1694 	  && (CONSTANT_P (op)
1695 	      || (GET_CODE (op) == SUBREG
1696 		  && REG_P (SUBREG_REG (op))
1697 		  && REG_POINTER (SUBREG_REG (op))
1698 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1699 	  && !targetm.have_ptr_extend ())
1700 	{
1701 	  temp
1702 	    = convert_memory_address_addr_space_1 (Pmode, op,
1703 						   ADDR_SPACE_GENERIC, false,
1704 						   true);
1705 	  if (temp)
1706 	    return temp;
1707 	}
1708 #endif
1709       break;
1710 
1711     default:
1712       break;
1713     }
1714 
1715   if (VECTOR_MODE_P (mode)
1716       && vec_duplicate_p (op, &elt)
1717       && code != VEC_DUPLICATE)
1718     {
1719       /* Try applying the operator to ELT and see if that simplifies.
1720 	 We can duplicate the result if so.
1721 
1722 	 The reason we don't use simplify_gen_unary is that it isn't
1723 	 necessarily a win to convert things like:
1724 
1725 	   (neg:V (vec_duplicate:V (reg:S R)))
1726 
1727 	 to:
1728 
1729 	   (vec_duplicate:V (neg:S (reg:S R)))
1730 
1731 	 The first might be done entirely in vector registers while the
1732 	 second might need a move between register files.  */
1733       temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1734 				       elt, GET_MODE_INNER (GET_MODE (op)));
1735       if (temp)
1736 	return gen_vec_duplicate (mode, temp);
1737     }
1738 
1739   return 0;
1740 }
1741 
1742 /* Try to compute the value of a unary operation CODE whose output mode is to
1743    be MODE with input operand OP whose mode was originally OP_MODE.
1744    Return zero if the value cannot be computed.  */
1745 rtx
simplify_const_unary_operation(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)1746 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1747 				rtx op, machine_mode op_mode)
1748 {
1749   scalar_int_mode result_mode;
1750 
1751   if (code == VEC_DUPLICATE)
1752     {
1753       gcc_assert (VECTOR_MODE_P (mode));
1754       if (GET_MODE (op) != VOIDmode)
1755       {
1756 	if (!VECTOR_MODE_P (GET_MODE (op)))
1757 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1758 	else
1759 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1760 						(GET_MODE (op)));
1761       }
1762       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1763 	return gen_const_vec_duplicate (mode, op);
1764       if (GET_CODE (op) == CONST_VECTOR
1765 	  && (CONST_VECTOR_DUPLICATE_P (op)
1766 	      || CONST_VECTOR_NUNITS (op).is_constant ()))
1767 	{
1768 	  unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1769 				    ? CONST_VECTOR_NPATTERNS (op)
1770 				    : CONST_VECTOR_NUNITS (op).to_constant ());
1771 	  gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1772 	  rtx_vector_builder builder (mode, npatterns, 1);
1773 	  for (unsigned i = 0; i < npatterns; i++)
1774 	    builder.quick_push (CONST_VECTOR_ELT (op, i));
1775 	  return builder.build ();
1776 	}
1777     }
1778 
1779   if (VECTOR_MODE_P (mode)
1780       && GET_CODE (op) == CONST_VECTOR
1781       && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1782     {
1783       gcc_assert (GET_MODE (op) == op_mode);
1784 
1785       rtx_vector_builder builder;
1786       if (!builder.new_unary_operation (mode, op, false))
1787 	return 0;
1788 
1789       unsigned int count = builder.encoded_nelts ();
1790       for (unsigned int i = 0; i < count; i++)
1791 	{
1792 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1793 					    CONST_VECTOR_ELT (op, i),
1794 					    GET_MODE_INNER (op_mode));
1795 	  if (!x || !valid_for_const_vector_p (mode, x))
1796 	    return 0;
1797 	  builder.quick_push (x);
1798 	}
1799       return builder.build ();
1800     }
1801 
1802   /* The order of these tests is critical so that, for example, we don't
1803      check the wrong mode (input vs. output) for a conversion operation,
1804      such as FIX.  At some point, this should be simplified.  */
1805 
1806   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1807     {
1808       REAL_VALUE_TYPE d;
1809 
1810       if (op_mode == VOIDmode)
1811 	{
1812 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1813 	     the bits of the constant are significant, though, this is
1814 	     a dangerous assumption as many times CONST_INTs are
1815 	     created and used with garbage in the bits outside of the
1816 	     precision of the implied mode of the const_int.  */
1817 	  op_mode = MAX_MODE_INT;
1818 	}
1819 
1820       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1821 
1822       /* Avoid the folding if flag_signaling_nans is on and
1823          operand is a signaling NaN.  */
1824       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1825         return 0;
1826 
1827       d = real_value_truncate (mode, d);
1828       return const_double_from_real_value (d, mode);
1829     }
1830   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1831     {
1832       REAL_VALUE_TYPE d;
1833 
1834       if (op_mode == VOIDmode)
1835 	{
1836 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1837 	     the bits of the constant are significant, though, this is
1838 	     a dangerous assumption as many times CONST_INTs are
1839 	     created and used with garbage in the bits outside of the
1840 	     precision of the implied mode of the const_int.  */
1841 	  op_mode = MAX_MODE_INT;
1842 	}
1843 
1844       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1845 
1846       /* Avoid the folding if flag_signaling_nans is on and
1847          operand is a signaling NaN.  */
1848       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1849         return 0;
1850 
1851       d = real_value_truncate (mode, d);
1852       return const_double_from_real_value (d, mode);
1853     }
1854 
1855   if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1856     {
1857       unsigned int width = GET_MODE_PRECISION (result_mode);
1858       if (width > MAX_BITSIZE_MODE_ANY_INT)
1859 	return 0;
1860 
1861       wide_int result;
1862       scalar_int_mode imode = (op_mode == VOIDmode
1863 			       ? result_mode
1864 			       : as_a <scalar_int_mode> (op_mode));
1865       rtx_mode_t op0 = rtx_mode_t (op, imode);
1866       int int_value;
1867 
1868 #if TARGET_SUPPORTS_WIDE_INT == 0
1869       /* This assert keeps the simplification from producing a result
1870 	 that cannot be represented in a CONST_DOUBLE but a lot of
1871 	 upstream callers expect that this function never fails to
1872 	 simplify something and so you if you added this to the test
1873 	 above the code would die later anyway.  If this assert
1874 	 happens, you just need to make the port support wide int.  */
1875       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1876 #endif
1877 
1878       switch (code)
1879 	{
1880 	case NOT:
1881 	  result = wi::bit_not (op0);
1882 	  break;
1883 
1884 	case NEG:
1885 	  result = wi::neg (op0);
1886 	  break;
1887 
1888 	case ABS:
1889 	  result = wi::abs (op0);
1890 	  break;
1891 
1892 	case FFS:
1893 	  result = wi::shwi (wi::ffs (op0), result_mode);
1894 	  break;
1895 
1896 	case CLZ:
1897 	  if (wi::ne_p (op0, 0))
1898 	    int_value = wi::clz (op0);
1899 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1900 	    return NULL_RTX;
1901 	  result = wi::shwi (int_value, result_mode);
1902 	  break;
1903 
1904 	case CLRSB:
1905 	  result = wi::shwi (wi::clrsb (op0), result_mode);
1906 	  break;
1907 
1908 	case CTZ:
1909 	  if (wi::ne_p (op0, 0))
1910 	    int_value = wi::ctz (op0);
1911 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1912 	    return NULL_RTX;
1913 	  result = wi::shwi (int_value, result_mode);
1914 	  break;
1915 
1916 	case POPCOUNT:
1917 	  result = wi::shwi (wi::popcount (op0), result_mode);
1918 	  break;
1919 
1920 	case PARITY:
1921 	  result = wi::shwi (wi::parity (op0), result_mode);
1922 	  break;
1923 
1924 	case BSWAP:
1925 	  result = wide_int (op0).bswap ();
1926 	  break;
1927 
1928 	case TRUNCATE:
1929 	case ZERO_EXTEND:
1930 	  result = wide_int::from (op0, width, UNSIGNED);
1931 	  break;
1932 
1933 	case SIGN_EXTEND:
1934 	  result = wide_int::from (op0, width, SIGNED);
1935 	  break;
1936 
1937 	case SQRT:
1938 	default:
1939 	  return 0;
1940 	}
1941 
1942       return immed_wide_int_const (result, result_mode);
1943     }
1944 
1945   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1946 	   && SCALAR_FLOAT_MODE_P (mode)
1947 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1948     {
1949       REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1950       switch (code)
1951 	{
1952 	case SQRT:
1953 	  return 0;
1954 	case ABS:
1955 	  d = real_value_abs (&d);
1956 	  break;
1957 	case NEG:
1958 	  d = real_value_negate (&d);
1959 	  break;
1960 	case FLOAT_TRUNCATE:
1961 	  /* Don't perform the operation if flag_signaling_nans is on
1962 	     and the operand is a signaling NaN.  */
1963 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1964 	    return NULL_RTX;
1965 	  d = real_value_truncate (mode, d);
1966 	  break;
1967 	case FLOAT_EXTEND:
1968 	  /* Don't perform the operation if flag_signaling_nans is on
1969 	     and the operand is a signaling NaN.  */
1970 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1971 	    return NULL_RTX;
1972 	  /* All this does is change the mode, unless changing
1973 	     mode class.  */
1974 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1975 	    real_convert (&d, mode, &d);
1976 	  break;
1977 	case FIX:
1978 	  /* Don't perform the operation if flag_signaling_nans is on
1979 	     and the operand is a signaling NaN.  */
1980 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1981 	    return NULL_RTX;
1982 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1983 	  break;
1984 	case NOT:
1985 	  {
1986 	    long tmp[4];
1987 	    int i;
1988 
1989 	    real_to_target (tmp, &d, GET_MODE (op));
1990 	    for (i = 0; i < 4; i++)
1991 	      tmp[i] = ~tmp[i];
1992 	    real_from_target (&d, tmp, mode);
1993 	    break;
1994 	  }
1995 	default:
1996 	  gcc_unreachable ();
1997 	}
1998       return const_double_from_real_value (d, mode);
1999     }
2000   else if (CONST_DOUBLE_AS_FLOAT_P (op)
2001 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2002 	   && is_int_mode (mode, &result_mode))
2003     {
2004       unsigned int width = GET_MODE_PRECISION (result_mode);
2005       if (width > MAX_BITSIZE_MODE_ANY_INT)
2006 	return 0;
2007 
2008       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2009 	 operators are intentionally left unspecified (to ease implementation
2010 	 by target backends), for consistency, this routine implements the
2011 	 same semantics for constant folding as used by the middle-end.  */
2012 
2013       /* This was formerly used only for non-IEEE float.
2014 	 eggert@twinsun.com says it is safe for IEEE also.  */
2015       REAL_VALUE_TYPE t;
2016       const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2017       wide_int wmax, wmin;
2018       /* This is part of the abi to real_to_integer, but we check
2019 	 things before making this call.  */
2020       bool fail;
2021 
2022       switch (code)
2023 	{
2024 	case FIX:
2025 	  if (REAL_VALUE_ISNAN (*x))
2026 	    return const0_rtx;
2027 
2028 	  /* Test against the signed upper bound.  */
2029 	  wmax = wi::max_value (width, SIGNED);
2030 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
2031 	  if (real_less (&t, x))
2032 	    return immed_wide_int_const (wmax, mode);
2033 
2034 	  /* Test against the signed lower bound.  */
2035 	  wmin = wi::min_value (width, SIGNED);
2036 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
2037 	  if (real_less (x, &t))
2038 	    return immed_wide_int_const (wmin, mode);
2039 
2040 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2041 				       mode);
2042 
2043 	case UNSIGNED_FIX:
2044 	  if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2045 	    return const0_rtx;
2046 
2047 	  /* Test against the unsigned upper bound.  */
2048 	  wmax = wi::max_value (width, UNSIGNED);
2049 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2050 	  if (real_less (&t, x))
2051 	    return immed_wide_int_const (wmax, mode);
2052 
2053 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2054 				       mode);
2055 
2056 	default:
2057 	  gcc_unreachable ();
2058 	}
2059     }
2060 
2061   /* Handle polynomial integers.  */
2062   else if (CONST_POLY_INT_P (op))
2063     {
2064       poly_wide_int result;
2065       switch (code)
2066 	{
2067 	case NEG:
2068 	  result = -const_poly_int_value (op);
2069 	  break;
2070 
2071 	case NOT:
2072 	  result = ~const_poly_int_value (op);
2073 	  break;
2074 
2075 	default:
2076 	  return NULL_RTX;
2077 	}
2078       return immed_wide_int_const (result, mode);
2079     }
2080 
2081   return NULL_RTX;
2082 }
2083 
2084 /* Subroutine of simplify_binary_operation to simplify a binary operation
2085    CODE that can commute with byte swapping, with result mode MODE and
2086    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
2087    Return zero if no simplification or canonicalization is possible.  */
2088 
2089 rtx
simplify_byte_swapping_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2090 simplify_context::simplify_byte_swapping_operation (rtx_code code,
2091 						    machine_mode mode,
2092 						    rtx op0, rtx op1)
2093 {
2094   rtx tem;
2095 
2096   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
2097   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2098     {
2099       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2100 				 simplify_gen_unary (BSWAP, mode, op1, mode));
2101       return simplify_gen_unary (BSWAP, mode, tem, mode);
2102     }
2103 
2104   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
2105   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2106     {
2107       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2108       return simplify_gen_unary (BSWAP, mode, tem, mode);
2109     }
2110 
2111   return NULL_RTX;
2112 }
2113 
2114 /* Subroutine of simplify_binary_operation to simplify a commutative,
2115    associative binary operation CODE with result mode MODE, operating
2116    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2117    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2118    canonicalization is possible.  */
2119 
2120 rtx
simplify_associative_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2121 simplify_context::simplify_associative_operation (rtx_code code,
2122 						  machine_mode mode,
2123 						  rtx op0, rtx op1)
2124 {
2125   rtx tem;
2126 
2127   /* Linearize the operator to the left.  */
2128   if (GET_CODE (op1) == code)
2129     {
2130       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2131       if (GET_CODE (op0) == code)
2132 	{
2133 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2134 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2135 	}
2136 
2137       /* "a op (b op c)" becomes "(b op c) op a".  */
2138       if (! swap_commutative_operands_p (op1, op0))
2139 	return simplify_gen_binary (code, mode, op1, op0);
2140 
2141       std::swap (op0, op1);
2142     }
2143 
2144   if (GET_CODE (op0) == code)
2145     {
2146       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2147       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2148 	{
2149 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2150 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2151 	}
2152 
2153       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2154       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2155       if (tem != 0)
2156         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2157 
2158       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2159       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2160       if (tem != 0)
2161         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2162     }
2163 
2164   return 0;
2165 }
2166 
2167 /* Return a mask describing the COMPARISON.  */
2168 static int
comparison_to_mask(enum rtx_code comparison)2169 comparison_to_mask (enum rtx_code comparison)
2170 {
2171   switch (comparison)
2172     {
2173     case LT:
2174       return 8;
2175     case GT:
2176       return 4;
2177     case EQ:
2178       return 2;
2179     case UNORDERED:
2180       return 1;
2181 
2182     case LTGT:
2183       return 12;
2184     case LE:
2185       return 10;
2186     case GE:
2187       return 6;
2188     case UNLT:
2189       return 9;
2190     case UNGT:
2191       return 5;
2192     case UNEQ:
2193       return 3;
2194 
2195     case ORDERED:
2196       return 14;
2197     case NE:
2198       return 13;
2199     case UNLE:
2200       return 11;
2201     case UNGE:
2202       return 7;
2203 
2204     default:
2205       gcc_unreachable ();
2206     }
2207 }
2208 
2209 /* Return a comparison corresponding to the MASK.  */
2210 static enum rtx_code
mask_to_comparison(int mask)2211 mask_to_comparison (int mask)
2212 {
2213   switch (mask)
2214     {
2215     case 8:
2216       return LT;
2217     case 4:
2218       return GT;
2219     case 2:
2220       return EQ;
2221     case 1:
2222       return UNORDERED;
2223 
2224     case 12:
2225       return LTGT;
2226     case 10:
2227       return LE;
2228     case 6:
2229       return GE;
2230     case 9:
2231       return UNLT;
2232     case 5:
2233       return UNGT;
2234     case 3:
2235       return UNEQ;
2236 
2237     case 14:
2238       return ORDERED;
2239     case 13:
2240       return NE;
2241     case 11:
2242       return UNLE;
2243     case 7:
2244       return UNGE;
2245 
2246     default:
2247       gcc_unreachable ();
2248     }
2249 }
2250 
2251 /* Return true if CODE is valid for comparisons of mode MODE, false
2252    otherwise.
2253 
2254    It is always safe to return false, even if the code was valid for the
2255    given mode as that will merely suppress optimizations.  */
2256 
2257 static bool
comparison_code_valid_for_mode(enum rtx_code code,enum machine_mode mode)2258 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2259 {
2260   switch (code)
2261     {
2262       /* These are valid for integral, floating and vector modes.  */
2263       case NE:
2264       case EQ:
2265       case GE:
2266       case GT:
2267       case LE:
2268       case LT:
2269 	return (INTEGRAL_MODE_P (mode)
2270 		|| FLOAT_MODE_P (mode)
2271 		|| VECTOR_MODE_P (mode));
2272 
2273       /* These are valid for floating point modes.  */
2274       case LTGT:
2275       case UNORDERED:
2276       case ORDERED:
2277       case UNEQ:
2278       case UNGE:
2279       case UNGT:
2280       case UNLE:
2281       case UNLT:
2282 	return FLOAT_MODE_P (mode);
2283 
2284       /* These are filtered out in simplify_logical_operation, but
2285 	 we check for them too as a matter of safety.   They are valid
2286 	 for integral and vector modes.  */
2287       case GEU:
2288       case GTU:
2289       case LEU:
2290       case LTU:
2291 	return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2292 
2293       default:
2294 	gcc_unreachable ();
2295     }
2296 }
2297 
2298 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2299    false/true value of comparison with MODE where comparison operands
2300    have CMP_MODE.  */
2301 
2302 static rtx
relational_result(machine_mode mode,machine_mode cmp_mode,rtx res)2303 relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
2304 {
2305   if (SCALAR_FLOAT_MODE_P (mode))
2306     {
2307       if (res == const0_rtx)
2308         return CONST0_RTX (mode);
2309 #ifdef FLOAT_STORE_FLAG_VALUE
2310       REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
2311       return const_double_from_real_value (val, mode);
2312 #else
2313       return NULL_RTX;
2314 #endif
2315     }
2316   if (VECTOR_MODE_P (mode))
2317     {
2318       if (res == const0_rtx)
2319 	return CONST0_RTX (mode);
2320 #ifdef VECTOR_STORE_FLAG_VALUE
2321       rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2322       if (val == NULL_RTX)
2323 	return NULL_RTX;
2324       if (val == const1_rtx)
2325 	return CONST1_RTX (mode);
2326 
2327       return gen_const_vec_duplicate (mode, val);
2328 #else
2329       return NULL_RTX;
2330 #endif
2331     }
2332   /* For vector comparison with scalar int result, it is unknown
2333      if the target means here a comparison into an integral bitmask,
2334      or comparison where all comparisons true mean const_true_rtx
2335      whole result, or where any comparisons true mean const_true_rtx
2336      whole result.  For const0_rtx all the cases are the same.  */
2337   if (VECTOR_MODE_P (cmp_mode)
2338       && SCALAR_INT_MODE_P (mode)
2339       && res == const_true_rtx)
2340     return NULL_RTX;
2341 
2342   return res;
2343 }
2344 
2345 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2346    and OP1, which should be both relational operations.  Return 0 if no such
2347    simplification is possible.  */
2348 rtx
simplify_logical_relational_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2349 simplify_context::simplify_logical_relational_operation (rtx_code code,
2350 							 machine_mode mode,
2351 							 rtx op0, rtx op1)
2352 {
2353   /* We only handle IOR of two relational operations.  */
2354   if (code != IOR)
2355     return 0;
2356 
2357   if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2358     return 0;
2359 
2360   if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2361 	&& rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2362     return 0;
2363 
2364   enum rtx_code code0 = GET_CODE (op0);
2365   enum rtx_code code1 = GET_CODE (op1);
2366 
2367   /* We don't handle unsigned comparisons currently.  */
2368   if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2369     return 0;
2370   if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2371     return 0;
2372 
2373   int mask0 = comparison_to_mask (code0);
2374   int mask1 = comparison_to_mask (code1);
2375 
2376   int mask = mask0 | mask1;
2377 
2378   if (mask == 15)
2379     return relational_result (mode, GET_MODE (op0), const_true_rtx);
2380 
2381   code = mask_to_comparison (mask);
2382 
2383   /* Many comparison codes are only valid for certain mode classes.  */
2384   if (!comparison_code_valid_for_mode (code, mode))
2385     return 0;
2386 
2387   op0 = XEXP (op1, 0);
2388   op1 = XEXP (op1, 1);
2389 
2390   return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2391 }
2392 
2393 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2394    and OP1.  Return 0 if no simplification is possible.
2395 
2396    Don't use this for relational operations such as EQ or LT.
2397    Use simplify_relational_operation instead.  */
2398 rtx
simplify_binary_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2399 simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
2400 					     rtx op0, rtx op1)
2401 {
2402   rtx trueop0, trueop1;
2403   rtx tem;
2404 
2405   /* Relational operations don't work here.  We must know the mode
2406      of the operands in order to do the comparison correctly.
2407      Assuming a full word can give incorrect results.
2408      Consider comparing 128 with -128 in QImode.  */
2409   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2410   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2411 
2412   /* Make sure the constant is second.  */
2413   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2414       && swap_commutative_operands_p (op0, op1))
2415     std::swap (op0, op1);
2416 
2417   trueop0 = avoid_constant_pool_reference (op0);
2418   trueop1 = avoid_constant_pool_reference (op1);
2419 
2420   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2421   if (tem)
2422     return tem;
2423   tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2424 
2425   if (tem)
2426     return tem;
2427 
2428   /* If the above steps did not result in a simplification and op0 or op1
2429      were constant pool references, use the referenced constants directly.  */
2430   if (trueop0 != op0 || trueop1 != op1)
2431     return simplify_gen_binary (code, mode, trueop0, trueop1);
2432 
2433   return NULL_RTX;
2434 }
2435 
2436 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2437    which OP0 and OP1 are both vector series or vector duplicates
2438    (which are really just series with a step of 0).  If so, try to
2439    form a new series by applying CODE to the bases and to the steps.
2440    Return null if no simplification is possible.
2441 
2442    MODE is the mode of the operation and is known to be a vector
2443    integer mode.  */
2444 
2445 rtx
simplify_binary_operation_series(rtx_code code,machine_mode mode,rtx op0,rtx op1)2446 simplify_context::simplify_binary_operation_series (rtx_code code,
2447 						    machine_mode mode,
2448 						    rtx op0, rtx op1)
2449 {
2450   rtx base0, step0;
2451   if (vec_duplicate_p (op0, &base0))
2452     step0 = const0_rtx;
2453   else if (!vec_series_p (op0, &base0, &step0))
2454     return NULL_RTX;
2455 
2456   rtx base1, step1;
2457   if (vec_duplicate_p (op1, &base1))
2458     step1 = const0_rtx;
2459   else if (!vec_series_p (op1, &base1, &step1))
2460     return NULL_RTX;
2461 
2462   /* Only create a new series if we can simplify both parts.  In other
2463      cases this isn't really a simplification, and it's not necessarily
2464      a win to replace a vector operation with a scalar operation.  */
2465   scalar_mode inner_mode = GET_MODE_INNER (mode);
2466   rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2467   if (!new_base)
2468     return NULL_RTX;
2469 
2470   rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2471   if (!new_step)
2472     return NULL_RTX;
2473 
2474   return gen_vec_series (mode, new_base, new_step);
2475 }
2476 
2477 /* Subroutine of simplify_binary_operation_1.  Un-distribute a binary
2478    operation CODE with result mode MODE, operating on OP0 and OP1.
2479    e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2480    Returns NULL_RTX if no simplification is possible.  */
2481 
2482 rtx
simplify_distributive_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2483 simplify_context::simplify_distributive_operation (rtx_code code,
2484 						   machine_mode mode,
2485 						   rtx op0, rtx op1)
2486 {
2487   enum rtx_code op = GET_CODE (op0);
2488   gcc_assert (GET_CODE (op1) == op);
2489 
2490   if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2491       && ! side_effects_p (XEXP (op0, 1)))
2492     return simplify_gen_binary (op, mode,
2493 				simplify_gen_binary (code, mode,
2494 						     XEXP (op0, 0),
2495 						     XEXP (op1, 0)),
2496 				XEXP (op0, 1));
2497 
2498   if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2499     {
2500       if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2501 	  && ! side_effects_p (XEXP (op0, 0)))
2502 	return simplify_gen_binary (op, mode,
2503 				    simplify_gen_binary (code, mode,
2504 							 XEXP (op0, 1),
2505 							 XEXP (op1, 1)),
2506 				    XEXP (op0, 0));
2507       if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2508 	  && ! side_effects_p (XEXP (op0, 0)))
2509 	return simplify_gen_binary (op, mode,
2510 				    simplify_gen_binary (code, mode,
2511 							 XEXP (op0, 1),
2512 							 XEXP (op1, 0)),
2513 				    XEXP (op0, 0));
2514       if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2515 	  && ! side_effects_p (XEXP (op0, 1)))
2516 	return simplify_gen_binary (op, mode,
2517 				    simplify_gen_binary (code, mode,
2518 							 XEXP (op0, 0),
2519 							 XEXP (op1, 1)),
2520 				    XEXP (op0, 1));
2521     }
2522 
2523   return NULL_RTX;
2524 }
2525 
2526 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2527    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2528    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2529    actual constants.  */
2530 
2531 rtx
simplify_binary_operation_1(rtx_code code,machine_mode mode,rtx op0,rtx op1,rtx trueop0,rtx trueop1)2532 simplify_context::simplify_binary_operation_1 (rtx_code code,
2533 					       machine_mode mode,
2534 					       rtx op0, rtx op1,
2535 					       rtx trueop0, rtx trueop1)
2536 {
2537   rtx tem, reversed, opleft, opright, elt0, elt1;
2538   HOST_WIDE_INT val;
2539   scalar_int_mode int_mode, inner_mode;
2540   poly_int64 offset;
2541 
2542   /* Even if we can't compute a constant result,
2543      there are some cases worth simplifying.  */
2544 
2545   switch (code)
2546     {
2547     case PLUS:
2548       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2549 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2550 	 when x is -0 and the rounding mode is not towards -infinity,
2551 	 since (-0) + 0 is then 0.  */
2552       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2553 	return op0;
2554 
2555       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2556 	 transformations are safe even for IEEE.  */
2557       if (GET_CODE (op0) == NEG)
2558 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2559       else if (GET_CODE (op1) == NEG)
2560 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2561 
2562       /* (~a) + 1 -> -a */
2563       if (INTEGRAL_MODE_P (mode)
2564 	  && GET_CODE (op0) == NOT
2565 	  && trueop1 == const1_rtx)
2566 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2567 
2568       /* Handle both-operands-constant cases.  We can only add
2569 	 CONST_INTs to constants since the sum of relocatable symbols
2570 	 can't be handled by most assemblers.  Don't add CONST_INT
2571 	 to CONST_INT since overflow won't be computed properly if wider
2572 	 than HOST_BITS_PER_WIDE_INT.  */
2573 
2574       if ((GET_CODE (op0) == CONST
2575 	   || GET_CODE (op0) == SYMBOL_REF
2576 	   || GET_CODE (op0) == LABEL_REF)
2577 	  && poly_int_rtx_p (op1, &offset))
2578 	return plus_constant (mode, op0, offset);
2579       else if ((GET_CODE (op1) == CONST
2580 		|| GET_CODE (op1) == SYMBOL_REF
2581 		|| GET_CODE (op1) == LABEL_REF)
2582 	       && poly_int_rtx_p (op0, &offset))
2583 	return plus_constant (mode, op1, offset);
2584 
2585       /* See if this is something like X * C - X or vice versa or
2586 	 if the multiplication is written as a shift.  If so, we can
2587 	 distribute and make a new multiply, shift, or maybe just
2588 	 have X (if C is 2 in the example above).  But don't make
2589 	 something more expensive than we had before.  */
2590 
2591       if (is_a <scalar_int_mode> (mode, &int_mode))
2592 	{
2593 	  rtx lhs = op0, rhs = op1;
2594 
2595 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2596 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2597 
2598 	  if (GET_CODE (lhs) == NEG)
2599 	    {
2600 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2601 	      lhs = XEXP (lhs, 0);
2602 	    }
2603 	  else if (GET_CODE (lhs) == MULT
2604 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2605 	    {
2606 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2607 	      lhs = XEXP (lhs, 0);
2608 	    }
2609 	  else if (GET_CODE (lhs) == ASHIFT
2610 		   && CONST_INT_P (XEXP (lhs, 1))
2611                    && INTVAL (XEXP (lhs, 1)) >= 0
2612 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2613 	    {
2614 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2615 					    GET_MODE_PRECISION (int_mode));
2616 	      lhs = XEXP (lhs, 0);
2617 	    }
2618 
2619 	  if (GET_CODE (rhs) == NEG)
2620 	    {
2621 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2622 	      rhs = XEXP (rhs, 0);
2623 	    }
2624 	  else if (GET_CODE (rhs) == MULT
2625 		   && CONST_INT_P (XEXP (rhs, 1)))
2626 	    {
2627 	      coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2628 	      rhs = XEXP (rhs, 0);
2629 	    }
2630 	  else if (GET_CODE (rhs) == ASHIFT
2631 		   && CONST_INT_P (XEXP (rhs, 1))
2632 		   && INTVAL (XEXP (rhs, 1)) >= 0
2633 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2634 	    {
2635 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2636 					    GET_MODE_PRECISION (int_mode));
2637 	      rhs = XEXP (rhs, 0);
2638 	    }
2639 
2640 	  if (rtx_equal_p (lhs, rhs))
2641 	    {
2642 	      rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2643 	      rtx coeff;
2644 	      bool speed = optimize_function_for_speed_p (cfun);
2645 
2646 	      coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2647 
2648 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2649 	      return (set_src_cost (tem, int_mode, speed)
2650 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2651 	    }
2652 
2653 	  /* Optimize (X - 1) * Y + Y to X * Y.  */
2654 	  lhs = op0;
2655 	  rhs = op1;
2656 	  if (GET_CODE (op0) == MULT)
2657 	    {
2658 	      if (((GET_CODE (XEXP (op0, 0)) == PLUS
2659 		    && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
2660 		   || (GET_CODE (XEXP (op0, 0)) == MINUS
2661 		       && XEXP (XEXP (op0, 0), 1) == const1_rtx))
2662 		  && rtx_equal_p (XEXP (op0, 1), op1))
2663 		lhs = XEXP (XEXP (op0, 0), 0);
2664 	      else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2665 			 && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
2666 			|| (GET_CODE (XEXP (op0, 1)) == MINUS
2667 			    && XEXP (XEXP (op0, 1), 1) == const1_rtx))
2668 		       && rtx_equal_p (XEXP (op0, 0), op1))
2669 		lhs = XEXP (XEXP (op0, 1), 0);
2670 	    }
2671 	  else if (GET_CODE (op1) == MULT)
2672 	    {
2673 	      if (((GET_CODE (XEXP (op1, 0)) == PLUS
2674 		    && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
2675 		   || (GET_CODE (XEXP (op1, 0)) == MINUS
2676 		       && XEXP (XEXP (op1, 0), 1) == const1_rtx))
2677 		  && rtx_equal_p (XEXP (op1, 1), op0))
2678 		rhs = XEXP (XEXP (op1, 0), 0);
2679 	      else if (((GET_CODE (XEXP (op1, 1)) == PLUS
2680 			 && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
2681 			|| (GET_CODE (XEXP (op1, 1)) == MINUS
2682 			    && XEXP (XEXP (op1, 1), 1) == const1_rtx))
2683 		       && rtx_equal_p (XEXP (op1, 0), op0))
2684 		rhs = XEXP (XEXP (op1, 1), 0);
2685 	    }
2686 	  if (lhs != op0 || rhs != op1)
2687 	    return simplify_gen_binary (MULT, int_mode, lhs, rhs);
2688 	}
2689 
2690       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2691       if (CONST_SCALAR_INT_P (op1)
2692 	  && GET_CODE (op0) == XOR
2693 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2694 	  && mode_signbit_p (mode, op1))
2695 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2696 				    simplify_gen_binary (XOR, mode, op1,
2697 							 XEXP (op0, 1)));
2698 
2699       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2700       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2701 	  && GET_CODE (op0) == MULT
2702 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2703 	{
2704 	  rtx in1, in2;
2705 
2706 	  in1 = XEXP (XEXP (op0, 0), 0);
2707 	  in2 = XEXP (op0, 1);
2708 	  return simplify_gen_binary (MINUS, mode, op1,
2709 				      simplify_gen_binary (MULT, mode,
2710 							   in1, in2));
2711 	}
2712 
2713       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2714 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2715 	 is 1.  */
2716       if (COMPARISON_P (op0)
2717 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2718 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2719 	  && (reversed = reversed_comparison (op0, mode)))
2720 	return
2721 	  simplify_gen_unary (NEG, mode, reversed, mode);
2722 
2723       /* If one of the operands is a PLUS or a MINUS, see if we can
2724 	 simplify this by the associative law.
2725 	 Don't use the associative law for floating point.
2726 	 The inaccuracy makes it nonassociative,
2727 	 and subtle programs can break if operations are associated.  */
2728 
2729       if (INTEGRAL_MODE_P (mode)
2730 	  && (plus_minus_operand_p (op0)
2731 	      || plus_minus_operand_p (op1))
2732 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2733 	return tem;
2734 
2735       /* Reassociate floating point addition only when the user
2736 	 specifies associative math operations.  */
2737       if (FLOAT_MODE_P (mode)
2738 	  && flag_associative_math)
2739 	{
2740 	  tem = simplify_associative_operation (code, mode, op0, op1);
2741 	  if (tem)
2742 	    return tem;
2743 	}
2744 
2745       /* Handle vector series.  */
2746       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2747 	{
2748 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2749 	  if (tem)
2750 	    return tem;
2751 	}
2752       break;
2753 
2754     case COMPARE:
2755       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2756       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2757 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2758 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2759 	{
2760 	  rtx xop00 = XEXP (op0, 0);
2761 	  rtx xop10 = XEXP (op1, 0);
2762 
2763 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2764 	      return xop00;
2765 
2766 	    if (REG_P (xop00) && REG_P (xop10)
2767 		&& REGNO (xop00) == REGNO (xop10)
2768 		&& GET_MODE (xop00) == mode
2769 		&& GET_MODE (xop10) == mode
2770 		&& GET_MODE_CLASS (mode) == MODE_CC)
2771 	      return xop00;
2772 	}
2773       break;
2774 
2775     case MINUS:
2776       /* We can't assume x-x is 0 even with non-IEEE floating point,
2777 	 but since it is zero except in very strange circumstances, we
2778 	 will treat it as zero with -ffinite-math-only.  */
2779       if (rtx_equal_p (trueop0, trueop1)
2780 	  && ! side_effects_p (op0)
2781 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2782 	return CONST0_RTX (mode);
2783 
2784       /* Change subtraction from zero into negation.  (0 - x) is the
2785 	 same as -x when x is NaN, infinite, or finite and nonzero.
2786 	 But if the mode has signed zeros, and does not round towards
2787 	 -infinity, then 0 - 0 is 0, not -0.  */
2788       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2789 	return simplify_gen_unary (NEG, mode, op1, mode);
2790 
2791       /* (-1 - a) is ~a, unless the expression contains symbolic
2792 	 constants, in which case not retaining additions and
2793 	 subtractions could cause invalid assembly to be produced.  */
2794       if (trueop0 == constm1_rtx
2795 	  && !contains_symbolic_reference_p (op1))
2796 	return simplify_gen_unary (NOT, mode, op1, mode);
2797 
2798       /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2799 	 or has signed zeros and supports rounding towards -infinity.
2800 	 In such a case, 0 - 0 is -0.  */
2801       if (!(HONOR_SIGNED_ZEROS (mode)
2802 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2803 	  && !HONOR_SNANS (mode)
2804 	  && trueop1 == CONST0_RTX (mode))
2805 	return op0;
2806 
2807       /* See if this is something like X * C - X or vice versa or
2808 	 if the multiplication is written as a shift.  If so, we can
2809 	 distribute and make a new multiply, shift, or maybe just
2810 	 have X (if C is 2 in the example above).  But don't make
2811 	 something more expensive than we had before.  */
2812 
2813       if (is_a <scalar_int_mode> (mode, &int_mode))
2814 	{
2815 	  rtx lhs = op0, rhs = op1;
2816 
2817 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2818 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2819 
2820 	  if (GET_CODE (lhs) == NEG)
2821 	    {
2822 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2823 	      lhs = XEXP (lhs, 0);
2824 	    }
2825 	  else if (GET_CODE (lhs) == MULT
2826 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2827 	    {
2828 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2829 	      lhs = XEXP (lhs, 0);
2830 	    }
2831 	  else if (GET_CODE (lhs) == ASHIFT
2832 		   && CONST_INT_P (XEXP (lhs, 1))
2833 		   && INTVAL (XEXP (lhs, 1)) >= 0
2834 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2835 	    {
2836 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2837 					    GET_MODE_PRECISION (int_mode));
2838 	      lhs = XEXP (lhs, 0);
2839 	    }
2840 
2841 	  if (GET_CODE (rhs) == NEG)
2842 	    {
2843 	      negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2844 	      rhs = XEXP (rhs, 0);
2845 	    }
2846 	  else if (GET_CODE (rhs) == MULT
2847 		   && CONST_INT_P (XEXP (rhs, 1)))
2848 	    {
2849 	      negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2850 	      rhs = XEXP (rhs, 0);
2851 	    }
2852 	  else if (GET_CODE (rhs) == ASHIFT
2853 		   && CONST_INT_P (XEXP (rhs, 1))
2854 		   && INTVAL (XEXP (rhs, 1)) >= 0
2855 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2856 	    {
2857 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2858 					       GET_MODE_PRECISION (int_mode));
2859 	      negcoeff1 = -negcoeff1;
2860 	      rhs = XEXP (rhs, 0);
2861 	    }
2862 
2863 	  if (rtx_equal_p (lhs, rhs))
2864 	    {
2865 	      rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2866 	      rtx coeff;
2867 	      bool speed = optimize_function_for_speed_p (cfun);
2868 
2869 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2870 
2871 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2872 	      return (set_src_cost (tem, int_mode, speed)
2873 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2874 	    }
2875 
2876 	  /* Optimize (X + 1) * Y - Y to X * Y.  */
2877 	  lhs = op0;
2878 	  if (GET_CODE (op0) == MULT)
2879 	    {
2880 	      if (((GET_CODE (XEXP (op0, 0)) == PLUS
2881 		    && XEXP (XEXP (op0, 0), 1) == const1_rtx)
2882 		   || (GET_CODE (XEXP (op0, 0)) == MINUS
2883 		       && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
2884 		  && rtx_equal_p (XEXP (op0, 1), op1))
2885 		lhs = XEXP (XEXP (op0, 0), 0);
2886 	      else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2887 			 && XEXP (XEXP (op0, 1), 1) == const1_rtx)
2888 			|| (GET_CODE (XEXP (op0, 1)) == MINUS
2889 			    && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
2890 		       && rtx_equal_p (XEXP (op0, 0), op1))
2891 		lhs = XEXP (XEXP (op0, 1), 0);
2892 	    }
2893 	  if (lhs != op0)
2894 	    return simplify_gen_binary (MULT, int_mode, lhs, op1);
2895 	}
2896 
2897       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2898       if (GET_CODE (op1) == NEG)
2899 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2900 
2901       /* (-x - c) may be simplified as (-c - x).  */
2902       if (GET_CODE (op0) == NEG
2903 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2904 	{
2905 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2906 	  if (tem)
2907 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2908 	}
2909 
2910       if ((GET_CODE (op0) == CONST
2911 	   || GET_CODE (op0) == SYMBOL_REF
2912 	   || GET_CODE (op0) == LABEL_REF)
2913 	  && poly_int_rtx_p (op1, &offset))
2914 	return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2915 
2916       /* Don't let a relocatable value get a negative coeff.  */
2917       if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
2918 	return simplify_gen_binary (PLUS, mode,
2919 				    op0,
2920 				    neg_poly_int_rtx (mode, op1));
2921 
2922       /* (x - (x & y)) -> (x & ~y) */
2923       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2924 	{
2925 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2926 	    {
2927 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2928 					GET_MODE (XEXP (op1, 1)));
2929 	      return simplify_gen_binary (AND, mode, op0, tem);
2930 	    }
2931 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2932 	    {
2933 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2934 					GET_MODE (XEXP (op1, 0)));
2935 	      return simplify_gen_binary (AND, mode, op0, tem);
2936 	    }
2937 	}
2938 
2939       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2940 	 by reversing the comparison code if valid.  */
2941       if (STORE_FLAG_VALUE == 1
2942 	  && trueop0 == const1_rtx
2943 	  && COMPARISON_P (op1)
2944 	  && (reversed = reversed_comparison (op1, mode)))
2945 	return reversed;
2946 
2947       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2948       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2949 	  && GET_CODE (op1) == MULT
2950 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2951 	{
2952 	  rtx in1, in2;
2953 
2954 	  in1 = XEXP (XEXP (op1, 0), 0);
2955 	  in2 = XEXP (op1, 1);
2956 	  return simplify_gen_binary (PLUS, mode,
2957 				      simplify_gen_binary (MULT, mode,
2958 							   in1, in2),
2959 				      op0);
2960 	}
2961 
2962       /* Canonicalize (minus (neg A) (mult B C)) to
2963 	 (minus (mult (neg B) C) A).  */
2964       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2965 	  && GET_CODE (op1) == MULT
2966 	  && GET_CODE (op0) == NEG)
2967 	{
2968 	  rtx in1, in2;
2969 
2970 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2971 	  in2 = XEXP (op1, 1);
2972 	  return simplify_gen_binary (MINUS, mode,
2973 				      simplify_gen_binary (MULT, mode,
2974 							   in1, in2),
2975 				      XEXP (op0, 0));
2976 	}
2977 
2978       /* If one of the operands is a PLUS or a MINUS, see if we can
2979 	 simplify this by the associative law.  This will, for example,
2980          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2981 	 Don't use the associative law for floating point.
2982 	 The inaccuracy makes it nonassociative,
2983 	 and subtle programs can break if operations are associated.  */
2984 
2985       if (INTEGRAL_MODE_P (mode)
2986 	  && (plus_minus_operand_p (op0)
2987 	      || plus_minus_operand_p (op1))
2988 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2989 	return tem;
2990 
2991       /* Handle vector series.  */
2992       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2993 	{
2994 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2995 	  if (tem)
2996 	    return tem;
2997 	}
2998       break;
2999 
3000     case MULT:
3001       if (trueop1 == constm1_rtx)
3002 	return simplify_gen_unary (NEG, mode, op0, mode);
3003 
3004       if (GET_CODE (op0) == NEG)
3005 	{
3006 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
3007 	  /* If op1 is a MULT as well and simplify_unary_operation
3008 	     just moved the NEG to the second operand, simplify_gen_binary
3009 	     below could through simplify_associative_operation move
3010 	     the NEG around again and recurse endlessly.  */
3011 	  if (temp
3012 	      && GET_CODE (op1) == MULT
3013 	      && GET_CODE (temp) == MULT
3014 	      && XEXP (op1, 0) == XEXP (temp, 0)
3015 	      && GET_CODE (XEXP (temp, 1)) == NEG
3016 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
3017 	    temp = NULL_RTX;
3018 	  if (temp)
3019 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
3020 	}
3021       if (GET_CODE (op1) == NEG)
3022 	{
3023 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
3024 	  /* If op0 is a MULT as well and simplify_unary_operation
3025 	     just moved the NEG to the second operand, simplify_gen_binary
3026 	     below could through simplify_associative_operation move
3027 	     the NEG around again and recurse endlessly.  */
3028 	  if (temp
3029 	      && GET_CODE (op0) == MULT
3030 	      && GET_CODE (temp) == MULT
3031 	      && XEXP (op0, 0) == XEXP (temp, 0)
3032 	      && GET_CODE (XEXP (temp, 1)) == NEG
3033 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
3034 	    temp = NULL_RTX;
3035 	  if (temp)
3036 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
3037 	}
3038 
3039       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
3040 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
3041 	 when the mode has signed zeros, since multiplying a negative
3042 	 number by 0 will give -0, not 0.  */
3043       if (!HONOR_NANS (mode)
3044 	  && !HONOR_SIGNED_ZEROS (mode)
3045 	  && trueop1 == CONST0_RTX (mode)
3046 	  && ! side_effects_p (op0))
3047 	return op1;
3048 
3049       /* In IEEE floating point, x*1 is not equivalent to x for
3050 	 signalling NaNs.  */
3051       if (!HONOR_SNANS (mode)
3052 	  && trueop1 == CONST1_RTX (mode))
3053 	return op0;
3054 
3055       /* Convert multiply by constant power of two into shift.  */
3056       if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
3057 	{
3058 	  val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
3059 	  if (val >= 0)
3060 	    return simplify_gen_binary (ASHIFT, mode, op0,
3061 					gen_int_shift_amount (mode, val));
3062 	}
3063 
3064       /* x*2 is x+x and x*(-1) is -x */
3065       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3066 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
3067 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
3068 	  && GET_MODE (op0) == mode)
3069 	{
3070 	  const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3071 
3072 	  if (real_equal (d1, &dconst2))
3073 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3074 
3075 	  if (!HONOR_SNANS (mode)
3076 	      && real_equal (d1, &dconstm1))
3077 	    return simplify_gen_unary (NEG, mode, op0, mode);
3078 	}
3079 
3080       /* Optimize -x * -x as x * x.  */
3081       if (FLOAT_MODE_P (mode)
3082 	  && GET_CODE (op0) == NEG
3083 	  && GET_CODE (op1) == NEG
3084 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3085 	  && !side_effects_p (XEXP (op0, 0)))
3086 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3087 
3088       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
3089       if (SCALAR_FLOAT_MODE_P (mode)
3090 	  && GET_CODE (op0) == ABS
3091 	  && GET_CODE (op1) == ABS
3092 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3093 	  && !side_effects_p (XEXP (op0, 0)))
3094 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3095 
3096       /* Reassociate multiplication, but for floating point MULTs
3097 	 only when the user specifies unsafe math optimizations.  */
3098       if (! FLOAT_MODE_P (mode)
3099 	  || flag_unsafe_math_optimizations)
3100 	{
3101 	  tem = simplify_associative_operation (code, mode, op0, op1);
3102 	  if (tem)
3103 	    return tem;
3104 	}
3105       break;
3106 
3107     case IOR:
3108       if (trueop1 == CONST0_RTX (mode))
3109 	return op0;
3110       if (INTEGRAL_MODE_P (mode)
3111 	  && trueop1 == CONSTM1_RTX (mode)
3112 	  && !side_effects_p (op0))
3113 	return op1;
3114       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3115 	return op0;
3116       /* A | (~A) -> -1 */
3117       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3118 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3119 	  && ! side_effects_p (op0)
3120 	  && SCALAR_INT_MODE_P (mode))
3121 	return constm1_rtx;
3122 
3123       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
3124       if (CONST_INT_P (op1)
3125 	  && HWI_COMPUTABLE_MODE_P (mode)
3126 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3127 	  && !side_effects_p (op0))
3128 	return op1;
3129 
3130       /* Canonicalize (X & C1) | C2.  */
3131       if (GET_CODE (op0) == AND
3132 	  && CONST_INT_P (trueop1)
3133 	  && CONST_INT_P (XEXP (op0, 1)))
3134 	{
3135 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3136 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3137 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
3138 
3139 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2.  */
3140 	  if ((c1 & c2) == c1
3141 	      && !side_effects_p (XEXP (op0, 0)))
3142 	    return trueop1;
3143 
3144 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
3145 	  if (((c1|c2) & mask) == mask)
3146 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3147 	}
3148 
3149       /* Convert (A & B) | A to A.  */
3150       if (GET_CODE (op0) == AND
3151 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3152 	      || rtx_equal_p (XEXP (op0, 1), op1))
3153 	  && ! side_effects_p (XEXP (op0, 0))
3154 	  && ! side_effects_p (XEXP (op0, 1)))
3155 	return op1;
3156 
3157       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3158          mode size to (rotate A CX).  */
3159 
3160       if (GET_CODE (op1) == ASHIFT
3161           || GET_CODE (op1) == SUBREG)
3162         {
3163 	  opleft = op1;
3164 	  opright = op0;
3165 	}
3166       else
3167         {
3168 	  opright = op1;
3169 	  opleft = op0;
3170 	}
3171 
3172       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
3173           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
3174           && CONST_INT_P (XEXP (opleft, 1))
3175           && CONST_INT_P (XEXP (opright, 1))
3176           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
3177 	      == GET_MODE_UNIT_PRECISION (mode)))
3178         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
3179 
3180       /* Same, but for ashift that has been "simplified" to a wider mode
3181         by simplify_shift_const.  */
3182 
3183       if (GET_CODE (opleft) == SUBREG
3184 	  && is_a <scalar_int_mode> (mode, &int_mode)
3185 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
3186 				     &inner_mode)
3187           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
3188           && GET_CODE (opright) == LSHIFTRT
3189           && GET_CODE (XEXP (opright, 0)) == SUBREG
3190 	  && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3191 	  && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3192           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3193                           SUBREG_REG (XEXP (opright, 0)))
3194           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3195           && CONST_INT_P (XEXP (opright, 1))
3196 	  && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3197 	      + INTVAL (XEXP (opright, 1))
3198 	      == GET_MODE_PRECISION (int_mode)))
3199 	return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3200 			       XEXP (SUBREG_REG (opleft), 1));
3201 
3202       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3203          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
3204 	 the PLUS does not affect any of the bits in OP1: then we can do
3205 	 the IOR as a PLUS and we can associate.  This is valid if OP1
3206          can be safely shifted left C bits.  */
3207       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3208           && GET_CODE (XEXP (op0, 0)) == PLUS
3209           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3210           && CONST_INT_P (XEXP (op0, 1))
3211           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3212         {
3213 	  int count = INTVAL (XEXP (op0, 1));
3214 	  HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3215 
3216           if (mask >> count == INTVAL (trueop1)
3217 	      && trunc_int_for_mode (mask, mode) == mask
3218               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3219 	    return simplify_gen_binary (ASHIFTRT, mode,
3220 					plus_constant (mode, XEXP (op0, 0),
3221 						       mask),
3222 					XEXP (op0, 1));
3223         }
3224 
3225       /* The following happens with bitfield merging.
3226          (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3227       if (GET_CODE (op0) == AND
3228 	  && GET_CODE (op1) == AND
3229 	  && CONST_INT_P (XEXP (op0, 1))
3230 	  && CONST_INT_P (XEXP (op1, 1))
3231 	  && (INTVAL (XEXP (op0, 1))
3232 	      == ~INTVAL (XEXP (op1, 1))))
3233 	{
3234 	  /* The IOR may be on both sides.  */
3235 	  rtx top0 = NULL_RTX, top1 = NULL_RTX;
3236 	  if (GET_CODE (XEXP (op1, 0)) == IOR)
3237 	    top0 = op0, top1 = op1;
3238 	  else if (GET_CODE (XEXP (op0, 0)) == IOR)
3239 	    top0 = op1, top1 = op0;
3240 	  if (top0 && top1)
3241 	    {
3242 	      /* X may be on either side of the inner IOR.  */
3243 	      rtx tem = NULL_RTX;
3244 	      if (rtx_equal_p (XEXP (top0, 0),
3245 			       XEXP (XEXP (top1, 0), 0)))
3246 		tem = XEXP (XEXP (top1, 0), 1);
3247 	      else if (rtx_equal_p (XEXP (top0, 0),
3248 				    XEXP (XEXP (top1, 0), 1)))
3249 		tem = XEXP (XEXP (top1, 0), 0);
3250 	      if (tem)
3251 		return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3252 					    simplify_gen_binary
3253 					      (AND, mode, tem, XEXP (top1, 1)));
3254 	    }
3255 	}
3256 
3257       /* Convert (ior (and A C) (and B C)) into (and (ior A B) C).  */
3258       if (GET_CODE (op0) == GET_CODE (op1)
3259 	  && (GET_CODE (op0) == AND
3260 	      || GET_CODE (op0) == IOR
3261 	      || GET_CODE (op0) == LSHIFTRT
3262 	      || GET_CODE (op0) == ASHIFTRT
3263 	      || GET_CODE (op0) == ASHIFT
3264 	      || GET_CODE (op0) == ROTATE
3265 	      || GET_CODE (op0) == ROTATERT))
3266 	{
3267 	  tem = simplify_distributive_operation (code, mode, op0, op1);
3268 	  if (tem)
3269 	    return tem;
3270 	}
3271 
3272       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3273       if (tem)
3274 	return tem;
3275 
3276       tem = simplify_associative_operation (code, mode, op0, op1);
3277       if (tem)
3278 	return tem;
3279 
3280       tem = simplify_logical_relational_operation (code, mode, op0, op1);
3281       if (tem)
3282 	return tem;
3283       break;
3284 
3285     case XOR:
3286       if (trueop1 == CONST0_RTX (mode))
3287 	return op0;
3288       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3289 	return simplify_gen_unary (NOT, mode, op0, mode);
3290       if (rtx_equal_p (trueop0, trueop1)
3291 	  && ! side_effects_p (op0)
3292 	  && GET_MODE_CLASS (mode) != MODE_CC)
3293 	 return CONST0_RTX (mode);
3294 
3295       /* Canonicalize XOR of the most significant bit to PLUS.  */
3296       if (CONST_SCALAR_INT_P (op1)
3297 	  && mode_signbit_p (mode, op1))
3298 	return simplify_gen_binary (PLUS, mode, op0, op1);
3299       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
3300       if (CONST_SCALAR_INT_P (op1)
3301 	  && GET_CODE (op0) == PLUS
3302 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
3303 	  && mode_signbit_p (mode, XEXP (op0, 1)))
3304 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3305 				    simplify_gen_binary (XOR, mode, op1,
3306 							 XEXP (op0, 1)));
3307 
3308       /* If we are XORing two things that have no bits in common,
3309 	 convert them into an IOR.  This helps to detect rotation encoded
3310 	 using those methods and possibly other simplifications.  */
3311 
3312       if (HWI_COMPUTABLE_MODE_P (mode)
3313 	  && (nonzero_bits (op0, mode)
3314 	      & nonzero_bits (op1, mode)) == 0)
3315 	return (simplify_gen_binary (IOR, mode, op0, op1));
3316 
3317       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3318 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3319 	 (NOT y).  */
3320       {
3321 	int num_negated = 0;
3322 
3323 	if (GET_CODE (op0) == NOT)
3324 	  num_negated++, op0 = XEXP (op0, 0);
3325 	if (GET_CODE (op1) == NOT)
3326 	  num_negated++, op1 = XEXP (op1, 0);
3327 
3328 	if (num_negated == 2)
3329 	  return simplify_gen_binary (XOR, mode, op0, op1);
3330 	else if (num_negated == 1)
3331 	  return simplify_gen_unary (NOT, mode,
3332 				     simplify_gen_binary (XOR, mode, op0, op1),
3333 				     mode);
3334       }
3335 
3336       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
3337 	 correspond to a machine insn or result in further simplifications
3338 	 if B is a constant.  */
3339 
3340       if (GET_CODE (op0) == AND
3341 	  && rtx_equal_p (XEXP (op0, 1), op1)
3342 	  && ! side_effects_p (op1))
3343 	return simplify_gen_binary (AND, mode,
3344 				    simplify_gen_unary (NOT, mode,
3345 							XEXP (op0, 0), mode),
3346 				    op1);
3347 
3348       else if (GET_CODE (op0) == AND
3349 	       && rtx_equal_p (XEXP (op0, 0), op1)
3350 	       && ! side_effects_p (op1))
3351 	return simplify_gen_binary (AND, mode,
3352 				    simplify_gen_unary (NOT, mode,
3353 							XEXP (op0, 1), mode),
3354 				    op1);
3355 
3356       /* Given (xor (ior (xor A B) C) D), where B, C and D are
3357 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3358 	 out bits inverted twice and not set by C.  Similarly, given
3359 	 (xor (and (xor A B) C) D), simplify without inverting C in
3360 	 the xor operand: (xor (and A C) (B&C)^D).
3361       */
3362       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3363 	       && GET_CODE (XEXP (op0, 0)) == XOR
3364 	       && CONST_INT_P (op1)
3365 	       && CONST_INT_P (XEXP (op0, 1))
3366 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3367 	{
3368 	  enum rtx_code op = GET_CODE (op0);
3369 	  rtx a = XEXP (XEXP (op0, 0), 0);
3370 	  rtx b = XEXP (XEXP (op0, 0), 1);
3371 	  rtx c = XEXP (op0, 1);
3372 	  rtx d = op1;
3373 	  HOST_WIDE_INT bval = INTVAL (b);
3374 	  HOST_WIDE_INT cval = INTVAL (c);
3375 	  HOST_WIDE_INT dval = INTVAL (d);
3376 	  HOST_WIDE_INT xcval;
3377 
3378 	  if (op == IOR)
3379 	    xcval = ~cval;
3380 	  else
3381 	    xcval = cval;
3382 
3383 	  return simplify_gen_binary (XOR, mode,
3384 				      simplify_gen_binary (op, mode, a, c),
3385 				      gen_int_mode ((bval & xcval) ^ dval,
3386 						    mode));
3387 	}
3388 
3389       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3390 	 we can transform like this:
3391             (A&B)^C == ~(A&B)&C | ~C&(A&B)
3392                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
3393                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
3394 	 Attempt a few simplifications when B and C are both constants.  */
3395       if (GET_CODE (op0) == AND
3396 	  && CONST_INT_P (op1)
3397 	  && CONST_INT_P (XEXP (op0, 1)))
3398 	{
3399 	  rtx a = XEXP (op0, 0);
3400 	  rtx b = XEXP (op0, 1);
3401 	  rtx c = op1;
3402 	  HOST_WIDE_INT bval = INTVAL (b);
3403 	  HOST_WIDE_INT cval = INTVAL (c);
3404 
3405 	  /* Instead of computing ~A&C, we compute its negated value,
3406 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
3407 	     optimize for sure.  If it does not simplify, we still try
3408 	     to compute ~A&C below, but since that always allocates
3409 	     RTL, we don't try that before committing to returning a
3410 	     simplified expression.  */
3411 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3412 						  GEN_INT (~cval));
3413 
3414 	  if ((~cval & bval) == 0)
3415 	    {
3416 	      rtx na_c = NULL_RTX;
3417 	      if (n_na_c)
3418 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3419 	      else
3420 		{
3421 		  /* If ~A does not simplify, don't bother: we don't
3422 		     want to simplify 2 operations into 3, and if na_c
3423 		     were to simplify with na, n_na_c would have
3424 		     simplified as well.  */
3425 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
3426 		  if (na)
3427 		    na_c = simplify_gen_binary (AND, mode, na, c);
3428 		}
3429 
3430 	      /* Try to simplify ~A&C | ~B&C.  */
3431 	      if (na_c != NULL_RTX)
3432 		return simplify_gen_binary (IOR, mode, na_c,
3433 					    gen_int_mode (~bval & cval, mode));
3434 	    }
3435 	  else
3436 	    {
3437 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
3438 	      if (n_na_c == CONSTM1_RTX (mode))
3439 		{
3440 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3441 						    gen_int_mode (~cval & bval,
3442 								  mode));
3443 		  return simplify_gen_binary (IOR, mode, a_nc_b,
3444 					      gen_int_mode (~bval & cval,
3445 							    mode));
3446 		}
3447 	    }
3448 	}
3449 
3450       /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3451 	 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3452 	 machines, and also has shorter instruction path length.  */
3453       if (GET_CODE (op0) == AND
3454 	  && GET_CODE (XEXP (op0, 0)) == XOR
3455 	  && CONST_INT_P (XEXP (op0, 1))
3456 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3457 	{
3458 	  rtx a = trueop1;
3459 	  rtx b = XEXP (XEXP (op0, 0), 1);
3460 	  rtx c = XEXP (op0, 1);
3461 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3462 	  rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3463 	  rtx bc = simplify_gen_binary (AND, mode, b, c);
3464 	  return simplify_gen_binary (IOR, mode, a_nc, bc);
3465 	}
3466       /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C))  */
3467       else if (GET_CODE (op0) == AND
3468 	  && GET_CODE (XEXP (op0, 0)) == XOR
3469 	  && CONST_INT_P (XEXP (op0, 1))
3470 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3471 	{
3472 	  rtx a = XEXP (XEXP (op0, 0), 0);
3473 	  rtx b = trueop1;
3474 	  rtx c = XEXP (op0, 1);
3475 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3476 	  rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3477 	  rtx ac = simplify_gen_binary (AND, mode, a, c);
3478 	  return simplify_gen_binary (IOR, mode, ac, b_nc);
3479 	}
3480 
3481       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3482 	 comparison if STORE_FLAG_VALUE is 1.  */
3483       if (STORE_FLAG_VALUE == 1
3484 	  && trueop1 == const1_rtx
3485 	  && COMPARISON_P (op0)
3486 	  && (reversed = reversed_comparison (op0, mode)))
3487 	return reversed;
3488 
3489       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3490 	 is (lt foo (const_int 0)), so we can perform the above
3491 	 simplification if STORE_FLAG_VALUE is 1.  */
3492 
3493       if (is_a <scalar_int_mode> (mode, &int_mode)
3494 	  && STORE_FLAG_VALUE == 1
3495 	  && trueop1 == const1_rtx
3496 	  && GET_CODE (op0) == LSHIFTRT
3497 	  && CONST_INT_P (XEXP (op0, 1))
3498 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3499 	return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3500 
3501       /* (xor (comparison foo bar) (const_int sign-bit))
3502 	 when STORE_FLAG_VALUE is the sign bit.  */
3503       if (is_a <scalar_int_mode> (mode, &int_mode)
3504 	  && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3505 	  && trueop1 == const_true_rtx
3506 	  && COMPARISON_P (op0)
3507 	  && (reversed = reversed_comparison (op0, int_mode)))
3508 	return reversed;
3509 
3510       /* Convert (xor (and A C) (and B C)) into (and (xor A B) C).  */
3511       if (GET_CODE (op0) == GET_CODE (op1)
3512 	  && (GET_CODE (op0) == AND
3513 	      || GET_CODE (op0) == LSHIFTRT
3514 	      || GET_CODE (op0) == ASHIFTRT
3515 	      || GET_CODE (op0) == ASHIFT
3516 	      || GET_CODE (op0) == ROTATE
3517 	      || GET_CODE (op0) == ROTATERT))
3518 	{
3519 	  tem = simplify_distributive_operation (code, mode, op0, op1);
3520 	  if (tem)
3521 	    return tem;
3522 	}
3523 
3524       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3525       if (tem)
3526 	return tem;
3527 
3528       tem = simplify_associative_operation (code, mode, op0, op1);
3529       if (tem)
3530 	return tem;
3531       break;
3532 
3533     case AND:
3534       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3535 	return trueop1;
3536       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3537 	return op0;
3538       if (HWI_COMPUTABLE_MODE_P (mode))
3539 	{
3540 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3541 	  HOST_WIDE_INT nzop1;
3542 	  if (CONST_INT_P (trueop1))
3543 	    {
3544 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
3545 	      /* If we are turning off bits already known off in OP0, we need
3546 		 not do an AND.  */
3547 	      if ((nzop0 & ~val1) == 0)
3548 		return op0;
3549 	    }
3550 	  nzop1 = nonzero_bits (trueop1, mode);
3551 	  /* If we are clearing all the nonzero bits, the result is zero.  */
3552 	  if ((nzop1 & nzop0) == 0
3553 	      && !side_effects_p (op0) && !side_effects_p (op1))
3554 	    return CONST0_RTX (mode);
3555 	}
3556       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3557 	  && GET_MODE_CLASS (mode) != MODE_CC)
3558 	return op0;
3559       /* A & (~A) -> 0 */
3560       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3561 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3562 	  && ! side_effects_p (op0)
3563 	  && GET_MODE_CLASS (mode) != MODE_CC)
3564 	return CONST0_RTX (mode);
3565 
3566       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3567 	 there are no nonzero bits of C outside of X's mode.  */
3568       if ((GET_CODE (op0) == SIGN_EXTEND
3569 	   || GET_CODE (op0) == ZERO_EXTEND)
3570 	  && CONST_INT_P (trueop1)
3571 	  && HWI_COMPUTABLE_MODE_P (mode)
3572 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3573 	      & UINTVAL (trueop1)) == 0)
3574 	{
3575 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3576 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3577 				     gen_int_mode (INTVAL (trueop1),
3578 						   imode));
3579 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3580 	}
3581 
3582       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
3583 	 we might be able to further simplify the AND with X and potentially
3584 	 remove the truncation altogether.  */
3585       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3586 	{
3587 	  rtx x = XEXP (op0, 0);
3588 	  machine_mode xmode = GET_MODE (x);
3589 	  tem = simplify_gen_binary (AND, xmode, x,
3590 				     gen_int_mode (INTVAL (trueop1), xmode));
3591 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3592 	}
3593 
3594       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3595       if (GET_CODE (op0) == IOR
3596 	  && CONST_INT_P (trueop1)
3597 	  && CONST_INT_P (XEXP (op0, 1)))
3598 	{
3599 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3600 	  return simplify_gen_binary (IOR, mode,
3601 				      simplify_gen_binary (AND, mode,
3602 							   XEXP (op0, 0), op1),
3603 				      gen_int_mode (tmp, mode));
3604 	}
3605 
3606       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3607 	 insn (and may simplify more).  */
3608       if (GET_CODE (op0) == XOR
3609 	  && rtx_equal_p (XEXP (op0, 0), op1)
3610 	  && ! side_effects_p (op1))
3611 	return simplify_gen_binary (AND, mode,
3612 				    simplify_gen_unary (NOT, mode,
3613 							XEXP (op0, 1), mode),
3614 				    op1);
3615 
3616       if (GET_CODE (op0) == XOR
3617 	  && rtx_equal_p (XEXP (op0, 1), op1)
3618 	  && ! side_effects_p (op1))
3619 	return simplify_gen_binary (AND, mode,
3620 				    simplify_gen_unary (NOT, mode,
3621 							XEXP (op0, 0), mode),
3622 				    op1);
3623 
3624       /* Similarly for (~(A ^ B)) & A.  */
3625       if (GET_CODE (op0) == NOT
3626 	  && GET_CODE (XEXP (op0, 0)) == XOR
3627 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3628 	  && ! side_effects_p (op1))
3629 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3630 
3631       if (GET_CODE (op0) == NOT
3632 	  && GET_CODE (XEXP (op0, 0)) == XOR
3633 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3634 	  && ! side_effects_p (op1))
3635 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3636 
3637       /* Convert (A | B) & A to A.  */
3638       if (GET_CODE (op0) == IOR
3639 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3640 	      || rtx_equal_p (XEXP (op0, 1), op1))
3641 	  && ! side_effects_p (XEXP (op0, 0))
3642 	  && ! side_effects_p (XEXP (op0, 1)))
3643 	return op1;
3644 
3645       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3646 	 ((A & N) + B) & M -> (A + B) & M
3647 	 Similarly if (N & M) == 0,
3648 	 ((A | N) + B) & M -> (A + B) & M
3649 	 and for - instead of + and/or ^ instead of |.
3650          Also, if (N & M) == 0, then
3651 	 (A +- N) & M -> A & M.  */
3652       if (CONST_INT_P (trueop1)
3653 	  && HWI_COMPUTABLE_MODE_P (mode)
3654 	  && ~UINTVAL (trueop1)
3655 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3656 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3657 	{
3658 	  rtx pmop[2];
3659 	  int which;
3660 
3661 	  pmop[0] = XEXP (op0, 0);
3662 	  pmop[1] = XEXP (op0, 1);
3663 
3664 	  if (CONST_INT_P (pmop[1])
3665 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3666 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3667 
3668 	  for (which = 0; which < 2; which++)
3669 	    {
3670 	      tem = pmop[which];
3671 	      switch (GET_CODE (tem))
3672 		{
3673 		case AND:
3674 		  if (CONST_INT_P (XEXP (tem, 1))
3675 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3676 		      == UINTVAL (trueop1))
3677 		    pmop[which] = XEXP (tem, 0);
3678 		  break;
3679 		case IOR:
3680 		case XOR:
3681 		  if (CONST_INT_P (XEXP (tem, 1))
3682 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3683 		    pmop[which] = XEXP (tem, 0);
3684 		  break;
3685 		default:
3686 		  break;
3687 		}
3688 	    }
3689 
3690 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3691 	    {
3692 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3693 					 pmop[0], pmop[1]);
3694 	      return simplify_gen_binary (code, mode, tem, op1);
3695 	    }
3696 	}
3697 
3698       /* (and X (ior (not X) Y) -> (and X Y) */
3699       if (GET_CODE (op1) == IOR
3700 	  && GET_CODE (XEXP (op1, 0)) == NOT
3701 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3702        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3703 
3704       /* (and (ior (not X) Y) X) -> (and X Y) */
3705       if (GET_CODE (op0) == IOR
3706 	  && GET_CODE (XEXP (op0, 0)) == NOT
3707 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3708 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3709 
3710       /* (and X (ior Y (not X)) -> (and X Y) */
3711       if (GET_CODE (op1) == IOR
3712 	  && GET_CODE (XEXP (op1, 1)) == NOT
3713 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3714        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3715 
3716       /* (and (ior Y (not X)) X) -> (and X Y) */
3717       if (GET_CODE (op0) == IOR
3718 	  && GET_CODE (XEXP (op0, 1)) == NOT
3719 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3720 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3721 
3722       /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C).  */
3723       if (GET_CODE (op0) == GET_CODE (op1)
3724 	  && (GET_CODE (op0) == AND
3725 	      || GET_CODE (op0) == IOR
3726 	      || GET_CODE (op0) == LSHIFTRT
3727 	      || GET_CODE (op0) == ASHIFTRT
3728 	      || GET_CODE (op0) == ASHIFT
3729 	      || GET_CODE (op0) == ROTATE
3730 	      || GET_CODE (op0) == ROTATERT))
3731 	{
3732 	  tem = simplify_distributive_operation (code, mode, op0, op1);
3733 	  if (tem)
3734 	    return tem;
3735 	}
3736 
3737       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3738       if (tem)
3739 	return tem;
3740 
3741       tem = simplify_associative_operation (code, mode, op0, op1);
3742       if (tem)
3743 	return tem;
3744       break;
3745 
3746     case UDIV:
3747       /* 0/x is 0 (or x&0 if x has side-effects).  */
3748       if (trueop0 == CONST0_RTX (mode)
3749 	  && !cfun->can_throw_non_call_exceptions)
3750 	{
3751 	  if (side_effects_p (op1))
3752 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3753 	  return trueop0;
3754 	}
3755       /* x/1 is x.  */
3756       if (trueop1 == CONST1_RTX (mode))
3757 	{
3758 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3759 	  if (tem)
3760 	    return tem;
3761 	}
3762       /* Convert divide by power of two into shift.  */
3763       if (CONST_INT_P (trueop1)
3764 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3765 	return simplify_gen_binary (LSHIFTRT, mode, op0,
3766 				    gen_int_shift_amount (mode, val));
3767       break;
3768 
3769     case DIV:
3770       /* Handle floating point and integers separately.  */
3771       if (SCALAR_FLOAT_MODE_P (mode))
3772 	{
3773 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3774 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3775 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3776 	     zeros, since dividing 0 by a negative number gives -0.0  */
3777 	  if (trueop0 == CONST0_RTX (mode)
3778 	      && !HONOR_NANS (mode)
3779 	      && !HONOR_SIGNED_ZEROS (mode)
3780 	      && ! side_effects_p (op1))
3781 	    return op0;
3782 	  /* x/1.0 is x.  */
3783 	  if (trueop1 == CONST1_RTX (mode)
3784 	      && !HONOR_SNANS (mode))
3785 	    return op0;
3786 
3787 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3788 	      && trueop1 != CONST0_RTX (mode))
3789 	    {
3790 	      const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3791 
3792 	      /* x/-1.0 is -x.  */
3793 	      if (real_equal (d1, &dconstm1)
3794 		  && !HONOR_SNANS (mode))
3795 		return simplify_gen_unary (NEG, mode, op0, mode);
3796 
3797 	      /* Change FP division by a constant into multiplication.
3798 		 Only do this with -freciprocal-math.  */
3799 	      if (flag_reciprocal_math
3800 		  && !real_equal (d1, &dconst0))
3801 		{
3802 		  REAL_VALUE_TYPE d;
3803 		  real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3804 		  tem = const_double_from_real_value (d, mode);
3805 		  return simplify_gen_binary (MULT, mode, op0, tem);
3806 		}
3807 	    }
3808 	}
3809       else if (SCALAR_INT_MODE_P (mode))
3810 	{
3811 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3812 	  if (trueop0 == CONST0_RTX (mode)
3813 	      && !cfun->can_throw_non_call_exceptions)
3814 	    {
3815 	      if (side_effects_p (op1))
3816 		return simplify_gen_binary (AND, mode, op1, trueop0);
3817 	      return trueop0;
3818 	    }
3819 	  /* x/1 is x.  */
3820 	  if (trueop1 == CONST1_RTX (mode))
3821 	    {
3822 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3823 	      if (tem)
3824 		return tem;
3825 	    }
3826 	  /* x/-1 is -x.  */
3827 	  if (trueop1 == constm1_rtx)
3828 	    {
3829 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3830 	      if (x)
3831 		return simplify_gen_unary (NEG, mode, x, mode);
3832 	    }
3833 	}
3834       break;
3835 
3836     case UMOD:
3837       /* 0%x is 0 (or x&0 if x has side-effects).  */
3838       if (trueop0 == CONST0_RTX (mode))
3839 	{
3840 	  if (side_effects_p (op1))
3841 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3842 	  return trueop0;
3843 	}
3844       /* x%1 is 0 (of x&0 if x has side-effects).  */
3845       if (trueop1 == CONST1_RTX (mode))
3846 	{
3847 	  if (side_effects_p (op0))
3848 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3849 	  return CONST0_RTX (mode);
3850 	}
3851       /* Implement modulus by power of two as AND.  */
3852       if (CONST_INT_P (trueop1)
3853 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3854 	return simplify_gen_binary (AND, mode, op0,
3855 				    gen_int_mode (UINTVAL (trueop1) - 1,
3856 						  mode));
3857       break;
3858 
3859     case MOD:
3860       /* 0%x is 0 (or x&0 if x has side-effects).  */
3861       if (trueop0 == CONST0_RTX (mode))
3862 	{
3863 	  if (side_effects_p (op1))
3864 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3865 	  return trueop0;
3866 	}
3867       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3868       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3869 	{
3870 	  if (side_effects_p (op0))
3871 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3872 	  return CONST0_RTX (mode);
3873 	}
3874       break;
3875 
3876     case ROTATERT:
3877     case ROTATE:
3878       if (trueop1 == CONST0_RTX (mode))
3879 	return op0;
3880       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3881 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3882 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3883 	 amount instead.  */
3884 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3885       if (CONST_INT_P (trueop1)
3886 	  && IN_RANGE (INTVAL (trueop1),
3887 		       GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3888 		       GET_MODE_UNIT_PRECISION (mode) - 1))
3889 	{
3890 	  int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3891 	  rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3892 	  return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3893 				      mode, op0, new_amount_rtx);
3894 	}
3895 #endif
3896       /* FALLTHRU */
3897     case ASHIFTRT:
3898       if (trueop1 == CONST0_RTX (mode))
3899 	return op0;
3900       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3901 	return op0;
3902       /* Rotating ~0 always results in ~0.  */
3903       if (CONST_INT_P (trueop0)
3904 	  && HWI_COMPUTABLE_MODE_P (mode)
3905 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3906 	  && ! side_effects_p (op1))
3907 	return op0;
3908 
3909     canonicalize_shift:
3910       /* Given:
3911 	 scalar modes M1, M2
3912 	 scalar constants c1, c2
3913 	 size (M2) > size (M1)
3914 	 c1 == size (M2) - size (M1)
3915 	 optimize:
3916 	 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3917 				 <low_part>)
3918 		      (const_int <c2>))
3919 	 to:
3920 	 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3921 		    <low_part>).  */
3922       if ((code == ASHIFTRT || code == LSHIFTRT)
3923 	  && is_a <scalar_int_mode> (mode, &int_mode)
3924 	  && SUBREG_P (op0)
3925 	  && CONST_INT_P (op1)
3926 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3927 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3928 				     &inner_mode)
3929 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3930 	  && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3931 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3932 	      == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3933 	  && subreg_lowpart_p (op0))
3934 	{
3935 	  rtx tmp = gen_int_shift_amount
3936 	    (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3937 
3938 	 /* Combine would usually zero out the value when combining two
3939 	    local shifts and the range becomes larger or equal to the mode.
3940 	    However since we fold away one of the shifts here combine won't
3941 	    see it so we should immediately zero the result if it's out of
3942 	    range.  */
3943 	 if (code == LSHIFTRT
3944 	     && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
3945 	  tmp = const0_rtx;
3946 	 else
3947 	   tmp = simplify_gen_binary (code,
3948 				      inner_mode,
3949 				      XEXP (SUBREG_REG (op0), 0),
3950 				      tmp);
3951 
3952 	  return lowpart_subreg (int_mode, tmp, inner_mode);
3953 	}
3954 
3955       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3956 	{
3957 	  val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3958 	  if (val != INTVAL (op1))
3959 	    return simplify_gen_binary (code, mode, op0,
3960 					gen_int_shift_amount (mode, val));
3961 	}
3962       break;
3963 
3964     case ASHIFT:
3965     case SS_ASHIFT:
3966     case US_ASHIFT:
3967       if (trueop1 == CONST0_RTX (mode))
3968 	return op0;
3969       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3970 	return op0;
3971       if (mem_depth
3972 	  && code == ASHIFT
3973 	  && CONST_INT_P (trueop1)
3974 	  && is_a <scalar_int_mode> (mode, &int_mode)
3975 	  && IN_RANGE (UINTVAL (trueop1),
3976 		       1, GET_MODE_PRECISION (int_mode) - 1))
3977 	{
3978 	  auto c = (wi::one (GET_MODE_PRECISION (int_mode))
3979 		    << UINTVAL (trueop1));
3980 	  rtx new_op1 = immed_wide_int_const (c, int_mode);
3981 	  return simplify_gen_binary (MULT, int_mode, op0, new_op1);
3982 	}
3983       goto canonicalize_shift;
3984 
3985     case LSHIFTRT:
3986       if (trueop1 == CONST0_RTX (mode))
3987 	return op0;
3988       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3989 	return op0;
3990       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3991       if (GET_CODE (op0) == CLZ
3992 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3993 	  && CONST_INT_P (trueop1)
3994 	  && STORE_FLAG_VALUE == 1
3995 	  && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3996 	{
3997 	  unsigned HOST_WIDE_INT zero_val = 0;
3998 
3999 	  if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
4000 	      && zero_val == GET_MODE_PRECISION (inner_mode)
4001 	      && INTVAL (trueop1) == exact_log2 (zero_val))
4002 	    return simplify_gen_relational (EQ, mode, inner_mode,
4003 					    XEXP (op0, 0), const0_rtx);
4004 	}
4005       goto canonicalize_shift;
4006 
4007     case SMIN:
4008       if (HWI_COMPUTABLE_MODE_P (mode)
4009 	  && mode_signbit_p (mode, trueop1)
4010 	  && ! side_effects_p (op0))
4011 	return op1;
4012       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4013 	return op0;
4014       tem = simplify_associative_operation (code, mode, op0, op1);
4015       if (tem)
4016 	return tem;
4017       break;
4018 
4019     case SMAX:
4020       if (HWI_COMPUTABLE_MODE_P (mode)
4021 	  && CONST_INT_P (trueop1)
4022 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
4023 	  && ! side_effects_p (op0))
4024 	return op1;
4025       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4026 	return op0;
4027       tem = simplify_associative_operation (code, mode, op0, op1);
4028       if (tem)
4029 	return tem;
4030       break;
4031 
4032     case UMIN:
4033       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4034 	return op1;
4035       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4036 	return op0;
4037       tem = simplify_associative_operation (code, mode, op0, op1);
4038       if (tem)
4039 	return tem;
4040       break;
4041 
4042     case UMAX:
4043       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
4044 	return op1;
4045       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4046 	return op0;
4047       tem = simplify_associative_operation (code, mode, op0, op1);
4048       if (tem)
4049 	return tem;
4050       break;
4051 
4052     case SS_PLUS:
4053     case US_PLUS:
4054     case SS_MINUS:
4055     case US_MINUS:
4056     case SS_MULT:
4057     case US_MULT:
4058     case SS_DIV:
4059     case US_DIV:
4060       /* ??? There are simplifications that can be done.  */
4061       return 0;
4062 
4063     case VEC_SERIES:
4064       if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
4065 	return gen_vec_duplicate (mode, op0);
4066       if (valid_for_const_vector_p (mode, op0)
4067 	  && valid_for_const_vector_p (mode, op1))
4068 	return gen_const_vec_series (mode, op0, op1);
4069       return 0;
4070 
4071     case VEC_SELECT:
4072       if (!VECTOR_MODE_P (mode))
4073 	{
4074 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4075 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
4076 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
4077 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
4078 
4079 	  /* We can't reason about selections made at runtime.  */
4080 	  if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4081 	    return 0;
4082 
4083 	  if (vec_duplicate_p (trueop0, &elt0))
4084 	    return elt0;
4085 
4086 	  if (GET_CODE (trueop0) == CONST_VECTOR)
4087 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
4088 						      (trueop1, 0, 0)));
4089 
4090 	  /* Extract a scalar element from a nested VEC_SELECT expression
4091 	     (with optional nested VEC_CONCAT expression).  Some targets
4092 	     (i386) extract scalar element from a vector using chain of
4093 	     nested VEC_SELECT expressions.  When input operand is a memory
4094 	     operand, this operation can be simplified to a simple scalar
4095 	     load from an offseted memory address.  */
4096 	  int n_elts;
4097 	  if (GET_CODE (trueop0) == VEC_SELECT
4098 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4099 		  .is_constant (&n_elts)))
4100 	    {
4101 	      rtx op0 = XEXP (trueop0, 0);
4102 	      rtx op1 = XEXP (trueop0, 1);
4103 
4104 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
4105 	      int elem;
4106 
4107 	      rtvec vec;
4108 	      rtx tmp_op, tmp;
4109 
4110 	      gcc_assert (GET_CODE (op1) == PARALLEL);
4111 	      gcc_assert (i < n_elts);
4112 
4113 	      /* Select element, pointed by nested selector.  */
4114 	      elem = INTVAL (XVECEXP (op1, 0, i));
4115 
4116 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
4117 	      if (GET_CODE (op0) == VEC_CONCAT)
4118 		{
4119 		  rtx op00 = XEXP (op0, 0);
4120 		  rtx op01 = XEXP (op0, 1);
4121 
4122 		  machine_mode mode00, mode01;
4123 		  int n_elts00, n_elts01;
4124 
4125 		  mode00 = GET_MODE (op00);
4126 		  mode01 = GET_MODE (op01);
4127 
4128 		  /* Find out the number of elements of each operand.
4129 		     Since the concatenated result has a constant number
4130 		     of elements, the operands must too.  */
4131 		  n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
4132 		  n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
4133 
4134 		  gcc_assert (n_elts == n_elts00 + n_elts01);
4135 
4136 		  /* Select correct operand of VEC_CONCAT
4137 		     and adjust selector. */
4138 		  if (elem < n_elts01)
4139 		    tmp_op = op00;
4140 		  else
4141 		    {
4142 		      tmp_op = op01;
4143 		      elem -= n_elts00;
4144 		    }
4145 		}
4146 	      else
4147 		tmp_op = op0;
4148 
4149 	      vec = rtvec_alloc (1);
4150 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
4151 
4152 	      tmp = gen_rtx_fmt_ee (code, mode,
4153 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
4154 	      return tmp;
4155 	    }
4156 	}
4157       else
4158 	{
4159 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4160 	  gcc_assert (GET_MODE_INNER (mode)
4161 		      == GET_MODE_INNER (GET_MODE (trueop0)));
4162 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
4163 
4164 	  if (vec_duplicate_p (trueop0, &elt0))
4165 	    /* It doesn't matter which elements are selected by trueop1,
4166 	       because they are all the same.  */
4167 	    return gen_vec_duplicate (mode, elt0);
4168 
4169 	  if (GET_CODE (trueop0) == CONST_VECTOR)
4170 	    {
4171 	      unsigned n_elts = XVECLEN (trueop1, 0);
4172 	      rtvec v = rtvec_alloc (n_elts);
4173 	      unsigned int i;
4174 
4175 	      gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4176 	      for (i = 0; i < n_elts; i++)
4177 		{
4178 		  rtx x = XVECEXP (trueop1, 0, i);
4179 
4180 		  if (!CONST_INT_P (x))
4181 		    return 0;
4182 
4183 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
4184 						       INTVAL (x));
4185 		}
4186 
4187 	      return gen_rtx_CONST_VECTOR (mode, v);
4188 	    }
4189 
4190 	  /* Recognize the identity.  */
4191 	  if (GET_MODE (trueop0) == mode)
4192 	    {
4193 	      bool maybe_ident = true;
4194 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4195 		{
4196 		  rtx j = XVECEXP (trueop1, 0, i);
4197 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
4198 		    {
4199 		      maybe_ident = false;
4200 		      break;
4201 		    }
4202 		}
4203 	      if (maybe_ident)
4204 		return trueop0;
4205 	    }
4206 
4207 	  /* If we build {a,b} then permute it, build the result directly.  */
4208 	  if (XVECLEN (trueop1, 0) == 2
4209 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4210 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4211 	      && GET_CODE (trueop0) == VEC_CONCAT
4212 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
4213 	      && GET_MODE (XEXP (trueop0, 0)) == mode
4214 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
4215 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
4216 	    {
4217 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4218 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4219 	      rtx subop0, subop1;
4220 
4221 	      gcc_assert (i0 < 4 && i1 < 4);
4222 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
4223 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
4224 
4225 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4226 	    }
4227 
4228 	  if (XVECLEN (trueop1, 0) == 2
4229 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4230 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4231 	      && GET_CODE (trueop0) == VEC_CONCAT
4232 	      && GET_MODE (trueop0) == mode)
4233 	    {
4234 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4235 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4236 	      rtx subop0, subop1;
4237 
4238 	      gcc_assert (i0 < 2 && i1 < 2);
4239 	      subop0 = XEXP (trueop0, i0);
4240 	      subop1 = XEXP (trueop0, i1);
4241 
4242 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4243 	    }
4244 
4245 	  /* If we select one half of a vec_concat, return that.  */
4246 	  int l0, l1;
4247 	  if (GET_CODE (trueop0) == VEC_CONCAT
4248 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4249 		  .is_constant (&l0))
4250 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4251 		  .is_constant (&l1))
4252 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4253 	    {
4254 	      rtx subop0 = XEXP (trueop0, 0);
4255 	      rtx subop1 = XEXP (trueop0, 1);
4256 	      machine_mode mode0 = GET_MODE (subop0);
4257 	      machine_mode mode1 = GET_MODE (subop1);
4258 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4259 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4260 		{
4261 		  bool success = true;
4262 		  for (int i = 1; i < l0; ++i)
4263 		    {
4264 		      rtx j = XVECEXP (trueop1, 0, i);
4265 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
4266 			{
4267 			  success = false;
4268 			  break;
4269 			}
4270 		    }
4271 		  if (success)
4272 		    return subop0;
4273 		}
4274 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4275 		{
4276 		  bool success = true;
4277 		  for (int i = 1; i < l1; ++i)
4278 		    {
4279 		      rtx j = XVECEXP (trueop1, 0, i);
4280 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4281 			{
4282 			  success = false;
4283 			  break;
4284 			}
4285 		    }
4286 		  if (success)
4287 		    return subop1;
4288 		}
4289 	    }
4290 
4291 	  /* Simplify vec_select of a subreg of X to just a vec_select of X
4292 	     when X has same component mode as vec_select.  */
4293 	  unsigned HOST_WIDE_INT subreg_offset = 0;
4294 	  if (GET_CODE (trueop0) == SUBREG
4295 	      && GET_MODE_INNER (mode)
4296 		 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
4297 	      && GET_MODE_NUNITS (mode).is_constant (&l1)
4298 	      && constant_multiple_p (subreg_memory_offset (trueop0),
4299 				      GET_MODE_UNIT_BITSIZE (mode),
4300 				      &subreg_offset))
4301 	    {
4302 	      poly_uint64 nunits
4303 		= GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
4304 	      bool success = true;
4305 	      for (int i = 0; i != l1; i++)
4306 		{
4307 		  rtx idx = XVECEXP (trueop1, 0, i);
4308 		  if (!CONST_INT_P (idx)
4309 		      || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
4310 		    {
4311 		      success = false;
4312 		      break;
4313 		    }
4314 		}
4315 
4316 	      if (success)
4317 		{
4318 		  rtx par = trueop1;
4319 		  if (subreg_offset)
4320 		    {
4321 		      rtvec vec = rtvec_alloc (l1);
4322 		      for (int i = 0; i < l1; i++)
4323 			RTVEC_ELT (vec, i)
4324 			  = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
4325 				     + subreg_offset);
4326 		      par = gen_rtx_PARALLEL (VOIDmode, vec);
4327 		    }
4328 		  return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
4329 		}
4330 	    }
4331 	}
4332 
4333       if (XVECLEN (trueop1, 0) == 1
4334 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4335 	  && GET_CODE (trueop0) == VEC_CONCAT)
4336 	{
4337 	  rtx vec = trueop0;
4338 	  offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4339 
4340 	  /* Try to find the element in the VEC_CONCAT.  */
4341 	  while (GET_MODE (vec) != mode
4342 		 && GET_CODE (vec) == VEC_CONCAT)
4343 	    {
4344 	      poly_int64 vec_size;
4345 
4346 	      if (CONST_INT_P (XEXP (vec, 0)))
4347 	        {
4348 	          /* vec_concat of two const_ints doesn't make sense with
4349 	             respect to modes.  */
4350 	          if (CONST_INT_P (XEXP (vec, 1)))
4351 	            return 0;
4352 
4353 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4354 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4355 	        }
4356 	      else
4357 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4358 
4359 	      if (known_lt (offset, vec_size))
4360 		vec = XEXP (vec, 0);
4361 	      else if (known_ge (offset, vec_size))
4362 		{
4363 		  offset -= vec_size;
4364 		  vec = XEXP (vec, 1);
4365 		}
4366 	      else
4367 		break;
4368 	      vec = avoid_constant_pool_reference (vec);
4369 	    }
4370 
4371 	  if (GET_MODE (vec) == mode)
4372 	    return vec;
4373 	}
4374 
4375       /* If we select elements in a vec_merge that all come from the same
4376 	 operand, select from that operand directly.  */
4377       if (GET_CODE (op0) == VEC_MERGE)
4378 	{
4379 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4380 	  if (CONST_INT_P (trueop02))
4381 	    {
4382 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4383 	      bool all_operand0 = true;
4384 	      bool all_operand1 = true;
4385 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4386 		{
4387 		  rtx j = XVECEXP (trueop1, 0, i);
4388 		  if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4389 		    all_operand1 = false;
4390 		  else
4391 		    all_operand0 = false;
4392 		}
4393 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4394 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4395 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4396 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4397 	    }
4398 	}
4399 
4400       /* If we have two nested selects that are inverses of each
4401 	 other, replace them with the source operand.  */
4402       if (GET_CODE (trueop0) == VEC_SELECT
4403 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
4404 	{
4405 	  rtx op0_subop1 = XEXP (trueop0, 1);
4406 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4407 	  gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4408 
4409 	  /* Apply the outer ordering vector to the inner one.  (The inner
4410 	     ordering vector is expressly permitted to be of a different
4411 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
4412 	     then the two VEC_SELECTs cancel.  */
4413 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4414 	    {
4415 	      rtx x = XVECEXP (trueop1, 0, i);
4416 	      if (!CONST_INT_P (x))
4417 		return 0;
4418 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4419 	      if (!CONST_INT_P (y) || i != INTVAL (y))
4420 		return 0;
4421 	    }
4422 	  return XEXP (trueop0, 0);
4423 	}
4424 
4425       return 0;
4426     case VEC_CONCAT:
4427       {
4428 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4429 				      ? GET_MODE (trueop0)
4430 				      : GET_MODE_INNER (mode));
4431 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4432 				      ? GET_MODE (trueop1)
4433 				      : GET_MODE_INNER (mode));
4434 
4435 	gcc_assert (VECTOR_MODE_P (mode));
4436 	gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4437 			      + GET_MODE_SIZE (op1_mode),
4438 			      GET_MODE_SIZE (mode)));
4439 
4440 	if (VECTOR_MODE_P (op0_mode))
4441 	  gcc_assert (GET_MODE_INNER (mode)
4442 		      == GET_MODE_INNER (op0_mode));
4443 	else
4444 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4445 
4446 	if (VECTOR_MODE_P (op1_mode))
4447 	  gcc_assert (GET_MODE_INNER (mode)
4448 		      == GET_MODE_INNER (op1_mode));
4449 	else
4450 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4451 
4452 	unsigned int n_elts, in_n_elts;
4453 	if ((GET_CODE (trueop0) == CONST_VECTOR
4454 	     || CONST_SCALAR_INT_P (trueop0)
4455 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4456 	    && (GET_CODE (trueop1) == CONST_VECTOR
4457 		|| CONST_SCALAR_INT_P (trueop1)
4458 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1))
4459 	    && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4460 	    && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4461 	  {
4462 	    rtvec v = rtvec_alloc (n_elts);
4463 	    unsigned int i;
4464 	    for (i = 0; i < n_elts; i++)
4465 	      {
4466 		if (i < in_n_elts)
4467 		  {
4468 		    if (!VECTOR_MODE_P (op0_mode))
4469 		      RTVEC_ELT (v, i) = trueop0;
4470 		    else
4471 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4472 		  }
4473 		else
4474 		  {
4475 		    if (!VECTOR_MODE_P (op1_mode))
4476 		      RTVEC_ELT (v, i) = trueop1;
4477 		    else
4478 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4479 							   i - in_n_elts);
4480 		  }
4481 	      }
4482 
4483 	    return gen_rtx_CONST_VECTOR (mode, v);
4484 	  }
4485 
4486 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
4487 	   Restrict the transformation to avoid generating a VEC_SELECT with a
4488 	   mode unrelated to its operand.  */
4489 	if (GET_CODE (trueop0) == VEC_SELECT
4490 	    && GET_CODE (trueop1) == VEC_SELECT
4491 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4492 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
4493 	  {
4494 	    rtx par0 = XEXP (trueop0, 1);
4495 	    rtx par1 = XEXP (trueop1, 1);
4496 	    int len0 = XVECLEN (par0, 0);
4497 	    int len1 = XVECLEN (par1, 0);
4498 	    rtvec vec = rtvec_alloc (len0 + len1);
4499 	    for (int i = 0; i < len0; i++)
4500 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4501 	    for (int i = 0; i < len1; i++)
4502 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4503 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4504 					gen_rtx_PARALLEL (VOIDmode, vec));
4505 	  }
4506       }
4507       return 0;
4508 
4509     default:
4510       gcc_unreachable ();
4511     }
4512 
4513   if (mode == GET_MODE (op0)
4514       && mode == GET_MODE (op1)
4515       && vec_duplicate_p (op0, &elt0)
4516       && vec_duplicate_p (op1, &elt1))
4517     {
4518       /* Try applying the operator to ELT and see if that simplifies.
4519 	 We can duplicate the result if so.
4520 
4521 	 The reason we don't use simplify_gen_binary is that it isn't
4522 	 necessarily a win to convert things like:
4523 
4524 	   (plus:V (vec_duplicate:V (reg:S R1))
4525 		   (vec_duplicate:V (reg:S R2)))
4526 
4527 	 to:
4528 
4529 	   (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4530 
4531 	 The first might be done entirely in vector registers while the
4532 	 second might need a move between register files.  */
4533       tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4534 				       elt0, elt1);
4535       if (tem)
4536 	return gen_vec_duplicate (mode, tem);
4537     }
4538 
4539   return 0;
4540 }
4541 
4542 /* Return true if binary operation OP distributes over addition in operand
4543    OPNO, with the other operand being held constant.  OPNO counts from 1.  */
4544 
4545 static bool
distributes_over_addition_p(rtx_code op,int opno)4546 distributes_over_addition_p (rtx_code op, int opno)
4547 {
4548   switch (op)
4549     {
4550     case PLUS:
4551     case MINUS:
4552     case MULT:
4553       return true;
4554 
4555     case ASHIFT:
4556       return opno == 1;
4557 
4558     default:
4559       return false;
4560     }
4561 }
4562 
4563 rtx
simplify_const_binary_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)4564 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4565 				 rtx op0, rtx op1)
4566 {
4567   if (VECTOR_MODE_P (mode)
4568       && code != VEC_CONCAT
4569       && GET_CODE (op0) == CONST_VECTOR
4570       && GET_CODE (op1) == CONST_VECTOR)
4571     {
4572       bool step_ok_p;
4573       if (CONST_VECTOR_STEPPED_P (op0)
4574 	  && CONST_VECTOR_STEPPED_P (op1))
4575 	/* We can operate directly on the encoding if:
4576 
4577 	      a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4578 	    implies
4579 	      (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4580 
4581 	   Addition and subtraction are the supported operators
4582 	   for which this is true.  */
4583 	step_ok_p = (code == PLUS || code == MINUS);
4584       else if (CONST_VECTOR_STEPPED_P (op0))
4585 	/* We can operate directly on stepped encodings if:
4586 
4587 	     a3 - a2 == a2 - a1
4588 	   implies:
4589 	     (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4590 
4591 	   which is true if (x -> x op c) distributes over addition.  */
4592 	step_ok_p = distributes_over_addition_p (code, 1);
4593       else
4594 	/* Similarly in reverse.  */
4595 	step_ok_p = distributes_over_addition_p (code, 2);
4596       rtx_vector_builder builder;
4597       if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4598 	return 0;
4599 
4600       unsigned int count = builder.encoded_nelts ();
4601       for (unsigned int i = 0; i < count; i++)
4602 	{
4603 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4604 					     CONST_VECTOR_ELT (op0, i),
4605 					     CONST_VECTOR_ELT (op1, i));
4606 	  if (!x || !valid_for_const_vector_p (mode, x))
4607 	    return 0;
4608 	  builder.quick_push (x);
4609 	}
4610       return builder.build ();
4611     }
4612 
4613   if (VECTOR_MODE_P (mode)
4614       && code == VEC_CONCAT
4615       && (CONST_SCALAR_INT_P (op0)
4616 	  || CONST_FIXED_P (op0)
4617 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
4618       && (CONST_SCALAR_INT_P (op1)
4619 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
4620 	  || CONST_FIXED_P (op1)))
4621     {
4622       /* Both inputs have a constant number of elements, so the result
4623 	 must too.  */
4624       unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4625       rtvec v = rtvec_alloc (n_elts);
4626 
4627       gcc_assert (n_elts >= 2);
4628       if (n_elts == 2)
4629 	{
4630 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4631 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4632 
4633 	  RTVEC_ELT (v, 0) = op0;
4634 	  RTVEC_ELT (v, 1) = op1;
4635 	}
4636       else
4637 	{
4638 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4639 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4640 	  unsigned i;
4641 
4642 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4643 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4644 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4645 
4646 	  for (i = 0; i < op0_n_elts; ++i)
4647 	    RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4648 	  for (i = 0; i < op1_n_elts; ++i)
4649 	    RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4650 	}
4651 
4652       return gen_rtx_CONST_VECTOR (mode, v);
4653     }
4654 
4655   if (SCALAR_FLOAT_MODE_P (mode)
4656       && CONST_DOUBLE_AS_FLOAT_P (op0)
4657       && CONST_DOUBLE_AS_FLOAT_P (op1)
4658       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4659     {
4660       if (code == AND
4661 	  || code == IOR
4662 	  || code == XOR)
4663 	{
4664 	  long tmp0[4];
4665 	  long tmp1[4];
4666 	  REAL_VALUE_TYPE r;
4667 	  int i;
4668 
4669 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4670 			  GET_MODE (op0));
4671 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4672 			  GET_MODE (op1));
4673 	  for (i = 0; i < 4; i++)
4674 	    {
4675 	      switch (code)
4676 	      {
4677 	      case AND:
4678 		tmp0[i] &= tmp1[i];
4679 		break;
4680 	      case IOR:
4681 		tmp0[i] |= tmp1[i];
4682 		break;
4683 	      case XOR:
4684 		tmp0[i] ^= tmp1[i];
4685 		break;
4686 	      default:
4687 		gcc_unreachable ();
4688 	      }
4689 	    }
4690 	   real_from_target (&r, tmp0, mode);
4691 	   return const_double_from_real_value (r, mode);
4692 	}
4693       else
4694 	{
4695 	  REAL_VALUE_TYPE f0, f1, value, result;
4696 	  const REAL_VALUE_TYPE *opr0, *opr1;
4697 	  bool inexact;
4698 
4699 	  opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4700 	  opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4701 
4702 	  if (HONOR_SNANS (mode)
4703 	      && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4704 	          || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4705 	    return 0;
4706 
4707 	  real_convert (&f0, mode, opr0);
4708 	  real_convert (&f1, mode, opr1);
4709 
4710 	  if (code == DIV
4711 	      && real_equal (&f1, &dconst0)
4712 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4713 	    return 0;
4714 
4715 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4716 	      && flag_trapping_math
4717 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4718 	    {
4719 	      int s0 = REAL_VALUE_NEGATIVE (f0);
4720 	      int s1 = REAL_VALUE_NEGATIVE (f1);
4721 
4722 	      switch (code)
4723 		{
4724 		case PLUS:
4725 		  /* Inf + -Inf = NaN plus exception.  */
4726 		  if (s0 != s1)
4727 		    return 0;
4728 		  break;
4729 		case MINUS:
4730 		  /* Inf - Inf = NaN plus exception.  */
4731 		  if (s0 == s1)
4732 		    return 0;
4733 		  break;
4734 		case DIV:
4735 		  /* Inf / Inf = NaN plus exception.  */
4736 		  return 0;
4737 		default:
4738 		  break;
4739 		}
4740 	    }
4741 
4742 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4743 	      && flag_trapping_math
4744 	      && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4745 		  || (REAL_VALUE_ISINF (f1)
4746 		      && real_equal (&f0, &dconst0))))
4747 	    /* Inf * 0 = NaN plus exception.  */
4748 	    return 0;
4749 
4750 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4751 				     &f0, &f1);
4752 	  real_convert (&result, mode, &value);
4753 
4754 	  /* Don't constant fold this floating point operation if
4755 	     the result has overflowed and flag_trapping_math.  */
4756 
4757 	  if (flag_trapping_math
4758 	      && MODE_HAS_INFINITIES (mode)
4759 	      && REAL_VALUE_ISINF (result)
4760 	      && !REAL_VALUE_ISINF (f0)
4761 	      && !REAL_VALUE_ISINF (f1))
4762 	    /* Overflow plus exception.  */
4763 	    return 0;
4764 
4765 	  /* Don't constant fold this floating point operation if the
4766 	     result may dependent upon the run-time rounding mode and
4767 	     flag_rounding_math is set, or if GCC's software emulation
4768 	     is unable to accurately represent the result.  */
4769 
4770 	  if ((flag_rounding_math
4771 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4772 	      && (inexact || !real_identical (&result, &value)))
4773 	    return NULL_RTX;
4774 
4775 	  return const_double_from_real_value (result, mode);
4776 	}
4777     }
4778 
4779   /* We can fold some multi-word operations.  */
4780   scalar_int_mode int_mode;
4781   if (is_a <scalar_int_mode> (mode, &int_mode)
4782       && CONST_SCALAR_INT_P (op0)
4783       && CONST_SCALAR_INT_P (op1)
4784       && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4785     {
4786       wide_int result;
4787       wi::overflow_type overflow;
4788       rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4789       rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4790 
4791 #if TARGET_SUPPORTS_WIDE_INT == 0
4792       /* This assert keeps the simplification from producing a result
4793 	 that cannot be represented in a CONST_DOUBLE but a lot of
4794 	 upstream callers expect that this function never fails to
4795 	 simplify something and so you if you added this to the test
4796 	 above the code would die later anyway.  If this assert
4797 	 happens, you just need to make the port support wide int.  */
4798       gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4799 #endif
4800       switch (code)
4801 	{
4802 	case MINUS:
4803 	  result = wi::sub (pop0, pop1);
4804 	  break;
4805 
4806 	case PLUS:
4807 	  result = wi::add (pop0, pop1);
4808 	  break;
4809 
4810 	case MULT:
4811 	  result = wi::mul (pop0, pop1);
4812 	  break;
4813 
4814 	case DIV:
4815 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4816 	  if (overflow)
4817 	    return NULL_RTX;
4818 	  break;
4819 
4820 	case MOD:
4821 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4822 	  if (overflow)
4823 	    return NULL_RTX;
4824 	  break;
4825 
4826 	case UDIV:
4827 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4828 	  if (overflow)
4829 	    return NULL_RTX;
4830 	  break;
4831 
4832 	case UMOD:
4833 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4834 	  if (overflow)
4835 	    return NULL_RTX;
4836 	  break;
4837 
4838 	case AND:
4839 	  result = wi::bit_and (pop0, pop1);
4840 	  break;
4841 
4842 	case IOR:
4843 	  result = wi::bit_or (pop0, pop1);
4844 	  break;
4845 
4846 	case XOR:
4847 	  result = wi::bit_xor (pop0, pop1);
4848 	  break;
4849 
4850 	case SMIN:
4851 	  result = wi::smin (pop0, pop1);
4852 	  break;
4853 
4854 	case SMAX:
4855 	  result = wi::smax (pop0, pop1);
4856 	  break;
4857 
4858 	case UMIN:
4859 	  result = wi::umin (pop0, pop1);
4860 	  break;
4861 
4862 	case UMAX:
4863 	  result = wi::umax (pop0, pop1);
4864 	  break;
4865 
4866 	case LSHIFTRT:
4867 	case ASHIFTRT:
4868 	case ASHIFT:
4869 	  {
4870 	    wide_int wop1 = pop1;
4871 	    if (SHIFT_COUNT_TRUNCATED)
4872 	      wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4873 	    else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4874 	      return NULL_RTX;
4875 
4876 	    switch (code)
4877 	      {
4878 	      case LSHIFTRT:
4879 		result = wi::lrshift (pop0, wop1);
4880 		break;
4881 
4882 	      case ASHIFTRT:
4883 		result = wi::arshift (pop0, wop1);
4884 		break;
4885 
4886 	      case ASHIFT:
4887 		result = wi::lshift (pop0, wop1);
4888 		break;
4889 
4890 	      default:
4891 		gcc_unreachable ();
4892 	      }
4893 	    break;
4894 	  }
4895 	case ROTATE:
4896 	case ROTATERT:
4897 	  {
4898 	    if (wi::neg_p (pop1))
4899 	      return NULL_RTX;
4900 
4901 	    switch (code)
4902 	      {
4903 	      case ROTATE:
4904 		result = wi::lrotate (pop0, pop1);
4905 		break;
4906 
4907 	      case ROTATERT:
4908 		result = wi::rrotate (pop0, pop1);
4909 		break;
4910 
4911 	      default:
4912 		gcc_unreachable ();
4913 	      }
4914 	    break;
4915 	  }
4916 	default:
4917 	  return NULL_RTX;
4918 	}
4919       return immed_wide_int_const (result, int_mode);
4920     }
4921 
4922   /* Handle polynomial integers.  */
4923   if (NUM_POLY_INT_COEFFS > 1
4924       && is_a <scalar_int_mode> (mode, &int_mode)
4925       && poly_int_rtx_p (op0)
4926       && poly_int_rtx_p (op1))
4927     {
4928       poly_wide_int result;
4929       switch (code)
4930 	{
4931 	case PLUS:
4932 	  result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4933 	  break;
4934 
4935 	case MINUS:
4936 	  result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4937 	  break;
4938 
4939 	case MULT:
4940 	  if (CONST_SCALAR_INT_P (op1))
4941 	    result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4942 	  else
4943 	    return NULL_RTX;
4944 	  break;
4945 
4946 	case ASHIFT:
4947 	  if (CONST_SCALAR_INT_P (op1))
4948 	    {
4949 	      wide_int shift = rtx_mode_t (op1, mode);
4950 	      if (SHIFT_COUNT_TRUNCATED)
4951 		shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4952 	      else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4953 		return NULL_RTX;
4954 	      result = wi::to_poly_wide (op0, mode) << shift;
4955 	    }
4956 	  else
4957 	    return NULL_RTX;
4958 	  break;
4959 
4960 	case IOR:
4961 	  if (!CONST_SCALAR_INT_P (op1)
4962 	      || !can_ior_p (wi::to_poly_wide (op0, mode),
4963 			     rtx_mode_t (op1, mode), &result))
4964 	    return NULL_RTX;
4965 	  break;
4966 
4967 	default:
4968 	  return NULL_RTX;
4969 	}
4970       return immed_wide_int_const (result, int_mode);
4971     }
4972 
4973   return NULL_RTX;
4974 }
4975 
4976 
4977 
4978 /* Return a positive integer if X should sort after Y.  The value
4979    returned is 1 if and only if X and Y are both regs.  */
4980 
4981 static int
simplify_plus_minus_op_data_cmp(rtx x,rtx y)4982 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4983 {
4984   int result;
4985 
4986   result = (commutative_operand_precedence (y)
4987 	    - commutative_operand_precedence (x));
4988   if (result)
4989     return result + result;
4990 
4991   /* Group together equal REGs to do more simplification.  */
4992   if (REG_P (x) && REG_P (y))
4993     return REGNO (x) > REGNO (y);
4994 
4995   return 0;
4996 }
4997 
4998 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4999    operands may be another PLUS or MINUS.
5000 
5001    Rather than test for specific case, we do this by a brute-force method
5002    and do all possible simplifications until no more changes occur.  Then
5003    we rebuild the operation.
5004 
5005    May return NULL_RTX when no changes were made.  */
5006 
5007 rtx
simplify_plus_minus(rtx_code code,machine_mode mode,rtx op0,rtx op1)5008 simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
5009 				       rtx op0, rtx op1)
5010 {
5011   struct simplify_plus_minus_op_data
5012   {
5013     rtx op;
5014     short neg;
5015   } ops[16];
5016   rtx result, tem;
5017   int n_ops = 2;
5018   int changed, n_constants, canonicalized = 0;
5019   int i, j;
5020 
5021   memset (ops, 0, sizeof ops);
5022 
5023   /* Set up the two operands and then expand them until nothing has been
5024      changed.  If we run out of room in our array, give up; this should
5025      almost never happen.  */
5026 
5027   ops[0].op = op0;
5028   ops[0].neg = 0;
5029   ops[1].op = op1;
5030   ops[1].neg = (code == MINUS);
5031 
5032   do
5033     {
5034       changed = 0;
5035       n_constants = 0;
5036 
5037       for (i = 0; i < n_ops; i++)
5038 	{
5039 	  rtx this_op = ops[i].op;
5040 	  int this_neg = ops[i].neg;
5041 	  enum rtx_code this_code = GET_CODE (this_op);
5042 
5043 	  switch (this_code)
5044 	    {
5045 	    case PLUS:
5046 	    case MINUS:
5047 	      if (n_ops == ARRAY_SIZE (ops))
5048 		return NULL_RTX;
5049 
5050 	      ops[n_ops].op = XEXP (this_op, 1);
5051 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
5052 	      n_ops++;
5053 
5054 	      ops[i].op = XEXP (this_op, 0);
5055 	      changed = 1;
5056 	      /* If this operand was negated then we will potentially
5057 		 canonicalize the expression.  Similarly if we don't
5058 		 place the operands adjacent we're re-ordering the
5059 		 expression and thus might be performing a
5060 		 canonicalization.  Ignore register re-ordering.
5061 		 ??? It might be better to shuffle the ops array here,
5062 		 but then (plus (plus (A, B), plus (C, D))) wouldn't
5063 		 be seen as non-canonical.  */
5064 	      if (this_neg
5065 		  || (i != n_ops - 2
5066 		      && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
5067 		canonicalized = 1;
5068 	      break;
5069 
5070 	    case NEG:
5071 	      ops[i].op = XEXP (this_op, 0);
5072 	      ops[i].neg = ! this_neg;
5073 	      changed = 1;
5074 	      canonicalized = 1;
5075 	      break;
5076 
5077 	    case CONST:
5078 	      if (n_ops != ARRAY_SIZE (ops)
5079 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
5080 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
5081 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
5082 		{
5083 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
5084 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
5085 		  ops[n_ops].neg = this_neg;
5086 		  n_ops++;
5087 		  changed = 1;
5088 		  canonicalized = 1;
5089 		}
5090 	      break;
5091 
5092 	    case NOT:
5093 	      /* ~a -> (-a - 1) */
5094 	      if (n_ops != ARRAY_SIZE (ops))
5095 		{
5096 		  ops[n_ops].op = CONSTM1_RTX (mode);
5097 		  ops[n_ops++].neg = this_neg;
5098 		  ops[i].op = XEXP (this_op, 0);
5099 		  ops[i].neg = !this_neg;
5100 		  changed = 1;
5101 		  canonicalized = 1;
5102 		}
5103 	      break;
5104 
5105 	    CASE_CONST_SCALAR_INT:
5106 	    case CONST_POLY_INT:
5107 	      n_constants++;
5108 	      if (this_neg)
5109 		{
5110 		  ops[i].op = neg_poly_int_rtx (mode, this_op);
5111 		  ops[i].neg = 0;
5112 		  changed = 1;
5113 		  canonicalized = 1;
5114 		}
5115 	      break;
5116 
5117 	    default:
5118 	      break;
5119 	    }
5120 	}
5121     }
5122   while (changed);
5123 
5124   if (n_constants > 1)
5125     canonicalized = 1;
5126 
5127   gcc_assert (n_ops >= 2);
5128 
5129   /* If we only have two operands, we can avoid the loops.  */
5130   if (n_ops == 2)
5131     {
5132       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
5133       rtx lhs, rhs;
5134 
5135       /* Get the two operands.  Be careful with the order, especially for
5136 	 the cases where code == MINUS.  */
5137       if (ops[0].neg && ops[1].neg)
5138 	{
5139 	  lhs = gen_rtx_NEG (mode, ops[0].op);
5140 	  rhs = ops[1].op;
5141 	}
5142       else if (ops[0].neg)
5143 	{
5144 	  lhs = ops[1].op;
5145 	  rhs = ops[0].op;
5146 	}
5147       else
5148 	{
5149 	  lhs = ops[0].op;
5150 	  rhs = ops[1].op;
5151 	}
5152 
5153       return simplify_const_binary_operation (code, mode, lhs, rhs);
5154     }
5155 
5156   /* Now simplify each pair of operands until nothing changes.  */
5157   while (1)
5158     {
5159       /* Insertion sort is good enough for a small array.  */
5160       for (i = 1; i < n_ops; i++)
5161 	{
5162 	  struct simplify_plus_minus_op_data save;
5163 	  int cmp;
5164 
5165 	  j = i - 1;
5166 	  cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
5167 	  if (cmp <= 0)
5168 	    continue;
5169 	  /* Just swapping registers doesn't count as canonicalization.  */
5170 	  if (cmp != 1)
5171 	    canonicalized = 1;
5172 
5173 	  save = ops[i];
5174 	  do
5175 	    ops[j + 1] = ops[j];
5176 	  while (j--
5177 		 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
5178 	  ops[j + 1] = save;
5179 	}
5180 
5181       changed = 0;
5182       for (i = n_ops - 1; i > 0; i--)
5183 	for (j = i - 1; j >= 0; j--)
5184 	  {
5185 	    rtx lhs = ops[j].op, rhs = ops[i].op;
5186 	    int lneg = ops[j].neg, rneg = ops[i].neg;
5187 
5188 	    if (lhs != 0 && rhs != 0)
5189 	      {
5190 		enum rtx_code ncode = PLUS;
5191 
5192 		if (lneg != rneg)
5193 		  {
5194 		    ncode = MINUS;
5195 		    if (lneg)
5196 		      std::swap (lhs, rhs);
5197 		  }
5198 		else if (swap_commutative_operands_p (lhs, rhs))
5199 		  std::swap (lhs, rhs);
5200 
5201 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
5202 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
5203 		  {
5204 		    rtx tem_lhs, tem_rhs;
5205 
5206 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
5207 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
5208 		    tem = simplify_binary_operation (ncode, mode, tem_lhs,
5209 						     tem_rhs);
5210 
5211 		    if (tem && !CONSTANT_P (tem))
5212 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
5213 		  }
5214 		else
5215 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
5216 
5217 		if (tem)
5218 		  {
5219 		    /* Reject "simplifications" that just wrap the two
5220 		       arguments in a CONST.  Failure to do so can result
5221 		       in infinite recursion with simplify_binary_operation
5222 		       when it calls us to simplify CONST operations.
5223 		       Also, if we find such a simplification, don't try
5224 		       any more combinations with this rhs:  We must have
5225 		       something like symbol+offset, ie. one of the
5226 		       trivial CONST expressions we handle later.  */
5227 		    if (GET_CODE (tem) == CONST
5228 			&& GET_CODE (XEXP (tem, 0)) == ncode
5229 			&& XEXP (XEXP (tem, 0), 0) == lhs
5230 			&& XEXP (XEXP (tem, 0), 1) == rhs)
5231 		      break;
5232 		    lneg &= rneg;
5233 		    if (GET_CODE (tem) == NEG)
5234 		      tem = XEXP (tem, 0), lneg = !lneg;
5235 		    if (poly_int_rtx_p (tem) && lneg)
5236 		      tem = neg_poly_int_rtx (mode, tem), lneg = 0;
5237 
5238 		    ops[i].op = tem;
5239 		    ops[i].neg = lneg;
5240 		    ops[j].op = NULL_RTX;
5241 		    changed = 1;
5242 		    canonicalized = 1;
5243 		  }
5244 	      }
5245 	  }
5246 
5247       if (!changed)
5248 	break;
5249 
5250       /* Pack all the operands to the lower-numbered entries.  */
5251       for (i = 0, j = 0; j < n_ops; j++)
5252 	if (ops[j].op)
5253 	  {
5254 	    ops[i] = ops[j];
5255 	    i++;
5256 	  }
5257       n_ops = i;
5258     }
5259 
5260   /* If nothing changed, check that rematerialization of rtl instructions
5261      is still required.  */
5262   if (!canonicalized)
5263     {
5264       /* Perform rematerialization if only all operands are registers and
5265 	 all operations are PLUS.  */
5266       /* ??? Also disallow (non-global, non-frame) fixed registers to work
5267 	 around rs6000 and how it uses the CA register.  See PR67145.  */
5268       for (i = 0; i < n_ops; i++)
5269 	if (ops[i].neg
5270 	    || !REG_P (ops[i].op)
5271 	    || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
5272 		&& fixed_regs[REGNO (ops[i].op)]
5273 		&& !global_regs[REGNO (ops[i].op)]
5274 		&& ops[i].op != frame_pointer_rtx
5275 		&& ops[i].op != arg_pointer_rtx
5276 		&& ops[i].op != stack_pointer_rtx))
5277 	  return NULL_RTX;
5278       goto gen_result;
5279     }
5280 
5281   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
5282   if (n_ops == 2
5283       && CONST_INT_P (ops[1].op)
5284       && CONSTANT_P (ops[0].op)
5285       && ops[0].neg)
5286     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
5287 
5288   /* We suppressed creation of trivial CONST expressions in the
5289      combination loop to avoid recursion.  Create one manually now.
5290      The combination loop should have ensured that there is exactly
5291      one CONST_INT, and the sort will have ensured that it is last
5292      in the array and that any other constant will be next-to-last.  */
5293 
5294   if (n_ops > 1
5295       && poly_int_rtx_p (ops[n_ops - 1].op)
5296       && CONSTANT_P (ops[n_ops - 2].op))
5297     {
5298       rtx value = ops[n_ops - 1].op;
5299       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5300 	value = neg_poly_int_rtx (mode, value);
5301       if (CONST_INT_P (value))
5302 	{
5303 	  ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5304 					     INTVAL (value));
5305 	  n_ops--;
5306 	}
5307     }
5308 
5309   /* Put a non-negated operand first, if possible.  */
5310 
5311   for (i = 0; i < n_ops && ops[i].neg; i++)
5312     continue;
5313   if (i == n_ops)
5314     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5315   else if (i != 0)
5316     {
5317       tem = ops[0].op;
5318       ops[0] = ops[i];
5319       ops[i].op = tem;
5320       ops[i].neg = 1;
5321     }
5322 
5323   /* Now make the result by performing the requested operations.  */
5324  gen_result:
5325   result = ops[0].op;
5326   for (i = 1; i < n_ops; i++)
5327     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5328 			     mode, result, ops[i].op);
5329 
5330   return result;
5331 }
5332 
5333 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
5334 static bool
plus_minus_operand_p(const_rtx x)5335 plus_minus_operand_p (const_rtx x)
5336 {
5337   return GET_CODE (x) == PLUS
5338          || GET_CODE (x) == MINUS
5339 	 || (GET_CODE (x) == CONST
5340 	     && GET_CODE (XEXP (x, 0)) == PLUS
5341 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5342 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5343 }
5344 
5345 /* Like simplify_binary_operation except used for relational operators.
5346    MODE is the mode of the result. If MODE is VOIDmode, both operands must
5347    not also be VOIDmode.
5348 
5349    CMP_MODE specifies in which mode the comparison is done in, so it is
5350    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
5351    the operands or, if both are VOIDmode, the operands are compared in
5352    "infinite precision".  */
5353 rtx
simplify_relational_operation(rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)5354 simplify_context::simplify_relational_operation (rtx_code code,
5355 						 machine_mode mode,
5356 						 machine_mode cmp_mode,
5357 						 rtx op0, rtx op1)
5358 {
5359   rtx tem, trueop0, trueop1;
5360 
5361   if (cmp_mode == VOIDmode)
5362     cmp_mode = GET_MODE (op0);
5363   if (cmp_mode == VOIDmode)
5364     cmp_mode = GET_MODE (op1);
5365 
5366   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5367   if (tem)
5368     return relational_result (mode, cmp_mode, tem);
5369 
5370   /* For the following tests, ensure const0_rtx is op1.  */
5371   if (swap_commutative_operands_p (op0, op1)
5372       || (op0 == const0_rtx && op1 != const0_rtx))
5373     std::swap (op0, op1), code = swap_condition (code);
5374 
5375   /* If op0 is a compare, extract the comparison arguments from it.  */
5376   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5377     return simplify_gen_relational (code, mode, VOIDmode,
5378 				    XEXP (op0, 0), XEXP (op0, 1));
5379 
5380   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
5381       || CC0_P (op0))
5382     return NULL_RTX;
5383 
5384   trueop0 = avoid_constant_pool_reference (op0);
5385   trueop1 = avoid_constant_pool_reference (op1);
5386   return simplify_relational_operation_1 (code, mode, cmp_mode,
5387 		  			  trueop0, trueop1);
5388 }
5389 
5390 /* This part of simplify_relational_operation is only used when CMP_MODE
5391    is not in class MODE_CC (i.e. it is a real comparison).
5392 
5393    MODE is the mode of the result, while CMP_MODE specifies in which
5394    mode the comparison is done in, so it is the mode of the operands.  */
5395 
5396 rtx
simplify_relational_operation_1(rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)5397 simplify_context::simplify_relational_operation_1 (rtx_code code,
5398 						   machine_mode mode,
5399 						   machine_mode cmp_mode,
5400 						   rtx op0, rtx op1)
5401 {
5402   enum rtx_code op0code = GET_CODE (op0);
5403 
5404   if (op1 == const0_rtx && COMPARISON_P (op0))
5405     {
5406       /* If op0 is a comparison, extract the comparison arguments
5407          from it.  */
5408       if (code == NE)
5409 	{
5410 	  if (GET_MODE (op0) == mode)
5411 	    return simplify_rtx (op0);
5412 	  else
5413 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5414 					    XEXP (op0, 0), XEXP (op0, 1));
5415 	}
5416       else if (code == EQ)
5417 	{
5418 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5419 	  if (new_code != UNKNOWN)
5420 	    return simplify_gen_relational (new_code, mode, VOIDmode,
5421 					    XEXP (op0, 0), XEXP (op0, 1));
5422 	}
5423     }
5424 
5425   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5426      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
5427   if ((code == LTU || code == GEU)
5428       && GET_CODE (op0) == PLUS
5429       && CONST_INT_P (XEXP (op0, 1))
5430       && (rtx_equal_p (op1, XEXP (op0, 0))
5431 	  || rtx_equal_p (op1, XEXP (op0, 1)))
5432       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5433       && XEXP (op0, 1) != const0_rtx)
5434     {
5435       rtx new_cmp
5436 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5437       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5438 				      cmp_mode, XEXP (op0, 0), new_cmp);
5439     }
5440 
5441   /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5442      transformed into (LTU a -C).  */
5443   if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5444       && CONST_INT_P (XEXP (op0, 1))
5445       && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5446       && XEXP (op0, 1) != const0_rtx)
5447     {
5448       rtx new_cmp
5449 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5450       return simplify_gen_relational (LTU, mode, cmp_mode,
5451 				       XEXP (op0, 0), new_cmp);
5452     }
5453 
5454   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
5455   if ((code == LTU || code == GEU)
5456       && GET_CODE (op0) == PLUS
5457       && rtx_equal_p (op1, XEXP (op0, 1))
5458       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
5459       && !rtx_equal_p (op1, XEXP (op0, 0)))
5460     return simplify_gen_relational (code, mode, cmp_mode, op0,
5461 				    copy_rtx (XEXP (op0, 0)));
5462 
5463   if (op1 == const0_rtx)
5464     {
5465       /* Canonicalize (GTU x 0) as (NE x 0).  */
5466       if (code == GTU)
5467         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5468       /* Canonicalize (LEU x 0) as (EQ x 0).  */
5469       if (code == LEU)
5470         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5471     }
5472   else if (op1 == const1_rtx)
5473     {
5474       switch (code)
5475         {
5476         case GE:
5477 	  /* Canonicalize (GE x 1) as (GT x 0).  */
5478 	  return simplify_gen_relational (GT, mode, cmp_mode,
5479 					  op0, const0_rtx);
5480 	case GEU:
5481 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
5482 	  return simplify_gen_relational (NE, mode, cmp_mode,
5483 					  op0, const0_rtx);
5484 	case LT:
5485 	  /* Canonicalize (LT x 1) as (LE x 0).  */
5486 	  return simplify_gen_relational (LE, mode, cmp_mode,
5487 					  op0, const0_rtx);
5488 	case LTU:
5489 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
5490 	  return simplify_gen_relational (EQ, mode, cmp_mode,
5491 					  op0, const0_rtx);
5492 	default:
5493 	  break;
5494 	}
5495     }
5496   else if (op1 == constm1_rtx)
5497     {
5498       /* Canonicalize (LE x -1) as (LT x 0).  */
5499       if (code == LE)
5500         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5501       /* Canonicalize (GT x -1) as (GE x 0).  */
5502       if (code == GT)
5503         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5504     }
5505 
5506   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
5507   if ((code == EQ || code == NE)
5508       && (op0code == PLUS || op0code == MINUS)
5509       && CONSTANT_P (op1)
5510       && CONSTANT_P (XEXP (op0, 1))
5511       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5512     {
5513       rtx x = XEXP (op0, 0);
5514       rtx c = XEXP (op0, 1);
5515       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5516       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5517 
5518       /* Detect an infinite recursive condition, where we oscillate at this
5519 	 simplification case between:
5520 	    A + B == C  <--->  C - B == A,
5521 	 where A, B, and C are all constants with non-simplifiable expressions,
5522 	 usually SYMBOL_REFs.  */
5523       if (GET_CODE (tem) == invcode
5524 	  && CONSTANT_P (x)
5525 	  && rtx_equal_p (c, XEXP (tem, 1)))
5526 	return NULL_RTX;
5527 
5528       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5529     }
5530 
5531   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5532      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
5533   scalar_int_mode int_mode, int_cmp_mode;
5534   if (code == NE
5535       && op1 == const0_rtx
5536       && is_int_mode (mode, &int_mode)
5537       && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5538       /* ??? Work-around BImode bugs in the ia64 backend.  */
5539       && int_mode != BImode
5540       && int_cmp_mode != BImode
5541       && nonzero_bits (op0, int_cmp_mode) == 1
5542       && STORE_FLAG_VALUE == 1)
5543     return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5544 	   ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5545 	   : lowpart_subreg (int_mode, op0, int_cmp_mode);
5546 
5547   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
5548   if ((code == EQ || code == NE)
5549       && op1 == const0_rtx
5550       && op0code == XOR)
5551     return simplify_gen_relational (code, mode, cmp_mode,
5552 				    XEXP (op0, 0), XEXP (op0, 1));
5553 
5554   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
5555   if ((code == EQ || code == NE)
5556       && op0code == XOR
5557       && rtx_equal_p (XEXP (op0, 0), op1)
5558       && !side_effects_p (XEXP (op0, 0)))
5559     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5560 				    CONST0_RTX (mode));
5561 
5562   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
5563   if ((code == EQ || code == NE)
5564       && op0code == XOR
5565       && rtx_equal_p (XEXP (op0, 1), op1)
5566       && !side_effects_p (XEXP (op0, 1)))
5567     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5568 				    CONST0_RTX (mode));
5569 
5570   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
5571   if ((code == EQ || code == NE)
5572       && op0code == XOR
5573       && CONST_SCALAR_INT_P (op1)
5574       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5575     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5576 				    simplify_gen_binary (XOR, cmp_mode,
5577 							 XEXP (op0, 1), op1));
5578 
5579   /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5580      constant folding if x/y is a constant.  */
5581   if ((code == EQ || code == NE)
5582       && (op0code == AND || op0code == IOR)
5583       && !side_effects_p (op1)
5584       && op1 != CONST0_RTX (cmp_mode))
5585     {
5586       /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5587 	 (eq/ne (and (not y) x) 0).  */
5588       if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5589 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5590 	{
5591 	  rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5592 					  cmp_mode);
5593 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5594 
5595 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5596 					  CONST0_RTX (cmp_mode));
5597 	}
5598 
5599       /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5600 	 (eq/ne (and (not x) y) 0).  */
5601       if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5602 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5603 	{
5604 	  rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5605 					  cmp_mode);
5606 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5607 
5608 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5609 					  CONST0_RTX (cmp_mode));
5610 	}
5611     }
5612 
5613   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
5614   if ((code == EQ || code == NE)
5615       && GET_CODE (op0) == BSWAP
5616       && CONST_SCALAR_INT_P (op1))
5617     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5618 				    simplify_gen_unary (BSWAP, cmp_mode,
5619 							op1, cmp_mode));
5620 
5621   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
5622   if ((code == EQ || code == NE)
5623       && GET_CODE (op0) == BSWAP
5624       && GET_CODE (op1) == BSWAP)
5625     return simplify_gen_relational (code, mode, cmp_mode,
5626 				    XEXP (op0, 0), XEXP (op1, 0));
5627 
5628   if (op0code == POPCOUNT && op1 == const0_rtx)
5629     switch (code)
5630       {
5631       case EQ:
5632       case LE:
5633       case LEU:
5634 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
5635 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5636 					XEXP (op0, 0), const0_rtx);
5637 
5638       case NE:
5639       case GT:
5640       case GTU:
5641 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
5642 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5643 					XEXP (op0, 0), const0_rtx);
5644 
5645       default:
5646 	break;
5647       }
5648 
5649   return NULL_RTX;
5650 }
5651 
5652 enum
5653 {
5654   CMP_EQ = 1,
5655   CMP_LT = 2,
5656   CMP_GT = 4,
5657   CMP_LTU = 8,
5658   CMP_GTU = 16
5659 };
5660 
5661 
5662 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5663    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5664    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5665    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5666    For floating-point comparisons, assume that the operands were ordered.  */
5667 
5668 static rtx
comparison_result(enum rtx_code code,int known_results)5669 comparison_result (enum rtx_code code, int known_results)
5670 {
5671   switch (code)
5672     {
5673     case EQ:
5674     case UNEQ:
5675       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5676     case NE:
5677     case LTGT:
5678       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5679 
5680     case LT:
5681     case UNLT:
5682       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5683     case GE:
5684     case UNGE:
5685       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5686 
5687     case GT:
5688     case UNGT:
5689       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5690     case LE:
5691     case UNLE:
5692       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5693 
5694     case LTU:
5695       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5696     case GEU:
5697       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5698 
5699     case GTU:
5700       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5701     case LEU:
5702       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5703 
5704     case ORDERED:
5705       return const_true_rtx;
5706     case UNORDERED:
5707       return const0_rtx;
5708     default:
5709       gcc_unreachable ();
5710     }
5711 }
5712 
5713 /* Check if the given comparison (done in the given MODE) is actually
5714    a tautology or a contradiction.  If the mode is VOIDmode, the
5715    comparison is done in "infinite precision".  If no simplification
5716    is possible, this function returns zero.  Otherwise, it returns
5717    either const_true_rtx or const0_rtx.  */
5718 
5719 rtx
simplify_const_relational_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)5720 simplify_const_relational_operation (enum rtx_code code,
5721 				     machine_mode mode,
5722 				     rtx op0, rtx op1)
5723 {
5724   rtx tem;
5725   rtx trueop0;
5726   rtx trueop1;
5727 
5728   gcc_assert (mode != VOIDmode
5729 	      || (GET_MODE (op0) == VOIDmode
5730 		  && GET_MODE (op1) == VOIDmode));
5731 
5732   /* If op0 is a compare, extract the comparison arguments from it.  */
5733   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5734     {
5735       op1 = XEXP (op0, 1);
5736       op0 = XEXP (op0, 0);
5737 
5738       if (GET_MODE (op0) != VOIDmode)
5739 	mode = GET_MODE (op0);
5740       else if (GET_MODE (op1) != VOIDmode)
5741 	mode = GET_MODE (op1);
5742       else
5743 	return 0;
5744     }
5745 
5746   /* We can't simplify MODE_CC values since we don't know what the
5747      actual comparison is.  */
5748   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5749     return 0;
5750 
5751   /* Make sure the constant is second.  */
5752   if (swap_commutative_operands_p (op0, op1))
5753     {
5754       std::swap (op0, op1);
5755       code = swap_condition (code);
5756     }
5757 
5758   trueop0 = avoid_constant_pool_reference (op0);
5759   trueop1 = avoid_constant_pool_reference (op1);
5760 
5761   /* For integer comparisons of A and B maybe we can simplify A - B and can
5762      then simplify a comparison of that with zero.  If A and B are both either
5763      a register or a CONST_INT, this can't help; testing for these cases will
5764      prevent infinite recursion here and speed things up.
5765 
5766      We can only do this for EQ and NE comparisons as otherwise we may
5767      lose or introduce overflow which we cannot disregard as undefined as
5768      we do not know the signedness of the operation on either the left or
5769      the right hand side of the comparison.  */
5770 
5771   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5772       && (code == EQ || code == NE)
5773       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5774 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
5775       && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5776       /* We cannot do this if tem is a nonzero address.  */
5777       && ! nonzero_address_p (tem))
5778     return simplify_const_relational_operation (signed_condition (code),
5779 						mode, tem, const0_rtx);
5780 
5781   if (! HONOR_NANS (mode) && code == ORDERED)
5782     return const_true_rtx;
5783 
5784   if (! HONOR_NANS (mode) && code == UNORDERED)
5785     return const0_rtx;
5786 
5787   /* For modes without NaNs, if the two operands are equal, we know the
5788      result except if they have side-effects.  Even with NaNs we know
5789      the result of unordered comparisons and, if signaling NaNs are
5790      irrelevant, also the result of LT/GT/LTGT.  */
5791   if ((! HONOR_NANS (trueop0)
5792        || code == UNEQ || code == UNLE || code == UNGE
5793        || ((code == LT || code == GT || code == LTGT)
5794 	   && ! HONOR_SNANS (trueop0)))
5795       && rtx_equal_p (trueop0, trueop1)
5796       && ! side_effects_p (trueop0))
5797     return comparison_result (code, CMP_EQ);
5798 
5799   /* If the operands are floating-point constants, see if we can fold
5800      the result.  */
5801   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5802       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5803       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5804     {
5805       const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5806       const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5807 
5808       /* Comparisons are unordered iff at least one of the values is NaN.  */
5809       if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5810 	switch (code)
5811 	  {
5812 	  case UNEQ:
5813 	  case UNLT:
5814 	  case UNGT:
5815 	  case UNLE:
5816 	  case UNGE:
5817 	  case NE:
5818 	  case UNORDERED:
5819 	    return const_true_rtx;
5820 	  case EQ:
5821 	  case LT:
5822 	  case GT:
5823 	  case LE:
5824 	  case GE:
5825 	  case LTGT:
5826 	  case ORDERED:
5827 	    return const0_rtx;
5828 	  default:
5829 	    return 0;
5830 	  }
5831 
5832       return comparison_result (code,
5833 				(real_equal (d0, d1) ? CMP_EQ :
5834 				 real_less (d0, d1) ? CMP_LT : CMP_GT));
5835     }
5836 
5837   /* Otherwise, see if the operands are both integers.  */
5838   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5839       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5840     {
5841       /* It would be nice if we really had a mode here.  However, the
5842 	 largest int representable on the target is as good as
5843 	 infinite.  */
5844       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5845       rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5846       rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5847 
5848       if (wi::eq_p (ptrueop0, ptrueop1))
5849 	return comparison_result (code, CMP_EQ);
5850       else
5851 	{
5852 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5853 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5854 	  return comparison_result (code, cr);
5855 	}
5856     }
5857 
5858   /* Optimize comparisons with upper and lower bounds.  */
5859   scalar_int_mode int_mode;
5860   if (CONST_INT_P (trueop1)
5861       && is_a <scalar_int_mode> (mode, &int_mode)
5862       && HWI_COMPUTABLE_MODE_P (int_mode)
5863       && !side_effects_p (trueop0))
5864     {
5865       int sign;
5866       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5867       HOST_WIDE_INT val = INTVAL (trueop1);
5868       HOST_WIDE_INT mmin, mmax;
5869 
5870       if (code == GEU
5871 	  || code == LEU
5872 	  || code == GTU
5873 	  || code == LTU)
5874 	sign = 0;
5875       else
5876 	sign = 1;
5877 
5878       /* Get a reduced range if the sign bit is zero.  */
5879       if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5880 	{
5881 	  mmin = 0;
5882 	  mmax = nonzero;
5883 	}
5884       else
5885 	{
5886 	  rtx mmin_rtx, mmax_rtx;
5887 	  get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5888 
5889 	  mmin = INTVAL (mmin_rtx);
5890 	  mmax = INTVAL (mmax_rtx);
5891 	  if (sign)
5892 	    {
5893 	      unsigned int sign_copies
5894 		= num_sign_bit_copies (trueop0, int_mode);
5895 
5896 	      mmin >>= (sign_copies - 1);
5897 	      mmax >>= (sign_copies - 1);
5898 	    }
5899 	}
5900 
5901       switch (code)
5902 	{
5903 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5904 	case GEU:
5905 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5906 	    return const_true_rtx;
5907 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5908 	    return const0_rtx;
5909 	  break;
5910 	case GE:
5911 	  if (val <= mmin)
5912 	    return const_true_rtx;
5913 	  if (val > mmax)
5914 	    return const0_rtx;
5915 	  break;
5916 
5917 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5918 	case LEU:
5919 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5920 	    return const_true_rtx;
5921 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5922 	    return const0_rtx;
5923 	  break;
5924 	case LE:
5925 	  if (val >= mmax)
5926 	    return const_true_rtx;
5927 	  if (val < mmin)
5928 	    return const0_rtx;
5929 	  break;
5930 
5931 	case EQ:
5932 	  /* x == y is always false for y out of range.  */
5933 	  if (val < mmin || val > mmax)
5934 	    return const0_rtx;
5935 	  break;
5936 
5937 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5938 	case GTU:
5939 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5940 	    return const0_rtx;
5941 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5942 	    return const_true_rtx;
5943 	  break;
5944 	case GT:
5945 	  if (val >= mmax)
5946 	    return const0_rtx;
5947 	  if (val < mmin)
5948 	    return const_true_rtx;
5949 	  break;
5950 
5951 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5952 	case LTU:
5953 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5954 	    return const0_rtx;
5955 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5956 	    return const_true_rtx;
5957 	  break;
5958 	case LT:
5959 	  if (val <= mmin)
5960 	    return const0_rtx;
5961 	  if (val > mmax)
5962 	    return const_true_rtx;
5963 	  break;
5964 
5965 	case NE:
5966 	  /* x != y is always true for y out of range.  */
5967 	  if (val < mmin || val > mmax)
5968 	    return const_true_rtx;
5969 	  break;
5970 
5971 	default:
5972 	  break;
5973 	}
5974     }
5975 
5976   /* Optimize integer comparisons with zero.  */
5977   if (is_a <scalar_int_mode> (mode, &int_mode)
5978       && trueop1 == const0_rtx
5979       && !side_effects_p (trueop0))
5980     {
5981       /* Some addresses are known to be nonzero.  We don't know
5982 	 their sign, but equality comparisons are known.  */
5983       if (nonzero_address_p (trueop0))
5984 	{
5985 	  if (code == EQ || code == LEU)
5986 	    return const0_rtx;
5987 	  if (code == NE || code == GTU)
5988 	    return const_true_rtx;
5989 	}
5990 
5991       /* See if the first operand is an IOR with a constant.  If so, we
5992 	 may be able to determine the result of this comparison.  */
5993       if (GET_CODE (op0) == IOR)
5994 	{
5995 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5996 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5997 	    {
5998 	      int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5999 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
6000 			      && (UINTVAL (inner_const)
6001 				  & (HOST_WIDE_INT_1U
6002 				     << sign_bitnum)));
6003 
6004 	      switch (code)
6005 		{
6006 		case EQ:
6007 		case LEU:
6008 		  return const0_rtx;
6009 		case NE:
6010 		case GTU:
6011 		  return const_true_rtx;
6012 		case LT:
6013 		case LE:
6014 		  if (has_sign)
6015 		    return const_true_rtx;
6016 		  break;
6017 		case GT:
6018 		case GE:
6019 		  if (has_sign)
6020 		    return const0_rtx;
6021 		  break;
6022 		default:
6023 		  break;
6024 		}
6025 	    }
6026 	}
6027     }
6028 
6029   /* Optimize comparison of ABS with zero.  */
6030   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
6031       && (GET_CODE (trueop0) == ABS
6032 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
6033 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
6034     {
6035       switch (code)
6036 	{
6037 	case LT:
6038 	  /* Optimize abs(x) < 0.0.  */
6039 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
6040 	    return const0_rtx;
6041 	  break;
6042 
6043 	case GE:
6044 	  /* Optimize abs(x) >= 0.0.  */
6045 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
6046 	    return const_true_rtx;
6047 	  break;
6048 
6049 	case UNGE:
6050 	  /* Optimize ! (abs(x) < 0.0).  */
6051 	  return const_true_rtx;
6052 
6053 	default:
6054 	  break;
6055 	}
6056     }
6057 
6058   return 0;
6059 }
6060 
6061 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6062    where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6063    or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6064    can be simplified to that or NULL_RTX if not.
6065    Assume X is compared against zero with CMP_CODE and the true
6066    arm is TRUE_VAL and the false arm is FALSE_VAL.  */
6067 
6068 rtx
simplify_cond_clz_ctz(rtx x,rtx_code cmp_code,rtx true_val,rtx false_val)6069 simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
6070 					 rtx true_val, rtx false_val)
6071 {
6072   if (cmp_code != EQ && cmp_code != NE)
6073     return NULL_RTX;
6074 
6075   /* Result on X == 0 and X !=0 respectively.  */
6076   rtx on_zero, on_nonzero;
6077   if (cmp_code == EQ)
6078     {
6079       on_zero = true_val;
6080       on_nonzero = false_val;
6081     }
6082   else
6083     {
6084       on_zero = false_val;
6085       on_nonzero = true_val;
6086     }
6087 
6088   rtx_code op_code = GET_CODE (on_nonzero);
6089   if ((op_code != CLZ && op_code != CTZ)
6090       || !rtx_equal_p (XEXP (on_nonzero, 0), x)
6091       || !CONST_INT_P (on_zero))
6092     return NULL_RTX;
6093 
6094   HOST_WIDE_INT op_val;
6095   scalar_int_mode mode ATTRIBUTE_UNUSED
6096     = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
6097   if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
6098        || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
6099       && op_val == INTVAL (on_zero))
6100     return on_nonzero;
6101 
6102   return NULL_RTX;
6103 }
6104 
6105 /* Try to simplify X given that it appears within operand OP of a
6106    VEC_MERGE operation whose mask is MASK.  X need not use the same
6107    vector mode as the VEC_MERGE, but it must have the same number of
6108    elements.
6109 
6110    Return the simplified X on success, otherwise return NULL_RTX.  */
6111 
6112 rtx
simplify_merge_mask(rtx x,rtx mask,int op)6113 simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
6114 {
6115   gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
6116   poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
6117   if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
6118     {
6119       if (side_effects_p (XEXP (x, 1 - op)))
6120 	return NULL_RTX;
6121 
6122       return XEXP (x, op);
6123     }
6124   if (UNARY_P (x)
6125       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6126       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
6127     {
6128       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6129       if (top0)
6130 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
6131 				   GET_MODE (XEXP (x, 0)));
6132     }
6133   if (BINARY_P (x)
6134       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6135       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6136       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6137       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
6138     {
6139       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6140       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6141       if (top0 || top1)
6142 	{
6143 	  if (COMPARISON_P (x))
6144 	    return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
6145 					    GET_MODE (XEXP (x, 0)) != VOIDmode
6146 					    ? GET_MODE (XEXP (x, 0))
6147 					    : GET_MODE (XEXP (x, 1)),
6148 					    top0 ? top0 : XEXP (x, 0),
6149 					    top1 ? top1 : XEXP (x, 1));
6150 	  else
6151 	    return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
6152 					top0 ? top0 : XEXP (x, 0),
6153 					top1 ? top1 : XEXP (x, 1));
6154 	}
6155     }
6156   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
6157       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6158       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6159       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6160       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
6161       && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
6162       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
6163     {
6164       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6165       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6166       rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
6167       if (top0 || top1 || top2)
6168 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
6169 				     GET_MODE (XEXP (x, 0)),
6170 				     top0 ? top0 : XEXP (x, 0),
6171 				     top1 ? top1 : XEXP (x, 1),
6172 				     top2 ? top2 : XEXP (x, 2));
6173     }
6174   return NULL_RTX;
6175 }
6176 
6177 
6178 /* Simplify CODE, an operation with result mode MODE and three operands,
6179    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
6180    a constant.  Return 0 if no simplifications is possible.  */
6181 
6182 rtx
simplify_ternary_operation(rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)6183 simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
6184 					      machine_mode op0_mode,
6185 					      rtx op0, rtx op1, rtx op2)
6186 {
6187   bool any_change = false;
6188   rtx tem, trueop2;
6189   scalar_int_mode int_mode, int_op0_mode;
6190   unsigned int n_elts;
6191 
6192   switch (code)
6193     {
6194     case FMA:
6195       /* Simplify negations around the multiplication.  */
6196       /* -a * -b + c  =>  a * b + c.  */
6197       if (GET_CODE (op0) == NEG)
6198 	{
6199 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
6200 	  if (tem)
6201 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
6202 	}
6203       else if (GET_CODE (op1) == NEG)
6204 	{
6205 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
6206 	  if (tem)
6207 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
6208 	}
6209 
6210       /* Canonicalize the two multiplication operands.  */
6211       /* a * -b + c  =>  -b * a + c.  */
6212       if (swap_commutative_operands_p (op0, op1))
6213 	std::swap (op0, op1), any_change = true;
6214 
6215       if (any_change)
6216 	return gen_rtx_FMA (mode, op0, op1, op2);
6217       return NULL_RTX;
6218 
6219     case SIGN_EXTRACT:
6220     case ZERO_EXTRACT:
6221       if (CONST_INT_P (op0)
6222 	  && CONST_INT_P (op1)
6223 	  && CONST_INT_P (op2)
6224 	  && is_a <scalar_int_mode> (mode, &int_mode)
6225 	  && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
6226 	  && HWI_COMPUTABLE_MODE_P (int_mode))
6227 	{
6228 	  /* Extracting a bit-field from a constant */
6229 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
6230 	  HOST_WIDE_INT op1val = INTVAL (op1);
6231 	  HOST_WIDE_INT op2val = INTVAL (op2);
6232 	  if (!BITS_BIG_ENDIAN)
6233 	    val >>= op2val;
6234 	  else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
6235 	    val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
6236 	  else
6237 	    /* Not enough information to calculate the bit position.  */
6238 	    break;
6239 
6240 	  if (HOST_BITS_PER_WIDE_INT != op1val)
6241 	    {
6242 	      /* First zero-extend.  */
6243 	      val &= (HOST_WIDE_INT_1U << op1val) - 1;
6244 	      /* If desired, propagate sign bit.  */
6245 	      if (code == SIGN_EXTRACT
6246 		  && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
6247 		     != 0)
6248 		val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
6249 	    }
6250 
6251 	  return gen_int_mode (val, int_mode);
6252 	}
6253       break;
6254 
6255     case IF_THEN_ELSE:
6256       if (CONST_INT_P (op0))
6257 	return op0 != const0_rtx ? op1 : op2;
6258 
6259       /* Convert c ? a : a into "a".  */
6260       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6261 	return op1;
6262 
6263       /* Convert a != b ? a : b into "a".  */
6264       if (GET_CODE (op0) == NE
6265 	  && ! side_effects_p (op0)
6266 	  && ! HONOR_NANS (mode)
6267 	  && ! HONOR_SIGNED_ZEROS (mode)
6268 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
6269 	       && rtx_equal_p (XEXP (op0, 1), op2))
6270 	      || (rtx_equal_p (XEXP (op0, 0), op2)
6271 		  && rtx_equal_p (XEXP (op0, 1), op1))))
6272 	return op1;
6273 
6274       /* Convert a == b ? a : b into "b".  */
6275       if (GET_CODE (op0) == EQ
6276 	  && ! side_effects_p (op0)
6277 	  && ! HONOR_NANS (mode)
6278 	  && ! HONOR_SIGNED_ZEROS (mode)
6279 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
6280 	       && rtx_equal_p (XEXP (op0, 1), op2))
6281 	      || (rtx_equal_p (XEXP (op0, 0), op2)
6282 		  && rtx_equal_p (XEXP (op0, 1), op1))))
6283 	return op2;
6284 
6285       /* Convert (!c) != {0,...,0} ? a : b into
6286          c != {0,...,0} ? b : a for vector modes.  */
6287       if (VECTOR_MODE_P (GET_MODE (op1))
6288 	  && GET_CODE (op0) == NE
6289 	  && GET_CODE (XEXP (op0, 0)) == NOT
6290 	  && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6291 	{
6292 	  rtx cv = XEXP (op0, 1);
6293 	  int nunits;
6294 	  bool ok = true;
6295 	  if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6296 	    ok = false;
6297 	  else
6298 	    for (int i = 0; i < nunits; ++i)
6299 	      if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6300 		{
6301 		  ok = false;
6302 		  break;
6303 		}
6304 	  if (ok)
6305 	    {
6306 	      rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6307 					XEXP (XEXP (op0, 0), 0),
6308 					XEXP (op0, 1));
6309 	      rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6310 	      return retval;
6311 	    }
6312 	}
6313 
6314       /* Convert x == 0 ? N : clz (x) into clz (x) when
6315 	 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6316 	 Similarly for ctz (x).  */
6317       if (COMPARISON_P (op0) && !side_effects_p (op0)
6318 	  && XEXP (op0, 1) == const0_rtx)
6319 	{
6320 	  rtx simplified
6321 	    = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6322 				     op1, op2);
6323 	  if (simplified)
6324 	    return simplified;
6325 	}
6326 
6327       if (COMPARISON_P (op0) && ! side_effects_p (op0))
6328 	{
6329 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6330 					? GET_MODE (XEXP (op0, 1))
6331 					: GET_MODE (XEXP (op0, 0)));
6332 	  rtx temp;
6333 
6334 	  /* Look for happy constants in op1 and op2.  */
6335 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
6336 	    {
6337 	      HOST_WIDE_INT t = INTVAL (op1);
6338 	      HOST_WIDE_INT f = INTVAL (op2);
6339 
6340 	      if (t == STORE_FLAG_VALUE && f == 0)
6341 	        code = GET_CODE (op0);
6342 	      else if (t == 0 && f == STORE_FLAG_VALUE)
6343 		{
6344 		  enum rtx_code tmp;
6345 		  tmp = reversed_comparison_code (op0, NULL);
6346 		  if (tmp == UNKNOWN)
6347 		    break;
6348 		  code = tmp;
6349 		}
6350 	      else
6351 		break;
6352 
6353 	      return simplify_gen_relational (code, mode, cmp_mode,
6354 					      XEXP (op0, 0), XEXP (op0, 1));
6355 	    }
6356 
6357 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6358 			  			cmp_mode, XEXP (op0, 0),
6359 						XEXP (op0, 1));
6360 
6361 	  /* See if any simplifications were possible.  */
6362 	  if (temp)
6363 	    {
6364 	      if (CONST_INT_P (temp))
6365 		return temp == const0_rtx ? op2 : op1;
6366 	      else if (temp)
6367 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6368 	    }
6369 	}
6370       break;
6371 
6372     case VEC_MERGE:
6373       gcc_assert (GET_MODE (op0) == mode);
6374       gcc_assert (GET_MODE (op1) == mode);
6375       gcc_assert (VECTOR_MODE_P (mode));
6376       trueop2 = avoid_constant_pool_reference (op2);
6377       if (CONST_INT_P (trueop2)
6378 	  && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6379 	{
6380 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6381 	  unsigned HOST_WIDE_INT mask;
6382 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
6383 	    mask = -1;
6384 	  else
6385 	    mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6386 
6387 	  if (!(sel & mask) && !side_effects_p (op0))
6388 	    return op1;
6389 	  if ((sel & mask) == mask && !side_effects_p (op1))
6390 	    return op0;
6391 
6392 	  rtx trueop0 = avoid_constant_pool_reference (op0);
6393 	  rtx trueop1 = avoid_constant_pool_reference (op1);
6394 	  if (GET_CODE (trueop0) == CONST_VECTOR
6395 	      && GET_CODE (trueop1) == CONST_VECTOR)
6396 	    {
6397 	      rtvec v = rtvec_alloc (n_elts);
6398 	      unsigned int i;
6399 
6400 	      for (i = 0; i < n_elts; i++)
6401 		RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6402 				    ? CONST_VECTOR_ELT (trueop0, i)
6403 				    : CONST_VECTOR_ELT (trueop1, i));
6404 	      return gen_rtx_CONST_VECTOR (mode, v);
6405 	    }
6406 
6407 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6408 	     if no element from a appears in the result.  */
6409 	  if (GET_CODE (op0) == VEC_MERGE)
6410 	    {
6411 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
6412 	      if (CONST_INT_P (tem))
6413 		{
6414 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6415 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6416 		    return simplify_gen_ternary (code, mode, mode,
6417 						 XEXP (op0, 1), op1, op2);
6418 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6419 		    return simplify_gen_ternary (code, mode, mode,
6420 						 XEXP (op0, 0), op1, op2);
6421 		}
6422 	    }
6423 	  if (GET_CODE (op1) == VEC_MERGE)
6424 	    {
6425 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
6426 	      if (CONST_INT_P (tem))
6427 		{
6428 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6429 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6430 		    return simplify_gen_ternary (code, mode, mode,
6431 						 op0, XEXP (op1, 1), op2);
6432 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6433 		    return simplify_gen_ternary (code, mode, mode,
6434 						 op0, XEXP (op1, 0), op2);
6435 		}
6436 	    }
6437 
6438 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6439 	     with a.  */
6440 	  if (GET_CODE (op0) == VEC_DUPLICATE
6441 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6442 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6443 	      && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6444 	    {
6445 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6446 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
6447 		{
6448 		  if (XEXP (XEXP (op0, 0), 0) == op1
6449 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6450 		    return op1;
6451 		}
6452 	    }
6453 	  /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6454 	     (const_int N))
6455 	     with (vec_concat (X) (B)) if N == 1 or
6456 	     (vec_concat (A) (X)) if N == 2.  */
6457 	  if (GET_CODE (op0) == VEC_DUPLICATE
6458 	      && GET_CODE (op1) == CONST_VECTOR
6459 	      && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6460 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6461 	      && IN_RANGE (sel, 1, 2))
6462 	    {
6463 	      rtx newop0 = XEXP (op0, 0);
6464 	      rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6465 	      if (sel == 2)
6466 		std::swap (newop0, newop1);
6467 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6468 	    }
6469 	  /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6470 	     with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6471 	     Only applies for vectors of two elements.  */
6472 	  if (GET_CODE (op0) == VEC_DUPLICATE
6473 	      && GET_CODE (op1) == VEC_CONCAT
6474 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6475 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6476 	      && IN_RANGE (sel, 1, 2))
6477 	    {
6478 	      rtx newop0 = XEXP (op0, 0);
6479 	      rtx newop1 = XEXP (op1, 2 - sel);
6480 	      rtx otherop = XEXP (op1, sel - 1);
6481 	      if (sel == 2)
6482 		std::swap (newop0, newop1);
6483 	      /* Don't want to throw away the other part of the vec_concat if
6484 		 it has side-effects.  */
6485 	      if (!side_effects_p (otherop))
6486 		return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6487 	    }
6488 
6489 	  /* Replace:
6490 
6491 	      (vec_merge:outer (vec_duplicate:outer x:inner)
6492 			       (subreg:outer y:inner 0)
6493 			       (const_int N))
6494 
6495 	     with (vec_concat:outer x:inner y:inner) if N == 1,
6496 	     or (vec_concat:outer y:inner x:inner) if N == 2.
6497 
6498 	     Implicitly, this means we have a paradoxical subreg, but such
6499 	     a check is cheap, so make it anyway.
6500 
6501 	     Only applies for vectors of two elements.  */
6502 	  if (GET_CODE (op0) == VEC_DUPLICATE
6503 	      && GET_CODE (op1) == SUBREG
6504 	      && GET_MODE (op1) == GET_MODE (op0)
6505 	      && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6506 	      && paradoxical_subreg_p (op1)
6507 	      && subreg_lowpart_p (op1)
6508 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6509 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6510 	      && IN_RANGE (sel, 1, 2))
6511 	    {
6512 	      rtx newop0 = XEXP (op0, 0);
6513 	      rtx newop1 = SUBREG_REG (op1);
6514 	      if (sel == 2)
6515 		std::swap (newop0, newop1);
6516 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6517 	    }
6518 
6519 	  /* Same as above but with switched operands:
6520 		Replace (vec_merge:outer (subreg:outer x:inner 0)
6521 					 (vec_duplicate:outer y:inner)
6522 			       (const_int N))
6523 
6524 	     with (vec_concat:outer x:inner y:inner) if N == 1,
6525 	     or (vec_concat:outer y:inner x:inner) if N == 2.  */
6526 	  if (GET_CODE (op1) == VEC_DUPLICATE
6527 	      && GET_CODE (op0) == SUBREG
6528 	      && GET_MODE (op0) == GET_MODE (op1)
6529 	      && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6530 	      && paradoxical_subreg_p (op0)
6531 	      && subreg_lowpart_p (op0)
6532 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6533 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6534 	      && IN_RANGE (sel, 1, 2))
6535 	    {
6536 	      rtx newop0 = SUBREG_REG (op0);
6537 	      rtx newop1 = XEXP (op1, 0);
6538 	      if (sel == 2)
6539 		std::swap (newop0, newop1);
6540 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6541 	    }
6542 
6543 	  /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6544 				 (const_int n))
6545 	     with (vec_concat x y) or (vec_concat y x) depending on value
6546 	     of N.  */
6547 	  if (GET_CODE (op0) == VEC_DUPLICATE
6548 	      && GET_CODE (op1) == VEC_DUPLICATE
6549 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6550 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6551 	      && IN_RANGE (sel, 1, 2))
6552 	    {
6553 	      rtx newop0 = XEXP (op0, 0);
6554 	      rtx newop1 = XEXP (op1, 0);
6555 	      if (sel == 2)
6556 		std::swap (newop0, newop1);
6557 
6558 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6559 	    }
6560 	}
6561 
6562       if (rtx_equal_p (op0, op1)
6563 	  && !side_effects_p (op2) && !side_effects_p (op1))
6564 	return op0;
6565 
6566       if (!side_effects_p (op2))
6567 	{
6568 	  rtx top0
6569 	    = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6570 	  rtx top1
6571 	    = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6572 	  if (top0 || top1)
6573 	    return simplify_gen_ternary (code, mode, mode,
6574 					 top0 ? top0 : op0,
6575 					 top1 ? top1 : op1, op2);
6576 	}
6577 
6578       break;
6579 
6580     default:
6581       gcc_unreachable ();
6582     }
6583 
6584   return 0;
6585 }
6586 
6587 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6588    starting at byte FIRST_BYTE.  Return true on success and add the
6589    bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6590    that the bytes follow target memory order.  Leave BYTES unmodified
6591    on failure.
6592 
6593    MODE is the mode of X.  The caller must reserve NUM_BYTES bytes in
6594    BYTES before calling this function.  */
6595 
6596 bool
native_encode_rtx(machine_mode mode,rtx x,vec<target_unit> & bytes,unsigned int first_byte,unsigned int num_bytes)6597 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6598 		   unsigned int first_byte, unsigned int num_bytes)
6599 {
6600   /* Check the mode is sensible.  */
6601   gcc_assert (GET_MODE (x) == VOIDmode
6602 	      ? is_a <scalar_int_mode> (mode)
6603 	      : mode == GET_MODE (x));
6604 
6605   if (GET_CODE (x) == CONST_VECTOR)
6606     {
6607       /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6608 	 is necessary.  The only complication is that MODE_VECTOR_BOOL
6609 	 vectors can have several elements per byte.  */
6610       unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6611 						   GET_MODE_NUNITS (mode));
6612       unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6613       if (elt_bits < BITS_PER_UNIT)
6614 	{
6615 	  /* This is the only case in which elements can be smaller than
6616 	     a byte.  */
6617 	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6618 	  for (unsigned int i = 0; i < num_bytes; ++i)
6619 	    {
6620 	      target_unit value = 0;
6621 	      for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6622 		{
6623 		  value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6624 		  elt += 1;
6625 		}
6626 	      bytes.quick_push (value);
6627 	    }
6628 	  return true;
6629 	}
6630 
6631       unsigned int start = bytes.length ();
6632       unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6633       /* Make FIRST_BYTE relative to ELT.  */
6634       first_byte %= elt_bytes;
6635       while (num_bytes > 0)
6636 	{
6637 	  /* Work out how many bytes we want from element ELT.  */
6638 	  unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6639 	  if (!native_encode_rtx (GET_MODE_INNER (mode),
6640 				  CONST_VECTOR_ELT (x, elt), bytes,
6641 				  first_byte, chunk_bytes))
6642 	    {
6643 	      bytes.truncate (start);
6644 	      return false;
6645 	    }
6646 	  elt += 1;
6647 	  first_byte = 0;
6648 	  num_bytes -= chunk_bytes;
6649 	}
6650       return true;
6651     }
6652 
6653   /* All subsequent cases are limited to scalars.  */
6654   scalar_mode smode;
6655   if (!is_a <scalar_mode> (mode, &smode))
6656     return false;
6657 
6658   /* Make sure that the region is in range.  */
6659   unsigned int end_byte = first_byte + num_bytes;
6660   unsigned int mode_bytes = GET_MODE_SIZE (smode);
6661   gcc_assert (end_byte <= mode_bytes);
6662 
6663   if (CONST_SCALAR_INT_P (x))
6664     {
6665       /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6666 	 and WORDS_BIG_ENDIAN.  Use the subreg machinery to get the lsb
6667 	 position of each byte.  */
6668       rtx_mode_t value (x, smode);
6669       wide_int_ref value_wi (value);
6670       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6671 	{
6672 	  /* Always constant because the inputs are.  */
6673 	  unsigned int lsb
6674 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6675 	  /* Operate directly on the encoding rather than using
6676 	     wi::extract_uhwi, so that we preserve the sign or zero
6677 	     extension for modes that are not a whole number of bits in
6678 	     size.  (Zero extension is only used for the combination of
6679 	     innermode == BImode && STORE_FLAG_VALUE == 1).  */
6680 	  unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6681 	  unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6682 	  unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6683 	  bytes.quick_push (uhwi >> shift);
6684 	}
6685       return true;
6686     }
6687 
6688   if (CONST_DOUBLE_P (x))
6689     {
6690       /* real_to_target produces an array of integers in target memory order.
6691 	 All integers before the last one have 32 bits; the last one may
6692 	 have 32 bits or fewer, depending on whether the mode bitsize
6693 	 is divisible by 32.  Each of these integers is then laid out
6694 	 in target memory as any other integer would be.  */
6695       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6696       real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6697 
6698       /* The (maximum) number of target bytes per element of el32.  */
6699       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6700       gcc_assert (bytes_per_el32 != 0);
6701 
6702       /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6703 	 handling above.  */
6704       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6705 	{
6706 	  unsigned int index = byte / bytes_per_el32;
6707 	  unsigned int subbyte = byte % bytes_per_el32;
6708 	  unsigned int int_bytes = MIN (bytes_per_el32,
6709 					mode_bytes - index * bytes_per_el32);
6710 	  /* Always constant because the inputs are.  */
6711 	  unsigned int lsb
6712 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6713 	  bytes.quick_push ((unsigned long) el32[index] >> lsb);
6714 	}
6715       return true;
6716     }
6717 
6718   if (GET_CODE (x) == CONST_FIXED)
6719     {
6720       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6721 	{
6722 	  /* Always constant because the inputs are.  */
6723 	  unsigned int lsb
6724 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6725 	  unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6726 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
6727 	    {
6728 	      lsb -= HOST_BITS_PER_WIDE_INT;
6729 	      piece = CONST_FIXED_VALUE_HIGH (x);
6730 	    }
6731 	  bytes.quick_push (piece >> lsb);
6732 	}
6733       return true;
6734     }
6735 
6736   return false;
6737 }
6738 
6739 /* Read a vector of mode MODE from the target memory image given by BYTES,
6740    starting at byte FIRST_BYTE.  The vector is known to be encodable using
6741    NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6742    and BYTES is known to have enough bytes to supply NPATTERNS *
6743    NELTS_PER_PATTERN vector elements.  Each element of BYTES contains
6744    BITS_PER_UNIT bits and the bytes are in target memory order.
6745 
6746    Return the vector on success, otherwise return NULL_RTX.  */
6747 
6748 rtx
native_decode_vector_rtx(machine_mode mode,vec<target_unit> bytes,unsigned int first_byte,unsigned int npatterns,unsigned int nelts_per_pattern)6749 native_decode_vector_rtx (machine_mode mode, vec<target_unit> bytes,
6750 			  unsigned int first_byte, unsigned int npatterns,
6751 			  unsigned int nelts_per_pattern)
6752 {
6753   rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
6754 
6755   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6756 					       GET_MODE_NUNITS (mode));
6757   if (elt_bits < BITS_PER_UNIT)
6758     {
6759       /* This is the only case in which elements can be smaller than a byte.
6760 	 Element 0 is always in the lsb of the containing byte.  */
6761       gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6762       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6763 	{
6764 	  unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
6765 	  unsigned int byte_index = bit_index / BITS_PER_UNIT;
6766 	  unsigned int lsb = bit_index % BITS_PER_UNIT;
6767 	  builder.quick_push (bytes[byte_index] & (1 << lsb)
6768 			      ? CONST1_RTX (BImode)
6769 			      : CONST0_RTX (BImode));
6770 	}
6771     }
6772   else
6773     {
6774       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6775 	{
6776 	  rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
6777 	  if (!x)
6778 	    return NULL_RTX;
6779 	  builder.quick_push (x);
6780 	  first_byte += elt_bits / BITS_PER_UNIT;
6781 	}
6782     }
6783   return builder.build ();
6784 }
6785 
6786 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6787    starting at byte FIRST_BYTE.  Each element of BYTES contains BITS_PER_UNIT
6788    bits and the bytes are in target memory order.  The image has enough
6789    values to specify all bytes of MODE.
6790 
6791    Return the rtx on success, otherwise return NULL_RTX.  */
6792 
6793 rtx
native_decode_rtx(machine_mode mode,vec<target_unit> bytes,unsigned int first_byte)6794 native_decode_rtx (machine_mode mode, vec<target_unit> bytes,
6795 		   unsigned int first_byte)
6796 {
6797   if (VECTOR_MODE_P (mode))
6798     {
6799       /* If we know at compile time how many elements there are,
6800 	 pull each element directly from BYTES.  */
6801       unsigned int nelts;
6802       if (GET_MODE_NUNITS (mode).is_constant (&nelts))
6803 	return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
6804       return NULL_RTX;
6805     }
6806 
6807   scalar_int_mode imode;
6808   if (is_a <scalar_int_mode> (mode, &imode)
6809       && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
6810     {
6811       /* Pull the bytes msb first, so that we can use simple
6812 	 shift-and-insert wide_int operations.  */
6813       unsigned int size = GET_MODE_SIZE (imode);
6814       wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
6815       for (unsigned int i = 0; i < size; ++i)
6816 	{
6817 	  unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
6818 	  /* Always constant because the inputs are.  */
6819 	  unsigned int subbyte
6820 	    = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
6821 	  result <<= BITS_PER_UNIT;
6822 	  result |= bytes[first_byte + subbyte];
6823 	}
6824       return immed_wide_int_const (result, imode);
6825     }
6826 
6827   scalar_float_mode fmode;
6828   if (is_a <scalar_float_mode> (mode, &fmode))
6829     {
6830       /* We need to build an array of integers in target memory order.
6831 	 All integers before the last one have 32 bits; the last one may
6832 	 have 32 bits or fewer, depending on whether the mode bitsize
6833 	 is divisible by 32.  */
6834       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6835       unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
6836       memset (el32, 0, num_el32 * sizeof (long));
6837 
6838       /* The (maximum) number of target bytes per element of el32.  */
6839       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6840       gcc_assert (bytes_per_el32 != 0);
6841 
6842       unsigned int mode_bytes = GET_MODE_SIZE (fmode);
6843       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
6844 	{
6845 	  unsigned int index = byte / bytes_per_el32;
6846 	  unsigned int subbyte = byte % bytes_per_el32;
6847 	  unsigned int int_bytes = MIN (bytes_per_el32,
6848 					mode_bytes - index * bytes_per_el32);
6849 	  /* Always constant because the inputs are.  */
6850 	  unsigned int lsb
6851 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6852 	  el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
6853 	}
6854       REAL_VALUE_TYPE r;
6855       real_from_target (&r, el32, fmode);
6856       return const_double_from_real_value (r, fmode);
6857     }
6858 
6859   if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
6860     {
6861       scalar_mode smode = as_a <scalar_mode> (mode);
6862       FIXED_VALUE_TYPE f;
6863       f.data.low = 0;
6864       f.data.high = 0;
6865       f.mode = smode;
6866 
6867       unsigned int mode_bytes = GET_MODE_SIZE (smode);
6868       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
6869 	{
6870 	  /* Always constant because the inputs are.  */
6871 	  unsigned int lsb
6872 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6873 	  unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
6874 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
6875 	    f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
6876 	  else
6877 	    f.data.low |= unit << lsb;
6878 	}
6879       return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
6880     }
6881 
6882   return NULL_RTX;
6883 }
6884 
6885 /* Simplify a byte offset BYTE into CONST_VECTOR X.  The main purpose
6886    is to convert a runtime BYTE value into a constant one.  */
6887 
6888 static poly_uint64
simplify_const_vector_byte_offset(rtx x,poly_uint64 byte)6889 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
6890 {
6891   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
6892   machine_mode mode = GET_MODE (x);
6893   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6894 					       GET_MODE_NUNITS (mode));
6895   /* The number of bits needed to encode one element from each pattern.  */
6896   unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
6897 
6898   /* Identify the start point in terms of a sequence number and a byte offset
6899      within that sequence.  */
6900   poly_uint64 first_sequence;
6901   unsigned HOST_WIDE_INT subbit;
6902   if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
6903 		       &first_sequence, &subbit))
6904     {
6905       unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
6906       if (nelts_per_pattern == 1)
6907 	/* This is a duplicated vector, so the value of FIRST_SEQUENCE
6908 	   doesn't matter.  */
6909 	byte = subbit / BITS_PER_UNIT;
6910       else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
6911 	{
6912 	  /* The subreg drops the first element from each pattern and
6913 	     only uses the second element.  Find the first sequence
6914 	     that starts on a byte boundary.  */
6915 	  subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
6916 	  byte = subbit / BITS_PER_UNIT;
6917 	}
6918     }
6919   return byte;
6920 }
6921 
6922 /* Subroutine of simplify_subreg in which:
6923 
6924    - X is known to be a CONST_VECTOR
6925    - OUTERMODE is known to be a vector mode
6926 
6927    Try to handle the subreg by operating on the CONST_VECTOR encoding
6928    rather than on each individual element of the CONST_VECTOR.
6929 
6930    Return the simplified subreg on success, otherwise return NULL_RTX.  */
6931 
6932 static rtx
simplify_const_vector_subreg(machine_mode outermode,rtx x,machine_mode innermode,unsigned int first_byte)6933 simplify_const_vector_subreg (machine_mode outermode, rtx x,
6934 			      machine_mode innermode, unsigned int first_byte)
6935 {
6936   /* Paradoxical subregs of vectors have dubious semantics.  */
6937   if (paradoxical_subreg_p (outermode, innermode))
6938     return NULL_RTX;
6939 
6940   /* We can only preserve the semantics of a stepped pattern if the new
6941      vector element is the same as the original one.  */
6942   if (CONST_VECTOR_STEPPED_P (x)
6943       && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
6944     return NULL_RTX;
6945 
6946   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
6947   unsigned int x_elt_bits
6948     = vector_element_size (GET_MODE_BITSIZE (innermode),
6949 			   GET_MODE_NUNITS (innermode));
6950   unsigned int out_elt_bits
6951     = vector_element_size (GET_MODE_BITSIZE (outermode),
6952 			   GET_MODE_NUNITS (outermode));
6953 
6954   /* The number of bits needed to encode one element from every pattern
6955      of the original vector.  */
6956   unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
6957 
6958   /* The number of bits needed to encode one element from every pattern
6959      of the result.  */
6960   unsigned int out_sequence_bits
6961     = least_common_multiple (x_sequence_bits, out_elt_bits);
6962 
6963   /* Work out the number of interleaved patterns in the output vector
6964      and the number of encoded elements per pattern.  */
6965   unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
6966   unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
6967 
6968   /* The encoding scheme requires the number of elements to be a multiple
6969      of the number of patterns, so that each pattern appears at least once
6970      and so that the same number of elements appear from each pattern.  */
6971   bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
6972   unsigned int const_nunits;
6973   if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
6974       && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
6975     {
6976       /* Either the encoding is invalid, or applying it would give us
6977 	 more elements than we need.  Just encode each element directly.  */
6978       out_npatterns = const_nunits;
6979       nelts_per_pattern = 1;
6980     }
6981   else if (!ok_p)
6982     return NULL_RTX;
6983 
6984   /* Get enough bytes of X to form the new encoding.  */
6985   unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
6986   unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
6987   auto_vec<target_unit, 128> buffer (buffer_bytes);
6988   if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
6989     return NULL_RTX;
6990 
6991   /* Reencode the bytes as OUTERMODE.  */
6992   return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
6993 				   nelts_per_pattern);
6994 }
6995 
6996 /* Try to simplify a subreg of a constant by encoding the subreg region
6997    as a sequence of target bytes and reading them back in the new mode.
6998    Return the new value on success, otherwise return null.
6999 
7000    The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7001    and byte offset FIRST_BYTE.  */
7002 
7003 static rtx
simplify_immed_subreg(fixed_size_mode outermode,rtx x,machine_mode innermode,unsigned int first_byte)7004 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
7005 		       machine_mode innermode, unsigned int first_byte)
7006 {
7007   unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
7008   auto_vec<target_unit, 128> buffer (buffer_bytes);
7009 
7010   /* Some ports misuse CCmode.  */
7011   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
7012     return x;
7013 
7014   /* Paradoxical subregs read undefined values for bytes outside of the
7015      inner value.  However, we have traditionally always sign-extended
7016      integer constants and zero-extended others.  */
7017   unsigned int inner_bytes = buffer_bytes;
7018   if (paradoxical_subreg_p (outermode, innermode))
7019     {
7020       if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
7021 	return NULL_RTX;
7022 
7023       target_unit filler = 0;
7024       if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
7025 	filler = -1;
7026 
7027       /* Add any leading bytes due to big-endian layout.  The number of
7028 	 bytes must be constant because both modes have constant size.  */
7029       unsigned int leading_bytes
7030 	= -byte_lowpart_offset (outermode, innermode).to_constant ();
7031       for (unsigned int i = 0; i < leading_bytes; ++i)
7032 	buffer.quick_push (filler);
7033 
7034       if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7035 	return NULL_RTX;
7036 
7037       /* Add any trailing bytes due to little-endian layout.  */
7038       while (buffer.length () < buffer_bytes)
7039 	buffer.quick_push (filler);
7040     }
7041   else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7042     return NULL_RTX;
7043   rtx ret = native_decode_rtx (outermode, buffer, 0);
7044   if (ret && MODE_COMPOSITE_P (outermode))
7045     {
7046       auto_vec<target_unit, 128> buffer2 (buffer_bytes);
7047       if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
7048 	return NULL_RTX;
7049       for (unsigned int i = 0; i < buffer_bytes; ++i)
7050 	if (buffer[i] != buffer2[i])
7051 	  return NULL_RTX;
7052     }
7053   return ret;
7054 }
7055 
7056 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7057    Return 0 if no simplifications are possible.  */
7058 rtx
simplify_subreg(machine_mode outermode,rtx op,machine_mode innermode,poly_uint64 byte)7059 simplify_context::simplify_subreg (machine_mode outermode, rtx op,
7060 				   machine_mode innermode, poly_uint64 byte)
7061 {
7062   /* Little bit of sanity checking.  */
7063   gcc_assert (innermode != VOIDmode);
7064   gcc_assert (outermode != VOIDmode);
7065   gcc_assert (innermode != BLKmode);
7066   gcc_assert (outermode != BLKmode);
7067 
7068   gcc_assert (GET_MODE (op) == innermode
7069 	      || GET_MODE (op) == VOIDmode);
7070 
7071   poly_uint64 outersize = GET_MODE_SIZE (outermode);
7072   if (!multiple_p (byte, outersize))
7073     return NULL_RTX;
7074 
7075   poly_uint64 innersize = GET_MODE_SIZE (innermode);
7076   if (maybe_ge (byte, innersize))
7077     return NULL_RTX;
7078 
7079   if (outermode == innermode && known_eq (byte, 0U))
7080     return op;
7081 
7082   if (GET_CODE (op) == CONST_VECTOR)
7083     byte = simplify_const_vector_byte_offset (op, byte);
7084 
7085   if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
7086     {
7087       rtx elt;
7088 
7089       if (VECTOR_MODE_P (outermode)
7090 	  && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
7091 	  && vec_duplicate_p (op, &elt))
7092 	return gen_vec_duplicate (outermode, elt);
7093 
7094       if (outermode == GET_MODE_INNER (innermode)
7095 	  && vec_duplicate_p (op, &elt))
7096 	return elt;
7097     }
7098 
7099   if (CONST_SCALAR_INT_P (op)
7100       || CONST_DOUBLE_AS_FLOAT_P (op)
7101       || CONST_FIXED_P (op)
7102       || GET_CODE (op) == CONST_VECTOR)
7103     {
7104       unsigned HOST_WIDE_INT cbyte;
7105       if (byte.is_constant (&cbyte))
7106 	{
7107 	  if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
7108 	    {
7109 	      rtx tmp = simplify_const_vector_subreg (outermode, op,
7110 						      innermode, cbyte);
7111 	      if (tmp)
7112 		return tmp;
7113 	    }
7114 
7115 	  fixed_size_mode fs_outermode;
7116 	  if (is_a <fixed_size_mode> (outermode, &fs_outermode))
7117 	    return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
7118 	}
7119     }
7120 
7121   /* Changing mode twice with SUBREG => just change it once,
7122      or not at all if changing back op starting mode.  */
7123   if (GET_CODE (op) == SUBREG)
7124     {
7125       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
7126       poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
7127       rtx newx;
7128 
7129       if (outermode == innermostmode
7130 	  && known_eq (byte, 0U)
7131 	  && known_eq (SUBREG_BYTE (op), 0))
7132 	return SUBREG_REG (op);
7133 
7134       /* Work out the memory offset of the final OUTERMODE value relative
7135 	 to the inner value of OP.  */
7136       poly_int64 mem_offset = subreg_memory_offset (outermode,
7137 						    innermode, byte);
7138       poly_int64 op_mem_offset = subreg_memory_offset (op);
7139       poly_int64 final_offset = mem_offset + op_mem_offset;
7140 
7141       /* See whether resulting subreg will be paradoxical.  */
7142       if (!paradoxical_subreg_p (outermode, innermostmode))
7143 	{
7144 	  /* Bail out in case resulting subreg would be incorrect.  */
7145 	  if (maybe_lt (final_offset, 0)
7146 	      || maybe_ge (poly_uint64 (final_offset), innermostsize)
7147 	      || !multiple_p (final_offset, outersize))
7148 	    return NULL_RTX;
7149 	}
7150       else
7151 	{
7152 	  poly_int64 required_offset = subreg_memory_offset (outermode,
7153 							     innermostmode, 0);
7154 	  if (maybe_ne (final_offset, required_offset))
7155 	    return NULL_RTX;
7156 	  /* Paradoxical subregs always have byte offset 0.  */
7157 	  final_offset = 0;
7158 	}
7159 
7160       /* Recurse for further possible simplifications.  */
7161       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
7162 			      final_offset);
7163       if (newx)
7164 	return newx;
7165       if (validate_subreg (outermode, innermostmode,
7166 			   SUBREG_REG (op), final_offset))
7167 	{
7168 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
7169 	  if (SUBREG_PROMOTED_VAR_P (op)
7170 	      && SUBREG_PROMOTED_SIGN (op) >= 0
7171 	      && GET_MODE_CLASS (outermode) == MODE_INT
7172 	      && known_ge (outersize, innersize)
7173 	      && known_le (outersize, innermostsize)
7174 	      && subreg_lowpart_p (newx))
7175 	    {
7176 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
7177 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
7178 	    }
7179 	  return newx;
7180 	}
7181       return NULL_RTX;
7182     }
7183 
7184   /* SUBREG of a hard register => just change the register number
7185      and/or mode.  If the hard register is not valid in that mode,
7186      suppress this simplification.  If the hard register is the stack,
7187      frame, or argument pointer, leave this as a SUBREG.  */
7188 
7189   if (REG_P (op) && HARD_REGISTER_P (op))
7190     {
7191       unsigned int regno, final_regno;
7192 
7193       regno = REGNO (op);
7194       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
7195       if (HARD_REGISTER_NUM_P (final_regno))
7196 	{
7197 	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
7198 				      subreg_memory_offset (outermode,
7199 							    innermode, byte));
7200 
7201 	  /* Propagate original regno.  We don't have any way to specify
7202 	     the offset inside original regno, so do so only for lowpart.
7203 	     The information is used only by alias analysis that cannot
7204 	     grog partial register anyway.  */
7205 
7206 	  if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
7207 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
7208 	  return x;
7209 	}
7210     }
7211 
7212   /* If we have a SUBREG of a register that we are replacing and we are
7213      replacing it with a MEM, make a new MEM and try replacing the
7214      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
7215      or if we would be widening it.  */
7216 
7217   if (MEM_P (op)
7218       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
7219       /* Allow splitting of volatile memory references in case we don't
7220          have instruction to move the whole thing.  */
7221       && (! MEM_VOLATILE_P (op)
7222 	  || ! have_insn_for (SET, innermode))
7223       && known_le (outersize, innersize))
7224     return adjust_address_nv (op, outermode, byte);
7225 
7226   /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7227      of two parts.  */
7228   if (GET_CODE (op) == CONCAT
7229       || GET_CODE (op) == VEC_CONCAT)
7230     {
7231       poly_uint64 final_offset;
7232       rtx part, res;
7233 
7234       machine_mode part_mode = GET_MODE (XEXP (op, 0));
7235       if (part_mode == VOIDmode)
7236 	part_mode = GET_MODE_INNER (GET_MODE (op));
7237       poly_uint64 part_size = GET_MODE_SIZE (part_mode);
7238       if (known_lt (byte, part_size))
7239 	{
7240 	  part = XEXP (op, 0);
7241 	  final_offset = byte;
7242 	}
7243       else if (known_ge (byte, part_size))
7244 	{
7245 	  part = XEXP (op, 1);
7246 	  final_offset = byte - part_size;
7247 	}
7248       else
7249 	return NULL_RTX;
7250 
7251       if (maybe_gt (final_offset + outersize, part_size))
7252 	return NULL_RTX;
7253 
7254       part_mode = GET_MODE (part);
7255       if (part_mode == VOIDmode)
7256 	part_mode = GET_MODE_INNER (GET_MODE (op));
7257       res = simplify_subreg (outermode, part, part_mode, final_offset);
7258       if (res)
7259 	return res;
7260       if (validate_subreg (outermode, part_mode, part, final_offset))
7261 	return gen_rtx_SUBREG (outermode, part, final_offset);
7262       return NULL_RTX;
7263     }
7264 
7265   /* Simplify
7266 	(subreg (vec_merge (X)
7267 			   (vector)
7268 			   (const_int ((1 << N) | M)))
7269 		(N * sizeof (outermode)))
7270      to
7271 	(subreg (X) (N * sizeof (outermode)))
7272    */
7273   unsigned int idx;
7274   if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7275       && idx < HOST_BITS_PER_WIDE_INT
7276       && GET_CODE (op) == VEC_MERGE
7277       && GET_MODE_INNER (innermode) == outermode
7278       && CONST_INT_P (XEXP (op, 2))
7279       && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7280     return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7281 
7282   /* A SUBREG resulting from a zero extension may fold to zero if
7283      it extracts higher bits that the ZERO_EXTEND's source bits.  */
7284   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7285     {
7286       poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7287       if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7288 	return CONST0_RTX (outermode);
7289     }
7290 
7291   scalar_int_mode int_outermode, int_innermode;
7292   if (is_a <scalar_int_mode> (outermode, &int_outermode)
7293       && is_a <scalar_int_mode> (innermode, &int_innermode)
7294       && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7295     {
7296       /* Handle polynomial integers.  The upper bits of a paradoxical
7297 	 subreg are undefined, so this is safe regardless of whether
7298 	 we're truncating or extending.  */
7299       if (CONST_POLY_INT_P (op))
7300 	{
7301 	  poly_wide_int val
7302 	    = poly_wide_int::from (const_poly_int_value (op),
7303 				   GET_MODE_PRECISION (int_outermode),
7304 				   SIGNED);
7305 	  return immed_wide_int_const (val, int_outermode);
7306 	}
7307 
7308       if (GET_MODE_PRECISION (int_outermode)
7309 	  < GET_MODE_PRECISION (int_innermode))
7310 	{
7311 	  rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7312 	  if (tem)
7313 	    return tem;
7314 	}
7315     }
7316 
7317   /* If OP is a vector comparison and the subreg is not changing the
7318      number of elements or the size of the elements, change the result
7319      of the comparison to the new mode.  */
7320   if (COMPARISON_P (op)
7321       && VECTOR_MODE_P (outermode)
7322       && VECTOR_MODE_P (innermode)
7323       && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7324       && known_eq (GET_MODE_UNIT_SIZE (outermode),
7325 		    GET_MODE_UNIT_SIZE (innermode)))
7326     return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7327 				    XEXP (op, 0), XEXP (op, 1));
7328   return NULL_RTX;
7329 }
7330 
7331 /* Make a SUBREG operation or equivalent if it folds.  */
7332 
7333 rtx
simplify_gen_subreg(machine_mode outermode,rtx op,machine_mode innermode,poly_uint64 byte)7334 simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
7335 				       machine_mode innermode,
7336 				       poly_uint64 byte)
7337 {
7338   rtx newx;
7339 
7340   newx = simplify_subreg (outermode, op, innermode, byte);
7341   if (newx)
7342     return newx;
7343 
7344   if (GET_CODE (op) == SUBREG
7345       || GET_CODE (op) == CONCAT
7346       || GET_MODE (op) == VOIDmode)
7347     return NULL_RTX;
7348 
7349   if (MODE_COMPOSITE_P (outermode)
7350       && (CONST_SCALAR_INT_P (op)
7351 	  || CONST_DOUBLE_AS_FLOAT_P (op)
7352 	  || CONST_FIXED_P (op)
7353 	  || GET_CODE (op) == CONST_VECTOR))
7354     return NULL_RTX;
7355 
7356   if (validate_subreg (outermode, innermode, op, byte))
7357     return gen_rtx_SUBREG (outermode, op, byte);
7358 
7359   return NULL_RTX;
7360 }
7361 
7362 /* Generates a subreg to get the least significant part of EXPR (in mode
7363    INNER_MODE) to OUTER_MODE.  */
7364 
7365 rtx
lowpart_subreg(machine_mode outer_mode,rtx expr,machine_mode inner_mode)7366 simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
7367 				  machine_mode inner_mode)
7368 {
7369   return simplify_gen_subreg (outer_mode, expr, inner_mode,
7370 			      subreg_lowpart_offset (outer_mode, inner_mode));
7371 }
7372 
7373 /* Simplify X, an rtx expression.
7374 
7375    Return the simplified expression or NULL if no simplifications
7376    were possible.
7377 
7378    This is the preferred entry point into the simplification routines;
7379    however, we still allow passes to call the more specific routines.
7380 
7381    Right now GCC has three (yes, three) major bodies of RTL simplification
7382    code that need to be unified.
7383 
7384 	1. fold_rtx in cse.c.  This code uses various CSE specific
7385 	   information to aid in RTL simplification.
7386 
7387 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
7388 	   it uses combine specific information to aid in RTL
7389 	   simplification.
7390 
7391 	3. The routines in this file.
7392 
7393 
7394    Long term we want to only have one body of simplification code; to
7395    get to that state I recommend the following steps:
7396 
7397 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
7398 	   which are not pass dependent state into these routines.
7399 
7400 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
7401 	   use this routine whenever possible.
7402 
7403 	3. Allow for pass dependent state to be provided to these
7404 	   routines and add simplifications based on the pass dependent
7405 	   state.  Remove code from cse.c & combine.c that becomes
7406 	   redundant/dead.
7407 
7408     It will take time, but ultimately the compiler will be easier to
7409     maintain and improve.  It's totally silly that when we add a
7410     simplification that it needs to be added to 4 places (3 for RTL
7411     simplification and 1 for tree simplification.  */
7412 
7413 rtx
simplify_rtx(const_rtx x)7414 simplify_rtx (const_rtx x)
7415 {
7416   const enum rtx_code code = GET_CODE (x);
7417   const machine_mode mode = GET_MODE (x);
7418 
7419   switch (GET_RTX_CLASS (code))
7420     {
7421     case RTX_UNARY:
7422       return simplify_unary_operation (code, mode,
7423 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7424     case RTX_COMM_ARITH:
7425       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7426 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7427 
7428       /* Fall through.  */
7429 
7430     case RTX_BIN_ARITH:
7431       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7432 
7433     case RTX_TERNARY:
7434     case RTX_BITFIELD_OPS:
7435       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7436 					 XEXP (x, 0), XEXP (x, 1),
7437 					 XEXP (x, 2));
7438 
7439     case RTX_COMPARE:
7440     case RTX_COMM_COMPARE:
7441       return simplify_relational_operation (code, mode,
7442                                             ((GET_MODE (XEXP (x, 0))
7443                                              != VOIDmode)
7444                                             ? GET_MODE (XEXP (x, 0))
7445                                             : GET_MODE (XEXP (x, 1))),
7446                                             XEXP (x, 0),
7447                                             XEXP (x, 1));
7448 
7449     case RTX_EXTRA:
7450       if (code == SUBREG)
7451 	return simplify_subreg (mode, SUBREG_REG (x),
7452 				GET_MODE (SUBREG_REG (x)),
7453 				SUBREG_BYTE (x));
7454       break;
7455 
7456     case RTX_OBJ:
7457       if (code == LO_SUM)
7458 	{
7459 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
7460 	  if (GET_CODE (XEXP (x, 0)) == HIGH
7461 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7462 	  return XEXP (x, 1);
7463 	}
7464       break;
7465 
7466     default:
7467       break;
7468     }
7469   return NULL;
7470 }
7471 
7472 #if CHECKING_P
7473 
7474 namespace selftest {
7475 
7476 /* Make a unique pseudo REG of mode MODE for use by selftests.  */
7477 
7478 static rtx
make_test_reg(machine_mode mode)7479 make_test_reg (machine_mode mode)
7480 {
7481   static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7482 
7483   return gen_rtx_REG (mode, test_reg_num++);
7484 }
7485 
7486 static void
test_scalar_int_ops(machine_mode mode)7487 test_scalar_int_ops (machine_mode mode)
7488 {
7489   rtx op0 = make_test_reg (mode);
7490   rtx op1 = make_test_reg (mode);
7491   rtx six = GEN_INT (6);
7492 
7493   rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
7494   rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
7495   rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
7496 
7497   rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
7498   rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
7499   rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
7500 
7501   rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
7502   rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
7503 
7504   /* Test some binary identities.  */
7505   ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
7506   ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
7507   ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
7508   ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
7509   ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
7510   ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
7511   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
7512   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
7513   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
7514   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
7515   ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
7516   ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
7517   ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
7518   ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
7519   ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
7520   ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
7521   ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
7522 
7523   /* Test some self-inverse operations.  */
7524   ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
7525   ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
7526   ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
7527 
7528   /* Test some reflexive operations.  */
7529   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
7530   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
7531   ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
7532   ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
7533   ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
7534   ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
7535 
7536   ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
7537   ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
7538 
7539   /* Test simplify_distributive_operation.  */
7540   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
7541 		 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
7542   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
7543 		 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
7544   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
7545 		 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
7546 }
7547 
7548 /* Verify some simplifications involving scalar expressions.  */
7549 
7550 static void
test_scalar_ops()7551 test_scalar_ops ()
7552 {
7553   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7554     {
7555       machine_mode mode = (machine_mode) i;
7556       if (SCALAR_INT_MODE_P (mode) && mode != BImode)
7557 	test_scalar_int_ops (mode);
7558     }
7559 }
7560 
7561 /* Test vector simplifications involving VEC_DUPLICATE in which the
7562    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
7563    register that holds one element of MODE.  */
7564 
7565 static void
test_vector_ops_duplicate(machine_mode mode,rtx scalar_reg)7566 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7567 {
7568   scalar_mode inner_mode = GET_MODE_INNER (mode);
7569   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7570   poly_uint64 nunits = GET_MODE_NUNITS (mode);
7571   if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7572     {
7573       /* Test some simple unary cases with VEC_DUPLICATE arguments.  */
7574       rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7575       rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7576       ASSERT_RTX_EQ (duplicate,
7577 		     simplify_unary_operation (NOT, mode,
7578 					       duplicate_not, mode));
7579 
7580       rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7581       rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7582       ASSERT_RTX_EQ (duplicate,
7583 		     simplify_unary_operation (NEG, mode,
7584 					       duplicate_neg, mode));
7585 
7586       /* Test some simple binary cases with VEC_DUPLICATE arguments.  */
7587       ASSERT_RTX_EQ (duplicate,
7588 		     simplify_binary_operation (PLUS, mode, duplicate,
7589 						CONST0_RTX (mode)));
7590 
7591       ASSERT_RTX_EQ (duplicate,
7592 		     simplify_binary_operation (MINUS, mode, duplicate,
7593 						CONST0_RTX (mode)));
7594 
7595       ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7596 			 simplify_binary_operation (MINUS, mode, duplicate,
7597 						    duplicate));
7598     }
7599 
7600   /* Test a scalar VEC_SELECT of a VEC_DUPLICATE.  */
7601   rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7602   ASSERT_RTX_PTR_EQ (scalar_reg,
7603 		     simplify_binary_operation (VEC_SELECT, inner_mode,
7604 						duplicate, zero_par));
7605 
7606   unsigned HOST_WIDE_INT const_nunits;
7607   if (nunits.is_constant (&const_nunits))
7608     {
7609       /* And again with the final element.  */
7610       rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7611       rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7612       ASSERT_RTX_PTR_EQ (scalar_reg,
7613 			 simplify_binary_operation (VEC_SELECT, inner_mode,
7614 						    duplicate, last_par));
7615 
7616       /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE.  */
7617       rtx vector_reg = make_test_reg (mode);
7618       for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7619 	{
7620 	  if (i >= HOST_BITS_PER_WIDE_INT)
7621 	    break;
7622 	  rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
7623 	  rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
7624 	  poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
7625 	  ASSERT_RTX_EQ (scalar_reg,
7626 			 simplify_gen_subreg (inner_mode, vm,
7627 					      mode, offset));
7628 	}
7629     }
7630 
7631   /* Test a scalar subreg of a VEC_DUPLICATE.  */
7632   poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
7633   ASSERT_RTX_EQ (scalar_reg,
7634 		 simplify_gen_subreg (inner_mode, duplicate,
7635 				      mode, offset));
7636 
7637   machine_mode narrower_mode;
7638   if (maybe_ne (nunits, 2U)
7639       && multiple_p (nunits, 2)
7640       && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
7641       && VECTOR_MODE_P (narrower_mode))
7642     {
7643       /* Test VEC_DUPLICATE of a vector.  */
7644       rtx_vector_builder nbuilder (narrower_mode, 2, 1);
7645       nbuilder.quick_push (const0_rtx);
7646       nbuilder.quick_push (const1_rtx);
7647       rtx_vector_builder builder (mode, 2, 1);
7648       builder.quick_push (const0_rtx);
7649       builder.quick_push (const1_rtx);
7650       ASSERT_RTX_EQ (builder.build (),
7651 		     simplify_unary_operation (VEC_DUPLICATE, mode,
7652 					       nbuilder.build (),
7653 					       narrower_mode));
7654 
7655       /* Test VEC_SELECT of a vector.  */
7656       rtx vec_par
7657 	= gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
7658       rtx narrower_duplicate
7659 	= gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
7660       ASSERT_RTX_EQ (narrower_duplicate,
7661 		     simplify_binary_operation (VEC_SELECT, narrower_mode,
7662 						duplicate, vec_par));
7663 
7664       /* Test a vector subreg of a VEC_DUPLICATE.  */
7665       poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
7666       ASSERT_RTX_EQ (narrower_duplicate,
7667 		     simplify_gen_subreg (narrower_mode, duplicate,
7668 					  mode, offset));
7669     }
7670 }
7671 
7672 /* Test vector simplifications involving VEC_SERIES in which the
7673    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
7674    register that holds one element of MODE.  */
7675 
7676 static void
test_vector_ops_series(machine_mode mode,rtx scalar_reg)7677 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
7678 {
7679   /* Test unary cases with VEC_SERIES arguments.  */
7680   scalar_mode inner_mode = GET_MODE_INNER (mode);
7681   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7682   rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7683   rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
7684   rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
7685   rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
7686   rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
7687   rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
7688   rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
7689 					 neg_scalar_reg);
7690   ASSERT_RTX_EQ (series_0_r,
7691 		 simplify_unary_operation (NEG, mode, series_0_nr, mode));
7692   ASSERT_RTX_EQ (series_r_m1,
7693 		 simplify_unary_operation (NEG, mode, series_nr_1, mode));
7694   ASSERT_RTX_EQ (series_r_r,
7695 		 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
7696 
7697   /* Test that a VEC_SERIES with a zero step is simplified away.  */
7698   ASSERT_RTX_EQ (duplicate,
7699 		 simplify_binary_operation (VEC_SERIES, mode,
7700 					    scalar_reg, const0_rtx));
7701 
7702   /* Test PLUS and MINUS with VEC_SERIES.  */
7703   rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
7704   rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
7705   rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
7706   ASSERT_RTX_EQ (series_r_r,
7707 		 simplify_binary_operation (PLUS, mode, series_0_r,
7708 					    duplicate));
7709   ASSERT_RTX_EQ (series_r_1,
7710 		 simplify_binary_operation (PLUS, mode, duplicate,
7711 					    series_0_1));
7712   ASSERT_RTX_EQ (series_r_m1,
7713 		 simplify_binary_operation (PLUS, mode, duplicate,
7714 					    series_0_m1));
7715   ASSERT_RTX_EQ (series_0_r,
7716 		 simplify_binary_operation (MINUS, mode, series_r_r,
7717 					    duplicate));
7718   ASSERT_RTX_EQ (series_r_m1,
7719 		 simplify_binary_operation (MINUS, mode, duplicate,
7720 					    series_0_1));
7721   ASSERT_RTX_EQ (series_r_1,
7722 		 simplify_binary_operation (MINUS, mode, duplicate,
7723 					    series_0_m1));
7724   ASSERT_RTX_EQ (series_0_m1,
7725 		 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
7726 					    constm1_rtx));
7727 
7728   /* Test NEG on constant vector series.  */
7729   ASSERT_RTX_EQ (series_0_m1,
7730 		 simplify_unary_operation (NEG, mode, series_0_1, mode));
7731   ASSERT_RTX_EQ (series_0_1,
7732 		 simplify_unary_operation (NEG, mode, series_0_m1, mode));
7733 
7734   /* Test PLUS and MINUS on constant vector series.  */
7735   rtx scalar2 = gen_int_mode (2, inner_mode);
7736   rtx scalar3 = gen_int_mode (3, inner_mode);
7737   rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
7738   rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
7739   rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
7740   ASSERT_RTX_EQ (series_1_1,
7741 		 simplify_binary_operation (PLUS, mode, series_0_1,
7742 					    CONST1_RTX (mode)));
7743   ASSERT_RTX_EQ (series_0_m1,
7744 		 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
7745 					    series_0_m1));
7746   ASSERT_RTX_EQ (series_1_3,
7747 		 simplify_binary_operation (PLUS, mode, series_1_1,
7748 					    series_0_2));
7749   ASSERT_RTX_EQ (series_0_1,
7750 		 simplify_binary_operation (MINUS, mode, series_1_1,
7751 					    CONST1_RTX (mode)));
7752   ASSERT_RTX_EQ (series_1_1,
7753 		 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
7754 					    series_0_m1));
7755   ASSERT_RTX_EQ (series_1_1,
7756 		 simplify_binary_operation (MINUS, mode, series_1_3,
7757 					    series_0_2));
7758 
7759   /* Test MULT between constant vectors.  */
7760   rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
7761   rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
7762   rtx scalar9 = gen_int_mode (9, inner_mode);
7763   rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
7764   ASSERT_RTX_EQ (series_0_2,
7765 		 simplify_binary_operation (MULT, mode, series_0_1, vec2));
7766   ASSERT_RTX_EQ (series_3_9,
7767 		 simplify_binary_operation (MULT, mode, vec3, series_1_3));
7768   if (!GET_MODE_NUNITS (mode).is_constant ())
7769     ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
7770 					     series_0_1));
7771 
7772   /* Test ASHIFT between constant vectors.  */
7773   ASSERT_RTX_EQ (series_0_2,
7774 		 simplify_binary_operation (ASHIFT, mode, series_0_1,
7775 					    CONST1_RTX (mode)));
7776   if (!GET_MODE_NUNITS (mode).is_constant ())
7777     ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
7778 					     series_0_1));
7779 }
7780 
7781 static rtx
simplify_merge_mask(rtx x,rtx mask,int op)7782 simplify_merge_mask (rtx x, rtx mask, int op)
7783 {
7784   return simplify_context ().simplify_merge_mask (x, mask, op);
7785 }
7786 
7787 /* Verify simplify_merge_mask works correctly.  */
7788 
7789 static void
test_vec_merge(machine_mode mode)7790 test_vec_merge (machine_mode mode)
7791 {
7792   rtx op0 = make_test_reg (mode);
7793   rtx op1 = make_test_reg (mode);
7794   rtx op2 = make_test_reg (mode);
7795   rtx op3 = make_test_reg (mode);
7796   rtx op4 = make_test_reg (mode);
7797   rtx op5 = make_test_reg (mode);
7798   rtx mask1 = make_test_reg (SImode);
7799   rtx mask2 = make_test_reg (SImode);
7800   rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
7801   rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
7802   rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
7803 
7804   /* Simple vec_merge.  */
7805   ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
7806   ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
7807   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
7808   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
7809 
7810   /* Nested vec_merge.
7811      It's tempting to make this simplify right down to opN, but we don't
7812      because all the simplify_* functions assume that the operands have
7813      already been simplified.  */
7814   rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
7815   ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
7816   ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
7817 
7818   /* Intermediate unary op. */
7819   rtx unop = gen_rtx_NOT (mode, vm1);
7820   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
7821 		 simplify_merge_mask (unop, mask1, 0));
7822   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
7823 		 simplify_merge_mask (unop, mask1, 1));
7824 
7825   /* Intermediate binary op. */
7826   rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7827   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7828 		 simplify_merge_mask (binop, mask1, 0));
7829   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7830 		 simplify_merge_mask (binop, mask1, 1));
7831 
7832   /* Intermediate ternary op. */
7833   rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7834   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7835 		 simplify_merge_mask (tenop, mask1, 0));
7836   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7837 		 simplify_merge_mask (tenop, mask1, 1));
7838 
7839   /* Side effects.  */
7840   rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7841   rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7842   ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7843   ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7844 
7845   /* Called indirectly.  */
7846   ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7847 		 simplify_rtx (nvm));
7848 }
7849 
7850 /* Test subregs of integer vector constant X, trying elements in
7851    the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7852    where NELTS is the number of elements in X.  Subregs involving
7853    elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail.  */
7854 
7855 static void
7856 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
7857 			   unsigned int first_valid = 0)
7858 {
7859   machine_mode inner_mode = GET_MODE (x);
7860   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7861 
7862   for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
7863     {
7864       machine_mode outer_mode = (machine_mode) modei;
7865       if (!VECTOR_MODE_P (outer_mode))
7866 	continue;
7867 
7868       unsigned int outer_nunits;
7869       if (GET_MODE_INNER (outer_mode) == int_mode
7870 	  && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
7871 	  && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
7872 	{
7873 	  /* Test subregs in which the outer mode is a smaller,
7874 	     constant-sized vector of the same element type.  */
7875 	  unsigned int limit
7876 	    = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
7877 	  for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
7878 	    {
7879 	      rtx expected = NULL_RTX;
7880 	      if (elt >= first_valid)
7881 		{
7882 		  rtx_vector_builder builder (outer_mode, outer_nunits, 1);
7883 		  for (unsigned int i = 0; i < outer_nunits; ++i)
7884 		    builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
7885 		  expected = builder.build ();
7886 		}
7887 	      poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
7888 	      ASSERT_RTX_EQ (expected,
7889 			     simplify_subreg (outer_mode, x,
7890 					      inner_mode, byte));
7891 	    }
7892 	}
7893       else if (known_eq (GET_MODE_SIZE (outer_mode),
7894 			 GET_MODE_SIZE (inner_mode))
7895 	       && known_eq (elt_bias, 0U)
7896 	       && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
7897 		   || known_eq (GET_MODE_BITSIZE (outer_mode),
7898 				GET_MODE_NUNITS (outer_mode)))
7899 	       && (!FLOAT_MODE_P (outer_mode)
7900 		   || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
7901 		       == GET_MODE_UNIT_PRECISION (outer_mode)))
7902 	       && (GET_MODE_SIZE (inner_mode).is_constant ()
7903 		   || !CONST_VECTOR_STEPPED_P (x)))
7904 	{
7905 	  /* Try converting to OUTER_MODE and back.  */
7906 	  rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
7907 	  ASSERT_TRUE (outer_x != NULL_RTX);
7908 	  ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
7909 					     outer_mode, 0));
7910 	}
7911     }
7912 
7913   if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
7914     {
7915       /* Test each byte in the element range.  */
7916       unsigned int limit
7917 	= constant_lower_bound (GET_MODE_SIZE (inner_mode));
7918       for (unsigned int i = 0; i < limit; ++i)
7919 	{
7920 	  unsigned int elt = i / GET_MODE_SIZE (int_mode);
7921 	  rtx expected = NULL_RTX;
7922 	  if (elt >= first_valid)
7923 	    {
7924 	      unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
7925 	      if (BYTES_BIG_ENDIAN)
7926 		byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
7927 	      rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
7928 	      wide_int shifted_elt
7929 		= wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
7930 	      expected = immed_wide_int_const (shifted_elt, QImode);
7931 	    }
7932 	  poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
7933 	  ASSERT_RTX_EQ (expected,
7934 			 simplify_subreg (QImode, x, inner_mode, byte));
7935 	}
7936     }
7937 }
7938 
7939 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7940    element per pattern.  */
7941 
7942 static void
test_vector_subregs_repeating(machine_mode inner_mode)7943 test_vector_subregs_repeating (machine_mode inner_mode)
7944 {
7945   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
7946   unsigned int min_nunits = constant_lower_bound (nunits);
7947   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7948   unsigned int count = gcd (min_nunits, 8);
7949 
7950   rtx_vector_builder builder (inner_mode, count, 1);
7951   for (unsigned int i = 0; i < count; ++i)
7952     builder.quick_push (gen_int_mode (8 - i, int_mode));
7953   rtx x = builder.build ();
7954 
7955   test_vector_subregs_modes (x);
7956   if (!nunits.is_constant ())
7957     test_vector_subregs_modes (x, nunits - min_nunits);
7958 }
7959 
7960 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7961    elements per pattern.  */
7962 
7963 static void
test_vector_subregs_fore_back(machine_mode inner_mode)7964 test_vector_subregs_fore_back (machine_mode inner_mode)
7965 {
7966   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
7967   unsigned int min_nunits = constant_lower_bound (nunits);
7968   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7969   unsigned int count = gcd (min_nunits, 4);
7970 
7971   rtx_vector_builder builder (inner_mode, count, 2);
7972   for (unsigned int i = 0; i < count; ++i)
7973     builder.quick_push (gen_int_mode (i, int_mode));
7974   for (unsigned int i = 0; i < count; ++i)
7975     builder.quick_push (gen_int_mode (-(int) i, int_mode));
7976   rtx x = builder.build ();
7977 
7978   test_vector_subregs_modes (x);
7979   if (!nunits.is_constant ())
7980     test_vector_subregs_modes (x, nunits - min_nunits, count);
7981 }
7982 
7983 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7984    elements per pattern.  */
7985 
7986 static void
test_vector_subregs_stepped(machine_mode inner_mode)7987 test_vector_subregs_stepped (machine_mode inner_mode)
7988 {
7989   /* Build { 0, 1, 2, 3, ... }.  */
7990   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7991   rtx_vector_builder builder (inner_mode, 1, 3);
7992   for (unsigned int i = 0; i < 3; ++i)
7993     builder.quick_push (gen_int_mode (i, int_mode));
7994   rtx x = builder.build ();
7995 
7996   test_vector_subregs_modes (x);
7997 }
7998 
7999 /* Test constant subregs of integer vector mode INNER_MODE.  */
8000 
8001 static void
test_vector_subregs(machine_mode inner_mode)8002 test_vector_subregs (machine_mode inner_mode)
8003 {
8004   test_vector_subregs_repeating (inner_mode);
8005   test_vector_subregs_fore_back (inner_mode);
8006   test_vector_subregs_stepped (inner_mode);
8007 }
8008 
8009 /* Verify some simplifications involving vectors.  */
8010 
8011 static void
test_vector_ops()8012 test_vector_ops ()
8013 {
8014   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
8015     {
8016       machine_mode mode = (machine_mode) i;
8017       if (VECTOR_MODE_P (mode))
8018 	{
8019 	  rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
8020 	  test_vector_ops_duplicate (mode, scalar_reg);
8021 	  if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
8022 	      && maybe_gt (GET_MODE_NUNITS (mode), 2))
8023 	    {
8024 	      test_vector_ops_series (mode, scalar_reg);
8025 	      test_vector_subregs (mode);
8026 	    }
8027 	  test_vec_merge (mode);
8028 	}
8029     }
8030 }
8031 
8032 template<unsigned int N>
8033 struct simplify_const_poly_int_tests
8034 {
8035   static void run ();
8036 };
8037 
8038 template<>
8039 struct simplify_const_poly_int_tests<1>
8040 {
8041   static void run () {}
8042 };
8043 
8044 /* Test various CONST_POLY_INT properties.  */
8045 
8046 template<unsigned int N>
8047 void
8048 simplify_const_poly_int_tests<N>::run ()
8049 {
8050   rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
8051   rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
8052   rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
8053   rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
8054   rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
8055   rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
8056   rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
8057   rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
8058   rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
8059   rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
8060   rtx two = GEN_INT (2);
8061   rtx six = GEN_INT (6);
8062   poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
8063 
8064   /* These tests only try limited operation combinations.  Fuller arithmetic
8065      testing is done directly on poly_ints.  */
8066   ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
8067   ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
8068   ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
8069   ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
8070   ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
8071   ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
8072   ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
8073   ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
8074   ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
8075   ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
8076   ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
8077 }
8078 
8079 /* Run all of the selftests within this file.  */
8080 
8081 void
8082 simplify_rtx_c_tests ()
8083 {
8084   test_scalar_ops ();
8085   test_vector_ops ();
8086   simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
8087 }
8088 
8089 } // namespace selftest
8090 
8091 #endif /* CHECKING_P */
8092