1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39 
40 /* Simplification and canonicalization of RTL.  */
41 
42 /* Much code operates on (low, high) pairs; the low value is an
43    unsigned wide int, the high value a signed wide int.  We
44    occasionally need to sign extend from low to high as if low were a
45    signed wide int.  */
46 #define HWI_SIGN_EXTEND(low) \
47   ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 					   rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 					    machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 					rtx, rtx, rtx, rtx);
58 
59 /* Negate I, which satisfies poly_int_rtx_p.  MODE is the mode of I.  */
60 
61 static rtx
neg_poly_int_rtx(machine_mode mode,const_rtx i)62 neg_poly_int_rtx (machine_mode mode, const_rtx i)
63 {
64   return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
65 }
66 
67 /* Test whether expression, X, is an immediate constant that represents
68    the most significant bit of machine mode MODE.  */
69 
70 bool
mode_signbit_p(machine_mode mode,const_rtx x)71 mode_signbit_p (machine_mode mode, const_rtx x)
72 {
73   unsigned HOST_WIDE_INT val;
74   unsigned int width;
75   scalar_int_mode int_mode;
76 
77   if (!is_int_mode (mode, &int_mode))
78     return false;
79 
80   width = GET_MODE_PRECISION (int_mode);
81   if (width == 0)
82     return false;
83 
84   if (width <= HOST_BITS_PER_WIDE_INT
85       && CONST_INT_P (x))
86     val = INTVAL (x);
87 #if TARGET_SUPPORTS_WIDE_INT
88   else if (CONST_WIDE_INT_P (x))
89     {
90       unsigned int i;
91       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
92       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
93 	return false;
94       for (i = 0; i < elts - 1; i++)
95 	if (CONST_WIDE_INT_ELT (x, i) != 0)
96 	  return false;
97       val = CONST_WIDE_INT_ELT (x, elts - 1);
98       width %= HOST_BITS_PER_WIDE_INT;
99       if (width == 0)
100 	width = HOST_BITS_PER_WIDE_INT;
101     }
102 #else
103   else if (width <= HOST_BITS_PER_DOUBLE_INT
104 	   && CONST_DOUBLE_AS_INT_P (x)
105 	   && CONST_DOUBLE_LOW (x) == 0)
106     {
107       val = CONST_DOUBLE_HIGH (x);
108       width -= HOST_BITS_PER_WIDE_INT;
109     }
110 #endif
111   else
112     /* X is not an integer constant.  */
113     return false;
114 
115   if (width < HOST_BITS_PER_WIDE_INT)
116     val &= (HOST_WIDE_INT_1U << width) - 1;
117   return val == (HOST_WIDE_INT_1U << (width - 1));
118 }
119 
120 /* Test whether VAL is equal to the most significant bit of mode MODE
121    (after masking with the mode mask of MODE).  Returns false if the
122    precision of MODE is too large to handle.  */
123 
124 bool
val_signbit_p(machine_mode mode,unsigned HOST_WIDE_INT val)125 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
126 {
127   unsigned int width;
128   scalar_int_mode int_mode;
129 
130   if (!is_int_mode (mode, &int_mode))
131     return false;
132 
133   width = GET_MODE_PRECISION (int_mode);
134   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
135     return false;
136 
137   val &= GET_MODE_MASK (int_mode);
138   return val == (HOST_WIDE_INT_1U << (width - 1));
139 }
140 
141 /* Test whether the most significant bit of mode MODE is set in VAL.
142    Returns false if the precision of MODE is too large to handle.  */
143 bool
val_signbit_known_set_p(machine_mode mode,unsigned HOST_WIDE_INT val)144 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
145 {
146   unsigned int width;
147 
148   scalar_int_mode int_mode;
149   if (!is_int_mode (mode, &int_mode))
150     return false;
151 
152   width = GET_MODE_PRECISION (int_mode);
153   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
154     return false;
155 
156   val &= HOST_WIDE_INT_1U << (width - 1);
157   return val != 0;
158 }
159 
160 /* Test whether the most significant bit of mode MODE is clear in VAL.
161    Returns false if the precision of MODE is too large to handle.  */
162 bool
val_signbit_known_clear_p(machine_mode mode,unsigned HOST_WIDE_INT val)163 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
164 {
165   unsigned int width;
166 
167   scalar_int_mode int_mode;
168   if (!is_int_mode (mode, &int_mode))
169     return false;
170 
171   width = GET_MODE_PRECISION (int_mode);
172   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173     return false;
174 
175   val &= HOST_WIDE_INT_1U << (width - 1);
176   return val == 0;
177 }
178 
179 /* Make a binary operation by properly ordering the operands and
180    seeing if the expression folds.  */
181 
182 rtx
simplify_gen_binary(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)183 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
184 		     rtx op1)
185 {
186   rtx tem;
187 
188   /* If this simplifies, do it.  */
189   tem = simplify_binary_operation (code, mode, op0, op1);
190   if (tem)
191     return tem;
192 
193   /* Put complex operands first and constants second if commutative.  */
194   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195       && swap_commutative_operands_p (op0, op1))
196     std::swap (op0, op1);
197 
198   return gen_rtx_fmt_ee (code, mode, op0, op1);
199 }
200 
201 /* If X is a MEM referencing the constant pool, return the real value.
202    Otherwise return X.  */
203 rtx
avoid_constant_pool_reference(rtx x)204 avoid_constant_pool_reference (rtx x)
205 {
206   rtx c, tmp, addr;
207   machine_mode cmode;
208   poly_int64 offset = 0;
209 
210   switch (GET_CODE (x))
211     {
212     case MEM:
213       break;
214 
215     case FLOAT_EXTEND:
216       /* Handle float extensions of constant pool references.  */
217       tmp = XEXP (x, 0);
218       c = avoid_constant_pool_reference (tmp);
219       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
220 	return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
221 					     GET_MODE (x));
222       return x;
223 
224     default:
225       return x;
226     }
227 
228   if (GET_MODE (x) == BLKmode)
229     return x;
230 
231   addr = XEXP (x, 0);
232 
233   /* Call target hook to avoid the effects of -fpic etc....  */
234   addr = targetm.delegitimize_address (addr);
235 
236   /* Split the address into a base and integer offset.  */
237   addr = strip_offset (addr, &offset);
238 
239   if (GET_CODE (addr) == LO_SUM)
240     addr = XEXP (addr, 1);
241 
242   /* If this is a constant pool reference, we can turn it into its
243      constant and hope that simplifications happen.  */
244   if (GET_CODE (addr) == SYMBOL_REF
245       && CONSTANT_POOL_ADDRESS_P (addr))
246     {
247       c = get_pool_constant (addr);
248       cmode = get_pool_mode (addr);
249 
250       /* If we're accessing the constant in a different mode than it was
251          originally stored, attempt to fix that up via subreg simplifications.
252          If that fails we have no choice but to return the original memory.  */
253       if (known_eq (offset, 0) && cmode == GET_MODE (x))
254 	return c;
255       else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
256         {
257           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
258           if (tem && CONSTANT_P (tem))
259             return tem;
260         }
261     }
262 
263   return x;
264 }
265 
266 /* Simplify a MEM based on its attributes.  This is the default
267    delegitimize_address target hook, and it's recommended that every
268    overrider call it.  */
269 
270 rtx
delegitimize_mem_from_attrs(rtx x)271 delegitimize_mem_from_attrs (rtx x)
272 {
273   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
274      use their base addresses as equivalent.  */
275   if (MEM_P (x)
276       && MEM_EXPR (x)
277       && MEM_OFFSET_KNOWN_P (x))
278     {
279       tree decl = MEM_EXPR (x);
280       machine_mode mode = GET_MODE (x);
281       poly_int64 offset = 0;
282 
283       switch (TREE_CODE (decl))
284 	{
285 	default:
286 	  decl = NULL;
287 	  break;
288 
289 	case VAR_DECL:
290 	  break;
291 
292 	case ARRAY_REF:
293 	case ARRAY_RANGE_REF:
294 	case COMPONENT_REF:
295 	case BIT_FIELD_REF:
296 	case REALPART_EXPR:
297 	case IMAGPART_EXPR:
298 	case VIEW_CONVERT_EXPR:
299 	  {
300 	    poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
301 	    tree toffset;
302 	    int unsignedp, reversep, volatilep = 0;
303 
304 	    decl
305 	      = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
306 				     &unsignedp, &reversep, &volatilep);
307 	    if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
308 		|| !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
309 		|| (toffset && !poly_int_tree_p (toffset, &toffset_val)))
310 	      decl = NULL;
311 	    else
312 	      offset += bytepos + toffset_val;
313 	    break;
314 	  }
315 	}
316 
317       if (decl
318 	  && mode == GET_MODE (x)
319 	  && VAR_P (decl)
320 	  && (TREE_STATIC (decl)
321 	      || DECL_THREAD_LOCAL_P (decl))
322 	  && DECL_RTL_SET_P (decl)
323 	  && MEM_P (DECL_RTL (decl)))
324 	{
325 	  rtx newx;
326 
327 	  offset += MEM_OFFSET (x);
328 
329 	  newx = DECL_RTL (decl);
330 
331 	  if (MEM_P (newx))
332 	    {
333 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
334 	      poly_int64 n_offset, o_offset;
335 
336 	      /* Avoid creating a new MEM needlessly if we already had
337 		 the same address.  We do if there's no OFFSET and the
338 		 old address X is identical to NEWX, or if X is of the
339 		 form (plus NEWX OFFSET), or the NEWX is of the form
340 		 (plus Y (const_int Z)) and X is that with the offset
341 		 added: (plus Y (const_int Z+OFFSET)).  */
342 	      n = strip_offset (n, &n_offset);
343 	      o = strip_offset (o, &o_offset);
344 	      if (!(known_eq (o_offset, n_offset + offset)
345 		    && rtx_equal_p (o, n)))
346 		x = adjust_address_nv (newx, mode, offset);
347 	    }
348 	  else if (GET_MODE (x) == GET_MODE (newx)
349 		   && known_eq (offset, 0))
350 	    x = newx;
351 	}
352     }
353 
354   return x;
355 }
356 
357 /* Make a unary operation by first seeing if it folds and otherwise making
358    the specified operation.  */
359 
360 rtx
simplify_gen_unary(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)361 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
362 		    machine_mode op_mode)
363 {
364   rtx tem;
365 
366   /* If this simplifies, use it.  */
367   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
368     return tem;
369 
370   return gen_rtx_fmt_e (code, mode, op);
371 }
372 
373 /* Likewise for ternary operations.  */
374 
375 rtx
simplify_gen_ternary(enum rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)376 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
377 		      machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
378 {
379   rtx tem;
380 
381   /* If this simplifies, use it.  */
382   if ((tem = simplify_ternary_operation (code, mode, op0_mode,
383 					 op0, op1, op2)) != 0)
384     return tem;
385 
386   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
387 }
388 
389 /* Likewise, for relational operations.
390    CMP_MODE specifies mode comparison is done in.  */
391 
392 rtx
simplify_gen_relational(enum rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)393 simplify_gen_relational (enum rtx_code code, machine_mode mode,
394 			 machine_mode cmp_mode, rtx op0, rtx op1)
395 {
396   rtx tem;
397 
398   if ((tem = simplify_relational_operation (code, mode, cmp_mode,
399 					    op0, op1)) != 0)
400     return tem;
401 
402   return gen_rtx_fmt_ee (code, mode, op0, op1);
403 }
404 
405 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
406    and simplify the result.  If FN is non-NULL, call this callback on each
407    X, if it returns non-NULL, replace X with its return value and simplify the
408    result.  */
409 
410 rtx
simplify_replace_fn_rtx(rtx x,const_rtx old_rtx,rtx (* fn)(rtx,const_rtx,void *),void * data)411 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
412 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
413 {
414   enum rtx_code code = GET_CODE (x);
415   machine_mode mode = GET_MODE (x);
416   machine_mode op_mode;
417   const char *fmt;
418   rtx op0, op1, op2, newx, op;
419   rtvec vec, newvec;
420   int i, j;
421 
422   if (__builtin_expect (fn != NULL, 0))
423     {
424       newx = fn (x, old_rtx, data);
425       if (newx)
426 	return newx;
427     }
428   else if (rtx_equal_p (x, old_rtx))
429     return copy_rtx ((rtx) data);
430 
431   switch (GET_RTX_CLASS (code))
432     {
433     case RTX_UNARY:
434       op0 = XEXP (x, 0);
435       op_mode = GET_MODE (op0);
436       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
437       if (op0 == XEXP (x, 0))
438 	return x;
439       return simplify_gen_unary (code, mode, op0, op_mode);
440 
441     case RTX_BIN_ARITH:
442     case RTX_COMM_ARITH:
443       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
444       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
445       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
446 	return x;
447       return simplify_gen_binary (code, mode, op0, op1);
448 
449     case RTX_COMPARE:
450     case RTX_COMM_COMPARE:
451       op0 = XEXP (x, 0);
452       op1 = XEXP (x, 1);
453       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
454       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
455       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
456       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 	return x;
458       return simplify_gen_relational (code, mode, op_mode, op0, op1);
459 
460     case RTX_TERNARY:
461     case RTX_BITFIELD_OPS:
462       op0 = XEXP (x, 0);
463       op_mode = GET_MODE (op0);
464       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
465       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
467       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
468 	return x;
469       if (op_mode == VOIDmode)
470 	op_mode = GET_MODE (op0);
471       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
472 
473     case RTX_EXTRA:
474       if (code == SUBREG)
475 	{
476 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
477 	  if (op0 == SUBREG_REG (x))
478 	    return x;
479 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
480 				     GET_MODE (SUBREG_REG (x)),
481 				     SUBREG_BYTE (x));
482 	  return op0 ? op0 : x;
483 	}
484       break;
485 
486     case RTX_OBJ:
487       if (code == MEM)
488 	{
489 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
490 	  if (op0 == XEXP (x, 0))
491 	    return x;
492 	  return replace_equiv_address_nv (x, op0);
493 	}
494       else if (code == LO_SUM)
495 	{
496 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
497 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
498 
499 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
500 	  if (GET_CODE (op0) == HIGH)
501 	    {
502 	      rtx base0, base1, offset0, offset1;
503 	      split_const (XEXP (op0, 0), &base0, &offset0);
504 	      split_const (op1, &base1, &offset1);
505 	      if (rtx_equal_p (base0, base1))
506 		return op1;
507 	    }
508 
509 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
510 	    return x;
511 	  return gen_rtx_LO_SUM (mode, op0, op1);
512 	}
513       break;
514 
515     default:
516       break;
517     }
518 
519   newx = x;
520   fmt = GET_RTX_FORMAT (code);
521   for (i = 0; fmt[i]; i++)
522     switch (fmt[i])
523       {
524       case 'E':
525 	vec = XVEC (x, i);
526 	newvec = XVEC (newx, i);
527 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
528 	  {
529 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
530 					  old_rtx, fn, data);
531 	    if (op != RTVEC_ELT (vec, j))
532 	      {
533 		if (newvec == vec)
534 		  {
535 		    newvec = shallow_copy_rtvec (vec);
536 		    if (x == newx)
537 		      newx = shallow_copy_rtx (x);
538 		    XVEC (newx, i) = newvec;
539 		  }
540 		RTVEC_ELT (newvec, j) = op;
541 	      }
542 	  }
543 	break;
544 
545       case 'e':
546 	if (XEXP (x, i))
547 	  {
548 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
549 	    if (op != XEXP (x, i))
550 	      {
551 		if (x == newx)
552 		  newx = shallow_copy_rtx (x);
553 		XEXP (newx, i) = op;
554 	      }
555 	  }
556 	break;
557       }
558   return newx;
559 }
560 
561 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
562    resulting RTX.  Return a new RTX which is as simplified as possible.  */
563 
564 rtx
simplify_replace_rtx(rtx x,const_rtx old_rtx,rtx new_rtx)565 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
566 {
567   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
568 }
569 
570 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
571    Only handle cases where the truncated value is inherently an rvalue.
572 
573    RTL provides two ways of truncating a value:
574 
575    1. a lowpart subreg.  This form is only a truncation when both
576       the outer and inner modes (here MODE and OP_MODE respectively)
577       are scalar integers, and only then when the subreg is used as
578       an rvalue.
579 
580       It is only valid to form such truncating subregs if the
581       truncation requires no action by the target.  The onus for
582       proving this is on the creator of the subreg -- e.g. the
583       caller to simplify_subreg or simplify_gen_subreg -- and typically
584       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
585 
586    2. a TRUNCATE.  This form handles both scalar and compound integers.
587 
588    The first form is preferred where valid.  However, the TRUNCATE
589    handling in simplify_unary_operation turns the second form into the
590    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
591    so it is generally safe to form rvalue truncations using:
592 
593       simplify_gen_unary (TRUNCATE, ...)
594 
595    and leave simplify_unary_operation to work out which representation
596    should be used.
597 
598    Because of the proof requirements on (1), simplify_truncation must
599    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
600    regardless of whether the outer truncation came from a SUBREG or a
601    TRUNCATE.  For example, if the caller has proven that an SImode
602    truncation of:
603 
604       (and:DI X Y)
605 
606    is a no-op and can be represented as a subreg, it does not follow
607    that SImode truncations of X and Y are also no-ops.  On a target
608    like 64-bit MIPS that requires SImode values to be stored in
609    sign-extended form, an SImode truncation of:
610 
611       (and:DI (reg:DI X) (const_int 63))
612 
613    is trivially a no-op because only the lower 6 bits can be set.
614    However, X is still an arbitrary 64-bit number and so we cannot
615    assume that truncating it too is a no-op.  */
616 
617 static rtx
simplify_truncation(machine_mode mode,rtx op,machine_mode op_mode)618 simplify_truncation (machine_mode mode, rtx op,
619 		     machine_mode op_mode)
620 {
621   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
622   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
623   scalar_int_mode int_mode, int_op_mode, subreg_mode;
624 
625   gcc_assert (precision <= op_precision);
626 
627   /* Optimize truncations of zero and sign extended values.  */
628   if (GET_CODE (op) == ZERO_EXTEND
629       || GET_CODE (op) == SIGN_EXTEND)
630     {
631       /* There are three possibilities.  If MODE is the same as the
632 	 origmode, we can omit both the extension and the subreg.
633 	 If MODE is not larger than the origmode, we can apply the
634 	 truncation without the extension.  Finally, if the outermode
635 	 is larger than the origmode, we can just extend to the appropriate
636 	 mode.  */
637       machine_mode origmode = GET_MODE (XEXP (op, 0));
638       if (mode == origmode)
639 	return XEXP (op, 0);
640       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
641 	return simplify_gen_unary (TRUNCATE, mode,
642 				   XEXP (op, 0), origmode);
643       else
644 	return simplify_gen_unary (GET_CODE (op), mode,
645 				   XEXP (op, 0), origmode);
646     }
647 
648   /* If the machine can perform operations in the truncated mode, distribute
649      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
650      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
651   if (1
652       && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
653       && (GET_CODE (op) == PLUS
654 	  || GET_CODE (op) == MINUS
655 	  || GET_CODE (op) == MULT))
656     {
657       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
658       if (op0)
659 	{
660 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
661 	  if (op1)
662 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
663 	}
664     }
665 
666   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
667      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
668      the outer subreg is effectively a truncation to the original mode.  */
669   if ((GET_CODE (op) == LSHIFTRT
670        || GET_CODE (op) == ASHIFTRT)
671       /* Ensure that OP_MODE is at least twice as wide as MODE
672 	 to avoid the possibility that an outer LSHIFTRT shifts by more
673 	 than the sign extension's sign_bit_copies and introduces zeros
674 	 into the high bits of the result.  */
675       && 2 * precision <= op_precision
676       && CONST_INT_P (XEXP (op, 1))
677       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
678       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
679       && UINTVAL (XEXP (op, 1)) < precision)
680     return simplify_gen_binary (ASHIFTRT, mode,
681 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
682 
683   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
684      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
685      the outer subreg is effectively a truncation to the original mode.  */
686   if ((GET_CODE (op) == LSHIFTRT
687        || GET_CODE (op) == ASHIFTRT)
688       && CONST_INT_P (XEXP (op, 1))
689       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
690       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
691       && UINTVAL (XEXP (op, 1)) < precision)
692     return simplify_gen_binary (LSHIFTRT, mode,
693 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
694 
695   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
696      to (ashift:QI (x:QI) C), where C is a suitable small constant and
697      the outer subreg is effectively a truncation to the original mode.  */
698   if (GET_CODE (op) == ASHIFT
699       && CONST_INT_P (XEXP (op, 1))
700       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
702       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
703       && UINTVAL (XEXP (op, 1)) < precision)
704     return simplify_gen_binary (ASHIFT, mode,
705 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
706 
707   /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
708      (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
709      and C2.  */
710   if (GET_CODE (op) == AND
711       && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
712 	  || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
713       && CONST_INT_P (XEXP (XEXP (op, 0), 1))
714       && CONST_INT_P (XEXP (op, 1)))
715     {
716       rtx op0 = (XEXP (XEXP (op, 0), 0));
717       rtx shift_op = XEXP (XEXP (op, 0), 1);
718       rtx mask_op = XEXP (op, 1);
719       unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
720       unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
721 
722       if (shift < precision
723 	  /* If doing this transform works for an X with all bits set,
724 	     it works for any X.  */
725 	  && ((GET_MODE_MASK (mode) >> shift) & mask)
726 	     == ((GET_MODE_MASK (op_mode) >> shift) & mask)
727 	  && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
728 	  && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
729 	{
730 	  mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
731 	  return simplify_gen_binary (AND, mode, op0, mask_op);
732 	}
733     }
734 
735   /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
736      (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
737      changing len.  */
738   if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
739       && REG_P (XEXP (op, 0))
740       && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
741       && CONST_INT_P (XEXP (op, 1))
742       && CONST_INT_P (XEXP (op, 2)))
743     {
744       rtx op0 = XEXP (op, 0);
745       unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
746       unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
747       if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
748 	{
749 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
750 	  if (op0)
751 	    {
752 	      pos -= op_precision - precision;
753 	      return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
754 					   XEXP (op, 1), GEN_INT (pos));
755 	    }
756 	}
757       else if (!BITS_BIG_ENDIAN && precision >= len + pos)
758 	{
759 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
760 	  if (op0)
761 	    return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
762 					 XEXP (op, 1), XEXP (op, 2));
763 	}
764     }
765 
766   /* Recognize a word extraction from a multi-word subreg.  */
767   if ((GET_CODE (op) == LSHIFTRT
768        || GET_CODE (op) == ASHIFTRT)
769       && SCALAR_INT_MODE_P (mode)
770       && SCALAR_INT_MODE_P (op_mode)
771       && precision >= BITS_PER_WORD
772       && 2 * precision <= op_precision
773       && CONST_INT_P (XEXP (op, 1))
774       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
775       && UINTVAL (XEXP (op, 1)) < op_precision)
776     {
777       poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
778       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
779       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
780 				  (WORDS_BIG_ENDIAN
781 				   ? byte - shifted_bytes
782 				   : byte + shifted_bytes));
783     }
784 
785   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
786      and try replacing the TRUNCATE and shift with it.  Don't do this
787      if the MEM has a mode-dependent address.  */
788   if ((GET_CODE (op) == LSHIFTRT
789        || GET_CODE (op) == ASHIFTRT)
790       && is_a <scalar_int_mode> (mode, &int_mode)
791       && is_a <scalar_int_mode> (op_mode, &int_op_mode)
792       && MEM_P (XEXP (op, 0))
793       && CONST_INT_P (XEXP (op, 1))
794       && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
795       && INTVAL (XEXP (op, 1)) > 0
796       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
797       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
798 				     MEM_ADDR_SPACE (XEXP (op, 0)))
799       && ! MEM_VOLATILE_P (XEXP (op, 0))
800       && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
801 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
802     {
803       poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
804       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
805       return adjust_address_nv (XEXP (op, 0), int_mode,
806 				(WORDS_BIG_ENDIAN
807 				 ? byte - shifted_bytes
808 				 : byte + shifted_bytes));
809     }
810 
811   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
812      (OP:SI foo:SI) if OP is NEG or ABS.  */
813   if ((GET_CODE (op) == ABS
814        || GET_CODE (op) == NEG)
815       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
816 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
817       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
818     return simplify_gen_unary (GET_CODE (op), mode,
819 			       XEXP (XEXP (op, 0), 0), mode);
820 
821   /* (truncate:A (subreg:B (truncate:C X) 0)) is
822      (truncate:A X).  */
823   if (GET_CODE (op) == SUBREG
824       && is_a <scalar_int_mode> (mode, &int_mode)
825       && SCALAR_INT_MODE_P (op_mode)
826       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
827       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
828       && subreg_lowpart_p (op))
829     {
830       rtx inner = XEXP (SUBREG_REG (op), 0);
831       if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
832 	return simplify_gen_unary (TRUNCATE, int_mode, inner,
833 				   GET_MODE (inner));
834       else
835 	/* If subreg above is paradoxical and C is narrower
836 	   than A, return (subreg:A (truncate:C X) 0).  */
837 	return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
838     }
839 
840   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
841   if (GET_CODE (op) == TRUNCATE)
842     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
843 			       GET_MODE (XEXP (op, 0)));
844 
845   /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
846      in mode A.  */
847   if (GET_CODE (op) == IOR
848       && SCALAR_INT_MODE_P (mode)
849       && SCALAR_INT_MODE_P (op_mode)
850       && CONST_INT_P (XEXP (op, 1))
851       && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
852     return constm1_rtx;
853 
854   return NULL_RTX;
855 }
856 
857 /* Try to simplify a unary operation CODE whose output mode is to be
858    MODE with input operand OP whose mode was originally OP_MODE.
859    Return zero if no simplification can be made.  */
860 rtx
simplify_unary_operation(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)861 simplify_unary_operation (enum rtx_code code, machine_mode mode,
862 			  rtx op, machine_mode op_mode)
863 {
864   rtx trueop, tem;
865 
866   trueop = avoid_constant_pool_reference (op);
867 
868   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
869   if (tem)
870     return tem;
871 
872   return simplify_unary_operation_1 (code, mode, op);
873 }
874 
875 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
876    to be exact.  */
877 
878 static bool
exact_int_to_float_conversion_p(const_rtx op)879 exact_int_to_float_conversion_p (const_rtx op)
880 {
881   int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
882   machine_mode op0_mode = GET_MODE (XEXP (op, 0));
883   /* Constants shouldn't reach here.  */
884   gcc_assert (op0_mode != VOIDmode);
885   int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
886   int in_bits = in_prec;
887   if (HWI_COMPUTABLE_MODE_P (op0_mode))
888     {
889       unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
890       if (GET_CODE (op) == FLOAT)
891 	in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
892       else if (GET_CODE (op) == UNSIGNED_FLOAT)
893 	in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
894       else
895 	gcc_unreachable ();
896       in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
897     }
898   return in_bits <= out_bits;
899 }
900 
901 /* Perform some simplifications we can do even if the operands
902    aren't constant.  */
903 static rtx
simplify_unary_operation_1(enum rtx_code code,machine_mode mode,rtx op)904 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
905 {
906   enum rtx_code reversed;
907   rtx temp, elt, base, step;
908   scalar_int_mode inner, int_mode, op_mode, op0_mode;
909 
910   switch (code)
911     {
912     case NOT:
913       /* (not (not X)) == X.  */
914       if (GET_CODE (op) == NOT)
915 	return XEXP (op, 0);
916 
917       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
918 	 comparison is all ones.   */
919       if (COMPARISON_P (op)
920 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
921 	  && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
922 	return simplify_gen_relational (reversed, mode, VOIDmode,
923 					XEXP (op, 0), XEXP (op, 1));
924 
925       /* (not (plus X -1)) can become (neg X).  */
926       if (GET_CODE (op) == PLUS
927 	  && XEXP (op, 1) == constm1_rtx)
928 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
929 
930       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
931 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
932 	 and MODE_VECTOR_INT.  */
933       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
934 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
935 				    CONSTM1_RTX (mode));
936 
937       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
938       if (GET_CODE (op) == XOR
939 	  && CONST_INT_P (XEXP (op, 1))
940 	  && (temp = simplify_unary_operation (NOT, mode,
941 					       XEXP (op, 1), mode)) != 0)
942 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
943 
944       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
945       if (GET_CODE (op) == PLUS
946 	  && CONST_INT_P (XEXP (op, 1))
947 	  && mode_signbit_p (mode, XEXP (op, 1))
948 	  && (temp = simplify_unary_operation (NOT, mode,
949 					       XEXP (op, 1), mode)) != 0)
950 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
951 
952 
953       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
954 	 operands other than 1, but that is not valid.  We could do a
955 	 similar simplification for (not (lshiftrt C X)) where C is
956 	 just the sign bit, but this doesn't seem common enough to
957 	 bother with.  */
958       if (GET_CODE (op) == ASHIFT
959 	  && XEXP (op, 0) == const1_rtx)
960 	{
961 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
962 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
963 	}
964 
965       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
966 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
967 	 so we can perform the above simplification.  */
968       if (STORE_FLAG_VALUE == -1
969 	  && is_a <scalar_int_mode> (mode, &int_mode)
970 	  && GET_CODE (op) == ASHIFTRT
971 	  && CONST_INT_P (XEXP (op, 1))
972 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
973 	return simplify_gen_relational (GE, int_mode, VOIDmode,
974 					XEXP (op, 0), const0_rtx);
975 
976 
977       if (partial_subreg_p (op)
978 	  && subreg_lowpart_p (op)
979 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
980 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
981 	{
982 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
983 	  rtx x;
984 
985 	  x = gen_rtx_ROTATE (inner_mode,
986 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
987 						  inner_mode),
988 			      XEXP (SUBREG_REG (op), 1));
989 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
990 	  if (temp)
991 	    return temp;
992 	}
993 
994       /* Apply De Morgan's laws to reduce number of patterns for machines
995 	 with negating logical insns (and-not, nand, etc.).  If result has
996 	 only one NOT, put it first, since that is how the patterns are
997 	 coded.  */
998       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
999 	{
1000 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1001 	  machine_mode op_mode;
1002 
1003 	  op_mode = GET_MODE (in1);
1004 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1005 
1006 	  op_mode = GET_MODE (in2);
1007 	  if (op_mode == VOIDmode)
1008 	    op_mode = mode;
1009 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1010 
1011 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1012 	    std::swap (in1, in2);
1013 
1014 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1015 				 mode, in1, in2);
1016 	}
1017 
1018       /* (not (bswap x)) -> (bswap (not x)).  */
1019       if (GET_CODE (op) == BSWAP)
1020 	{
1021 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1022 	  return simplify_gen_unary (BSWAP, mode, x, mode);
1023 	}
1024       break;
1025 
1026     case NEG:
1027       /* (neg (neg X)) == X.  */
1028       if (GET_CODE (op) == NEG)
1029 	return XEXP (op, 0);
1030 
1031       /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1032 	 If comparison is not reversible use
1033 	 x ? y : (neg y).  */
1034       if (GET_CODE (op) == IF_THEN_ELSE)
1035 	{
1036 	  rtx cond = XEXP (op, 0);
1037 	  rtx true_rtx = XEXP (op, 1);
1038 	  rtx false_rtx = XEXP (op, 2);
1039 
1040 	  if ((GET_CODE (true_rtx) == NEG
1041 	       && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1042 	       || (GET_CODE (false_rtx) == NEG
1043 		   && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1044 	    {
1045 	      if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1046 		temp = reversed_comparison (cond, mode);
1047 	      else
1048 		{
1049 		  temp = cond;
1050 		  std::swap (true_rtx, false_rtx);
1051 		}
1052 	      return simplify_gen_ternary (IF_THEN_ELSE, mode,
1053 					    mode, temp, true_rtx, false_rtx);
1054 	    }
1055 	}
1056 
1057       /* (neg (plus X 1)) can become (not X).  */
1058       if (GET_CODE (op) == PLUS
1059 	  && XEXP (op, 1) == const1_rtx)
1060 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1061 
1062       /* Similarly, (neg (not X)) is (plus X 1).  */
1063       if (GET_CODE (op) == NOT)
1064 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1065 				    CONST1_RTX (mode));
1066 
1067       /* (neg (minus X Y)) can become (minus Y X).  This transformation
1068 	 isn't safe for modes with signed zeros, since if X and Y are
1069 	 both +0, (minus Y X) is the same as (minus X Y).  If the
1070 	 rounding mode is towards +infinity (or -infinity) then the two
1071 	 expressions will be rounded differently.  */
1072       if (GET_CODE (op) == MINUS
1073 	  && !HONOR_SIGNED_ZEROS (mode)
1074 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1075 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1076 
1077       if (GET_CODE (op) == PLUS
1078 	  && !HONOR_SIGNED_ZEROS (mode)
1079 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1080 	{
1081 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1082 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1083 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1084 	    {
1085 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1086 	      if (temp)
1087 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1088 	    }
1089 
1090 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1091 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1092 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1093 	}
1094 
1095       /* (neg (mult A B)) becomes (mult A (neg B)).
1096 	 This works even for floating-point values.  */
1097       if (GET_CODE (op) == MULT
1098 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1099 	{
1100 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1101 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1102 	}
1103 
1104       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1105 	 this if we can then eliminate the NEG (e.g., if the operand
1106 	 is a constant).  */
1107       if (GET_CODE (op) == ASHIFT)
1108 	{
1109 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1110 	  if (temp)
1111 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1112 	}
1113 
1114       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1115 	 C is equal to the width of MODE minus 1.  */
1116       if (GET_CODE (op) == ASHIFTRT
1117 	  && CONST_INT_P (XEXP (op, 1))
1118 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1119 	return simplify_gen_binary (LSHIFTRT, mode,
1120 				    XEXP (op, 0), XEXP (op, 1));
1121 
1122       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1123 	 C is equal to the width of MODE minus 1.  */
1124       if (GET_CODE (op) == LSHIFTRT
1125 	  && CONST_INT_P (XEXP (op, 1))
1126 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1127 	return simplify_gen_binary (ASHIFTRT, mode,
1128 				    XEXP (op, 0), XEXP (op, 1));
1129 
1130       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1131       if (GET_CODE (op) == XOR
1132 	  && XEXP (op, 1) == const1_rtx
1133 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1134 	return plus_constant (mode, XEXP (op, 0), -1);
1135 
1136       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1137       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1138       if (GET_CODE (op) == LT
1139 	  && XEXP (op, 1) == const0_rtx
1140 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1141 	{
1142 	  int_mode = as_a <scalar_int_mode> (mode);
1143 	  int isize = GET_MODE_PRECISION (inner);
1144 	  if (STORE_FLAG_VALUE == 1)
1145 	    {
1146 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1147 					  gen_int_shift_amount (inner,
1148 								isize - 1));
1149 	      if (int_mode == inner)
1150 		return temp;
1151 	      if (GET_MODE_PRECISION (int_mode) > isize)
1152 		return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1153 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1154 	    }
1155 	  else if (STORE_FLAG_VALUE == -1)
1156 	    {
1157 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1158 					  gen_int_shift_amount (inner,
1159 								isize - 1));
1160 	      if (int_mode == inner)
1161 		return temp;
1162 	      if (GET_MODE_PRECISION (int_mode) > isize)
1163 		return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1164 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1165 	    }
1166 	}
1167 
1168       if (vec_series_p (op, &base, &step))
1169 	{
1170 	  /* Only create a new series if we can simplify both parts.  In other
1171 	     cases this isn't really a simplification, and it's not necessarily
1172 	     a win to replace a vector operation with a scalar operation.  */
1173 	  scalar_mode inner_mode = GET_MODE_INNER (mode);
1174 	  base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1175 	  if (base)
1176 	    {
1177 	      step = simplify_unary_operation (NEG, inner_mode,
1178 					       step, inner_mode);
1179 	      if (step)
1180 		return gen_vec_series (mode, base, step);
1181 	    }
1182 	}
1183       break;
1184 
1185     case TRUNCATE:
1186       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1187 	 with the umulXi3_highpart patterns.  */
1188       if (GET_CODE (op) == LSHIFTRT
1189 	  && GET_CODE (XEXP (op, 0)) == MULT)
1190 	break;
1191 
1192       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1193 	{
1194 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1195 	    {
1196 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1197 	      if (temp)
1198 		return temp;
1199 	    }
1200 	  /* We can't handle truncation to a partial integer mode here
1201 	     because we don't know the real bitsize of the partial
1202 	     integer mode.  */
1203 	  break;
1204 	}
1205 
1206       if (GET_MODE (op) != VOIDmode)
1207 	{
1208 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1209 	  if (temp)
1210 	    return temp;
1211 	}
1212 
1213       /* If we know that the value is already truncated, we can
1214 	 replace the TRUNCATE with a SUBREG.  */
1215       if (known_eq (GET_MODE_NUNITS (mode), 1)
1216 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1217 	      || truncated_to_mode (mode, op)))
1218 	{
1219 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1220 	  if (temp)
1221 	    return temp;
1222 	}
1223 
1224       /* A truncate of a comparison can be replaced with a subreg if
1225          STORE_FLAG_VALUE permits.  This is like the previous test,
1226          but it works even if the comparison is done in a mode larger
1227          than HOST_BITS_PER_WIDE_INT.  */
1228       if (HWI_COMPUTABLE_MODE_P (mode)
1229 	  && COMPARISON_P (op)
1230 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1231 	{
1232 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1233 	  if (temp)
1234 	    return temp;
1235 	}
1236 
1237       /* A truncate of a memory is just loading the low part of the memory
1238 	 if we are not changing the meaning of the address. */
1239       if (GET_CODE (op) == MEM
1240 	  && !VECTOR_MODE_P (mode)
1241 	  && !MEM_VOLATILE_P (op)
1242 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1243 	{
1244 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1245 	  if (temp)
1246 	    return temp;
1247 	}
1248 
1249       break;
1250 
1251     case FLOAT_TRUNCATE:
1252       if (DECIMAL_FLOAT_MODE_P (mode))
1253 	break;
1254 
1255       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1256       if (GET_CODE (op) == FLOAT_EXTEND
1257 	  && GET_MODE (XEXP (op, 0)) == mode)
1258 	return XEXP (op, 0);
1259 
1260       /* (float_truncate:SF (float_truncate:DF foo:XF))
1261          = (float_truncate:SF foo:XF).
1262 	 This may eliminate double rounding, so it is unsafe.
1263 
1264          (float_truncate:SF (float_extend:XF foo:DF))
1265          = (float_truncate:SF foo:DF).
1266 
1267          (float_truncate:DF (float_extend:XF foo:SF))
1268          = (float_extend:DF foo:SF).  */
1269       if ((GET_CODE (op) == FLOAT_TRUNCATE
1270 	   && flag_unsafe_math_optimizations)
1271 	  || GET_CODE (op) == FLOAT_EXTEND)
1272 	return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1273 	  			   > GET_MODE_UNIT_SIZE (mode)
1274 	  			   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1275 				   mode,
1276 				   XEXP (op, 0), mode);
1277 
1278       /*  (float_truncate (float x)) is (float x)  */
1279       if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1280 	  && (flag_unsafe_math_optimizations
1281 	      || exact_int_to_float_conversion_p (op)))
1282 	return simplify_gen_unary (GET_CODE (op), mode,
1283 				   XEXP (op, 0),
1284 				   GET_MODE (XEXP (op, 0)));
1285 
1286       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1287 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1288       if ((GET_CODE (op) == ABS
1289 	   || GET_CODE (op) == NEG)
1290 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1291 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1292 	return simplify_gen_unary (GET_CODE (op), mode,
1293 				   XEXP (XEXP (op, 0), 0), mode);
1294 
1295       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1296 	 is (float_truncate:SF x).  */
1297       if (GET_CODE (op) == SUBREG
1298 	  && subreg_lowpart_p (op)
1299 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1300 	return SUBREG_REG (op);
1301       break;
1302 
1303     case FLOAT_EXTEND:
1304       if (DECIMAL_FLOAT_MODE_P (mode))
1305 	break;
1306 
1307       /*  (float_extend (float_extend x)) is (float_extend x)
1308 
1309 	  (float_extend (float x)) is (float x) assuming that double
1310 	  rounding can't happen.
1311           */
1312       if (GET_CODE (op) == FLOAT_EXTEND
1313 	  || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1314 	      && exact_int_to_float_conversion_p (op)))
1315 	return simplify_gen_unary (GET_CODE (op), mode,
1316 				   XEXP (op, 0),
1317 				   GET_MODE (XEXP (op, 0)));
1318 
1319       break;
1320 
1321     case ABS:
1322       /* (abs (neg <foo>)) -> (abs <foo>) */
1323       if (GET_CODE (op) == NEG)
1324 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1325 				   GET_MODE (XEXP (op, 0)));
1326 
1327       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1328          do nothing.  */
1329       if (GET_MODE (op) == VOIDmode)
1330 	break;
1331 
1332       /* If operand is something known to be positive, ignore the ABS.  */
1333       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1334 	  || val_signbit_known_clear_p (GET_MODE (op),
1335 					nonzero_bits (op, GET_MODE (op))))
1336 	return op;
1337 
1338       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1339       if (is_a <scalar_int_mode> (mode, &int_mode)
1340 	  && (num_sign_bit_copies (op, int_mode)
1341 	      == GET_MODE_PRECISION (int_mode)))
1342 	return gen_rtx_NEG (int_mode, op);
1343 
1344       break;
1345 
1346     case FFS:
1347       /* (ffs (*_extend <X>)) = (ffs <X>) */
1348       if (GET_CODE (op) == SIGN_EXTEND
1349 	  || GET_CODE (op) == ZERO_EXTEND)
1350 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1351 				   GET_MODE (XEXP (op, 0)));
1352       break;
1353 
1354     case POPCOUNT:
1355       switch (GET_CODE (op))
1356 	{
1357 	case BSWAP:
1358 	case ZERO_EXTEND:
1359 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1360 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1361 				     GET_MODE (XEXP (op, 0)));
1362 
1363 	case ROTATE:
1364 	case ROTATERT:
1365 	  /* Rotations don't affect popcount.  */
1366 	  if (!side_effects_p (XEXP (op, 1)))
1367 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1368 				       GET_MODE (XEXP (op, 0)));
1369 	  break;
1370 
1371 	default:
1372 	  break;
1373 	}
1374       break;
1375 
1376     case PARITY:
1377       switch (GET_CODE (op))
1378 	{
1379 	case NOT:
1380 	case BSWAP:
1381 	case ZERO_EXTEND:
1382 	case SIGN_EXTEND:
1383 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1384 				     GET_MODE (XEXP (op, 0)));
1385 
1386 	case ROTATE:
1387 	case ROTATERT:
1388 	  /* Rotations don't affect parity.  */
1389 	  if (!side_effects_p (XEXP (op, 1)))
1390 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1391 				       GET_MODE (XEXP (op, 0)));
1392 	  break;
1393 
1394 	default:
1395 	  break;
1396 	}
1397       break;
1398 
1399     case BSWAP:
1400       /* (bswap (bswap x)) -> x.  */
1401       if (GET_CODE (op) == BSWAP)
1402 	return XEXP (op, 0);
1403       break;
1404 
1405     case FLOAT:
1406       /* (float (sign_extend <X>)) = (float <X>).  */
1407       if (GET_CODE (op) == SIGN_EXTEND)
1408 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1409 				   GET_MODE (XEXP (op, 0)));
1410       break;
1411 
1412     case SIGN_EXTEND:
1413       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1414 	 becomes just the MINUS if its mode is MODE.  This allows
1415 	 folding switch statements on machines using casesi (such as
1416 	 the VAX).  */
1417       if (GET_CODE (op) == TRUNCATE
1418 	  && GET_MODE (XEXP (op, 0)) == mode
1419 	  && GET_CODE (XEXP (op, 0)) == MINUS
1420 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1421 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1422 	return XEXP (op, 0);
1423 
1424       /* Extending a widening multiplication should be canonicalized to
1425 	 a wider widening multiplication.  */
1426       if (GET_CODE (op) == MULT)
1427 	{
1428 	  rtx lhs = XEXP (op, 0);
1429 	  rtx rhs = XEXP (op, 1);
1430 	  enum rtx_code lcode = GET_CODE (lhs);
1431 	  enum rtx_code rcode = GET_CODE (rhs);
1432 
1433 	  /* Widening multiplies usually extend both operands, but sometimes
1434 	     they use a shift to extract a portion of a register.  */
1435 	  if ((lcode == SIGN_EXTEND
1436 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1437 	      && (rcode == SIGN_EXTEND
1438 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1439 	    {
1440 	      machine_mode lmode = GET_MODE (lhs);
1441 	      machine_mode rmode = GET_MODE (rhs);
1442 	      int bits;
1443 
1444 	      if (lcode == ASHIFTRT)
1445 		/* Number of bits not shifted off the end.  */
1446 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1447 			- INTVAL (XEXP (lhs, 1)));
1448 	      else /* lcode == SIGN_EXTEND */
1449 		/* Size of inner mode.  */
1450 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1451 
1452 	      if (rcode == ASHIFTRT)
1453 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1454 			 - INTVAL (XEXP (rhs, 1)));
1455 	      else /* rcode == SIGN_EXTEND */
1456 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1457 
1458 	      /* We can only widen multiplies if the result is mathematiclly
1459 		 equivalent.  I.e. if overflow was impossible.  */
1460 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1461 		return simplify_gen_binary
1462 			 (MULT, mode,
1463 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1464 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1465 	    }
1466 	}
1467 
1468       /* Check for a sign extension of a subreg of a promoted
1469 	 variable, where the promotion is sign-extended, and the
1470 	 target mode is the same as the variable's promotion.  */
1471       if (GET_CODE (op) == SUBREG
1472 	  && SUBREG_PROMOTED_VAR_P (op)
1473 	  && SUBREG_PROMOTED_SIGNED_P (op)
1474 	  && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1475 	{
1476 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1477 	  if (temp)
1478 	    return temp;
1479 	}
1480 
1481       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1482 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1483       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1484 	{
1485 	  gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1486 		      > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1487 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1488 				     GET_MODE (XEXP (op, 0)));
1489 	}
1490 
1491       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1492 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1493 	 GET_MODE_BITSIZE (N) - I bits.
1494 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1495 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1496       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1497 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1498 	  && is_a <scalar_int_mode> (mode, &int_mode)
1499 	  && CONST_INT_P (XEXP (op, 1))
1500 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1501 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1502 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1503 	{
1504 	  scalar_int_mode tmode;
1505 	  gcc_assert (GET_MODE_PRECISION (int_mode)
1506 		      > GET_MODE_PRECISION (op_mode));
1507 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1508 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1509 	    {
1510 	      rtx inner =
1511 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1512 	      if (inner)
1513 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1514 					   ? SIGN_EXTEND : ZERO_EXTEND,
1515 					   int_mode, inner, tmode);
1516 	    }
1517 	}
1518 
1519       /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1520          (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0.  */
1521       if (GET_CODE (op) == LSHIFTRT
1522 	  && CONST_INT_P (XEXP (op, 1))
1523 	  && XEXP (op, 1) != const0_rtx)
1524 	return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1525 
1526 #if defined(POINTERS_EXTEND_UNSIGNED)
1527       /* As we do not know which address space the pointer is referring to,
1528 	 we can do this only if the target does not support different pointer
1529 	 or address modes depending on the address space.  */
1530       if (target_default_pointer_address_modes_p ()
1531 	  && ! POINTERS_EXTEND_UNSIGNED
1532 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1533 	  && (CONSTANT_P (op)
1534 	      || (GET_CODE (op) == SUBREG
1535 		  && REG_P (SUBREG_REG (op))
1536 		  && REG_POINTER (SUBREG_REG (op))
1537 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1538 	  && !targetm.have_ptr_extend ())
1539 	{
1540 	  temp
1541 	    = convert_memory_address_addr_space_1 (Pmode, op,
1542 						   ADDR_SPACE_GENERIC, false,
1543 						   true);
1544 	  if (temp)
1545 	    return temp;
1546 	}
1547 #endif
1548       break;
1549 
1550     case ZERO_EXTEND:
1551       /* Check for a zero extension of a subreg of a promoted
1552 	 variable, where the promotion is zero-extended, and the
1553 	 target mode is the same as the variable's promotion.  */
1554       if (GET_CODE (op) == SUBREG
1555 	  && SUBREG_PROMOTED_VAR_P (op)
1556 	  && SUBREG_PROMOTED_UNSIGNED_P (op)
1557 	  && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1558 	{
1559 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1560 	  if (temp)
1561 	    return temp;
1562 	}
1563 
1564       /* Extending a widening multiplication should be canonicalized to
1565 	 a wider widening multiplication.  */
1566       if (GET_CODE (op) == MULT)
1567 	{
1568 	  rtx lhs = XEXP (op, 0);
1569 	  rtx rhs = XEXP (op, 1);
1570 	  enum rtx_code lcode = GET_CODE (lhs);
1571 	  enum rtx_code rcode = GET_CODE (rhs);
1572 
1573 	  /* Widening multiplies usually extend both operands, but sometimes
1574 	     they use a shift to extract a portion of a register.  */
1575 	  if ((lcode == ZERO_EXTEND
1576 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1577 	      && (rcode == ZERO_EXTEND
1578 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1579 	    {
1580 	      machine_mode lmode = GET_MODE (lhs);
1581 	      machine_mode rmode = GET_MODE (rhs);
1582 	      int bits;
1583 
1584 	      if (lcode == LSHIFTRT)
1585 		/* Number of bits not shifted off the end.  */
1586 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1587 			- INTVAL (XEXP (lhs, 1)));
1588 	      else /* lcode == ZERO_EXTEND */
1589 		/* Size of inner mode.  */
1590 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1591 
1592 	      if (rcode == LSHIFTRT)
1593 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1594 			 - INTVAL (XEXP (rhs, 1)));
1595 	      else /* rcode == ZERO_EXTEND */
1596 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1597 
1598 	      /* We can only widen multiplies if the result is mathematiclly
1599 		 equivalent.  I.e. if overflow was impossible.  */
1600 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1601 		return simplify_gen_binary
1602 			 (MULT, mode,
1603 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1604 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1605 	    }
1606 	}
1607 
1608       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1609       if (GET_CODE (op) == ZERO_EXTEND)
1610 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1611 				   GET_MODE (XEXP (op, 0)));
1612 
1613       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1614 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1615 	 GET_MODE_PRECISION (N) - I bits.  */
1616       if (GET_CODE (op) == LSHIFTRT
1617 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1618 	  && is_a <scalar_int_mode> (mode, &int_mode)
1619 	  && CONST_INT_P (XEXP (op, 1))
1620 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1621 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1622 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1623 	{
1624 	  scalar_int_mode tmode;
1625 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1626 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1627 	    {
1628 	      rtx inner =
1629 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1630 	      if (inner)
1631 		return simplify_gen_unary (ZERO_EXTEND, int_mode,
1632 					   inner, tmode);
1633 	    }
1634 	}
1635 
1636       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1637 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1638 	 of mode N.  E.g.
1639 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1640 	 (and:SI (reg:SI) (const_int 63)).  */
1641       if (partial_subreg_p (op)
1642 	  && is_a <scalar_int_mode> (mode, &int_mode)
1643 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1644 	  && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1645 	  && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1646 	  && subreg_lowpart_p (op)
1647 	  && (nonzero_bits (SUBREG_REG (op), op0_mode)
1648 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1649 	{
1650 	  if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1651 	    return SUBREG_REG (op);
1652 	  return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1653 				     op0_mode);
1654 	}
1655 
1656 #if defined(POINTERS_EXTEND_UNSIGNED)
1657       /* As we do not know which address space the pointer is referring to,
1658 	 we can do this only if the target does not support different pointer
1659 	 or address modes depending on the address space.  */
1660       if (target_default_pointer_address_modes_p ()
1661 	  && POINTERS_EXTEND_UNSIGNED > 0
1662 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1663 	  && (CONSTANT_P (op)
1664 	      || (GET_CODE (op) == SUBREG
1665 		  && REG_P (SUBREG_REG (op))
1666 		  && REG_POINTER (SUBREG_REG (op))
1667 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1668 	  && !targetm.have_ptr_extend ())
1669 	{
1670 	  temp
1671 	    = convert_memory_address_addr_space_1 (Pmode, op,
1672 						   ADDR_SPACE_GENERIC, false,
1673 						   true);
1674 	  if (temp)
1675 	    return temp;
1676 	}
1677 #endif
1678       break;
1679 
1680     default:
1681       break;
1682     }
1683 
1684   if (VECTOR_MODE_P (mode)
1685       && vec_duplicate_p (op, &elt)
1686       && code != VEC_DUPLICATE)
1687     {
1688       /* Try applying the operator to ELT and see if that simplifies.
1689 	 We can duplicate the result if so.
1690 
1691 	 The reason we don't use simplify_gen_unary is that it isn't
1692 	 necessarily a win to convert things like:
1693 
1694 	   (neg:V (vec_duplicate:V (reg:S R)))
1695 
1696 	 to:
1697 
1698 	   (vec_duplicate:V (neg:S (reg:S R)))
1699 
1700 	 The first might be done entirely in vector registers while the
1701 	 second might need a move between register files.  */
1702       temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1703 				       elt, GET_MODE_INNER (GET_MODE (op)));
1704       if (temp)
1705 	return gen_vec_duplicate (mode, temp);
1706     }
1707 
1708   return 0;
1709 }
1710 
1711 /* Try to compute the value of a unary operation CODE whose output mode is to
1712    be MODE with input operand OP whose mode was originally OP_MODE.
1713    Return zero if the value cannot be computed.  */
1714 rtx
simplify_const_unary_operation(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)1715 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1716 				rtx op, machine_mode op_mode)
1717 {
1718   scalar_int_mode result_mode;
1719 
1720   if (code == VEC_DUPLICATE)
1721     {
1722       gcc_assert (VECTOR_MODE_P (mode));
1723       if (GET_MODE (op) != VOIDmode)
1724       {
1725 	if (!VECTOR_MODE_P (GET_MODE (op)))
1726 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1727 	else
1728 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1729 						(GET_MODE (op)));
1730       }
1731       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1732 	return gen_const_vec_duplicate (mode, op);
1733       if (GET_CODE (op) == CONST_VECTOR
1734 	  && (CONST_VECTOR_DUPLICATE_P (op)
1735 	      || CONST_VECTOR_NUNITS (op).is_constant ()))
1736 	{
1737 	  unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1738 				    ? CONST_VECTOR_NPATTERNS (op)
1739 				    : CONST_VECTOR_NUNITS (op).to_constant ());
1740 	  gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1741 	  rtx_vector_builder builder (mode, npatterns, 1);
1742 	  for (unsigned i = 0; i < npatterns; i++)
1743 	    builder.quick_push (CONST_VECTOR_ELT (op, i));
1744 	  return builder.build ();
1745 	}
1746     }
1747 
1748   if (VECTOR_MODE_P (mode)
1749       && GET_CODE (op) == CONST_VECTOR
1750       && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1751     {
1752       gcc_assert (GET_MODE (op) == op_mode);
1753 
1754       rtx_vector_builder builder;
1755       if (!builder.new_unary_operation (mode, op, false))
1756 	return 0;
1757 
1758       unsigned int count = builder.encoded_nelts ();
1759       for (unsigned int i = 0; i < count; i++)
1760 	{
1761 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1762 					    CONST_VECTOR_ELT (op, i),
1763 					    GET_MODE_INNER (op_mode));
1764 	  if (!x || !valid_for_const_vector_p (mode, x))
1765 	    return 0;
1766 	  builder.quick_push (x);
1767 	}
1768       return builder.build ();
1769     }
1770 
1771   /* The order of these tests is critical so that, for example, we don't
1772      check the wrong mode (input vs. output) for a conversion operation,
1773      such as FIX.  At some point, this should be simplified.  */
1774 
1775   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1776     {
1777       REAL_VALUE_TYPE d;
1778 
1779       if (op_mode == VOIDmode)
1780 	{
1781 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1782 	     the bits of the constant are significant, though, this is
1783 	     a dangerous assumption as many times CONST_INTs are
1784 	     created and used with garbage in the bits outside of the
1785 	     precision of the implied mode of the const_int.  */
1786 	  op_mode = MAX_MODE_INT;
1787 	}
1788 
1789       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1790 
1791       /* Avoid the folding if flag_signaling_nans is on and
1792          operand is a signaling NaN.  */
1793       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1794         return 0;
1795 
1796       d = real_value_truncate (mode, d);
1797       return const_double_from_real_value (d, mode);
1798     }
1799   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1800     {
1801       REAL_VALUE_TYPE d;
1802 
1803       if (op_mode == VOIDmode)
1804 	{
1805 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1806 	     the bits of the constant are significant, though, this is
1807 	     a dangerous assumption as many times CONST_INTs are
1808 	     created and used with garbage in the bits outside of the
1809 	     precision of the implied mode of the const_int.  */
1810 	  op_mode = MAX_MODE_INT;
1811 	}
1812 
1813       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1814 
1815       /* Avoid the folding if flag_signaling_nans is on and
1816          operand is a signaling NaN.  */
1817       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1818         return 0;
1819 
1820       d = real_value_truncate (mode, d);
1821       return const_double_from_real_value (d, mode);
1822     }
1823 
1824   if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1825     {
1826       unsigned int width = GET_MODE_PRECISION (result_mode);
1827       if (width > MAX_BITSIZE_MODE_ANY_INT)
1828 	return 0;
1829 
1830       wide_int result;
1831       scalar_int_mode imode = (op_mode == VOIDmode
1832 			       ? result_mode
1833 			       : as_a <scalar_int_mode> (op_mode));
1834       rtx_mode_t op0 = rtx_mode_t (op, imode);
1835       int int_value;
1836 
1837 #if TARGET_SUPPORTS_WIDE_INT == 0
1838       /* This assert keeps the simplification from producing a result
1839 	 that cannot be represented in a CONST_DOUBLE but a lot of
1840 	 upstream callers expect that this function never fails to
1841 	 simplify something and so you if you added this to the test
1842 	 above the code would die later anyway.  If this assert
1843 	 happens, you just need to make the port support wide int.  */
1844       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1845 #endif
1846 
1847       switch (code)
1848 	{
1849 	case NOT:
1850 	  result = wi::bit_not (op0);
1851 	  break;
1852 
1853 	case NEG:
1854 	  result = wi::neg (op0);
1855 	  break;
1856 
1857 	case ABS:
1858 	  result = wi::abs (op0);
1859 	  break;
1860 
1861 	case FFS:
1862 	  result = wi::shwi (wi::ffs (op0), result_mode);
1863 	  break;
1864 
1865 	case CLZ:
1866 	  if (wi::ne_p (op0, 0))
1867 	    int_value = wi::clz (op0);
1868 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1869 	    return NULL_RTX;
1870 	  result = wi::shwi (int_value, result_mode);
1871 	  break;
1872 
1873 	case CLRSB:
1874 	  result = wi::shwi (wi::clrsb (op0), result_mode);
1875 	  break;
1876 
1877 	case CTZ:
1878 	  if (wi::ne_p (op0, 0))
1879 	    int_value = wi::ctz (op0);
1880 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1881 	    return NULL_RTX;
1882 	  result = wi::shwi (int_value, result_mode);
1883 	  break;
1884 
1885 	case POPCOUNT:
1886 	  result = wi::shwi (wi::popcount (op0), result_mode);
1887 	  break;
1888 
1889 	case PARITY:
1890 	  result = wi::shwi (wi::parity (op0), result_mode);
1891 	  break;
1892 
1893 	case BSWAP:
1894 	  result = wide_int (op0).bswap ();
1895 	  break;
1896 
1897 	case TRUNCATE:
1898 	case ZERO_EXTEND:
1899 	  result = wide_int::from (op0, width, UNSIGNED);
1900 	  break;
1901 
1902 	case SIGN_EXTEND:
1903 	  result = wide_int::from (op0, width, SIGNED);
1904 	  break;
1905 
1906 	case SQRT:
1907 	default:
1908 	  return 0;
1909 	}
1910 
1911       return immed_wide_int_const (result, result_mode);
1912     }
1913 
1914   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1915 	   && SCALAR_FLOAT_MODE_P (mode)
1916 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1917     {
1918       REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1919       switch (code)
1920 	{
1921 	case SQRT:
1922 	  return 0;
1923 	case ABS:
1924 	  d = real_value_abs (&d);
1925 	  break;
1926 	case NEG:
1927 	  d = real_value_negate (&d);
1928 	  break;
1929 	case FLOAT_TRUNCATE:
1930 	  /* Don't perform the operation if flag_signaling_nans is on
1931 	     and the operand is a signaling NaN.  */
1932 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1933 	    return NULL_RTX;
1934 	  d = real_value_truncate (mode, d);
1935 	  break;
1936 	case FLOAT_EXTEND:
1937 	  /* Don't perform the operation if flag_signaling_nans is on
1938 	     and the operand is a signaling NaN.  */
1939 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1940 	    return NULL_RTX;
1941 	  /* All this does is change the mode, unless changing
1942 	     mode class.  */
1943 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1944 	    real_convert (&d, mode, &d);
1945 	  break;
1946 	case FIX:
1947 	  /* Don't perform the operation if flag_signaling_nans is on
1948 	     and the operand is a signaling NaN.  */
1949 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1950 	    return NULL_RTX;
1951 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1952 	  break;
1953 	case NOT:
1954 	  {
1955 	    long tmp[4];
1956 	    int i;
1957 
1958 	    real_to_target (tmp, &d, GET_MODE (op));
1959 	    for (i = 0; i < 4; i++)
1960 	      tmp[i] = ~tmp[i];
1961 	    real_from_target (&d, tmp, mode);
1962 	    break;
1963 	  }
1964 	default:
1965 	  gcc_unreachable ();
1966 	}
1967       return const_double_from_real_value (d, mode);
1968     }
1969   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1970 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1971 	   && is_int_mode (mode, &result_mode))
1972     {
1973       unsigned int width = GET_MODE_PRECISION (result_mode);
1974       if (width > MAX_BITSIZE_MODE_ANY_INT)
1975 	return 0;
1976 
1977       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1978 	 operators are intentionally left unspecified (to ease implementation
1979 	 by target backends), for consistency, this routine implements the
1980 	 same semantics for constant folding as used by the middle-end.  */
1981 
1982       /* This was formerly used only for non-IEEE float.
1983 	 eggert@twinsun.com says it is safe for IEEE also.  */
1984       REAL_VALUE_TYPE t;
1985       const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1986       wide_int wmax, wmin;
1987       /* This is part of the abi to real_to_integer, but we check
1988 	 things before making this call.  */
1989       bool fail;
1990 
1991       switch (code)
1992 	{
1993 	case FIX:
1994 	  if (REAL_VALUE_ISNAN (*x))
1995 	    return const0_rtx;
1996 
1997 	  /* Test against the signed upper bound.  */
1998 	  wmax = wi::max_value (width, SIGNED);
1999 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
2000 	  if (real_less (&t, x))
2001 	    return immed_wide_int_const (wmax, mode);
2002 
2003 	  /* Test against the signed lower bound.  */
2004 	  wmin = wi::min_value (width, SIGNED);
2005 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
2006 	  if (real_less (x, &t))
2007 	    return immed_wide_int_const (wmin, mode);
2008 
2009 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2010 				       mode);
2011 
2012 	case UNSIGNED_FIX:
2013 	  if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2014 	    return const0_rtx;
2015 
2016 	  /* Test against the unsigned upper bound.  */
2017 	  wmax = wi::max_value (width, UNSIGNED);
2018 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2019 	  if (real_less (&t, x))
2020 	    return immed_wide_int_const (wmax, mode);
2021 
2022 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2023 				       mode);
2024 
2025 	default:
2026 	  gcc_unreachable ();
2027 	}
2028     }
2029 
2030   /* Handle polynomial integers.  */
2031   else if (CONST_POLY_INT_P (op))
2032     {
2033       poly_wide_int result;
2034       switch (code)
2035 	{
2036 	case NEG:
2037 	  result = -const_poly_int_value (op);
2038 	  break;
2039 
2040 	case NOT:
2041 	  result = ~const_poly_int_value (op);
2042 	  break;
2043 
2044 	default:
2045 	  return NULL_RTX;
2046 	}
2047       return immed_wide_int_const (result, mode);
2048     }
2049 
2050   return NULL_RTX;
2051 }
2052 
2053 /* Subroutine of simplify_binary_operation to simplify a binary operation
2054    CODE that can commute with byte swapping, with result mode MODE and
2055    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
2056    Return zero if no simplification or canonicalization is possible.  */
2057 
2058 static rtx
simplify_byte_swapping_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)2059 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2060 				  rtx op0, rtx op1)
2061 {
2062   rtx tem;
2063 
2064   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
2065   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2066     {
2067       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2068 				 simplify_gen_unary (BSWAP, mode, op1, mode));
2069       return simplify_gen_unary (BSWAP, mode, tem, mode);
2070     }
2071 
2072   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
2073   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2074     {
2075       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2076       return simplify_gen_unary (BSWAP, mode, tem, mode);
2077     }
2078 
2079   return NULL_RTX;
2080 }
2081 
2082 /* Subroutine of simplify_binary_operation to simplify a commutative,
2083    associative binary operation CODE with result mode MODE, operating
2084    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2085    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2086    canonicalization is possible.  */
2087 
2088 static rtx
simplify_associative_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)2089 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2090 				rtx op0, rtx op1)
2091 {
2092   rtx tem;
2093 
2094   /* Linearize the operator to the left.  */
2095   if (GET_CODE (op1) == code)
2096     {
2097       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2098       if (GET_CODE (op0) == code)
2099 	{
2100 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2101 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2102 	}
2103 
2104       /* "a op (b op c)" becomes "(b op c) op a".  */
2105       if (! swap_commutative_operands_p (op1, op0))
2106 	return simplify_gen_binary (code, mode, op1, op0);
2107 
2108       std::swap (op0, op1);
2109     }
2110 
2111   if (GET_CODE (op0) == code)
2112     {
2113       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2114       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2115 	{
2116 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2117 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2118 	}
2119 
2120       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2121       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2122       if (tem != 0)
2123         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2124 
2125       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2126       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2127       if (tem != 0)
2128         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2129     }
2130 
2131   return 0;
2132 }
2133 
2134 /* Return a mask describing the COMPARISON.  */
2135 static int
comparison_to_mask(enum rtx_code comparison)2136 comparison_to_mask (enum rtx_code comparison)
2137 {
2138   switch (comparison)
2139     {
2140     case LT:
2141       return 8;
2142     case GT:
2143       return 4;
2144     case EQ:
2145       return 2;
2146     case UNORDERED:
2147       return 1;
2148 
2149     case LTGT:
2150       return 12;
2151     case LE:
2152       return 10;
2153     case GE:
2154       return 6;
2155     case UNLT:
2156       return 9;
2157     case UNGT:
2158       return 5;
2159     case UNEQ:
2160       return 3;
2161 
2162     case ORDERED:
2163       return 14;
2164     case NE:
2165       return 13;
2166     case UNLE:
2167       return 11;
2168     case UNGE:
2169       return 7;
2170 
2171     default:
2172       gcc_unreachable ();
2173     }
2174 }
2175 
2176 /* Return a comparison corresponding to the MASK.  */
2177 static enum rtx_code
mask_to_comparison(int mask)2178 mask_to_comparison (int mask)
2179 {
2180   switch (mask)
2181     {
2182     case 8:
2183       return LT;
2184     case 4:
2185       return GT;
2186     case 2:
2187       return EQ;
2188     case 1:
2189       return UNORDERED;
2190 
2191     case 12:
2192       return LTGT;
2193     case 10:
2194       return LE;
2195     case 6:
2196       return GE;
2197     case 9:
2198       return UNLT;
2199     case 5:
2200       return UNGT;
2201     case 3:
2202       return UNEQ;
2203 
2204     case 14:
2205       return ORDERED;
2206     case 13:
2207       return NE;
2208     case 11:
2209       return UNLE;
2210     case 7:
2211       return UNGE;
2212 
2213     default:
2214       gcc_unreachable ();
2215     }
2216 }
2217 
2218 /* Return true if CODE is valid for comparisons of mode MODE, false
2219    otherwise.
2220 
2221    It is always safe to return false, even if the code was valid for the
2222    given mode as that will merely suppress optimizations.  */
2223 
2224 static bool
comparison_code_valid_for_mode(enum rtx_code code,enum machine_mode mode)2225 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2226 {
2227   switch (code)
2228     {
2229       /* These are valid for integral, floating and vector modes.  */
2230       case NE:
2231       case EQ:
2232       case GE:
2233       case GT:
2234       case LE:
2235       case LT:
2236 	return (INTEGRAL_MODE_P (mode)
2237 		|| FLOAT_MODE_P (mode)
2238 		|| VECTOR_MODE_P (mode));
2239 
2240       /* These are valid for floating point modes.  */
2241       case LTGT:
2242       case UNORDERED:
2243       case ORDERED:
2244       case UNEQ:
2245       case UNGE:
2246       case UNGT:
2247       case UNLE:
2248       case UNLT:
2249 	return FLOAT_MODE_P (mode);
2250 
2251       /* These are filtered out in simplify_logical_operation, but
2252 	 we check for them too as a matter of safety.   They are valid
2253 	 for integral and vector modes.  */
2254       case GEU:
2255       case GTU:
2256       case LEU:
2257       case LTU:
2258 	return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2259 
2260       default:
2261 	gcc_unreachable ();
2262     }
2263 }
2264 
2265 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2266    and OP1, which should be both relational operations.  Return 0 if no such
2267    simplification is possible.  */
2268 rtx
simplify_logical_relational_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)2269 simplify_logical_relational_operation (enum rtx_code code, machine_mode mode,
2270 				       rtx op0, rtx op1)
2271 {
2272   /* We only handle IOR of two relational operations.  */
2273   if (code != IOR)
2274     return 0;
2275 
2276   if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2277     return 0;
2278 
2279   if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2280 	&& rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2281     return 0;
2282 
2283   enum rtx_code code0 = GET_CODE (op0);
2284   enum rtx_code code1 = GET_CODE (op1);
2285 
2286   /* We don't handle unsigned comparisons currently.  */
2287   if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2288     return 0;
2289   if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2290     return 0;
2291 
2292   int mask0 = comparison_to_mask (code0);
2293   int mask1 = comparison_to_mask (code1);
2294 
2295   int mask = mask0 | mask1;
2296 
2297   if (mask == 15)
2298     return const_true_rtx;
2299 
2300   code = mask_to_comparison (mask);
2301 
2302   /* Many comparison codes are only valid for certain mode classes.  */
2303   if (!comparison_code_valid_for_mode (code, mode))
2304     return 0;
2305 
2306   op0 = XEXP (op1, 0);
2307   op1 = XEXP (op1, 1);
2308 
2309   return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2310 }
2311 
2312 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2313    and OP1.  Return 0 if no simplification is possible.
2314 
2315    Don't use this for relational operations such as EQ or LT.
2316    Use simplify_relational_operation instead.  */
2317 rtx
simplify_binary_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)2318 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2319 			   rtx op0, rtx op1)
2320 {
2321   rtx trueop0, trueop1;
2322   rtx tem;
2323 
2324   /* Relational operations don't work here.  We must know the mode
2325      of the operands in order to do the comparison correctly.
2326      Assuming a full word can give incorrect results.
2327      Consider comparing 128 with -128 in QImode.  */
2328   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2329   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2330 
2331   /* Make sure the constant is second.  */
2332   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2333       && swap_commutative_operands_p (op0, op1))
2334     std::swap (op0, op1);
2335 
2336   trueop0 = avoid_constant_pool_reference (op0);
2337   trueop1 = avoid_constant_pool_reference (op1);
2338 
2339   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2340   if (tem)
2341     return tem;
2342   tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2343 
2344   if (tem)
2345     return tem;
2346 
2347   /* If the above steps did not result in a simplification and op0 or op1
2348      were constant pool references, use the referenced constants directly.  */
2349   if (trueop0 != op0 || trueop1 != op1)
2350     return simplify_gen_binary (code, mode, trueop0, trueop1);
2351 
2352   return NULL_RTX;
2353 }
2354 
2355 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2356    which OP0 and OP1 are both vector series or vector duplicates
2357    (which are really just series with a step of 0).  If so, try to
2358    form a new series by applying CODE to the bases and to the steps.
2359    Return null if no simplification is possible.
2360 
2361    MODE is the mode of the operation and is known to be a vector
2362    integer mode.  */
2363 
2364 static rtx
simplify_binary_operation_series(rtx_code code,machine_mode mode,rtx op0,rtx op1)2365 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2366 				  rtx op0, rtx op1)
2367 {
2368   rtx base0, step0;
2369   if (vec_duplicate_p (op0, &base0))
2370     step0 = const0_rtx;
2371   else if (!vec_series_p (op0, &base0, &step0))
2372     return NULL_RTX;
2373 
2374   rtx base1, step1;
2375   if (vec_duplicate_p (op1, &base1))
2376     step1 = const0_rtx;
2377   else if (!vec_series_p (op1, &base1, &step1))
2378     return NULL_RTX;
2379 
2380   /* Only create a new series if we can simplify both parts.  In other
2381      cases this isn't really a simplification, and it's not necessarily
2382      a win to replace a vector operation with a scalar operation.  */
2383   scalar_mode inner_mode = GET_MODE_INNER (mode);
2384   rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2385   if (!new_base)
2386     return NULL_RTX;
2387 
2388   rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2389   if (!new_step)
2390     return NULL_RTX;
2391 
2392   return gen_vec_series (mode, new_base, new_step);
2393 }
2394 
2395 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2396    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2397    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2398    actual constants.  */
2399 
2400 static rtx
simplify_binary_operation_1(enum rtx_code code,machine_mode mode,rtx op0,rtx op1,rtx trueop0,rtx trueop1)2401 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2402 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2403 {
2404   rtx tem, reversed, opleft, opright, elt0, elt1;
2405   HOST_WIDE_INT val;
2406   scalar_int_mode int_mode, inner_mode;
2407   poly_int64 offset;
2408 
2409   /* Even if we can't compute a constant result,
2410      there are some cases worth simplifying.  */
2411 
2412   switch (code)
2413     {
2414     case PLUS:
2415       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2416 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2417 	 when x is -0 and the rounding mode is not towards -infinity,
2418 	 since (-0) + 0 is then 0.  */
2419       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2420 	return op0;
2421 
2422       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2423 	 transformations are safe even for IEEE.  */
2424       if (GET_CODE (op0) == NEG)
2425 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2426       else if (GET_CODE (op1) == NEG)
2427 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2428 
2429       /* (~a) + 1 -> -a */
2430       if (INTEGRAL_MODE_P (mode)
2431 	  && GET_CODE (op0) == NOT
2432 	  && trueop1 == const1_rtx)
2433 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2434 
2435       /* Handle both-operands-constant cases.  We can only add
2436 	 CONST_INTs to constants since the sum of relocatable symbols
2437 	 can't be handled by most assemblers.  Don't add CONST_INT
2438 	 to CONST_INT since overflow won't be computed properly if wider
2439 	 than HOST_BITS_PER_WIDE_INT.  */
2440 
2441       if ((GET_CODE (op0) == CONST
2442 	   || GET_CODE (op0) == SYMBOL_REF
2443 	   || GET_CODE (op0) == LABEL_REF)
2444 	  && poly_int_rtx_p (op1, &offset))
2445 	return plus_constant (mode, op0, offset);
2446       else if ((GET_CODE (op1) == CONST
2447 		|| GET_CODE (op1) == SYMBOL_REF
2448 		|| GET_CODE (op1) == LABEL_REF)
2449 	       && poly_int_rtx_p (op0, &offset))
2450 	return plus_constant (mode, op1, offset);
2451 
2452       /* See if this is something like X * C - X or vice versa or
2453 	 if the multiplication is written as a shift.  If so, we can
2454 	 distribute and make a new multiply, shift, or maybe just
2455 	 have X (if C is 2 in the example above).  But don't make
2456 	 something more expensive than we had before.  */
2457 
2458       if (is_a <scalar_int_mode> (mode, &int_mode))
2459 	{
2460 	  rtx lhs = op0, rhs = op1;
2461 
2462 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2463 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2464 
2465 	  if (GET_CODE (lhs) == NEG)
2466 	    {
2467 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2468 	      lhs = XEXP (lhs, 0);
2469 	    }
2470 	  else if (GET_CODE (lhs) == MULT
2471 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2472 	    {
2473 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2474 	      lhs = XEXP (lhs, 0);
2475 	    }
2476 	  else if (GET_CODE (lhs) == ASHIFT
2477 		   && CONST_INT_P (XEXP (lhs, 1))
2478                    && INTVAL (XEXP (lhs, 1)) >= 0
2479 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2480 	    {
2481 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2482 					    GET_MODE_PRECISION (int_mode));
2483 	      lhs = XEXP (lhs, 0);
2484 	    }
2485 
2486 	  if (GET_CODE (rhs) == NEG)
2487 	    {
2488 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2489 	      rhs = XEXP (rhs, 0);
2490 	    }
2491 	  else if (GET_CODE (rhs) == MULT
2492 		   && CONST_INT_P (XEXP (rhs, 1)))
2493 	    {
2494 	      coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2495 	      rhs = XEXP (rhs, 0);
2496 	    }
2497 	  else if (GET_CODE (rhs) == ASHIFT
2498 		   && CONST_INT_P (XEXP (rhs, 1))
2499 		   && INTVAL (XEXP (rhs, 1)) >= 0
2500 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2501 	    {
2502 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2503 					    GET_MODE_PRECISION (int_mode));
2504 	      rhs = XEXP (rhs, 0);
2505 	    }
2506 
2507 	  if (rtx_equal_p (lhs, rhs))
2508 	    {
2509 	      rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2510 	      rtx coeff;
2511 	      bool speed = optimize_function_for_speed_p (cfun);
2512 
2513 	      coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2514 
2515 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2516 	      return (set_src_cost (tem, int_mode, speed)
2517 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2518 	    }
2519 	}
2520 
2521       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2522       if (CONST_SCALAR_INT_P (op1)
2523 	  && GET_CODE (op0) == XOR
2524 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2525 	  && mode_signbit_p (mode, op1))
2526 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2527 				    simplify_gen_binary (XOR, mode, op1,
2528 							 XEXP (op0, 1)));
2529 
2530       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2531       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2532 	  && GET_CODE (op0) == MULT
2533 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2534 	{
2535 	  rtx in1, in2;
2536 
2537 	  in1 = XEXP (XEXP (op0, 0), 0);
2538 	  in2 = XEXP (op0, 1);
2539 	  return simplify_gen_binary (MINUS, mode, op1,
2540 				      simplify_gen_binary (MULT, mode,
2541 							   in1, in2));
2542 	}
2543 
2544       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2545 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2546 	 is 1.  */
2547       if (COMPARISON_P (op0)
2548 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2549 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2550 	  && (reversed = reversed_comparison (op0, mode)))
2551 	return
2552 	  simplify_gen_unary (NEG, mode, reversed, mode);
2553 
2554       /* If one of the operands is a PLUS or a MINUS, see if we can
2555 	 simplify this by the associative law.
2556 	 Don't use the associative law for floating point.
2557 	 The inaccuracy makes it nonassociative,
2558 	 and subtle programs can break if operations are associated.  */
2559 
2560       if (INTEGRAL_MODE_P (mode)
2561 	  && (plus_minus_operand_p (op0)
2562 	      || plus_minus_operand_p (op1))
2563 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2564 	return tem;
2565 
2566       /* Reassociate floating point addition only when the user
2567 	 specifies associative math operations.  */
2568       if (FLOAT_MODE_P (mode)
2569 	  && flag_associative_math)
2570 	{
2571 	  tem = simplify_associative_operation (code, mode, op0, op1);
2572 	  if (tem)
2573 	    return tem;
2574 	}
2575 
2576       /* Handle vector series.  */
2577       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2578 	{
2579 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2580 	  if (tem)
2581 	    return tem;
2582 	}
2583       break;
2584 
2585     case COMPARE:
2586       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2587       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2588 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2589 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2590 	{
2591 	  rtx xop00 = XEXP (op0, 0);
2592 	  rtx xop10 = XEXP (op1, 0);
2593 
2594 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2595 	      return xop00;
2596 
2597 	    if (REG_P (xop00) && REG_P (xop10)
2598 		&& REGNO (xop00) == REGNO (xop10)
2599 		&& GET_MODE (xop00) == mode
2600 		&& GET_MODE (xop10) == mode
2601 		&& GET_MODE_CLASS (mode) == MODE_CC)
2602 	      return xop00;
2603 	}
2604       break;
2605 
2606     case MINUS:
2607       /* We can't assume x-x is 0 even with non-IEEE floating point,
2608 	 but since it is zero except in very strange circumstances, we
2609 	 will treat it as zero with -ffinite-math-only.  */
2610       if (rtx_equal_p (trueop0, trueop1)
2611 	  && ! side_effects_p (op0)
2612 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2613 	return CONST0_RTX (mode);
2614 
2615       /* Change subtraction from zero into negation.  (0 - x) is the
2616 	 same as -x when x is NaN, infinite, or finite and nonzero.
2617 	 But if the mode has signed zeros, and does not round towards
2618 	 -infinity, then 0 - 0 is 0, not -0.  */
2619       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2620 	return simplify_gen_unary (NEG, mode, op1, mode);
2621 
2622       /* (-1 - a) is ~a, unless the expression contains symbolic
2623 	 constants, in which case not retaining additions and
2624 	 subtractions could cause invalid assembly to be produced.  */
2625       if (trueop0 == constm1_rtx
2626 	  && !contains_symbolic_reference_p (op1))
2627 	return simplify_gen_unary (NOT, mode, op1, mode);
2628 
2629       /* Subtracting 0 has no effect unless the mode has signed zeros
2630 	 and supports rounding towards -infinity.  In such a case,
2631 	 0 - 0 is -0.  */
2632       if (!(HONOR_SIGNED_ZEROS (mode)
2633 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2634 	  && trueop1 == CONST0_RTX (mode))
2635 	return op0;
2636 
2637       /* See if this is something like X * C - X or vice versa or
2638 	 if the multiplication is written as a shift.  If so, we can
2639 	 distribute and make a new multiply, shift, or maybe just
2640 	 have X (if C is 2 in the example above).  But don't make
2641 	 something more expensive than we had before.  */
2642 
2643       if (is_a <scalar_int_mode> (mode, &int_mode))
2644 	{
2645 	  rtx lhs = op0, rhs = op1;
2646 
2647 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2648 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2649 
2650 	  if (GET_CODE (lhs) == NEG)
2651 	    {
2652 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2653 	      lhs = XEXP (lhs, 0);
2654 	    }
2655 	  else if (GET_CODE (lhs) == MULT
2656 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2657 	    {
2658 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2659 	      lhs = XEXP (lhs, 0);
2660 	    }
2661 	  else if (GET_CODE (lhs) == ASHIFT
2662 		   && CONST_INT_P (XEXP (lhs, 1))
2663 		   && INTVAL (XEXP (lhs, 1)) >= 0
2664 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2665 	    {
2666 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2667 					    GET_MODE_PRECISION (int_mode));
2668 	      lhs = XEXP (lhs, 0);
2669 	    }
2670 
2671 	  if (GET_CODE (rhs) == NEG)
2672 	    {
2673 	      negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2674 	      rhs = XEXP (rhs, 0);
2675 	    }
2676 	  else if (GET_CODE (rhs) == MULT
2677 		   && CONST_INT_P (XEXP (rhs, 1)))
2678 	    {
2679 	      negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2680 	      rhs = XEXP (rhs, 0);
2681 	    }
2682 	  else if (GET_CODE (rhs) == ASHIFT
2683 		   && CONST_INT_P (XEXP (rhs, 1))
2684 		   && INTVAL (XEXP (rhs, 1)) >= 0
2685 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2686 	    {
2687 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2688 					       GET_MODE_PRECISION (int_mode));
2689 	      negcoeff1 = -negcoeff1;
2690 	      rhs = XEXP (rhs, 0);
2691 	    }
2692 
2693 	  if (rtx_equal_p (lhs, rhs))
2694 	    {
2695 	      rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2696 	      rtx coeff;
2697 	      bool speed = optimize_function_for_speed_p (cfun);
2698 
2699 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2700 
2701 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2702 	      return (set_src_cost (tem, int_mode, speed)
2703 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2704 	    }
2705 	}
2706 
2707       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2708       if (GET_CODE (op1) == NEG)
2709 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2710 
2711       /* (-x - c) may be simplified as (-c - x).  */
2712       if (GET_CODE (op0) == NEG
2713 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2714 	{
2715 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2716 	  if (tem)
2717 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2718 	}
2719 
2720       if ((GET_CODE (op0) == CONST
2721 	   || GET_CODE (op0) == SYMBOL_REF
2722 	   || GET_CODE (op0) == LABEL_REF)
2723 	  && poly_int_rtx_p (op1, &offset))
2724 	return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2725 
2726       /* Don't let a relocatable value get a negative coeff.  */
2727       if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
2728 	return simplify_gen_binary (PLUS, mode,
2729 				    op0,
2730 				    neg_poly_int_rtx (mode, op1));
2731 
2732       /* (x - (x & y)) -> (x & ~y) */
2733       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2734 	{
2735 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2736 	    {
2737 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2738 					GET_MODE (XEXP (op1, 1)));
2739 	      return simplify_gen_binary (AND, mode, op0, tem);
2740 	    }
2741 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2742 	    {
2743 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2744 					GET_MODE (XEXP (op1, 0)));
2745 	      return simplify_gen_binary (AND, mode, op0, tem);
2746 	    }
2747 	}
2748 
2749       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2750 	 by reversing the comparison code if valid.  */
2751       if (STORE_FLAG_VALUE == 1
2752 	  && trueop0 == const1_rtx
2753 	  && COMPARISON_P (op1)
2754 	  && (reversed = reversed_comparison (op1, mode)))
2755 	return reversed;
2756 
2757       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2758       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2759 	  && GET_CODE (op1) == MULT
2760 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2761 	{
2762 	  rtx in1, in2;
2763 
2764 	  in1 = XEXP (XEXP (op1, 0), 0);
2765 	  in2 = XEXP (op1, 1);
2766 	  return simplify_gen_binary (PLUS, mode,
2767 				      simplify_gen_binary (MULT, mode,
2768 							   in1, in2),
2769 				      op0);
2770 	}
2771 
2772       /* Canonicalize (minus (neg A) (mult B C)) to
2773 	 (minus (mult (neg B) C) A).  */
2774       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2775 	  && GET_CODE (op1) == MULT
2776 	  && GET_CODE (op0) == NEG)
2777 	{
2778 	  rtx in1, in2;
2779 
2780 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2781 	  in2 = XEXP (op1, 1);
2782 	  return simplify_gen_binary (MINUS, mode,
2783 				      simplify_gen_binary (MULT, mode,
2784 							   in1, in2),
2785 				      XEXP (op0, 0));
2786 	}
2787 
2788       /* If one of the operands is a PLUS or a MINUS, see if we can
2789 	 simplify this by the associative law.  This will, for example,
2790          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2791 	 Don't use the associative law for floating point.
2792 	 The inaccuracy makes it nonassociative,
2793 	 and subtle programs can break if operations are associated.  */
2794 
2795       if (INTEGRAL_MODE_P (mode)
2796 	  && (plus_minus_operand_p (op0)
2797 	      || plus_minus_operand_p (op1))
2798 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2799 	return tem;
2800 
2801       /* Handle vector series.  */
2802       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2803 	{
2804 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2805 	  if (tem)
2806 	    return tem;
2807 	}
2808       break;
2809 
2810     case MULT:
2811       if (trueop1 == constm1_rtx)
2812 	return simplify_gen_unary (NEG, mode, op0, mode);
2813 
2814       if (GET_CODE (op0) == NEG)
2815 	{
2816 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2817 	  /* If op1 is a MULT as well and simplify_unary_operation
2818 	     just moved the NEG to the second operand, simplify_gen_binary
2819 	     below could through simplify_associative_operation move
2820 	     the NEG around again and recurse endlessly.  */
2821 	  if (temp
2822 	      && GET_CODE (op1) == MULT
2823 	      && GET_CODE (temp) == MULT
2824 	      && XEXP (op1, 0) == XEXP (temp, 0)
2825 	      && GET_CODE (XEXP (temp, 1)) == NEG
2826 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2827 	    temp = NULL_RTX;
2828 	  if (temp)
2829 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2830 	}
2831       if (GET_CODE (op1) == NEG)
2832 	{
2833 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2834 	  /* If op0 is a MULT as well and simplify_unary_operation
2835 	     just moved the NEG to the second operand, simplify_gen_binary
2836 	     below could through simplify_associative_operation move
2837 	     the NEG around again and recurse endlessly.  */
2838 	  if (temp
2839 	      && GET_CODE (op0) == MULT
2840 	      && GET_CODE (temp) == MULT
2841 	      && XEXP (op0, 0) == XEXP (temp, 0)
2842 	      && GET_CODE (XEXP (temp, 1)) == NEG
2843 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2844 	    temp = NULL_RTX;
2845 	  if (temp)
2846 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2847 	}
2848 
2849       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2850 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2851 	 when the mode has signed zeros, since multiplying a negative
2852 	 number by 0 will give -0, not 0.  */
2853       if (!HONOR_NANS (mode)
2854 	  && !HONOR_SIGNED_ZEROS (mode)
2855 	  && trueop1 == CONST0_RTX (mode)
2856 	  && ! side_effects_p (op0))
2857 	return op1;
2858 
2859       /* In IEEE floating point, x*1 is not equivalent to x for
2860 	 signalling NaNs.  */
2861       if (!HONOR_SNANS (mode)
2862 	  && trueop1 == CONST1_RTX (mode))
2863 	return op0;
2864 
2865       /* Convert multiply by constant power of two into shift.  */
2866       if (CONST_SCALAR_INT_P (trueop1))
2867 	{
2868 	  val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2869 	  if (val >= 0)
2870 	    return simplify_gen_binary (ASHIFT, mode, op0,
2871 					gen_int_shift_amount (mode, val));
2872 	}
2873 
2874       /* x*2 is x+x and x*(-1) is -x */
2875       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2876 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2877 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2878 	  && GET_MODE (op0) == mode)
2879 	{
2880 	  const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2881 
2882 	  if (real_equal (d1, &dconst2))
2883 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2884 
2885 	  if (!HONOR_SNANS (mode)
2886 	      && real_equal (d1, &dconstm1))
2887 	    return simplify_gen_unary (NEG, mode, op0, mode);
2888 	}
2889 
2890       /* Optimize -x * -x as x * x.  */
2891       if (FLOAT_MODE_P (mode)
2892 	  && GET_CODE (op0) == NEG
2893 	  && GET_CODE (op1) == NEG
2894 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2895 	  && !side_effects_p (XEXP (op0, 0)))
2896 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2897 
2898       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2899       if (SCALAR_FLOAT_MODE_P (mode)
2900 	  && GET_CODE (op0) == ABS
2901 	  && GET_CODE (op1) == ABS
2902 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2903 	  && !side_effects_p (XEXP (op0, 0)))
2904 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2905 
2906       /* Reassociate multiplication, but for floating point MULTs
2907 	 only when the user specifies unsafe math optimizations.  */
2908       if (! FLOAT_MODE_P (mode)
2909 	  || flag_unsafe_math_optimizations)
2910 	{
2911 	  tem = simplify_associative_operation (code, mode, op0, op1);
2912 	  if (tem)
2913 	    return tem;
2914 	}
2915       break;
2916 
2917     case IOR:
2918       if (trueop1 == CONST0_RTX (mode))
2919 	return op0;
2920       if (INTEGRAL_MODE_P (mode)
2921 	  && trueop1 == CONSTM1_RTX (mode)
2922 	  && !side_effects_p (op0))
2923 	return op1;
2924       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2925 	return op0;
2926       /* A | (~A) -> -1 */
2927       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2928 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2929 	  && ! side_effects_p (op0)
2930 	  && SCALAR_INT_MODE_P (mode))
2931 	return constm1_rtx;
2932 
2933       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2934       if (CONST_INT_P (op1)
2935 	  && HWI_COMPUTABLE_MODE_P (mode)
2936 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2937 	  && !side_effects_p (op0))
2938 	return op1;
2939 
2940       /* Canonicalize (X & C1) | C2.  */
2941       if (GET_CODE (op0) == AND
2942 	  && CONST_INT_P (trueop1)
2943 	  && CONST_INT_P (XEXP (op0, 1)))
2944 	{
2945 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2946 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2947 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2948 
2949 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2.  */
2950 	  if ((c1 & c2) == c1
2951 	      && !side_effects_p (XEXP (op0, 0)))
2952 	    return trueop1;
2953 
2954 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2955 	  if (((c1|c2) & mask) == mask)
2956 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2957 	}
2958 
2959       /* Convert (A & B) | A to A.  */
2960       if (GET_CODE (op0) == AND
2961 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2962 	      || rtx_equal_p (XEXP (op0, 1), op1))
2963 	  && ! side_effects_p (XEXP (op0, 0))
2964 	  && ! side_effects_p (XEXP (op0, 1)))
2965 	return op1;
2966 
2967       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2968          mode size to (rotate A CX).  */
2969 
2970       if (GET_CODE (op1) == ASHIFT
2971           || GET_CODE (op1) == SUBREG)
2972         {
2973 	  opleft = op1;
2974 	  opright = op0;
2975 	}
2976       else
2977         {
2978 	  opright = op1;
2979 	  opleft = op0;
2980 	}
2981 
2982       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2983           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2984           && CONST_INT_P (XEXP (opleft, 1))
2985           && CONST_INT_P (XEXP (opright, 1))
2986           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2987 	      == GET_MODE_UNIT_PRECISION (mode)))
2988         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2989 
2990       /* Same, but for ashift that has been "simplified" to a wider mode
2991         by simplify_shift_const.  */
2992 
2993       if (GET_CODE (opleft) == SUBREG
2994 	  && is_a <scalar_int_mode> (mode, &int_mode)
2995 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2996 				     &inner_mode)
2997           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2998           && GET_CODE (opright) == LSHIFTRT
2999           && GET_CODE (XEXP (opright, 0)) == SUBREG
3000 	  && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3001 	  && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3002           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3003                           SUBREG_REG (XEXP (opright, 0)))
3004           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3005           && CONST_INT_P (XEXP (opright, 1))
3006 	  && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3007 	      + INTVAL (XEXP (opright, 1))
3008 	      == GET_MODE_PRECISION (int_mode)))
3009 	return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3010 			       XEXP (SUBREG_REG (opleft), 1));
3011 
3012       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3013          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
3014 	 the PLUS does not affect any of the bits in OP1: then we can do
3015 	 the IOR as a PLUS and we can associate.  This is valid if OP1
3016          can be safely shifted left C bits.  */
3017       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3018           && GET_CODE (XEXP (op0, 0)) == PLUS
3019           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3020           && CONST_INT_P (XEXP (op0, 1))
3021           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3022         {
3023 	  int count = INTVAL (XEXP (op0, 1));
3024 	  HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3025 
3026           if (mask >> count == INTVAL (trueop1)
3027 	      && trunc_int_for_mode (mask, mode) == mask
3028               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3029 	    return simplify_gen_binary (ASHIFTRT, mode,
3030 					plus_constant (mode, XEXP (op0, 0),
3031 						       mask),
3032 					XEXP (op0, 1));
3033         }
3034 
3035       /* The following happens with bitfield merging.
3036          (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3037       if (GET_CODE (op0) == AND
3038 	  && GET_CODE (op1) == AND
3039 	  && CONST_INT_P (XEXP (op0, 1))
3040 	  && CONST_INT_P (XEXP (op1, 1))
3041 	  && (INTVAL (XEXP (op0, 1))
3042 	      == ~INTVAL (XEXP (op1, 1))))
3043 	{
3044 	  /* The IOR may be on both sides.  */
3045 	  rtx top0 = NULL_RTX, top1 = NULL_RTX;
3046 	  if (GET_CODE (XEXP (op1, 0)) == IOR)
3047 	    top0 = op0, top1 = op1;
3048 	  else if (GET_CODE (XEXP (op0, 0)) == IOR)
3049 	    top0 = op1, top1 = op0;
3050 	  if (top0 && top1)
3051 	    {
3052 	      /* X may be on either side of the inner IOR.  */
3053 	      rtx tem = NULL_RTX;
3054 	      if (rtx_equal_p (XEXP (top0, 0),
3055 			       XEXP (XEXP (top1, 0), 0)))
3056 		tem = XEXP (XEXP (top1, 0), 1);
3057 	      else if (rtx_equal_p (XEXP (top0, 0),
3058 				    XEXP (XEXP (top1, 0), 1)))
3059 		tem = XEXP (XEXP (top1, 0), 0);
3060 	      if (tem)
3061 		return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3062 					    simplify_gen_binary
3063 					      (AND, mode, tem, XEXP (top1, 1)));
3064 	    }
3065 	}
3066 
3067       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3068       if (tem)
3069 	return tem;
3070 
3071       tem = simplify_associative_operation (code, mode, op0, op1);
3072       if (tem)
3073 	return tem;
3074 
3075       tem = simplify_logical_relational_operation (code, mode, op0, op1);
3076       if (tem)
3077 	return tem;
3078       break;
3079 
3080     case XOR:
3081       if (trueop1 == CONST0_RTX (mode))
3082 	return op0;
3083       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3084 	return simplify_gen_unary (NOT, mode, op0, mode);
3085       if (rtx_equal_p (trueop0, trueop1)
3086 	  && ! side_effects_p (op0)
3087 	  && GET_MODE_CLASS (mode) != MODE_CC)
3088 	 return CONST0_RTX (mode);
3089 
3090       /* Canonicalize XOR of the most significant bit to PLUS.  */
3091       if (CONST_SCALAR_INT_P (op1)
3092 	  && mode_signbit_p (mode, op1))
3093 	return simplify_gen_binary (PLUS, mode, op0, op1);
3094       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
3095       if (CONST_SCALAR_INT_P (op1)
3096 	  && GET_CODE (op0) == PLUS
3097 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
3098 	  && mode_signbit_p (mode, XEXP (op0, 1)))
3099 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3100 				    simplify_gen_binary (XOR, mode, op1,
3101 							 XEXP (op0, 1)));
3102 
3103       /* If we are XORing two things that have no bits in common,
3104 	 convert them into an IOR.  This helps to detect rotation encoded
3105 	 using those methods and possibly other simplifications.  */
3106 
3107       if (HWI_COMPUTABLE_MODE_P (mode)
3108 	  && (nonzero_bits (op0, mode)
3109 	      & nonzero_bits (op1, mode)) == 0)
3110 	return (simplify_gen_binary (IOR, mode, op0, op1));
3111 
3112       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3113 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3114 	 (NOT y).  */
3115       {
3116 	int num_negated = 0;
3117 
3118 	if (GET_CODE (op0) == NOT)
3119 	  num_negated++, op0 = XEXP (op0, 0);
3120 	if (GET_CODE (op1) == NOT)
3121 	  num_negated++, op1 = XEXP (op1, 0);
3122 
3123 	if (num_negated == 2)
3124 	  return simplify_gen_binary (XOR, mode, op0, op1);
3125 	else if (num_negated == 1)
3126 	  return simplify_gen_unary (NOT, mode,
3127 				     simplify_gen_binary (XOR, mode, op0, op1),
3128 				     mode);
3129       }
3130 
3131       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
3132 	 correspond to a machine insn or result in further simplifications
3133 	 if B is a constant.  */
3134 
3135       if (GET_CODE (op0) == AND
3136 	  && rtx_equal_p (XEXP (op0, 1), op1)
3137 	  && ! side_effects_p (op1))
3138 	return simplify_gen_binary (AND, mode,
3139 				    simplify_gen_unary (NOT, mode,
3140 							XEXP (op0, 0), mode),
3141 				    op1);
3142 
3143       else if (GET_CODE (op0) == AND
3144 	       && rtx_equal_p (XEXP (op0, 0), op1)
3145 	       && ! side_effects_p (op1))
3146 	return simplify_gen_binary (AND, mode,
3147 				    simplify_gen_unary (NOT, mode,
3148 							XEXP (op0, 1), mode),
3149 				    op1);
3150 
3151       /* Given (xor (ior (xor A B) C) D), where B, C and D are
3152 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3153 	 out bits inverted twice and not set by C.  Similarly, given
3154 	 (xor (and (xor A B) C) D), simplify without inverting C in
3155 	 the xor operand: (xor (and A C) (B&C)^D).
3156       */
3157       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3158 	       && GET_CODE (XEXP (op0, 0)) == XOR
3159 	       && CONST_INT_P (op1)
3160 	       && CONST_INT_P (XEXP (op0, 1))
3161 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3162 	{
3163 	  enum rtx_code op = GET_CODE (op0);
3164 	  rtx a = XEXP (XEXP (op0, 0), 0);
3165 	  rtx b = XEXP (XEXP (op0, 0), 1);
3166 	  rtx c = XEXP (op0, 1);
3167 	  rtx d = op1;
3168 	  HOST_WIDE_INT bval = INTVAL (b);
3169 	  HOST_WIDE_INT cval = INTVAL (c);
3170 	  HOST_WIDE_INT dval = INTVAL (d);
3171 	  HOST_WIDE_INT xcval;
3172 
3173 	  if (op == IOR)
3174 	    xcval = ~cval;
3175 	  else
3176 	    xcval = cval;
3177 
3178 	  return simplify_gen_binary (XOR, mode,
3179 				      simplify_gen_binary (op, mode, a, c),
3180 				      gen_int_mode ((bval & xcval) ^ dval,
3181 						    mode));
3182 	}
3183 
3184       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3185 	 we can transform like this:
3186             (A&B)^C == ~(A&B)&C | ~C&(A&B)
3187                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
3188                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
3189 	 Attempt a few simplifications when B and C are both constants.  */
3190       if (GET_CODE (op0) == AND
3191 	  && CONST_INT_P (op1)
3192 	  && CONST_INT_P (XEXP (op0, 1)))
3193 	{
3194 	  rtx a = XEXP (op0, 0);
3195 	  rtx b = XEXP (op0, 1);
3196 	  rtx c = op1;
3197 	  HOST_WIDE_INT bval = INTVAL (b);
3198 	  HOST_WIDE_INT cval = INTVAL (c);
3199 
3200 	  /* Instead of computing ~A&C, we compute its negated value,
3201 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
3202 	     optimize for sure.  If it does not simplify, we still try
3203 	     to compute ~A&C below, but since that always allocates
3204 	     RTL, we don't try that before committing to returning a
3205 	     simplified expression.  */
3206 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3207 						  GEN_INT (~cval));
3208 
3209 	  if ((~cval & bval) == 0)
3210 	    {
3211 	      rtx na_c = NULL_RTX;
3212 	      if (n_na_c)
3213 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3214 	      else
3215 		{
3216 		  /* If ~A does not simplify, don't bother: we don't
3217 		     want to simplify 2 operations into 3, and if na_c
3218 		     were to simplify with na, n_na_c would have
3219 		     simplified as well.  */
3220 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
3221 		  if (na)
3222 		    na_c = simplify_gen_binary (AND, mode, na, c);
3223 		}
3224 
3225 	      /* Try to simplify ~A&C | ~B&C.  */
3226 	      if (na_c != NULL_RTX)
3227 		return simplify_gen_binary (IOR, mode, na_c,
3228 					    gen_int_mode (~bval & cval, mode));
3229 	    }
3230 	  else
3231 	    {
3232 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
3233 	      if (n_na_c == CONSTM1_RTX (mode))
3234 		{
3235 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3236 						    gen_int_mode (~cval & bval,
3237 								  mode));
3238 		  return simplify_gen_binary (IOR, mode, a_nc_b,
3239 					      gen_int_mode (~bval & cval,
3240 							    mode));
3241 		}
3242 	    }
3243 	}
3244 
3245       /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3246 	 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3247 	 machines, and also has shorter instruction path length.  */
3248       if (GET_CODE (op0) == AND
3249 	  && GET_CODE (XEXP (op0, 0)) == XOR
3250 	  && CONST_INT_P (XEXP (op0, 1))
3251 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3252 	{
3253 	  rtx a = trueop1;
3254 	  rtx b = XEXP (XEXP (op0, 0), 1);
3255 	  rtx c = XEXP (op0, 1);
3256 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3257 	  rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3258 	  rtx bc = simplify_gen_binary (AND, mode, b, c);
3259 	  return simplify_gen_binary (IOR, mode, a_nc, bc);
3260 	}
3261       /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C))  */
3262       else if (GET_CODE (op0) == AND
3263 	  && GET_CODE (XEXP (op0, 0)) == XOR
3264 	  && CONST_INT_P (XEXP (op0, 1))
3265 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3266 	{
3267 	  rtx a = XEXP (XEXP (op0, 0), 0);
3268 	  rtx b = trueop1;
3269 	  rtx c = XEXP (op0, 1);
3270 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3271 	  rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3272 	  rtx ac = simplify_gen_binary (AND, mode, a, c);
3273 	  return simplify_gen_binary (IOR, mode, ac, b_nc);
3274 	}
3275 
3276       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3277 	 comparison if STORE_FLAG_VALUE is 1.  */
3278       if (STORE_FLAG_VALUE == 1
3279 	  && trueop1 == const1_rtx
3280 	  && COMPARISON_P (op0)
3281 	  && (reversed = reversed_comparison (op0, mode)))
3282 	return reversed;
3283 
3284       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3285 	 is (lt foo (const_int 0)), so we can perform the above
3286 	 simplification if STORE_FLAG_VALUE is 1.  */
3287 
3288       if (is_a <scalar_int_mode> (mode, &int_mode)
3289 	  && STORE_FLAG_VALUE == 1
3290 	  && trueop1 == const1_rtx
3291 	  && GET_CODE (op0) == LSHIFTRT
3292 	  && CONST_INT_P (XEXP (op0, 1))
3293 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3294 	return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3295 
3296       /* (xor (comparison foo bar) (const_int sign-bit))
3297 	 when STORE_FLAG_VALUE is the sign bit.  */
3298       if (is_a <scalar_int_mode> (mode, &int_mode)
3299 	  && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3300 	  && trueop1 == const_true_rtx
3301 	  && COMPARISON_P (op0)
3302 	  && (reversed = reversed_comparison (op0, int_mode)))
3303 	return reversed;
3304 
3305       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3306       if (tem)
3307 	return tem;
3308 
3309       tem = simplify_associative_operation (code, mode, op0, op1);
3310       if (tem)
3311 	return tem;
3312       break;
3313 
3314     case AND:
3315       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3316 	return trueop1;
3317       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3318 	return op0;
3319       if (HWI_COMPUTABLE_MODE_P (mode))
3320 	{
3321 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3322 	  HOST_WIDE_INT nzop1;
3323 	  if (CONST_INT_P (trueop1))
3324 	    {
3325 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
3326 	      /* If we are turning off bits already known off in OP0, we need
3327 		 not do an AND.  */
3328 	      if ((nzop0 & ~val1) == 0)
3329 		return op0;
3330 	    }
3331 	  nzop1 = nonzero_bits (trueop1, mode);
3332 	  /* If we are clearing all the nonzero bits, the result is zero.  */
3333 	  if ((nzop1 & nzop0) == 0
3334 	      && !side_effects_p (op0) && !side_effects_p (op1))
3335 	    return CONST0_RTX (mode);
3336 	}
3337       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3338 	  && GET_MODE_CLASS (mode) != MODE_CC)
3339 	return op0;
3340       /* A & (~A) -> 0 */
3341       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3342 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3343 	  && ! side_effects_p (op0)
3344 	  && GET_MODE_CLASS (mode) != MODE_CC)
3345 	return CONST0_RTX (mode);
3346 
3347       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3348 	 there are no nonzero bits of C outside of X's mode.  */
3349       if ((GET_CODE (op0) == SIGN_EXTEND
3350 	   || GET_CODE (op0) == ZERO_EXTEND)
3351 	  && CONST_INT_P (trueop1)
3352 	  && HWI_COMPUTABLE_MODE_P (mode)
3353 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3354 	      & UINTVAL (trueop1)) == 0)
3355 	{
3356 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3357 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3358 				     gen_int_mode (INTVAL (trueop1),
3359 						   imode));
3360 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3361 	}
3362 
3363       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
3364 	 we might be able to further simplify the AND with X and potentially
3365 	 remove the truncation altogether.  */
3366       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3367 	{
3368 	  rtx x = XEXP (op0, 0);
3369 	  machine_mode xmode = GET_MODE (x);
3370 	  tem = simplify_gen_binary (AND, xmode, x,
3371 				     gen_int_mode (INTVAL (trueop1), xmode));
3372 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3373 	}
3374 
3375       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3376       if (GET_CODE (op0) == IOR
3377 	  && CONST_INT_P (trueop1)
3378 	  && CONST_INT_P (XEXP (op0, 1)))
3379 	{
3380 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3381 	  return simplify_gen_binary (IOR, mode,
3382 				      simplify_gen_binary (AND, mode,
3383 							   XEXP (op0, 0), op1),
3384 				      gen_int_mode (tmp, mode));
3385 	}
3386 
3387       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3388 	 insn (and may simplify more).  */
3389       if (GET_CODE (op0) == XOR
3390 	  && rtx_equal_p (XEXP (op0, 0), op1)
3391 	  && ! side_effects_p (op1))
3392 	return simplify_gen_binary (AND, mode,
3393 				    simplify_gen_unary (NOT, mode,
3394 							XEXP (op0, 1), mode),
3395 				    op1);
3396 
3397       if (GET_CODE (op0) == XOR
3398 	  && rtx_equal_p (XEXP (op0, 1), op1)
3399 	  && ! side_effects_p (op1))
3400 	return simplify_gen_binary (AND, mode,
3401 				    simplify_gen_unary (NOT, mode,
3402 							XEXP (op0, 0), mode),
3403 				    op1);
3404 
3405       /* Similarly for (~(A ^ B)) & A.  */
3406       if (GET_CODE (op0) == NOT
3407 	  && GET_CODE (XEXP (op0, 0)) == XOR
3408 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3409 	  && ! side_effects_p (op1))
3410 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3411 
3412       if (GET_CODE (op0) == NOT
3413 	  && GET_CODE (XEXP (op0, 0)) == XOR
3414 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3415 	  && ! side_effects_p (op1))
3416 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3417 
3418       /* Convert (A | B) & A to A.  */
3419       if (GET_CODE (op0) == IOR
3420 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3421 	      || rtx_equal_p (XEXP (op0, 1), op1))
3422 	  && ! side_effects_p (XEXP (op0, 0))
3423 	  && ! side_effects_p (XEXP (op0, 1)))
3424 	return op1;
3425 
3426       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3427 	 ((A & N) + B) & M -> (A + B) & M
3428 	 Similarly if (N & M) == 0,
3429 	 ((A | N) + B) & M -> (A + B) & M
3430 	 and for - instead of + and/or ^ instead of |.
3431          Also, if (N & M) == 0, then
3432 	 (A +- N) & M -> A & M.  */
3433       if (CONST_INT_P (trueop1)
3434 	  && HWI_COMPUTABLE_MODE_P (mode)
3435 	  && ~UINTVAL (trueop1)
3436 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3437 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3438 	{
3439 	  rtx pmop[2];
3440 	  int which;
3441 
3442 	  pmop[0] = XEXP (op0, 0);
3443 	  pmop[1] = XEXP (op0, 1);
3444 
3445 	  if (CONST_INT_P (pmop[1])
3446 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3447 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3448 
3449 	  for (which = 0; which < 2; which++)
3450 	    {
3451 	      tem = pmop[which];
3452 	      switch (GET_CODE (tem))
3453 		{
3454 		case AND:
3455 		  if (CONST_INT_P (XEXP (tem, 1))
3456 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3457 		      == UINTVAL (trueop1))
3458 		    pmop[which] = XEXP (tem, 0);
3459 		  break;
3460 		case IOR:
3461 		case XOR:
3462 		  if (CONST_INT_P (XEXP (tem, 1))
3463 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3464 		    pmop[which] = XEXP (tem, 0);
3465 		  break;
3466 		default:
3467 		  break;
3468 		}
3469 	    }
3470 
3471 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3472 	    {
3473 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3474 					 pmop[0], pmop[1]);
3475 	      return simplify_gen_binary (code, mode, tem, op1);
3476 	    }
3477 	}
3478 
3479       /* (and X (ior (not X) Y) -> (and X Y) */
3480       if (GET_CODE (op1) == IOR
3481 	  && GET_CODE (XEXP (op1, 0)) == NOT
3482 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3483        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3484 
3485       /* (and (ior (not X) Y) X) -> (and X Y) */
3486       if (GET_CODE (op0) == IOR
3487 	  && GET_CODE (XEXP (op0, 0)) == NOT
3488 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3489 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3490 
3491       /* (and X (ior Y (not X)) -> (and X Y) */
3492       if (GET_CODE (op1) == IOR
3493 	  && GET_CODE (XEXP (op1, 1)) == NOT
3494 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3495        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3496 
3497       /* (and (ior Y (not X)) X) -> (and X Y) */
3498       if (GET_CODE (op0) == IOR
3499 	  && GET_CODE (XEXP (op0, 1)) == NOT
3500 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3501 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3502 
3503       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3504       if (tem)
3505 	return tem;
3506 
3507       tem = simplify_associative_operation (code, mode, op0, op1);
3508       if (tem)
3509 	return tem;
3510       break;
3511 
3512     case UDIV:
3513       /* 0/x is 0 (or x&0 if x has side-effects).  */
3514       if (trueop0 == CONST0_RTX (mode)
3515 	  && !cfun->can_throw_non_call_exceptions)
3516 	{
3517 	  if (side_effects_p (op1))
3518 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3519 	  return trueop0;
3520 	}
3521       /* x/1 is x.  */
3522       if (trueop1 == CONST1_RTX (mode))
3523 	{
3524 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3525 	  if (tem)
3526 	    return tem;
3527 	}
3528       /* Convert divide by power of two into shift.  */
3529       if (CONST_INT_P (trueop1)
3530 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3531 	return simplify_gen_binary (LSHIFTRT, mode, op0,
3532 				    gen_int_shift_amount (mode, val));
3533       break;
3534 
3535     case DIV:
3536       /* Handle floating point and integers separately.  */
3537       if (SCALAR_FLOAT_MODE_P (mode))
3538 	{
3539 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3540 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3541 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3542 	     zeros, since dividing 0 by a negative number gives -0.0  */
3543 	  if (trueop0 == CONST0_RTX (mode)
3544 	      && !HONOR_NANS (mode)
3545 	      && !HONOR_SIGNED_ZEROS (mode)
3546 	      && ! side_effects_p (op1))
3547 	    return op0;
3548 	  /* x/1.0 is x.  */
3549 	  if (trueop1 == CONST1_RTX (mode)
3550 	      && !HONOR_SNANS (mode))
3551 	    return op0;
3552 
3553 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3554 	      && trueop1 != CONST0_RTX (mode))
3555 	    {
3556 	      const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3557 
3558 	      /* x/-1.0 is -x.  */
3559 	      if (real_equal (d1, &dconstm1)
3560 		  && !HONOR_SNANS (mode))
3561 		return simplify_gen_unary (NEG, mode, op0, mode);
3562 
3563 	      /* Change FP division by a constant into multiplication.
3564 		 Only do this with -freciprocal-math.  */
3565 	      if (flag_reciprocal_math
3566 		  && !real_equal (d1, &dconst0))
3567 		{
3568 		  REAL_VALUE_TYPE d;
3569 		  real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3570 		  tem = const_double_from_real_value (d, mode);
3571 		  return simplify_gen_binary (MULT, mode, op0, tem);
3572 		}
3573 	    }
3574 	}
3575       else if (SCALAR_INT_MODE_P (mode))
3576 	{
3577 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3578 	  if (trueop0 == CONST0_RTX (mode)
3579 	      && !cfun->can_throw_non_call_exceptions)
3580 	    {
3581 	      if (side_effects_p (op1))
3582 		return simplify_gen_binary (AND, mode, op1, trueop0);
3583 	      return trueop0;
3584 	    }
3585 	  /* x/1 is x.  */
3586 	  if (trueop1 == CONST1_RTX (mode))
3587 	    {
3588 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3589 	      if (tem)
3590 		return tem;
3591 	    }
3592 	  /* x/-1 is -x.  */
3593 	  if (trueop1 == constm1_rtx)
3594 	    {
3595 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3596 	      if (x)
3597 		return simplify_gen_unary (NEG, mode, x, mode);
3598 	    }
3599 	}
3600       break;
3601 
3602     case UMOD:
3603       /* 0%x is 0 (or x&0 if x has side-effects).  */
3604       if (trueop0 == CONST0_RTX (mode))
3605 	{
3606 	  if (side_effects_p (op1))
3607 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3608 	  return trueop0;
3609 	}
3610       /* x%1 is 0 (of x&0 if x has side-effects).  */
3611       if (trueop1 == CONST1_RTX (mode))
3612 	{
3613 	  if (side_effects_p (op0))
3614 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3615 	  return CONST0_RTX (mode);
3616 	}
3617       /* Implement modulus by power of two as AND.  */
3618       if (CONST_INT_P (trueop1)
3619 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3620 	return simplify_gen_binary (AND, mode, op0,
3621 				    gen_int_mode (UINTVAL (trueop1) - 1,
3622 						  mode));
3623       break;
3624 
3625     case MOD:
3626       /* 0%x is 0 (or x&0 if x has side-effects).  */
3627       if (trueop0 == CONST0_RTX (mode))
3628 	{
3629 	  if (side_effects_p (op1))
3630 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3631 	  return trueop0;
3632 	}
3633       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3634       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3635 	{
3636 	  if (side_effects_p (op0))
3637 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3638 	  return CONST0_RTX (mode);
3639 	}
3640       break;
3641 
3642     case ROTATERT:
3643     case ROTATE:
3644       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3645 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3646 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3647 	 amount instead.  */
3648 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3649       if (CONST_INT_P (trueop1)
3650 	  && IN_RANGE (INTVAL (trueop1),
3651 		       GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3652 		       GET_MODE_UNIT_PRECISION (mode) - 1))
3653 	{
3654 	  int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3655 	  rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3656 	  return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3657 				      mode, op0, new_amount_rtx);
3658 	}
3659 #endif
3660       /* FALLTHRU */
3661     case ASHIFTRT:
3662       if (trueop1 == CONST0_RTX (mode))
3663 	return op0;
3664       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3665 	return op0;
3666       /* Rotating ~0 always results in ~0.  */
3667       if (CONST_INT_P (trueop0)
3668 	  && HWI_COMPUTABLE_MODE_P (mode)
3669 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3670 	  && ! side_effects_p (op1))
3671 	return op0;
3672 
3673     canonicalize_shift:
3674       /* Given:
3675 	 scalar modes M1, M2
3676 	 scalar constants c1, c2
3677 	 size (M2) > size (M1)
3678 	 c1 == size (M2) - size (M1)
3679 	 optimize:
3680 	 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3681 				 <low_part>)
3682 		      (const_int <c2>))
3683 	 to:
3684 	 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3685 		    <low_part>).  */
3686       if ((code == ASHIFTRT || code == LSHIFTRT)
3687 	  && is_a <scalar_int_mode> (mode, &int_mode)
3688 	  && SUBREG_P (op0)
3689 	  && CONST_INT_P (op1)
3690 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3691 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3692 				     &inner_mode)
3693 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3694 	  && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3695 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3696 	      == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3697 	  && subreg_lowpart_p (op0))
3698 	{
3699 	  rtx tmp = gen_int_shift_amount
3700 	    (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3701 
3702 	 /* Combine would usually zero out the value when combining two
3703 	    local shifts and the range becomes larger or equal to the mode.
3704 	    However since we fold away one of the shifts here combine won't
3705 	    see it so we should immediately zero the result if it's out of
3706 	    range.  */
3707 	 if (code == LSHIFTRT
3708 	     && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
3709 	  tmp = const0_rtx;
3710 	 else
3711 	   tmp = simplify_gen_binary (code,
3712 				      inner_mode,
3713 				      XEXP (SUBREG_REG (op0), 0),
3714 				      tmp);
3715 
3716 	  return lowpart_subreg (int_mode, tmp, inner_mode);
3717 	}
3718 
3719       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3720 	{
3721 	  val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3722 	  if (val != INTVAL (op1))
3723 	    return simplify_gen_binary (code, mode, op0,
3724 					gen_int_shift_amount (mode, val));
3725 	}
3726       break;
3727 
3728     case ASHIFT:
3729     case SS_ASHIFT:
3730     case US_ASHIFT:
3731       if (trueop1 == CONST0_RTX (mode))
3732 	return op0;
3733       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3734 	return op0;
3735       goto canonicalize_shift;
3736 
3737     case LSHIFTRT:
3738       if (trueop1 == CONST0_RTX (mode))
3739 	return op0;
3740       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3741 	return op0;
3742       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3743       if (GET_CODE (op0) == CLZ
3744 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3745 	  && CONST_INT_P (trueop1)
3746 	  && STORE_FLAG_VALUE == 1
3747 	  && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3748 	{
3749 	  unsigned HOST_WIDE_INT zero_val = 0;
3750 
3751 	  if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3752 	      && zero_val == GET_MODE_PRECISION (inner_mode)
3753 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3754 	    return simplify_gen_relational (EQ, mode, inner_mode,
3755 					    XEXP (op0, 0), const0_rtx);
3756 	}
3757       goto canonicalize_shift;
3758 
3759     case SMIN:
3760       if (HWI_COMPUTABLE_MODE_P (mode)
3761 	  && mode_signbit_p (mode, trueop1)
3762 	  && ! side_effects_p (op0))
3763 	return op1;
3764       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3765 	return op0;
3766       tem = simplify_associative_operation (code, mode, op0, op1);
3767       if (tem)
3768 	return tem;
3769       break;
3770 
3771     case SMAX:
3772       if (HWI_COMPUTABLE_MODE_P (mode)
3773 	  && CONST_INT_P (trueop1)
3774 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3775 	  && ! side_effects_p (op0))
3776 	return op1;
3777       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3778 	return op0;
3779       tem = simplify_associative_operation (code, mode, op0, op1);
3780       if (tem)
3781 	return tem;
3782       break;
3783 
3784     case UMIN:
3785       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3786 	return op1;
3787       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3788 	return op0;
3789       tem = simplify_associative_operation (code, mode, op0, op1);
3790       if (tem)
3791 	return tem;
3792       break;
3793 
3794     case UMAX:
3795       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3796 	return op1;
3797       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3798 	return op0;
3799       tem = simplify_associative_operation (code, mode, op0, op1);
3800       if (tem)
3801 	return tem;
3802       break;
3803 
3804     case SS_PLUS:
3805     case US_PLUS:
3806     case SS_MINUS:
3807     case US_MINUS:
3808     case SS_MULT:
3809     case US_MULT:
3810     case SS_DIV:
3811     case US_DIV:
3812       /* ??? There are simplifications that can be done.  */
3813       return 0;
3814 
3815     case VEC_SERIES:
3816       if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3817 	return gen_vec_duplicate (mode, op0);
3818       if (valid_for_const_vector_p (mode, op0)
3819 	  && valid_for_const_vector_p (mode, op1))
3820 	return gen_const_vec_series (mode, op0, op1);
3821       return 0;
3822 
3823     case VEC_SELECT:
3824       if (!VECTOR_MODE_P (mode))
3825 	{
3826 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3827 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3828 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3829 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3830 
3831 	  /* We can't reason about selections made at runtime.  */
3832 	  if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3833 	    return 0;
3834 
3835 	  if (vec_duplicate_p (trueop0, &elt0))
3836 	    return elt0;
3837 
3838 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3839 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3840 						      (trueop1, 0, 0)));
3841 
3842 	  /* Extract a scalar element from a nested VEC_SELECT expression
3843 	     (with optional nested VEC_CONCAT expression).  Some targets
3844 	     (i386) extract scalar element from a vector using chain of
3845 	     nested VEC_SELECT expressions.  When input operand is a memory
3846 	     operand, this operation can be simplified to a simple scalar
3847 	     load from an offseted memory address.  */
3848 	  int n_elts;
3849 	  if (GET_CODE (trueop0) == VEC_SELECT
3850 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3851 		  .is_constant (&n_elts)))
3852 	    {
3853 	      rtx op0 = XEXP (trueop0, 0);
3854 	      rtx op1 = XEXP (trueop0, 1);
3855 
3856 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3857 	      int elem;
3858 
3859 	      rtvec vec;
3860 	      rtx tmp_op, tmp;
3861 
3862 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3863 	      gcc_assert (i < n_elts);
3864 
3865 	      /* Select element, pointed by nested selector.  */
3866 	      elem = INTVAL (XVECEXP (op1, 0, i));
3867 
3868 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3869 	      if (GET_CODE (op0) == VEC_CONCAT)
3870 		{
3871 		  rtx op00 = XEXP (op0, 0);
3872 		  rtx op01 = XEXP (op0, 1);
3873 
3874 		  machine_mode mode00, mode01;
3875 		  int n_elts00, n_elts01;
3876 
3877 		  mode00 = GET_MODE (op00);
3878 		  mode01 = GET_MODE (op01);
3879 
3880 		  /* Find out the number of elements of each operand.
3881 		     Since the concatenated result has a constant number
3882 		     of elements, the operands must too.  */
3883 		  n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3884 		  n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3885 
3886 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3887 
3888 		  /* Select correct operand of VEC_CONCAT
3889 		     and adjust selector. */
3890 		  if (elem < n_elts01)
3891 		    tmp_op = op00;
3892 		  else
3893 		    {
3894 		      tmp_op = op01;
3895 		      elem -= n_elts00;
3896 		    }
3897 		}
3898 	      else
3899 		tmp_op = op0;
3900 
3901 	      vec = rtvec_alloc (1);
3902 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3903 
3904 	      tmp = gen_rtx_fmt_ee (code, mode,
3905 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3906 	      return tmp;
3907 	    }
3908 	}
3909       else
3910 	{
3911 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3912 	  gcc_assert (GET_MODE_INNER (mode)
3913 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3914 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3915 
3916 	  if (vec_duplicate_p (trueop0, &elt0))
3917 	    /* It doesn't matter which elements are selected by trueop1,
3918 	       because they are all the same.  */
3919 	    return gen_vec_duplicate (mode, elt0);
3920 
3921 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3922 	    {
3923 	      unsigned n_elts = XVECLEN (trueop1, 0);
3924 	      rtvec v = rtvec_alloc (n_elts);
3925 	      unsigned int i;
3926 
3927 	      gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3928 	      for (i = 0; i < n_elts; i++)
3929 		{
3930 		  rtx x = XVECEXP (trueop1, 0, i);
3931 
3932 		  if (!CONST_INT_P (x))
3933 		    return 0;
3934 
3935 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3936 						       INTVAL (x));
3937 		}
3938 
3939 	      return gen_rtx_CONST_VECTOR (mode, v);
3940 	    }
3941 
3942 	  /* Recognize the identity.  */
3943 	  if (GET_MODE (trueop0) == mode)
3944 	    {
3945 	      bool maybe_ident = true;
3946 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3947 		{
3948 		  rtx j = XVECEXP (trueop1, 0, i);
3949 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3950 		    {
3951 		      maybe_ident = false;
3952 		      break;
3953 		    }
3954 		}
3955 	      if (maybe_ident)
3956 		return trueop0;
3957 	    }
3958 
3959 	  /* If we build {a,b} then permute it, build the result directly.  */
3960 	  if (XVECLEN (trueop1, 0) == 2
3961 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3962 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3963 	      && GET_CODE (trueop0) == VEC_CONCAT
3964 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3965 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3966 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3967 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3968 	    {
3969 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3970 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3971 	      rtx subop0, subop1;
3972 
3973 	      gcc_assert (i0 < 4 && i1 < 4);
3974 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3975 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3976 
3977 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3978 	    }
3979 
3980 	  if (XVECLEN (trueop1, 0) == 2
3981 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3982 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3983 	      && GET_CODE (trueop0) == VEC_CONCAT
3984 	      && GET_MODE (trueop0) == mode)
3985 	    {
3986 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3987 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3988 	      rtx subop0, subop1;
3989 
3990 	      gcc_assert (i0 < 2 && i1 < 2);
3991 	      subop0 = XEXP (trueop0, i0);
3992 	      subop1 = XEXP (trueop0, i1);
3993 
3994 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3995 	    }
3996 
3997 	  /* If we select one half of a vec_concat, return that.  */
3998 	  int l0, l1;
3999 	  if (GET_CODE (trueop0) == VEC_CONCAT
4000 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4001 		  .is_constant (&l0))
4002 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4003 		  .is_constant (&l1))
4004 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4005 	    {
4006 	      rtx subop0 = XEXP (trueop0, 0);
4007 	      rtx subop1 = XEXP (trueop0, 1);
4008 	      machine_mode mode0 = GET_MODE (subop0);
4009 	      machine_mode mode1 = GET_MODE (subop1);
4010 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4011 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4012 		{
4013 		  bool success = true;
4014 		  for (int i = 1; i < l0; ++i)
4015 		    {
4016 		      rtx j = XVECEXP (trueop1, 0, i);
4017 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
4018 			{
4019 			  success = false;
4020 			  break;
4021 			}
4022 		    }
4023 		  if (success)
4024 		    return subop0;
4025 		}
4026 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4027 		{
4028 		  bool success = true;
4029 		  for (int i = 1; i < l1; ++i)
4030 		    {
4031 		      rtx j = XVECEXP (trueop1, 0, i);
4032 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4033 			{
4034 			  success = false;
4035 			  break;
4036 			}
4037 		    }
4038 		  if (success)
4039 		    return subop1;
4040 		}
4041 	    }
4042 	}
4043 
4044       if (XVECLEN (trueop1, 0) == 1
4045 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4046 	  && GET_CODE (trueop0) == VEC_CONCAT)
4047 	{
4048 	  rtx vec = trueop0;
4049 	  offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4050 
4051 	  /* Try to find the element in the VEC_CONCAT.  */
4052 	  while (GET_MODE (vec) != mode
4053 		 && GET_CODE (vec) == VEC_CONCAT)
4054 	    {
4055 	      poly_int64 vec_size;
4056 
4057 	      if (CONST_INT_P (XEXP (vec, 0)))
4058 	        {
4059 	          /* vec_concat of two const_ints doesn't make sense with
4060 	             respect to modes.  */
4061 	          if (CONST_INT_P (XEXP (vec, 1)))
4062 	            return 0;
4063 
4064 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4065 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4066 	        }
4067 	      else
4068 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4069 
4070 	      if (known_lt (offset, vec_size))
4071 		vec = XEXP (vec, 0);
4072 	      else if (known_ge (offset, vec_size))
4073 		{
4074 		  offset -= vec_size;
4075 		  vec = XEXP (vec, 1);
4076 		}
4077 	      else
4078 		break;
4079 	      vec = avoid_constant_pool_reference (vec);
4080 	    }
4081 
4082 	  if (GET_MODE (vec) == mode)
4083 	    return vec;
4084 	}
4085 
4086       /* If we select elements in a vec_merge that all come from the same
4087 	 operand, select from that operand directly.  */
4088       if (GET_CODE (op0) == VEC_MERGE)
4089 	{
4090 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4091 	  if (CONST_INT_P (trueop02))
4092 	    {
4093 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4094 	      bool all_operand0 = true;
4095 	      bool all_operand1 = true;
4096 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4097 		{
4098 		  rtx j = XVECEXP (trueop1, 0, i);
4099 		  if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4100 		    all_operand1 = false;
4101 		  else
4102 		    all_operand0 = false;
4103 		}
4104 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4105 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4106 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4107 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4108 	    }
4109 	}
4110 
4111       /* If we have two nested selects that are inverses of each
4112 	 other, replace them with the source operand.  */
4113       if (GET_CODE (trueop0) == VEC_SELECT
4114 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
4115 	{
4116 	  rtx op0_subop1 = XEXP (trueop0, 1);
4117 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4118 	  gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4119 
4120 	  /* Apply the outer ordering vector to the inner one.  (The inner
4121 	     ordering vector is expressly permitted to be of a different
4122 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
4123 	     then the two VEC_SELECTs cancel.  */
4124 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4125 	    {
4126 	      rtx x = XVECEXP (trueop1, 0, i);
4127 	      if (!CONST_INT_P (x))
4128 		return 0;
4129 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4130 	      if (!CONST_INT_P (y) || i != INTVAL (y))
4131 		return 0;
4132 	    }
4133 	  return XEXP (trueop0, 0);
4134 	}
4135 
4136       return 0;
4137     case VEC_CONCAT:
4138       {
4139 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4140 				      ? GET_MODE (trueop0)
4141 				      : GET_MODE_INNER (mode));
4142 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4143 				      ? GET_MODE (trueop1)
4144 				      : GET_MODE_INNER (mode));
4145 
4146 	gcc_assert (VECTOR_MODE_P (mode));
4147 	gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4148 			      + GET_MODE_SIZE (op1_mode),
4149 			      GET_MODE_SIZE (mode)));
4150 
4151 	if (VECTOR_MODE_P (op0_mode))
4152 	  gcc_assert (GET_MODE_INNER (mode)
4153 		      == GET_MODE_INNER (op0_mode));
4154 	else
4155 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4156 
4157 	if (VECTOR_MODE_P (op1_mode))
4158 	  gcc_assert (GET_MODE_INNER (mode)
4159 		      == GET_MODE_INNER (op1_mode));
4160 	else
4161 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4162 
4163 	unsigned int n_elts, in_n_elts;
4164 	if ((GET_CODE (trueop0) == CONST_VECTOR
4165 	     || CONST_SCALAR_INT_P (trueop0)
4166 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4167 	    && (GET_CODE (trueop1) == CONST_VECTOR
4168 		|| CONST_SCALAR_INT_P (trueop1)
4169 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1))
4170 	    && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4171 	    && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4172 	  {
4173 	    rtvec v = rtvec_alloc (n_elts);
4174 	    unsigned int i;
4175 	    for (i = 0; i < n_elts; i++)
4176 	      {
4177 		if (i < in_n_elts)
4178 		  {
4179 		    if (!VECTOR_MODE_P (op0_mode))
4180 		      RTVEC_ELT (v, i) = trueop0;
4181 		    else
4182 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4183 		  }
4184 		else
4185 		  {
4186 		    if (!VECTOR_MODE_P (op1_mode))
4187 		      RTVEC_ELT (v, i) = trueop1;
4188 		    else
4189 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4190 							   i - in_n_elts);
4191 		  }
4192 	      }
4193 
4194 	    return gen_rtx_CONST_VECTOR (mode, v);
4195 	  }
4196 
4197 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
4198 	   Restrict the transformation to avoid generating a VEC_SELECT with a
4199 	   mode unrelated to its operand.  */
4200 	if (GET_CODE (trueop0) == VEC_SELECT
4201 	    && GET_CODE (trueop1) == VEC_SELECT
4202 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4203 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
4204 	  {
4205 	    rtx par0 = XEXP (trueop0, 1);
4206 	    rtx par1 = XEXP (trueop1, 1);
4207 	    int len0 = XVECLEN (par0, 0);
4208 	    int len1 = XVECLEN (par1, 0);
4209 	    rtvec vec = rtvec_alloc (len0 + len1);
4210 	    for (int i = 0; i < len0; i++)
4211 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4212 	    for (int i = 0; i < len1; i++)
4213 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4214 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4215 					gen_rtx_PARALLEL (VOIDmode, vec));
4216 	  }
4217       }
4218       return 0;
4219 
4220     default:
4221       gcc_unreachable ();
4222     }
4223 
4224   if (mode == GET_MODE (op0)
4225       && mode == GET_MODE (op1)
4226       && vec_duplicate_p (op0, &elt0)
4227       && vec_duplicate_p (op1, &elt1))
4228     {
4229       /* Try applying the operator to ELT and see if that simplifies.
4230 	 We can duplicate the result if so.
4231 
4232 	 The reason we don't use simplify_gen_binary is that it isn't
4233 	 necessarily a win to convert things like:
4234 
4235 	   (plus:V (vec_duplicate:V (reg:S R1))
4236 		   (vec_duplicate:V (reg:S R2)))
4237 
4238 	 to:
4239 
4240 	   (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4241 
4242 	 The first might be done entirely in vector registers while the
4243 	 second might need a move between register files.  */
4244       tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4245 				       elt0, elt1);
4246       if (tem)
4247 	return gen_vec_duplicate (mode, tem);
4248     }
4249 
4250   return 0;
4251 }
4252 
4253 /* Return true if binary operation OP distributes over addition in operand
4254    OPNO, with the other operand being held constant.  OPNO counts from 1.  */
4255 
4256 static bool
distributes_over_addition_p(rtx_code op,int opno)4257 distributes_over_addition_p (rtx_code op, int opno)
4258 {
4259   switch (op)
4260     {
4261     case PLUS:
4262     case MINUS:
4263     case MULT:
4264       return true;
4265 
4266     case ASHIFT:
4267       return opno == 1;
4268 
4269     default:
4270       return false;
4271     }
4272 }
4273 
4274 rtx
simplify_const_binary_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)4275 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4276 				 rtx op0, rtx op1)
4277 {
4278   if (VECTOR_MODE_P (mode)
4279       && code != VEC_CONCAT
4280       && GET_CODE (op0) == CONST_VECTOR
4281       && GET_CODE (op1) == CONST_VECTOR)
4282     {
4283       bool step_ok_p;
4284       if (CONST_VECTOR_STEPPED_P (op0)
4285 	  && CONST_VECTOR_STEPPED_P (op1))
4286 	/* We can operate directly on the encoding if:
4287 
4288 	      a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4289 	    implies
4290 	      (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4291 
4292 	   Addition and subtraction are the supported operators
4293 	   for which this is true.  */
4294 	step_ok_p = (code == PLUS || code == MINUS);
4295       else if (CONST_VECTOR_STEPPED_P (op0))
4296 	/* We can operate directly on stepped encodings if:
4297 
4298 	     a3 - a2 == a2 - a1
4299 	   implies:
4300 	     (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4301 
4302 	   which is true if (x -> x op c) distributes over addition.  */
4303 	step_ok_p = distributes_over_addition_p (code, 1);
4304       else
4305 	/* Similarly in reverse.  */
4306 	step_ok_p = distributes_over_addition_p (code, 2);
4307       rtx_vector_builder builder;
4308       if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4309 	return 0;
4310 
4311       unsigned int count = builder.encoded_nelts ();
4312       for (unsigned int i = 0; i < count; i++)
4313 	{
4314 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4315 					     CONST_VECTOR_ELT (op0, i),
4316 					     CONST_VECTOR_ELT (op1, i));
4317 	  if (!x || !valid_for_const_vector_p (mode, x))
4318 	    return 0;
4319 	  builder.quick_push (x);
4320 	}
4321       return builder.build ();
4322     }
4323 
4324   if (VECTOR_MODE_P (mode)
4325       && code == VEC_CONCAT
4326       && (CONST_SCALAR_INT_P (op0)
4327 	  || CONST_FIXED_P (op0)
4328 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
4329       && (CONST_SCALAR_INT_P (op1)
4330 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
4331 	  || CONST_FIXED_P (op1)))
4332     {
4333       /* Both inputs have a constant number of elements, so the result
4334 	 must too.  */
4335       unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4336       rtvec v = rtvec_alloc (n_elts);
4337 
4338       gcc_assert (n_elts >= 2);
4339       if (n_elts == 2)
4340 	{
4341 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4342 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4343 
4344 	  RTVEC_ELT (v, 0) = op0;
4345 	  RTVEC_ELT (v, 1) = op1;
4346 	}
4347       else
4348 	{
4349 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4350 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4351 	  unsigned i;
4352 
4353 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4354 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4355 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4356 
4357 	  for (i = 0; i < op0_n_elts; ++i)
4358 	    RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4359 	  for (i = 0; i < op1_n_elts; ++i)
4360 	    RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4361 	}
4362 
4363       return gen_rtx_CONST_VECTOR (mode, v);
4364     }
4365 
4366   if (SCALAR_FLOAT_MODE_P (mode)
4367       && CONST_DOUBLE_AS_FLOAT_P (op0)
4368       && CONST_DOUBLE_AS_FLOAT_P (op1)
4369       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4370     {
4371       if (code == AND
4372 	  || code == IOR
4373 	  || code == XOR)
4374 	{
4375 	  long tmp0[4];
4376 	  long tmp1[4];
4377 	  REAL_VALUE_TYPE r;
4378 	  int i;
4379 
4380 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4381 			  GET_MODE (op0));
4382 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4383 			  GET_MODE (op1));
4384 	  for (i = 0; i < 4; i++)
4385 	    {
4386 	      switch (code)
4387 	      {
4388 	      case AND:
4389 		tmp0[i] &= tmp1[i];
4390 		break;
4391 	      case IOR:
4392 		tmp0[i] |= tmp1[i];
4393 		break;
4394 	      case XOR:
4395 		tmp0[i] ^= tmp1[i];
4396 		break;
4397 	      default:
4398 		gcc_unreachable ();
4399 	      }
4400 	    }
4401 	   real_from_target (&r, tmp0, mode);
4402 	   return const_double_from_real_value (r, mode);
4403 	}
4404       else
4405 	{
4406 	  REAL_VALUE_TYPE f0, f1, value, result;
4407 	  const REAL_VALUE_TYPE *opr0, *opr1;
4408 	  bool inexact;
4409 
4410 	  opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4411 	  opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4412 
4413 	  if (HONOR_SNANS (mode)
4414 	      && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4415 	          || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4416 	    return 0;
4417 
4418 	  real_convert (&f0, mode, opr0);
4419 	  real_convert (&f1, mode, opr1);
4420 
4421 	  if (code == DIV
4422 	      && real_equal (&f1, &dconst0)
4423 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4424 	    return 0;
4425 
4426 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4427 	      && flag_trapping_math
4428 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4429 	    {
4430 	      int s0 = REAL_VALUE_NEGATIVE (f0);
4431 	      int s1 = REAL_VALUE_NEGATIVE (f1);
4432 
4433 	      switch (code)
4434 		{
4435 		case PLUS:
4436 		  /* Inf + -Inf = NaN plus exception.  */
4437 		  if (s0 != s1)
4438 		    return 0;
4439 		  break;
4440 		case MINUS:
4441 		  /* Inf - Inf = NaN plus exception.  */
4442 		  if (s0 == s1)
4443 		    return 0;
4444 		  break;
4445 		case DIV:
4446 		  /* Inf / Inf = NaN plus exception.  */
4447 		  return 0;
4448 		default:
4449 		  break;
4450 		}
4451 	    }
4452 
4453 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4454 	      && flag_trapping_math
4455 	      && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4456 		  || (REAL_VALUE_ISINF (f1)
4457 		      && real_equal (&f0, &dconst0))))
4458 	    /* Inf * 0 = NaN plus exception.  */
4459 	    return 0;
4460 
4461 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4462 				     &f0, &f1);
4463 	  real_convert (&result, mode, &value);
4464 
4465 	  /* Don't constant fold this floating point operation if
4466 	     the result has overflowed and flag_trapping_math.  */
4467 
4468 	  if (flag_trapping_math
4469 	      && MODE_HAS_INFINITIES (mode)
4470 	      && REAL_VALUE_ISINF (result)
4471 	      && !REAL_VALUE_ISINF (f0)
4472 	      && !REAL_VALUE_ISINF (f1))
4473 	    /* Overflow plus exception.  */
4474 	    return 0;
4475 
4476 	  /* Don't constant fold this floating point operation if the
4477 	     result may dependent upon the run-time rounding mode and
4478 	     flag_rounding_math is set, or if GCC's software emulation
4479 	     is unable to accurately represent the result.  */
4480 
4481 	  if ((flag_rounding_math
4482 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4483 	      && (inexact || !real_identical (&result, &value)))
4484 	    return NULL_RTX;
4485 
4486 	  return const_double_from_real_value (result, mode);
4487 	}
4488     }
4489 
4490   /* We can fold some multi-word operations.  */
4491   scalar_int_mode int_mode;
4492   if (is_a <scalar_int_mode> (mode, &int_mode)
4493       && CONST_SCALAR_INT_P (op0)
4494       && CONST_SCALAR_INT_P (op1)
4495       && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4496     {
4497       wide_int result;
4498       wi::overflow_type overflow;
4499       rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4500       rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4501 
4502 #if TARGET_SUPPORTS_WIDE_INT == 0
4503       /* This assert keeps the simplification from producing a result
4504 	 that cannot be represented in a CONST_DOUBLE but a lot of
4505 	 upstream callers expect that this function never fails to
4506 	 simplify something and so you if you added this to the test
4507 	 above the code would die later anyway.  If this assert
4508 	 happens, you just need to make the port support wide int.  */
4509       gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4510 #endif
4511       switch (code)
4512 	{
4513 	case MINUS:
4514 	  result = wi::sub (pop0, pop1);
4515 	  break;
4516 
4517 	case PLUS:
4518 	  result = wi::add (pop0, pop1);
4519 	  break;
4520 
4521 	case MULT:
4522 	  result = wi::mul (pop0, pop1);
4523 	  break;
4524 
4525 	case DIV:
4526 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4527 	  if (overflow)
4528 	    return NULL_RTX;
4529 	  break;
4530 
4531 	case MOD:
4532 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4533 	  if (overflow)
4534 	    return NULL_RTX;
4535 	  break;
4536 
4537 	case UDIV:
4538 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4539 	  if (overflow)
4540 	    return NULL_RTX;
4541 	  break;
4542 
4543 	case UMOD:
4544 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4545 	  if (overflow)
4546 	    return NULL_RTX;
4547 	  break;
4548 
4549 	case AND:
4550 	  result = wi::bit_and (pop0, pop1);
4551 	  break;
4552 
4553 	case IOR:
4554 	  result = wi::bit_or (pop0, pop1);
4555 	  break;
4556 
4557 	case XOR:
4558 	  result = wi::bit_xor (pop0, pop1);
4559 	  break;
4560 
4561 	case SMIN:
4562 	  result = wi::smin (pop0, pop1);
4563 	  break;
4564 
4565 	case SMAX:
4566 	  result = wi::smax (pop0, pop1);
4567 	  break;
4568 
4569 	case UMIN:
4570 	  result = wi::umin (pop0, pop1);
4571 	  break;
4572 
4573 	case UMAX:
4574 	  result = wi::umax (pop0, pop1);
4575 	  break;
4576 
4577 	case LSHIFTRT:
4578 	case ASHIFTRT:
4579 	case ASHIFT:
4580 	  {
4581 	    wide_int wop1 = pop1;
4582 	    if (SHIFT_COUNT_TRUNCATED)
4583 	      wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4584 	    else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4585 	      return NULL_RTX;
4586 
4587 	    switch (code)
4588 	      {
4589 	      case LSHIFTRT:
4590 		result = wi::lrshift (pop0, wop1);
4591 		break;
4592 
4593 	      case ASHIFTRT:
4594 		result = wi::arshift (pop0, wop1);
4595 		break;
4596 
4597 	      case ASHIFT:
4598 		result = wi::lshift (pop0, wop1);
4599 		break;
4600 
4601 	      default:
4602 		gcc_unreachable ();
4603 	      }
4604 	    break;
4605 	  }
4606 	case ROTATE:
4607 	case ROTATERT:
4608 	  {
4609 	    if (wi::neg_p (pop1))
4610 	      return NULL_RTX;
4611 
4612 	    switch (code)
4613 	      {
4614 	      case ROTATE:
4615 		result = wi::lrotate (pop0, pop1);
4616 		break;
4617 
4618 	      case ROTATERT:
4619 		result = wi::rrotate (pop0, pop1);
4620 		break;
4621 
4622 	      default:
4623 		gcc_unreachable ();
4624 	      }
4625 	    break;
4626 	  }
4627 	default:
4628 	  return NULL_RTX;
4629 	}
4630       return immed_wide_int_const (result, int_mode);
4631     }
4632 
4633   /* Handle polynomial integers.  */
4634   if (NUM_POLY_INT_COEFFS > 1
4635       && is_a <scalar_int_mode> (mode, &int_mode)
4636       && poly_int_rtx_p (op0)
4637       && poly_int_rtx_p (op1))
4638     {
4639       poly_wide_int result;
4640       switch (code)
4641 	{
4642 	case PLUS:
4643 	  result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4644 	  break;
4645 
4646 	case MINUS:
4647 	  result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4648 	  break;
4649 
4650 	case MULT:
4651 	  if (CONST_SCALAR_INT_P (op1))
4652 	    result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4653 	  else
4654 	    return NULL_RTX;
4655 	  break;
4656 
4657 	case ASHIFT:
4658 	  if (CONST_SCALAR_INT_P (op1))
4659 	    {
4660 	      wide_int shift = rtx_mode_t (op1, mode);
4661 	      if (SHIFT_COUNT_TRUNCATED)
4662 		shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4663 	      else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4664 		return NULL_RTX;
4665 	      result = wi::to_poly_wide (op0, mode) << shift;
4666 	    }
4667 	  else
4668 	    return NULL_RTX;
4669 	  break;
4670 
4671 	case IOR:
4672 	  if (!CONST_SCALAR_INT_P (op1)
4673 	      || !can_ior_p (wi::to_poly_wide (op0, mode),
4674 			     rtx_mode_t (op1, mode), &result))
4675 	    return NULL_RTX;
4676 	  break;
4677 
4678 	default:
4679 	  return NULL_RTX;
4680 	}
4681       return immed_wide_int_const (result, int_mode);
4682     }
4683 
4684   return NULL_RTX;
4685 }
4686 
4687 
4688 
4689 /* Return a positive integer if X should sort after Y.  The value
4690    returned is 1 if and only if X and Y are both regs.  */
4691 
4692 static int
simplify_plus_minus_op_data_cmp(rtx x,rtx y)4693 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4694 {
4695   int result;
4696 
4697   result = (commutative_operand_precedence (y)
4698 	    - commutative_operand_precedence (x));
4699   if (result)
4700     return result + result;
4701 
4702   /* Group together equal REGs to do more simplification.  */
4703   if (REG_P (x) && REG_P (y))
4704     return REGNO (x) > REGNO (y);
4705 
4706   return 0;
4707 }
4708 
4709 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4710    operands may be another PLUS or MINUS.
4711 
4712    Rather than test for specific case, we do this by a brute-force method
4713    and do all possible simplifications until no more changes occur.  Then
4714    we rebuild the operation.
4715 
4716    May return NULL_RTX when no changes were made.  */
4717 
4718 static rtx
simplify_plus_minus(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)4719 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4720 		     rtx op1)
4721 {
4722   struct simplify_plus_minus_op_data
4723   {
4724     rtx op;
4725     short neg;
4726   } ops[16];
4727   rtx result, tem;
4728   int n_ops = 2;
4729   int changed, n_constants, canonicalized = 0;
4730   int i, j;
4731 
4732   memset (ops, 0, sizeof ops);
4733 
4734   /* Set up the two operands and then expand them until nothing has been
4735      changed.  If we run out of room in our array, give up; this should
4736      almost never happen.  */
4737 
4738   ops[0].op = op0;
4739   ops[0].neg = 0;
4740   ops[1].op = op1;
4741   ops[1].neg = (code == MINUS);
4742 
4743   do
4744     {
4745       changed = 0;
4746       n_constants = 0;
4747 
4748       for (i = 0; i < n_ops; i++)
4749 	{
4750 	  rtx this_op = ops[i].op;
4751 	  int this_neg = ops[i].neg;
4752 	  enum rtx_code this_code = GET_CODE (this_op);
4753 
4754 	  switch (this_code)
4755 	    {
4756 	    case PLUS:
4757 	    case MINUS:
4758 	      if (n_ops == ARRAY_SIZE (ops))
4759 		return NULL_RTX;
4760 
4761 	      ops[n_ops].op = XEXP (this_op, 1);
4762 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4763 	      n_ops++;
4764 
4765 	      ops[i].op = XEXP (this_op, 0);
4766 	      changed = 1;
4767 	      /* If this operand was negated then we will potentially
4768 		 canonicalize the expression.  Similarly if we don't
4769 		 place the operands adjacent we're re-ordering the
4770 		 expression and thus might be performing a
4771 		 canonicalization.  Ignore register re-ordering.
4772 		 ??? It might be better to shuffle the ops array here,
4773 		 but then (plus (plus (A, B), plus (C, D))) wouldn't
4774 		 be seen as non-canonical.  */
4775 	      if (this_neg
4776 		  || (i != n_ops - 2
4777 		      && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4778 		canonicalized = 1;
4779 	      break;
4780 
4781 	    case NEG:
4782 	      ops[i].op = XEXP (this_op, 0);
4783 	      ops[i].neg = ! this_neg;
4784 	      changed = 1;
4785 	      canonicalized = 1;
4786 	      break;
4787 
4788 	    case CONST:
4789 	      if (n_ops != ARRAY_SIZE (ops)
4790 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4791 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4792 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4793 		{
4794 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4795 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4796 		  ops[n_ops].neg = this_neg;
4797 		  n_ops++;
4798 		  changed = 1;
4799 		  canonicalized = 1;
4800 		}
4801 	      break;
4802 
4803 	    case NOT:
4804 	      /* ~a -> (-a - 1) */
4805 	      if (n_ops != ARRAY_SIZE (ops))
4806 		{
4807 		  ops[n_ops].op = CONSTM1_RTX (mode);
4808 		  ops[n_ops++].neg = this_neg;
4809 		  ops[i].op = XEXP (this_op, 0);
4810 		  ops[i].neg = !this_neg;
4811 		  changed = 1;
4812 		  canonicalized = 1;
4813 		}
4814 	      break;
4815 
4816 	    CASE_CONST_SCALAR_INT:
4817 	    case CONST_POLY_INT:
4818 	      n_constants++;
4819 	      if (this_neg)
4820 		{
4821 		  ops[i].op = neg_poly_int_rtx (mode, this_op);
4822 		  ops[i].neg = 0;
4823 		  changed = 1;
4824 		  canonicalized = 1;
4825 		}
4826 	      break;
4827 
4828 	    default:
4829 	      break;
4830 	    }
4831 	}
4832     }
4833   while (changed);
4834 
4835   if (n_constants > 1)
4836     canonicalized = 1;
4837 
4838   gcc_assert (n_ops >= 2);
4839 
4840   /* If we only have two operands, we can avoid the loops.  */
4841   if (n_ops == 2)
4842     {
4843       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4844       rtx lhs, rhs;
4845 
4846       /* Get the two operands.  Be careful with the order, especially for
4847 	 the cases where code == MINUS.  */
4848       if (ops[0].neg && ops[1].neg)
4849 	{
4850 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4851 	  rhs = ops[1].op;
4852 	}
4853       else if (ops[0].neg)
4854 	{
4855 	  lhs = ops[1].op;
4856 	  rhs = ops[0].op;
4857 	}
4858       else
4859 	{
4860 	  lhs = ops[0].op;
4861 	  rhs = ops[1].op;
4862 	}
4863 
4864       return simplify_const_binary_operation (code, mode, lhs, rhs);
4865     }
4866 
4867   /* Now simplify each pair of operands until nothing changes.  */
4868   while (1)
4869     {
4870       /* Insertion sort is good enough for a small array.  */
4871       for (i = 1; i < n_ops; i++)
4872 	{
4873 	  struct simplify_plus_minus_op_data save;
4874 	  int cmp;
4875 
4876 	  j = i - 1;
4877 	  cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4878 	  if (cmp <= 0)
4879 	    continue;
4880 	  /* Just swapping registers doesn't count as canonicalization.  */
4881 	  if (cmp != 1)
4882 	    canonicalized = 1;
4883 
4884 	  save = ops[i];
4885 	  do
4886 	    ops[j + 1] = ops[j];
4887 	  while (j--
4888 		 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4889 	  ops[j + 1] = save;
4890 	}
4891 
4892       changed = 0;
4893       for (i = n_ops - 1; i > 0; i--)
4894 	for (j = i - 1; j >= 0; j--)
4895 	  {
4896 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4897 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4898 
4899 	    if (lhs != 0 && rhs != 0)
4900 	      {
4901 		enum rtx_code ncode = PLUS;
4902 
4903 		if (lneg != rneg)
4904 		  {
4905 		    ncode = MINUS;
4906 		    if (lneg)
4907 		      std::swap (lhs, rhs);
4908 		  }
4909 		else if (swap_commutative_operands_p (lhs, rhs))
4910 		  std::swap (lhs, rhs);
4911 
4912 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4913 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4914 		  {
4915 		    rtx tem_lhs, tem_rhs;
4916 
4917 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4918 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4919 		    tem = simplify_binary_operation (ncode, mode, tem_lhs,
4920 						     tem_rhs);
4921 
4922 		    if (tem && !CONSTANT_P (tem))
4923 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4924 		  }
4925 		else
4926 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4927 
4928 		if (tem)
4929 		  {
4930 		    /* Reject "simplifications" that just wrap the two
4931 		       arguments in a CONST.  Failure to do so can result
4932 		       in infinite recursion with simplify_binary_operation
4933 		       when it calls us to simplify CONST operations.
4934 		       Also, if we find such a simplification, don't try
4935 		       any more combinations with this rhs:  We must have
4936 		       something like symbol+offset, ie. one of the
4937 		       trivial CONST expressions we handle later.  */
4938 		    if (GET_CODE (tem) == CONST
4939 			&& GET_CODE (XEXP (tem, 0)) == ncode
4940 			&& XEXP (XEXP (tem, 0), 0) == lhs
4941 			&& XEXP (XEXP (tem, 0), 1) == rhs)
4942 		      break;
4943 		    lneg &= rneg;
4944 		    if (GET_CODE (tem) == NEG)
4945 		      tem = XEXP (tem, 0), lneg = !lneg;
4946 		    if (poly_int_rtx_p (tem) && lneg)
4947 		      tem = neg_poly_int_rtx (mode, tem), lneg = 0;
4948 
4949 		    ops[i].op = tem;
4950 		    ops[i].neg = lneg;
4951 		    ops[j].op = NULL_RTX;
4952 		    changed = 1;
4953 		    canonicalized = 1;
4954 		  }
4955 	      }
4956 	  }
4957 
4958       if (!changed)
4959 	break;
4960 
4961       /* Pack all the operands to the lower-numbered entries.  */
4962       for (i = 0, j = 0; j < n_ops; j++)
4963 	if (ops[j].op)
4964 	  {
4965 	    ops[i] = ops[j];
4966 	    i++;
4967 	  }
4968       n_ops = i;
4969     }
4970 
4971   /* If nothing changed, check that rematerialization of rtl instructions
4972      is still required.  */
4973   if (!canonicalized)
4974     {
4975       /* Perform rematerialization if only all operands are registers and
4976 	 all operations are PLUS.  */
4977       /* ??? Also disallow (non-global, non-frame) fixed registers to work
4978 	 around rs6000 and how it uses the CA register.  See PR67145.  */
4979       for (i = 0; i < n_ops; i++)
4980 	if (ops[i].neg
4981 	    || !REG_P (ops[i].op)
4982 	    || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4983 		&& fixed_regs[REGNO (ops[i].op)]
4984 		&& !global_regs[REGNO (ops[i].op)]
4985 		&& ops[i].op != frame_pointer_rtx
4986 		&& ops[i].op != arg_pointer_rtx
4987 		&& ops[i].op != stack_pointer_rtx))
4988 	  return NULL_RTX;
4989       goto gen_result;
4990     }
4991 
4992   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4993   if (n_ops == 2
4994       && CONST_INT_P (ops[1].op)
4995       && CONSTANT_P (ops[0].op)
4996       && ops[0].neg)
4997     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4998 
4999   /* We suppressed creation of trivial CONST expressions in the
5000      combination loop to avoid recursion.  Create one manually now.
5001      The combination loop should have ensured that there is exactly
5002      one CONST_INT, and the sort will have ensured that it is last
5003      in the array and that any other constant will be next-to-last.  */
5004 
5005   if (n_ops > 1
5006       && poly_int_rtx_p (ops[n_ops - 1].op)
5007       && CONSTANT_P (ops[n_ops - 2].op))
5008     {
5009       rtx value = ops[n_ops - 1].op;
5010       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5011 	value = neg_poly_int_rtx (mode, value);
5012       if (CONST_INT_P (value))
5013 	{
5014 	  ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5015 					     INTVAL (value));
5016 	  n_ops--;
5017 	}
5018     }
5019 
5020   /* Put a non-negated operand first, if possible.  */
5021 
5022   for (i = 0; i < n_ops && ops[i].neg; i++)
5023     continue;
5024   if (i == n_ops)
5025     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5026   else if (i != 0)
5027     {
5028       tem = ops[0].op;
5029       ops[0] = ops[i];
5030       ops[i].op = tem;
5031       ops[i].neg = 1;
5032     }
5033 
5034   /* Now make the result by performing the requested operations.  */
5035  gen_result:
5036   result = ops[0].op;
5037   for (i = 1; i < n_ops; i++)
5038     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5039 			     mode, result, ops[i].op);
5040 
5041   return result;
5042 }
5043 
5044 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
5045 static bool
plus_minus_operand_p(const_rtx x)5046 plus_minus_operand_p (const_rtx x)
5047 {
5048   return GET_CODE (x) == PLUS
5049          || GET_CODE (x) == MINUS
5050 	 || (GET_CODE (x) == CONST
5051 	     && GET_CODE (XEXP (x, 0)) == PLUS
5052 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5053 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5054 }
5055 
5056 /* Like simplify_binary_operation except used for relational operators.
5057    MODE is the mode of the result. If MODE is VOIDmode, both operands must
5058    not also be VOIDmode.
5059 
5060    CMP_MODE specifies in which mode the comparison is done in, so it is
5061    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
5062    the operands or, if both are VOIDmode, the operands are compared in
5063    "infinite precision".  */
5064 rtx
simplify_relational_operation(enum rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)5065 simplify_relational_operation (enum rtx_code code, machine_mode mode,
5066 			       machine_mode cmp_mode, rtx op0, rtx op1)
5067 {
5068   rtx tem, trueop0, trueop1;
5069 
5070   if (cmp_mode == VOIDmode)
5071     cmp_mode = GET_MODE (op0);
5072   if (cmp_mode == VOIDmode)
5073     cmp_mode = GET_MODE (op1);
5074 
5075   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5076   if (tem)
5077     {
5078       if (SCALAR_FLOAT_MODE_P (mode))
5079 	{
5080           if (tem == const0_rtx)
5081             return CONST0_RTX (mode);
5082 #ifdef FLOAT_STORE_FLAG_VALUE
5083 	  {
5084 	    REAL_VALUE_TYPE val;
5085 	    val = FLOAT_STORE_FLAG_VALUE (mode);
5086 	    return const_double_from_real_value (val, mode);
5087 	  }
5088 #else
5089 	  return NULL_RTX;
5090 #endif
5091 	}
5092       if (VECTOR_MODE_P (mode))
5093 	{
5094 	  if (tem == const0_rtx)
5095 	    return CONST0_RTX (mode);
5096 #ifdef VECTOR_STORE_FLAG_VALUE
5097 	  {
5098 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
5099 	    if (val == NULL_RTX)
5100 	      return NULL_RTX;
5101 	    if (val == const1_rtx)
5102 	      return CONST1_RTX (mode);
5103 
5104 	    return gen_const_vec_duplicate (mode, val);
5105 	  }
5106 #else
5107 	  return NULL_RTX;
5108 #endif
5109 	}
5110       /* For vector comparison with scalar int result, it is unknown
5111 	 if the target means here a comparison into an integral bitmask,
5112 	 or comparison where all comparisons true mean const_true_rtx
5113 	 whole result, or where any comparisons true mean const_true_rtx
5114 	 whole result.  For const0_rtx all the cases are the same.  */
5115       if (VECTOR_MODE_P (cmp_mode)
5116 	  && SCALAR_INT_MODE_P (mode)
5117 	  && tem == const_true_rtx)
5118 	return NULL_RTX;
5119 
5120       return tem;
5121     }
5122 
5123   /* For the following tests, ensure const0_rtx is op1.  */
5124   if (swap_commutative_operands_p (op0, op1)
5125       || (op0 == const0_rtx && op1 != const0_rtx))
5126     std::swap (op0, op1), code = swap_condition (code);
5127 
5128   /* If op0 is a compare, extract the comparison arguments from it.  */
5129   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5130     return simplify_gen_relational (code, mode, VOIDmode,
5131 				    XEXP (op0, 0), XEXP (op0, 1));
5132 
5133   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
5134       || CC0_P (op0))
5135     return NULL_RTX;
5136 
5137   trueop0 = avoid_constant_pool_reference (op0);
5138   trueop1 = avoid_constant_pool_reference (op1);
5139   return simplify_relational_operation_1 (code, mode, cmp_mode,
5140 		  			  trueop0, trueop1);
5141 }
5142 
5143 /* This part of simplify_relational_operation is only used when CMP_MODE
5144    is not in class MODE_CC (i.e. it is a real comparison).
5145 
5146    MODE is the mode of the result, while CMP_MODE specifies in which
5147    mode the comparison is done in, so it is the mode of the operands.  */
5148 
5149 static rtx
simplify_relational_operation_1(enum rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)5150 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
5151 				 machine_mode cmp_mode, rtx op0, rtx op1)
5152 {
5153   enum rtx_code op0code = GET_CODE (op0);
5154 
5155   if (op1 == const0_rtx && COMPARISON_P (op0))
5156     {
5157       /* If op0 is a comparison, extract the comparison arguments
5158          from it.  */
5159       if (code == NE)
5160 	{
5161 	  if (GET_MODE (op0) == mode)
5162 	    return simplify_rtx (op0);
5163 	  else
5164 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5165 					    XEXP (op0, 0), XEXP (op0, 1));
5166 	}
5167       else if (code == EQ)
5168 	{
5169 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5170 	  if (new_code != UNKNOWN)
5171 	    return simplify_gen_relational (new_code, mode, VOIDmode,
5172 					    XEXP (op0, 0), XEXP (op0, 1));
5173 	}
5174     }
5175 
5176   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5177      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
5178   if ((code == LTU || code == GEU)
5179       && GET_CODE (op0) == PLUS
5180       && CONST_INT_P (XEXP (op0, 1))
5181       && (rtx_equal_p (op1, XEXP (op0, 0))
5182 	  || rtx_equal_p (op1, XEXP (op0, 1)))
5183       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5184       && XEXP (op0, 1) != const0_rtx)
5185     {
5186       rtx new_cmp
5187 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5188       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5189 				      cmp_mode, XEXP (op0, 0), new_cmp);
5190     }
5191 
5192   /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5193      transformed into (LTU a -C).  */
5194   if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5195       && CONST_INT_P (XEXP (op0, 1))
5196       && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5197       && XEXP (op0, 1) != const0_rtx)
5198     {
5199       rtx new_cmp
5200 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5201       return simplify_gen_relational (LTU, mode, cmp_mode,
5202 				       XEXP (op0, 0), new_cmp);
5203     }
5204 
5205   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
5206   if ((code == LTU || code == GEU)
5207       && GET_CODE (op0) == PLUS
5208       && rtx_equal_p (op1, XEXP (op0, 1))
5209       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
5210       && !rtx_equal_p (op1, XEXP (op0, 0)))
5211     return simplify_gen_relational (code, mode, cmp_mode, op0,
5212 				    copy_rtx (XEXP (op0, 0)));
5213 
5214   if (op1 == const0_rtx)
5215     {
5216       /* Canonicalize (GTU x 0) as (NE x 0).  */
5217       if (code == GTU)
5218         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5219       /* Canonicalize (LEU x 0) as (EQ x 0).  */
5220       if (code == LEU)
5221         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5222     }
5223   else if (op1 == const1_rtx)
5224     {
5225       switch (code)
5226         {
5227         case GE:
5228 	  /* Canonicalize (GE x 1) as (GT x 0).  */
5229 	  return simplify_gen_relational (GT, mode, cmp_mode,
5230 					  op0, const0_rtx);
5231 	case GEU:
5232 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
5233 	  return simplify_gen_relational (NE, mode, cmp_mode,
5234 					  op0, const0_rtx);
5235 	case LT:
5236 	  /* Canonicalize (LT x 1) as (LE x 0).  */
5237 	  return simplify_gen_relational (LE, mode, cmp_mode,
5238 					  op0, const0_rtx);
5239 	case LTU:
5240 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
5241 	  return simplify_gen_relational (EQ, mode, cmp_mode,
5242 					  op0, const0_rtx);
5243 	default:
5244 	  break;
5245 	}
5246     }
5247   else if (op1 == constm1_rtx)
5248     {
5249       /* Canonicalize (LE x -1) as (LT x 0).  */
5250       if (code == LE)
5251         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5252       /* Canonicalize (GT x -1) as (GE x 0).  */
5253       if (code == GT)
5254         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5255     }
5256 
5257   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
5258   if ((code == EQ || code == NE)
5259       && (op0code == PLUS || op0code == MINUS)
5260       && CONSTANT_P (op1)
5261       && CONSTANT_P (XEXP (op0, 1))
5262       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5263     {
5264       rtx x = XEXP (op0, 0);
5265       rtx c = XEXP (op0, 1);
5266       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5267       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5268 
5269       /* Detect an infinite recursive condition, where we oscillate at this
5270 	 simplification case between:
5271 	    A + B == C  <--->  C - B == A,
5272 	 where A, B, and C are all constants with non-simplifiable expressions,
5273 	 usually SYMBOL_REFs.  */
5274       if (GET_CODE (tem) == invcode
5275 	  && CONSTANT_P (x)
5276 	  && rtx_equal_p (c, XEXP (tem, 1)))
5277 	return NULL_RTX;
5278 
5279       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5280     }
5281 
5282   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5283      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
5284   scalar_int_mode int_mode, int_cmp_mode;
5285   if (code == NE
5286       && op1 == const0_rtx
5287       && is_int_mode (mode, &int_mode)
5288       && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5289       /* ??? Work-around BImode bugs in the ia64 backend.  */
5290       && int_mode != BImode
5291       && int_cmp_mode != BImode
5292       && nonzero_bits (op0, int_cmp_mode) == 1
5293       && STORE_FLAG_VALUE == 1)
5294     return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5295 	   ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5296 	   : lowpart_subreg (int_mode, op0, int_cmp_mode);
5297 
5298   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
5299   if ((code == EQ || code == NE)
5300       && op1 == const0_rtx
5301       && op0code == XOR)
5302     return simplify_gen_relational (code, mode, cmp_mode,
5303 				    XEXP (op0, 0), XEXP (op0, 1));
5304 
5305   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
5306   if ((code == EQ || code == NE)
5307       && op0code == XOR
5308       && rtx_equal_p (XEXP (op0, 0), op1)
5309       && !side_effects_p (XEXP (op0, 0)))
5310     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5311 				    CONST0_RTX (mode));
5312 
5313   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
5314   if ((code == EQ || code == NE)
5315       && op0code == XOR
5316       && rtx_equal_p (XEXP (op0, 1), op1)
5317       && !side_effects_p (XEXP (op0, 1)))
5318     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5319 				    CONST0_RTX (mode));
5320 
5321   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
5322   if ((code == EQ || code == NE)
5323       && op0code == XOR
5324       && CONST_SCALAR_INT_P (op1)
5325       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5326     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5327 				    simplify_gen_binary (XOR, cmp_mode,
5328 							 XEXP (op0, 1), op1));
5329 
5330   /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5331      constant folding if x/y is a constant.  */
5332   if ((code == EQ || code == NE)
5333       && (op0code == AND || op0code == IOR)
5334       && !side_effects_p (op1)
5335       && op1 != CONST0_RTX (cmp_mode))
5336     {
5337       /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5338 	 (eq/ne (and (not y) x) 0).  */
5339       if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5340 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5341 	{
5342 	  rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5343 					  cmp_mode);
5344 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5345 
5346 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5347 					  CONST0_RTX (cmp_mode));
5348 	}
5349 
5350       /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5351 	 (eq/ne (and (not x) y) 0).  */
5352       if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5353 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5354 	{
5355 	  rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5356 					  cmp_mode);
5357 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5358 
5359 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5360 					  CONST0_RTX (cmp_mode));
5361 	}
5362     }
5363 
5364   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
5365   if ((code == EQ || code == NE)
5366       && GET_CODE (op0) == BSWAP
5367       && CONST_SCALAR_INT_P (op1))
5368     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5369 				    simplify_gen_unary (BSWAP, cmp_mode,
5370 							op1, cmp_mode));
5371 
5372   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
5373   if ((code == EQ || code == NE)
5374       && GET_CODE (op0) == BSWAP
5375       && GET_CODE (op1) == BSWAP)
5376     return simplify_gen_relational (code, mode, cmp_mode,
5377 				    XEXP (op0, 0), XEXP (op1, 0));
5378 
5379   if (op0code == POPCOUNT && op1 == const0_rtx)
5380     switch (code)
5381       {
5382       case EQ:
5383       case LE:
5384       case LEU:
5385 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
5386 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5387 					XEXP (op0, 0), const0_rtx);
5388 
5389       case NE:
5390       case GT:
5391       case GTU:
5392 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
5393 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5394 					XEXP (op0, 0), const0_rtx);
5395 
5396       default:
5397 	break;
5398       }
5399 
5400   return NULL_RTX;
5401 }
5402 
5403 enum
5404 {
5405   CMP_EQ = 1,
5406   CMP_LT = 2,
5407   CMP_GT = 4,
5408   CMP_LTU = 8,
5409   CMP_GTU = 16
5410 };
5411 
5412 
5413 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5414    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5415    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5416    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5417    For floating-point comparisons, assume that the operands were ordered.  */
5418 
5419 static rtx
comparison_result(enum rtx_code code,int known_results)5420 comparison_result (enum rtx_code code, int known_results)
5421 {
5422   switch (code)
5423     {
5424     case EQ:
5425     case UNEQ:
5426       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5427     case NE:
5428     case LTGT:
5429       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5430 
5431     case LT:
5432     case UNLT:
5433       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5434     case GE:
5435     case UNGE:
5436       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5437 
5438     case GT:
5439     case UNGT:
5440       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5441     case LE:
5442     case UNLE:
5443       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5444 
5445     case LTU:
5446       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5447     case GEU:
5448       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5449 
5450     case GTU:
5451       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5452     case LEU:
5453       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5454 
5455     case ORDERED:
5456       return const_true_rtx;
5457     case UNORDERED:
5458       return const0_rtx;
5459     default:
5460       gcc_unreachable ();
5461     }
5462 }
5463 
5464 /* Check if the given comparison (done in the given MODE) is actually
5465    a tautology or a contradiction.  If the mode is VOIDmode, the
5466    comparison is done in "infinite precision".  If no simplification
5467    is possible, this function returns zero.  Otherwise, it returns
5468    either const_true_rtx or const0_rtx.  */
5469 
5470 rtx
simplify_const_relational_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)5471 simplify_const_relational_operation (enum rtx_code code,
5472 				     machine_mode mode,
5473 				     rtx op0, rtx op1)
5474 {
5475   rtx tem;
5476   rtx trueop0;
5477   rtx trueop1;
5478 
5479   gcc_assert (mode != VOIDmode
5480 	      || (GET_MODE (op0) == VOIDmode
5481 		  && GET_MODE (op1) == VOIDmode));
5482 
5483   /* If op0 is a compare, extract the comparison arguments from it.  */
5484   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5485     {
5486       op1 = XEXP (op0, 1);
5487       op0 = XEXP (op0, 0);
5488 
5489       if (GET_MODE (op0) != VOIDmode)
5490 	mode = GET_MODE (op0);
5491       else if (GET_MODE (op1) != VOIDmode)
5492 	mode = GET_MODE (op1);
5493       else
5494 	return 0;
5495     }
5496 
5497   /* We can't simplify MODE_CC values since we don't know what the
5498      actual comparison is.  */
5499   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5500     return 0;
5501 
5502   /* Make sure the constant is second.  */
5503   if (swap_commutative_operands_p (op0, op1))
5504     {
5505       std::swap (op0, op1);
5506       code = swap_condition (code);
5507     }
5508 
5509   trueop0 = avoid_constant_pool_reference (op0);
5510   trueop1 = avoid_constant_pool_reference (op1);
5511 
5512   /* For integer comparisons of A and B maybe we can simplify A - B and can
5513      then simplify a comparison of that with zero.  If A and B are both either
5514      a register or a CONST_INT, this can't help; testing for these cases will
5515      prevent infinite recursion here and speed things up.
5516 
5517      We can only do this for EQ and NE comparisons as otherwise we may
5518      lose or introduce overflow which we cannot disregard as undefined as
5519      we do not know the signedness of the operation on either the left or
5520      the right hand side of the comparison.  */
5521 
5522   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5523       && (code == EQ || code == NE)
5524       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5525 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
5526       && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5527       /* We cannot do this if tem is a nonzero address.  */
5528       && ! nonzero_address_p (tem))
5529     return simplify_const_relational_operation (signed_condition (code),
5530 						mode, tem, const0_rtx);
5531 
5532   if (! HONOR_NANS (mode) && code == ORDERED)
5533     return const_true_rtx;
5534 
5535   if (! HONOR_NANS (mode) && code == UNORDERED)
5536     return const0_rtx;
5537 
5538   /* For modes without NaNs, if the two operands are equal, we know the
5539      result except if they have side-effects.  Even with NaNs we know
5540      the result of unordered comparisons and, if signaling NaNs are
5541      irrelevant, also the result of LT/GT/LTGT.  */
5542   if ((! HONOR_NANS (trueop0)
5543        || code == UNEQ || code == UNLE || code == UNGE
5544        || ((code == LT || code == GT || code == LTGT)
5545 	   && ! HONOR_SNANS (trueop0)))
5546       && rtx_equal_p (trueop0, trueop1)
5547       && ! side_effects_p (trueop0))
5548     return comparison_result (code, CMP_EQ);
5549 
5550   /* If the operands are floating-point constants, see if we can fold
5551      the result.  */
5552   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5553       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5554       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5555     {
5556       const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5557       const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5558 
5559       /* Comparisons are unordered iff at least one of the values is NaN.  */
5560       if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5561 	switch (code)
5562 	  {
5563 	  case UNEQ:
5564 	  case UNLT:
5565 	  case UNGT:
5566 	  case UNLE:
5567 	  case UNGE:
5568 	  case NE:
5569 	  case UNORDERED:
5570 	    return const_true_rtx;
5571 	  case EQ:
5572 	  case LT:
5573 	  case GT:
5574 	  case LE:
5575 	  case GE:
5576 	  case LTGT:
5577 	  case ORDERED:
5578 	    return const0_rtx;
5579 	  default:
5580 	    return 0;
5581 	  }
5582 
5583       return comparison_result (code,
5584 				(real_equal (d0, d1) ? CMP_EQ :
5585 				 real_less (d0, d1) ? CMP_LT : CMP_GT));
5586     }
5587 
5588   /* Otherwise, see if the operands are both integers.  */
5589   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5590       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5591     {
5592       /* It would be nice if we really had a mode here.  However, the
5593 	 largest int representable on the target is as good as
5594 	 infinite.  */
5595       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5596       rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5597       rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5598 
5599       if (wi::eq_p (ptrueop0, ptrueop1))
5600 	return comparison_result (code, CMP_EQ);
5601       else
5602 	{
5603 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5604 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5605 	  return comparison_result (code, cr);
5606 	}
5607     }
5608 
5609   /* Optimize comparisons with upper and lower bounds.  */
5610   scalar_int_mode int_mode;
5611   if (CONST_INT_P (trueop1)
5612       && is_a <scalar_int_mode> (mode, &int_mode)
5613       && HWI_COMPUTABLE_MODE_P (int_mode)
5614       && !side_effects_p (trueop0))
5615     {
5616       int sign;
5617       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5618       HOST_WIDE_INT val = INTVAL (trueop1);
5619       HOST_WIDE_INT mmin, mmax;
5620 
5621       if (code == GEU
5622 	  || code == LEU
5623 	  || code == GTU
5624 	  || code == LTU)
5625 	sign = 0;
5626       else
5627 	sign = 1;
5628 
5629       /* Get a reduced range if the sign bit is zero.  */
5630       if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5631 	{
5632 	  mmin = 0;
5633 	  mmax = nonzero;
5634 	}
5635       else
5636 	{
5637 	  rtx mmin_rtx, mmax_rtx;
5638 	  get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5639 
5640 	  mmin = INTVAL (mmin_rtx);
5641 	  mmax = INTVAL (mmax_rtx);
5642 	  if (sign)
5643 	    {
5644 	      unsigned int sign_copies
5645 		= num_sign_bit_copies (trueop0, int_mode);
5646 
5647 	      mmin >>= (sign_copies - 1);
5648 	      mmax >>= (sign_copies - 1);
5649 	    }
5650 	}
5651 
5652       switch (code)
5653 	{
5654 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5655 	case GEU:
5656 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5657 	    return const_true_rtx;
5658 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5659 	    return const0_rtx;
5660 	  break;
5661 	case GE:
5662 	  if (val <= mmin)
5663 	    return const_true_rtx;
5664 	  if (val > mmax)
5665 	    return const0_rtx;
5666 	  break;
5667 
5668 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5669 	case LEU:
5670 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5671 	    return const_true_rtx;
5672 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5673 	    return const0_rtx;
5674 	  break;
5675 	case LE:
5676 	  if (val >= mmax)
5677 	    return const_true_rtx;
5678 	  if (val < mmin)
5679 	    return const0_rtx;
5680 	  break;
5681 
5682 	case EQ:
5683 	  /* x == y is always false for y out of range.  */
5684 	  if (val < mmin || val > mmax)
5685 	    return const0_rtx;
5686 	  break;
5687 
5688 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5689 	case GTU:
5690 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5691 	    return const0_rtx;
5692 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5693 	    return const_true_rtx;
5694 	  break;
5695 	case GT:
5696 	  if (val >= mmax)
5697 	    return const0_rtx;
5698 	  if (val < mmin)
5699 	    return const_true_rtx;
5700 	  break;
5701 
5702 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5703 	case LTU:
5704 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5705 	    return const0_rtx;
5706 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5707 	    return const_true_rtx;
5708 	  break;
5709 	case LT:
5710 	  if (val <= mmin)
5711 	    return const0_rtx;
5712 	  if (val > mmax)
5713 	    return const_true_rtx;
5714 	  break;
5715 
5716 	case NE:
5717 	  /* x != y is always true for y out of range.  */
5718 	  if (val < mmin || val > mmax)
5719 	    return const_true_rtx;
5720 	  break;
5721 
5722 	default:
5723 	  break;
5724 	}
5725     }
5726 
5727   /* Optimize integer comparisons with zero.  */
5728   if (is_a <scalar_int_mode> (mode, &int_mode)
5729       && trueop1 == const0_rtx
5730       && !side_effects_p (trueop0))
5731     {
5732       /* Some addresses are known to be nonzero.  We don't know
5733 	 their sign, but equality comparisons are known.  */
5734       if (nonzero_address_p (trueop0))
5735 	{
5736 	  if (code == EQ || code == LEU)
5737 	    return const0_rtx;
5738 	  if (code == NE || code == GTU)
5739 	    return const_true_rtx;
5740 	}
5741 
5742       /* See if the first operand is an IOR with a constant.  If so, we
5743 	 may be able to determine the result of this comparison.  */
5744       if (GET_CODE (op0) == IOR)
5745 	{
5746 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5747 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5748 	    {
5749 	      int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5750 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5751 			      && (UINTVAL (inner_const)
5752 				  & (HOST_WIDE_INT_1U
5753 				     << sign_bitnum)));
5754 
5755 	      switch (code)
5756 		{
5757 		case EQ:
5758 		case LEU:
5759 		  return const0_rtx;
5760 		case NE:
5761 		case GTU:
5762 		  return const_true_rtx;
5763 		case LT:
5764 		case LE:
5765 		  if (has_sign)
5766 		    return const_true_rtx;
5767 		  break;
5768 		case GT:
5769 		case GE:
5770 		  if (has_sign)
5771 		    return const0_rtx;
5772 		  break;
5773 		default:
5774 		  break;
5775 		}
5776 	    }
5777 	}
5778     }
5779 
5780   /* Optimize comparison of ABS with zero.  */
5781   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5782       && (GET_CODE (trueop0) == ABS
5783 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5784 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5785     {
5786       switch (code)
5787 	{
5788 	case LT:
5789 	  /* Optimize abs(x) < 0.0.  */
5790 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5791 	    return const0_rtx;
5792 	  break;
5793 
5794 	case GE:
5795 	  /* Optimize abs(x) >= 0.0.  */
5796 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5797 	    return const_true_rtx;
5798 	  break;
5799 
5800 	case UNGE:
5801 	  /* Optimize ! (abs(x) < 0.0).  */
5802 	  return const_true_rtx;
5803 
5804 	default:
5805 	  break;
5806 	}
5807     }
5808 
5809   return 0;
5810 }
5811 
5812 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5813    where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5814    or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5815    can be simplified to that or NULL_RTX if not.
5816    Assume X is compared against zero with CMP_CODE and the true
5817    arm is TRUE_VAL and the false arm is FALSE_VAL.  */
5818 
5819 static rtx
simplify_cond_clz_ctz(rtx x,rtx_code cmp_code,rtx true_val,rtx false_val)5820 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5821 {
5822   if (cmp_code != EQ && cmp_code != NE)
5823     return NULL_RTX;
5824 
5825   /* Result on X == 0 and X !=0 respectively.  */
5826   rtx on_zero, on_nonzero;
5827   if (cmp_code == EQ)
5828     {
5829       on_zero = true_val;
5830       on_nonzero = false_val;
5831     }
5832   else
5833     {
5834       on_zero = false_val;
5835       on_nonzero = true_val;
5836     }
5837 
5838   rtx_code op_code = GET_CODE (on_nonzero);
5839   if ((op_code != CLZ && op_code != CTZ)
5840       || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5841       || !CONST_INT_P (on_zero))
5842     return NULL_RTX;
5843 
5844   HOST_WIDE_INT op_val;
5845   scalar_int_mode mode ATTRIBUTE_UNUSED
5846     = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5847   if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5848        || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5849       && op_val == INTVAL (on_zero))
5850     return on_nonzero;
5851 
5852   return NULL_RTX;
5853 }
5854 
5855 /* Try to simplify X given that it appears within operand OP of a
5856    VEC_MERGE operation whose mask is MASK.  X need not use the same
5857    vector mode as the VEC_MERGE, but it must have the same number of
5858    elements.
5859 
5860    Return the simplified X on success, otherwise return NULL_RTX.  */
5861 
5862 rtx
simplify_merge_mask(rtx x,rtx mask,int op)5863 simplify_merge_mask (rtx x, rtx mask, int op)
5864 {
5865   gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
5866   poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
5867   if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
5868     {
5869       if (side_effects_p (XEXP (x, 1 - op)))
5870 	return NULL_RTX;
5871 
5872       return XEXP (x, op);
5873     }
5874   if (UNARY_P (x)
5875       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5876       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
5877     {
5878       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5879       if (top0)
5880 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
5881 				   GET_MODE (XEXP (x, 0)));
5882     }
5883   if (BINARY_P (x)
5884       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5885       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5886       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5887       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
5888     {
5889       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5890       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5891       if (top0 || top1)
5892 	{
5893 	  if (COMPARISON_P (x))
5894 	    return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
5895 					    GET_MODE (XEXP (x, 0)) != VOIDmode
5896 					    ? GET_MODE (XEXP (x, 0))
5897 					    : GET_MODE (XEXP (x, 1)),
5898 					    top0 ? top0 : XEXP (x, 0),
5899 					    top1 ? top1 : XEXP (x, 1));
5900 	  else
5901 	    return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
5902 					top0 ? top0 : XEXP (x, 0),
5903 					top1 ? top1 : XEXP (x, 1));
5904 	}
5905     }
5906   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
5907       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5908       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5909       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5910       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
5911       && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
5912       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
5913     {
5914       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5915       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5916       rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
5917       if (top0 || top1 || top2)
5918 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
5919 				     GET_MODE (XEXP (x, 0)),
5920 				     top0 ? top0 : XEXP (x, 0),
5921 				     top1 ? top1 : XEXP (x, 1),
5922 				     top2 ? top2 : XEXP (x, 2));
5923     }
5924   return NULL_RTX;
5925 }
5926 
5927 
5928 /* Simplify CODE, an operation with result mode MODE and three operands,
5929    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5930    a constant.  Return 0 if no simplifications is possible.  */
5931 
5932 rtx
simplify_ternary_operation(enum rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)5933 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5934 			    machine_mode op0_mode, rtx op0, rtx op1,
5935 			    rtx op2)
5936 {
5937   bool any_change = false;
5938   rtx tem, trueop2;
5939   scalar_int_mode int_mode, int_op0_mode;
5940   unsigned int n_elts;
5941 
5942   switch (code)
5943     {
5944     case FMA:
5945       /* Simplify negations around the multiplication.  */
5946       /* -a * -b + c  =>  a * b + c.  */
5947       if (GET_CODE (op0) == NEG)
5948 	{
5949 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5950 	  if (tem)
5951 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5952 	}
5953       else if (GET_CODE (op1) == NEG)
5954 	{
5955 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5956 	  if (tem)
5957 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5958 	}
5959 
5960       /* Canonicalize the two multiplication operands.  */
5961       /* a * -b + c  =>  -b * a + c.  */
5962       if (swap_commutative_operands_p (op0, op1))
5963 	std::swap (op0, op1), any_change = true;
5964 
5965       if (any_change)
5966 	return gen_rtx_FMA (mode, op0, op1, op2);
5967       return NULL_RTX;
5968 
5969     case SIGN_EXTRACT:
5970     case ZERO_EXTRACT:
5971       if (CONST_INT_P (op0)
5972 	  && CONST_INT_P (op1)
5973 	  && CONST_INT_P (op2)
5974 	  && is_a <scalar_int_mode> (mode, &int_mode)
5975 	  && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5976 	  && HWI_COMPUTABLE_MODE_P (int_mode))
5977 	{
5978 	  /* Extracting a bit-field from a constant */
5979 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5980 	  HOST_WIDE_INT op1val = INTVAL (op1);
5981 	  HOST_WIDE_INT op2val = INTVAL (op2);
5982 	  if (!BITS_BIG_ENDIAN)
5983 	    val >>= op2val;
5984 	  else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5985 	    val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5986 	  else
5987 	    /* Not enough information to calculate the bit position.  */
5988 	    break;
5989 
5990 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5991 	    {
5992 	      /* First zero-extend.  */
5993 	      val &= (HOST_WIDE_INT_1U << op1val) - 1;
5994 	      /* If desired, propagate sign bit.  */
5995 	      if (code == SIGN_EXTRACT
5996 		  && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5997 		     != 0)
5998 		val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5999 	    }
6000 
6001 	  return gen_int_mode (val, int_mode);
6002 	}
6003       break;
6004 
6005     case IF_THEN_ELSE:
6006       if (CONST_INT_P (op0))
6007 	return op0 != const0_rtx ? op1 : op2;
6008 
6009       /* Convert c ? a : a into "a".  */
6010       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6011 	return op1;
6012 
6013       /* Convert a != b ? a : b into "a".  */
6014       if (GET_CODE (op0) == NE
6015 	  && ! side_effects_p (op0)
6016 	  && ! HONOR_NANS (mode)
6017 	  && ! HONOR_SIGNED_ZEROS (mode)
6018 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
6019 	       && rtx_equal_p (XEXP (op0, 1), op2))
6020 	      || (rtx_equal_p (XEXP (op0, 0), op2)
6021 		  && rtx_equal_p (XEXP (op0, 1), op1))))
6022 	return op1;
6023 
6024       /* Convert a == b ? a : b into "b".  */
6025       if (GET_CODE (op0) == EQ
6026 	  && ! side_effects_p (op0)
6027 	  && ! HONOR_NANS (mode)
6028 	  && ! HONOR_SIGNED_ZEROS (mode)
6029 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
6030 	       && rtx_equal_p (XEXP (op0, 1), op2))
6031 	      || (rtx_equal_p (XEXP (op0, 0), op2)
6032 		  && rtx_equal_p (XEXP (op0, 1), op1))))
6033 	return op2;
6034 
6035       /* Convert (!c) != {0,...,0} ? a : b into
6036          c != {0,...,0} ? b : a for vector modes.  */
6037       if (VECTOR_MODE_P (GET_MODE (op1))
6038 	  && GET_CODE (op0) == NE
6039 	  && GET_CODE (XEXP (op0, 0)) == NOT
6040 	  && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6041 	{
6042 	  rtx cv = XEXP (op0, 1);
6043 	  int nunits;
6044 	  bool ok = true;
6045 	  if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6046 	    ok = false;
6047 	  else
6048 	    for (int i = 0; i < nunits; ++i)
6049 	      if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6050 		{
6051 		  ok = false;
6052 		  break;
6053 		}
6054 	  if (ok)
6055 	    {
6056 	      rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6057 					XEXP (XEXP (op0, 0), 0),
6058 					XEXP (op0, 1));
6059 	      rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6060 	      return retval;
6061 	    }
6062 	}
6063 
6064       /* Convert x == 0 ? N : clz (x) into clz (x) when
6065 	 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6066 	 Similarly for ctz (x).  */
6067       if (COMPARISON_P (op0) && !side_effects_p (op0)
6068 	  && XEXP (op0, 1) == const0_rtx)
6069 	{
6070 	  rtx simplified
6071 	    = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6072 				     op1, op2);
6073 	  if (simplified)
6074 	    return simplified;
6075 	}
6076 
6077       if (COMPARISON_P (op0) && ! side_effects_p (op0))
6078 	{
6079 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6080 					? GET_MODE (XEXP (op0, 1))
6081 					: GET_MODE (XEXP (op0, 0)));
6082 	  rtx temp;
6083 
6084 	  /* Look for happy constants in op1 and op2.  */
6085 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
6086 	    {
6087 	      HOST_WIDE_INT t = INTVAL (op1);
6088 	      HOST_WIDE_INT f = INTVAL (op2);
6089 
6090 	      if (t == STORE_FLAG_VALUE && f == 0)
6091 	        code = GET_CODE (op0);
6092 	      else if (t == 0 && f == STORE_FLAG_VALUE)
6093 		{
6094 		  enum rtx_code tmp;
6095 		  tmp = reversed_comparison_code (op0, NULL);
6096 		  if (tmp == UNKNOWN)
6097 		    break;
6098 		  code = tmp;
6099 		}
6100 	      else
6101 		break;
6102 
6103 	      return simplify_gen_relational (code, mode, cmp_mode,
6104 					      XEXP (op0, 0), XEXP (op0, 1));
6105 	    }
6106 
6107 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6108 			  			cmp_mode, XEXP (op0, 0),
6109 						XEXP (op0, 1));
6110 
6111 	  /* See if any simplifications were possible.  */
6112 	  if (temp)
6113 	    {
6114 	      if (CONST_INT_P (temp))
6115 		return temp == const0_rtx ? op2 : op1;
6116 	      else if (temp)
6117 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6118 	    }
6119 	}
6120       break;
6121 
6122     case VEC_MERGE:
6123       gcc_assert (GET_MODE (op0) == mode);
6124       gcc_assert (GET_MODE (op1) == mode);
6125       gcc_assert (VECTOR_MODE_P (mode));
6126       trueop2 = avoid_constant_pool_reference (op2);
6127       if (CONST_INT_P (trueop2)
6128 	  && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6129 	{
6130 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6131 	  unsigned HOST_WIDE_INT mask;
6132 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
6133 	    mask = -1;
6134 	  else
6135 	    mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6136 
6137 	  if (!(sel & mask) && !side_effects_p (op0))
6138 	    return op1;
6139 	  if ((sel & mask) == mask && !side_effects_p (op1))
6140 	    return op0;
6141 
6142 	  rtx trueop0 = avoid_constant_pool_reference (op0);
6143 	  rtx trueop1 = avoid_constant_pool_reference (op1);
6144 	  if (GET_CODE (trueop0) == CONST_VECTOR
6145 	      && GET_CODE (trueop1) == CONST_VECTOR)
6146 	    {
6147 	      rtvec v = rtvec_alloc (n_elts);
6148 	      unsigned int i;
6149 
6150 	      for (i = 0; i < n_elts; i++)
6151 		RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6152 				    ? CONST_VECTOR_ELT (trueop0, i)
6153 				    : CONST_VECTOR_ELT (trueop1, i));
6154 	      return gen_rtx_CONST_VECTOR (mode, v);
6155 	    }
6156 
6157 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6158 	     if no element from a appears in the result.  */
6159 	  if (GET_CODE (op0) == VEC_MERGE)
6160 	    {
6161 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
6162 	      if (CONST_INT_P (tem))
6163 		{
6164 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6165 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6166 		    return simplify_gen_ternary (code, mode, mode,
6167 						 XEXP (op0, 1), op1, op2);
6168 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6169 		    return simplify_gen_ternary (code, mode, mode,
6170 						 XEXP (op0, 0), op1, op2);
6171 		}
6172 	    }
6173 	  if (GET_CODE (op1) == VEC_MERGE)
6174 	    {
6175 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
6176 	      if (CONST_INT_P (tem))
6177 		{
6178 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6179 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6180 		    return simplify_gen_ternary (code, mode, mode,
6181 						 op0, XEXP (op1, 1), op2);
6182 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6183 		    return simplify_gen_ternary (code, mode, mode,
6184 						 op0, XEXP (op1, 0), op2);
6185 		}
6186 	    }
6187 
6188 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6189 	     with a.  */
6190 	  if (GET_CODE (op0) == VEC_DUPLICATE
6191 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6192 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6193 	      && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6194 	    {
6195 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6196 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
6197 		{
6198 		  if (XEXP (XEXP (op0, 0), 0) == op1
6199 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6200 		    return op1;
6201 		}
6202 	    }
6203 	  /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6204 	     (const_int N))
6205 	     with (vec_concat (X) (B)) if N == 1 or
6206 	     (vec_concat (A) (X)) if N == 2.  */
6207 	  if (GET_CODE (op0) == VEC_DUPLICATE
6208 	      && GET_CODE (op1) == CONST_VECTOR
6209 	      && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6210 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6211 	      && IN_RANGE (sel, 1, 2))
6212 	    {
6213 	      rtx newop0 = XEXP (op0, 0);
6214 	      rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6215 	      if (sel == 2)
6216 		std::swap (newop0, newop1);
6217 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6218 	    }
6219 	  /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6220 	     with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6221 	     Only applies for vectors of two elements.  */
6222 	  if (GET_CODE (op0) == VEC_DUPLICATE
6223 	      && GET_CODE (op1) == VEC_CONCAT
6224 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6225 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6226 	      && IN_RANGE (sel, 1, 2))
6227 	    {
6228 	      rtx newop0 = XEXP (op0, 0);
6229 	      rtx newop1 = XEXP (op1, 2 - sel);
6230 	      rtx otherop = XEXP (op1, sel - 1);
6231 	      if (sel == 2)
6232 		std::swap (newop0, newop1);
6233 	      /* Don't want to throw away the other part of the vec_concat if
6234 		 it has side-effects.  */
6235 	      if (!side_effects_p (otherop))
6236 		return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6237 	    }
6238 
6239 	  /* Replace:
6240 
6241 	      (vec_merge:outer (vec_duplicate:outer x:inner)
6242 			       (subreg:outer y:inner 0)
6243 			       (const_int N))
6244 
6245 	     with (vec_concat:outer x:inner y:inner) if N == 1,
6246 	     or (vec_concat:outer y:inner x:inner) if N == 2.
6247 
6248 	     Implicitly, this means we have a paradoxical subreg, but such
6249 	     a check is cheap, so make it anyway.
6250 
6251 	     Only applies for vectors of two elements.  */
6252 	  if (GET_CODE (op0) == VEC_DUPLICATE
6253 	      && GET_CODE (op1) == SUBREG
6254 	      && GET_MODE (op1) == GET_MODE (op0)
6255 	      && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6256 	      && paradoxical_subreg_p (op1)
6257 	      && subreg_lowpart_p (op1)
6258 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6259 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6260 	      && IN_RANGE (sel, 1, 2))
6261 	    {
6262 	      rtx newop0 = XEXP (op0, 0);
6263 	      rtx newop1 = SUBREG_REG (op1);
6264 	      if (sel == 2)
6265 		std::swap (newop0, newop1);
6266 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6267 	    }
6268 
6269 	  /* Same as above but with switched operands:
6270 		Replace (vec_merge:outer (subreg:outer x:inner 0)
6271 					 (vec_duplicate:outer y:inner)
6272 			       (const_int N))
6273 
6274 	     with (vec_concat:outer x:inner y:inner) if N == 1,
6275 	     or (vec_concat:outer y:inner x:inner) if N == 2.  */
6276 	  if (GET_CODE (op1) == VEC_DUPLICATE
6277 	      && GET_CODE (op0) == SUBREG
6278 	      && GET_MODE (op0) == GET_MODE (op1)
6279 	      && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6280 	      && paradoxical_subreg_p (op0)
6281 	      && subreg_lowpart_p (op0)
6282 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6283 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6284 	      && IN_RANGE (sel, 1, 2))
6285 	    {
6286 	      rtx newop0 = SUBREG_REG (op0);
6287 	      rtx newop1 = XEXP (op1, 0);
6288 	      if (sel == 2)
6289 		std::swap (newop0, newop1);
6290 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6291 	    }
6292 
6293 	  /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6294 				 (const_int n))
6295 	     with (vec_concat x y) or (vec_concat y x) depending on value
6296 	     of N.  */
6297 	  if (GET_CODE (op0) == VEC_DUPLICATE
6298 	      && GET_CODE (op1) == VEC_DUPLICATE
6299 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6300 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6301 	      && IN_RANGE (sel, 1, 2))
6302 	    {
6303 	      rtx newop0 = XEXP (op0, 0);
6304 	      rtx newop1 = XEXP (op1, 0);
6305 	      if (sel == 2)
6306 		std::swap (newop0, newop1);
6307 
6308 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6309 	    }
6310 	}
6311 
6312       if (rtx_equal_p (op0, op1)
6313 	  && !side_effects_p (op2) && !side_effects_p (op1))
6314 	return op0;
6315 
6316       if (!side_effects_p (op2))
6317 	{
6318 	  rtx top0
6319 	    = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6320 	  rtx top1
6321 	    = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6322 	  if (top0 || top1)
6323 	    return simplify_gen_ternary (code, mode, mode,
6324 					 top0 ? top0 : op0,
6325 					 top1 ? top1 : op1, op2);
6326 	}
6327 
6328       break;
6329 
6330     default:
6331       gcc_unreachable ();
6332     }
6333 
6334   return 0;
6335 }
6336 
6337 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6338    starting at byte FIRST_BYTE.  Return true on success and add the
6339    bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6340    that the bytes follow target memory order.  Leave BYTES unmodified
6341    on failure.
6342 
6343    MODE is the mode of X.  The caller must reserve NUM_BYTES bytes in
6344    BYTES before calling this function.  */
6345 
6346 bool
native_encode_rtx(machine_mode mode,rtx x,vec<target_unit> & bytes,unsigned int first_byte,unsigned int num_bytes)6347 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6348 		   unsigned int first_byte, unsigned int num_bytes)
6349 {
6350   /* Check the mode is sensible.  */
6351   gcc_assert (GET_MODE (x) == VOIDmode
6352 	      ? is_a <scalar_int_mode> (mode)
6353 	      : mode == GET_MODE (x));
6354 
6355   if (GET_CODE (x) == CONST_VECTOR)
6356     {
6357       /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6358 	 is necessary.  The only complication is that MODE_VECTOR_BOOL
6359 	 vectors can have several elements per byte.  */
6360       unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6361 						   GET_MODE_NUNITS (mode));
6362       unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6363       if (elt_bits < BITS_PER_UNIT)
6364 	{
6365 	  /* This is the only case in which elements can be smaller than
6366 	     a byte.  */
6367 	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6368 	  for (unsigned int i = 0; i < num_bytes; ++i)
6369 	    {
6370 	      target_unit value = 0;
6371 	      for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6372 		{
6373 		  value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6374 		  elt += 1;
6375 		}
6376 	      bytes.quick_push (value);
6377 	    }
6378 	  return true;
6379 	}
6380 
6381       unsigned int start = bytes.length ();
6382       unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6383       /* Make FIRST_BYTE relative to ELT.  */
6384       first_byte %= elt_bytes;
6385       while (num_bytes > 0)
6386 	{
6387 	  /* Work out how many bytes we want from element ELT.  */
6388 	  unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6389 	  if (!native_encode_rtx (GET_MODE_INNER (mode),
6390 				  CONST_VECTOR_ELT (x, elt), bytes,
6391 				  first_byte, chunk_bytes))
6392 	    {
6393 	      bytes.truncate (start);
6394 	      return false;
6395 	    }
6396 	  elt += 1;
6397 	  first_byte = 0;
6398 	  num_bytes -= chunk_bytes;
6399 	}
6400       return true;
6401     }
6402 
6403   /* All subsequent cases are limited to scalars.  */
6404   scalar_mode smode;
6405   if (!is_a <scalar_mode> (mode, &smode))
6406     return false;
6407 
6408   /* Make sure that the region is in range.  */
6409   unsigned int end_byte = first_byte + num_bytes;
6410   unsigned int mode_bytes = GET_MODE_SIZE (smode);
6411   gcc_assert (end_byte <= mode_bytes);
6412 
6413   if (CONST_SCALAR_INT_P (x))
6414     {
6415       /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6416 	 and WORDS_BIG_ENDIAN.  Use the subreg machinery to get the lsb
6417 	 position of each byte.  */
6418       rtx_mode_t value (x, smode);
6419       wide_int_ref value_wi (value);
6420       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6421 	{
6422 	  /* Always constant because the inputs are.  */
6423 	  unsigned int lsb
6424 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6425 	  /* Operate directly on the encoding rather than using
6426 	     wi::extract_uhwi, so that we preserve the sign or zero
6427 	     extension for modes that are not a whole number of bits in
6428 	     size.  (Zero extension is only used for the combination of
6429 	     innermode == BImode && STORE_FLAG_VALUE == 1).  */
6430 	  unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6431 	  unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6432 	  unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6433 	  bytes.quick_push (uhwi >> shift);
6434 	}
6435       return true;
6436     }
6437 
6438   if (CONST_DOUBLE_P (x))
6439     {
6440       /* real_to_target produces an array of integers in target memory order.
6441 	 All integers before the last one have 32 bits; the last one may
6442 	 have 32 bits or fewer, depending on whether the mode bitsize
6443 	 is divisible by 32.  Each of these integers is then laid out
6444 	 in target memory as any other integer would be.  */
6445       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6446       real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6447 
6448       /* The (maximum) number of target bytes per element of el32.  */
6449       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6450       gcc_assert (bytes_per_el32 != 0);
6451 
6452       /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6453 	 handling above.  */
6454       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6455 	{
6456 	  unsigned int index = byte / bytes_per_el32;
6457 	  unsigned int subbyte = byte % bytes_per_el32;
6458 	  unsigned int int_bytes = MIN (bytes_per_el32,
6459 					mode_bytes - index * bytes_per_el32);
6460 	  /* Always constant because the inputs are.  */
6461 	  unsigned int lsb
6462 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6463 	  bytes.quick_push ((unsigned long) el32[index] >> lsb);
6464 	}
6465       return true;
6466     }
6467 
6468   if (GET_CODE (x) == CONST_FIXED)
6469     {
6470       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6471 	{
6472 	  /* Always constant because the inputs are.  */
6473 	  unsigned int lsb
6474 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6475 	  unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6476 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
6477 	    {
6478 	      lsb -= HOST_BITS_PER_WIDE_INT;
6479 	      piece = CONST_FIXED_VALUE_HIGH (x);
6480 	    }
6481 	  bytes.quick_push (piece >> lsb);
6482 	}
6483       return true;
6484     }
6485 
6486   return false;
6487 }
6488 
6489 /* Read a vector of mode MODE from the target memory image given by BYTES,
6490    starting at byte FIRST_BYTE.  The vector is known to be encodable using
6491    NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6492    and BYTES is known to have enough bytes to supply NPATTERNS *
6493    NELTS_PER_PATTERN vector elements.  Each element of BYTES contains
6494    BITS_PER_UNIT bits and the bytes are in target memory order.
6495 
6496    Return the vector on success, otherwise return NULL_RTX.  */
6497 
6498 rtx
native_decode_vector_rtx(machine_mode mode,vec<target_unit> bytes,unsigned int first_byte,unsigned int npatterns,unsigned int nelts_per_pattern)6499 native_decode_vector_rtx (machine_mode mode, vec<target_unit> bytes,
6500 			  unsigned int first_byte, unsigned int npatterns,
6501 			  unsigned int nelts_per_pattern)
6502 {
6503   rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
6504 
6505   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6506 					       GET_MODE_NUNITS (mode));
6507   if (elt_bits < BITS_PER_UNIT)
6508     {
6509       /* This is the only case in which elements can be smaller than a byte.
6510 	 Element 0 is always in the lsb of the containing byte.  */
6511       gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6512       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6513 	{
6514 	  unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
6515 	  unsigned int byte_index = bit_index / BITS_PER_UNIT;
6516 	  unsigned int lsb = bit_index % BITS_PER_UNIT;
6517 	  builder.quick_push (bytes[byte_index] & (1 << lsb)
6518 			      ? CONST1_RTX (BImode)
6519 			      : CONST0_RTX (BImode));
6520 	}
6521     }
6522   else
6523     {
6524       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6525 	{
6526 	  rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
6527 	  if (!x)
6528 	    return NULL_RTX;
6529 	  builder.quick_push (x);
6530 	  first_byte += elt_bits / BITS_PER_UNIT;
6531 	}
6532     }
6533   return builder.build ();
6534 }
6535 
6536 /* Read an rtx of mode MODE from the target memory image given by BYTES,
6537    starting at byte FIRST_BYTE.  Each element of BYTES contains BITS_PER_UNIT
6538    bits and the bytes are in target memory order.  The image has enough
6539    values to specify all bytes of MODE.
6540 
6541    Return the rtx on success, otherwise return NULL_RTX.  */
6542 
6543 rtx
native_decode_rtx(machine_mode mode,vec<target_unit> bytes,unsigned int first_byte)6544 native_decode_rtx (machine_mode mode, vec<target_unit> bytes,
6545 		   unsigned int first_byte)
6546 {
6547   if (VECTOR_MODE_P (mode))
6548     {
6549       /* If we know at compile time how many elements there are,
6550 	 pull each element directly from BYTES.  */
6551       unsigned int nelts;
6552       if (GET_MODE_NUNITS (mode).is_constant (&nelts))
6553 	return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
6554       return NULL_RTX;
6555     }
6556 
6557   scalar_int_mode imode;
6558   if (is_a <scalar_int_mode> (mode, &imode)
6559       && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
6560     {
6561       /* Pull the bytes msb first, so that we can use simple
6562 	 shift-and-insert wide_int operations.  */
6563       unsigned int size = GET_MODE_SIZE (imode);
6564       wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
6565       for (unsigned int i = 0; i < size; ++i)
6566 	{
6567 	  unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
6568 	  /* Always constant because the inputs are.  */
6569 	  unsigned int subbyte
6570 	    = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
6571 	  result <<= BITS_PER_UNIT;
6572 	  result |= bytes[first_byte + subbyte];
6573 	}
6574       return immed_wide_int_const (result, imode);
6575     }
6576 
6577   scalar_float_mode fmode;
6578   if (is_a <scalar_float_mode> (mode, &fmode))
6579     {
6580       /* We need to build an array of integers in target memory order.
6581 	 All integers before the last one have 32 bits; the last one may
6582 	 have 32 bits or fewer, depending on whether the mode bitsize
6583 	 is divisible by 32.  */
6584       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6585       unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
6586       memset (el32, 0, num_el32 * sizeof (long));
6587 
6588       /* The (maximum) number of target bytes per element of el32.  */
6589       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6590       gcc_assert (bytes_per_el32 != 0);
6591 
6592       unsigned int mode_bytes = GET_MODE_SIZE (fmode);
6593       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
6594 	{
6595 	  unsigned int index = byte / bytes_per_el32;
6596 	  unsigned int subbyte = byte % bytes_per_el32;
6597 	  unsigned int int_bytes = MIN (bytes_per_el32,
6598 					mode_bytes - index * bytes_per_el32);
6599 	  /* Always constant because the inputs are.  */
6600 	  unsigned int lsb
6601 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6602 	  el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
6603 	}
6604       REAL_VALUE_TYPE r;
6605       real_from_target (&r, el32, fmode);
6606       return const_double_from_real_value (r, fmode);
6607     }
6608 
6609   if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
6610     {
6611       scalar_mode smode = as_a <scalar_mode> (mode);
6612       FIXED_VALUE_TYPE f;
6613       f.data.low = 0;
6614       f.data.high = 0;
6615       f.mode = smode;
6616 
6617       unsigned int mode_bytes = GET_MODE_SIZE (smode);
6618       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
6619 	{
6620 	  /* Always constant because the inputs are.  */
6621 	  unsigned int lsb
6622 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6623 	  unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
6624 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
6625 	    f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
6626 	  else
6627 	    f.data.low |= unit << lsb;
6628 	}
6629       return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
6630     }
6631 
6632   return NULL_RTX;
6633 }
6634 
6635 /* Simplify a byte offset BYTE into CONST_VECTOR X.  The main purpose
6636    is to convert a runtime BYTE value into a constant one.  */
6637 
6638 static poly_uint64
simplify_const_vector_byte_offset(rtx x,poly_uint64 byte)6639 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
6640 {
6641   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
6642   machine_mode mode = GET_MODE (x);
6643   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6644 					       GET_MODE_NUNITS (mode));
6645   /* The number of bits needed to encode one element from each pattern.  */
6646   unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
6647 
6648   /* Identify the start point in terms of a sequence number and a byte offset
6649      within that sequence.  */
6650   poly_uint64 first_sequence;
6651   unsigned HOST_WIDE_INT subbit;
6652   if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
6653 		       &first_sequence, &subbit))
6654     {
6655       unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
6656       if (nelts_per_pattern == 1)
6657 	/* This is a duplicated vector, so the value of FIRST_SEQUENCE
6658 	   doesn't matter.  */
6659 	byte = subbit / BITS_PER_UNIT;
6660       else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
6661 	{
6662 	  /* The subreg drops the first element from each pattern and
6663 	     only uses the second element.  Find the first sequence
6664 	     that starts on a byte boundary.  */
6665 	  subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
6666 	  byte = subbit / BITS_PER_UNIT;
6667 	}
6668     }
6669   return byte;
6670 }
6671 
6672 /* Subroutine of simplify_subreg in which:
6673 
6674    - X is known to be a CONST_VECTOR
6675    - OUTERMODE is known to be a vector mode
6676 
6677    Try to handle the subreg by operating on the CONST_VECTOR encoding
6678    rather than on each individual element of the CONST_VECTOR.
6679 
6680    Return the simplified subreg on success, otherwise return NULL_RTX.  */
6681 
6682 static rtx
simplify_const_vector_subreg(machine_mode outermode,rtx x,machine_mode innermode,unsigned int first_byte)6683 simplify_const_vector_subreg (machine_mode outermode, rtx x,
6684 			      machine_mode innermode, unsigned int first_byte)
6685 {
6686   /* Paradoxical subregs of vectors have dubious semantics.  */
6687   if (paradoxical_subreg_p (outermode, innermode))
6688     return NULL_RTX;
6689 
6690   /* We can only preserve the semantics of a stepped pattern if the new
6691      vector element is the same as the original one.  */
6692   if (CONST_VECTOR_STEPPED_P (x)
6693       && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
6694     return NULL_RTX;
6695 
6696   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
6697   unsigned int x_elt_bits
6698     = vector_element_size (GET_MODE_BITSIZE (innermode),
6699 			   GET_MODE_NUNITS (innermode));
6700   unsigned int out_elt_bits
6701     = vector_element_size (GET_MODE_BITSIZE (outermode),
6702 			   GET_MODE_NUNITS (outermode));
6703 
6704   /* The number of bits needed to encode one element from every pattern
6705      of the original vector.  */
6706   unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
6707 
6708   /* The number of bits needed to encode one element from every pattern
6709      of the result.  */
6710   unsigned int out_sequence_bits
6711     = least_common_multiple (x_sequence_bits, out_elt_bits);
6712 
6713   /* Work out the number of interleaved patterns in the output vector
6714      and the number of encoded elements per pattern.  */
6715   unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
6716   unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
6717 
6718   /* The encoding scheme requires the number of elements to be a multiple
6719      of the number of patterns, so that each pattern appears at least once
6720      and so that the same number of elements appear from each pattern.  */
6721   bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
6722   unsigned int const_nunits;
6723   if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
6724       && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
6725     {
6726       /* Either the encoding is invalid, or applying it would give us
6727 	 more elements than we need.  Just encode each element directly.  */
6728       out_npatterns = const_nunits;
6729       nelts_per_pattern = 1;
6730     }
6731   else if (!ok_p)
6732     return NULL_RTX;
6733 
6734   /* Get enough bytes of X to form the new encoding.  */
6735   unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
6736   unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
6737   auto_vec<target_unit, 128> buffer (buffer_bytes);
6738   if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
6739     return NULL_RTX;
6740 
6741   /* Reencode the bytes as OUTERMODE.  */
6742   return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
6743 				   nelts_per_pattern);
6744 }
6745 
6746 /* Try to simplify a subreg of a constant by encoding the subreg region
6747    as a sequence of target bytes and reading them back in the new mode.
6748    Return the new value on success, otherwise return null.
6749 
6750    The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
6751    and byte offset FIRST_BYTE.  */
6752 
6753 static rtx
simplify_immed_subreg(fixed_size_mode outermode,rtx x,machine_mode innermode,unsigned int first_byte)6754 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
6755 		       machine_mode innermode, unsigned int first_byte)
6756 {
6757   unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
6758   auto_vec<target_unit, 128> buffer (buffer_bytes);
6759 
6760   /* Some ports misuse CCmode.  */
6761   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
6762     return x;
6763 
6764   /* Paradoxical subregs read undefined values for bytes outside of the
6765      inner value.  However, we have traditionally always sign-extended
6766      integer constants and zero-extended others.  */
6767   unsigned int inner_bytes = buffer_bytes;
6768   if (paradoxical_subreg_p (outermode, innermode))
6769     {
6770       if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
6771 	return NULL_RTX;
6772 
6773       target_unit filler = 0;
6774       if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
6775 	filler = -1;
6776 
6777       /* Add any leading bytes due to big-endian layout.  The number of
6778 	 bytes must be constant because both modes have constant size.  */
6779       unsigned int leading_bytes
6780 	= -byte_lowpart_offset (outermode, innermode).to_constant ();
6781       for (unsigned int i = 0; i < leading_bytes; ++i)
6782 	buffer.quick_push (filler);
6783 
6784       if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
6785 	return NULL_RTX;
6786 
6787       /* Add any trailing bytes due to little-endian layout.  */
6788       while (buffer.length () < buffer_bytes)
6789 	buffer.quick_push (filler);
6790     }
6791   else
6792     {
6793       if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
6794 	return NULL_RTX;
6795       }
6796   return native_decode_rtx (outermode, buffer, 0);
6797 }
6798 
6799 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6800    Return 0 if no simplifications are possible.  */
6801 rtx
simplify_subreg(machine_mode outermode,rtx op,machine_mode innermode,poly_uint64 byte)6802 simplify_subreg (machine_mode outermode, rtx op,
6803 		 machine_mode innermode, poly_uint64 byte)
6804 {
6805   /* Little bit of sanity checking.  */
6806   gcc_assert (innermode != VOIDmode);
6807   gcc_assert (outermode != VOIDmode);
6808   gcc_assert (innermode != BLKmode);
6809   gcc_assert (outermode != BLKmode);
6810 
6811   gcc_assert (GET_MODE (op) == innermode
6812 	      || GET_MODE (op) == VOIDmode);
6813 
6814   poly_uint64 outersize = GET_MODE_SIZE (outermode);
6815   if (!multiple_p (byte, outersize))
6816     return NULL_RTX;
6817 
6818   poly_uint64 innersize = GET_MODE_SIZE (innermode);
6819   if (maybe_ge (byte, innersize))
6820     return NULL_RTX;
6821 
6822   if (outermode == innermode && known_eq (byte, 0U))
6823     return op;
6824 
6825   if (GET_CODE (op) == CONST_VECTOR)
6826     byte = simplify_const_vector_byte_offset (op, byte);
6827 
6828   if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6829     {
6830       rtx elt;
6831 
6832       if (VECTOR_MODE_P (outermode)
6833 	  && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6834 	  && vec_duplicate_p (op, &elt))
6835 	return gen_vec_duplicate (outermode, elt);
6836 
6837       if (outermode == GET_MODE_INNER (innermode)
6838 	  && vec_duplicate_p (op, &elt))
6839 	return elt;
6840     }
6841 
6842   if (CONST_SCALAR_INT_P (op)
6843       || CONST_DOUBLE_AS_FLOAT_P (op)
6844       || CONST_FIXED_P (op)
6845       || GET_CODE (op) == CONST_VECTOR)
6846     {
6847       unsigned HOST_WIDE_INT cbyte;
6848       if (byte.is_constant (&cbyte))
6849 	{
6850 	  if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
6851 	    {
6852 	      rtx tmp = simplify_const_vector_subreg (outermode, op,
6853 						      innermode, cbyte);
6854 	      if (tmp)
6855 		return tmp;
6856 	    }
6857 
6858 	  fixed_size_mode fs_outermode;
6859 	  if (is_a <fixed_size_mode> (outermode, &fs_outermode))
6860 	    return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
6861 	}
6862     }
6863 
6864   /* Changing mode twice with SUBREG => just change it once,
6865      or not at all if changing back op starting mode.  */
6866   if (GET_CODE (op) == SUBREG)
6867     {
6868       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6869       poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6870       rtx newx;
6871 
6872       if (outermode == innermostmode
6873 	  && known_eq (byte, 0U)
6874 	  && known_eq (SUBREG_BYTE (op), 0))
6875 	return SUBREG_REG (op);
6876 
6877       /* Work out the memory offset of the final OUTERMODE value relative
6878 	 to the inner value of OP.  */
6879       poly_int64 mem_offset = subreg_memory_offset (outermode,
6880 						    innermode, byte);
6881       poly_int64 op_mem_offset = subreg_memory_offset (op);
6882       poly_int64 final_offset = mem_offset + op_mem_offset;
6883 
6884       /* See whether resulting subreg will be paradoxical.  */
6885       if (!paradoxical_subreg_p (outermode, innermostmode))
6886 	{
6887 	  /* Bail out in case resulting subreg would be incorrect.  */
6888 	  if (maybe_lt (final_offset, 0)
6889 	      || maybe_ge (poly_uint64 (final_offset), innermostsize)
6890 	      || !multiple_p (final_offset, outersize))
6891 	    return NULL_RTX;
6892 	}
6893       else
6894 	{
6895 	  poly_int64 required_offset = subreg_memory_offset (outermode,
6896 							     innermostmode, 0);
6897 	  if (maybe_ne (final_offset, required_offset))
6898 	    return NULL_RTX;
6899 	  /* Paradoxical subregs always have byte offset 0.  */
6900 	  final_offset = 0;
6901 	}
6902 
6903       /* Recurse for further possible simplifications.  */
6904       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6905 			      final_offset);
6906       if (newx)
6907 	return newx;
6908       if (validate_subreg (outermode, innermostmode,
6909 			   SUBREG_REG (op), final_offset))
6910 	{
6911 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6912 	  if (SUBREG_PROMOTED_VAR_P (op)
6913 	      && SUBREG_PROMOTED_SIGN (op) >= 0
6914 	      && GET_MODE_CLASS (outermode) == MODE_INT
6915 	      && known_ge (outersize, innersize)
6916 	      && known_le (outersize, innermostsize)
6917 	      && subreg_lowpart_p (newx))
6918 	    {
6919 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
6920 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6921 	    }
6922 	  return newx;
6923 	}
6924       return NULL_RTX;
6925     }
6926 
6927   /* SUBREG of a hard register => just change the register number
6928      and/or mode.  If the hard register is not valid in that mode,
6929      suppress this simplification.  If the hard register is the stack,
6930      frame, or argument pointer, leave this as a SUBREG.  */
6931 
6932   if (REG_P (op) && HARD_REGISTER_P (op))
6933     {
6934       unsigned int regno, final_regno;
6935 
6936       regno = REGNO (op);
6937       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6938       if (HARD_REGISTER_NUM_P (final_regno))
6939 	{
6940 	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6941 				      subreg_memory_offset (outermode,
6942 							    innermode, byte));
6943 
6944 	  /* Propagate original regno.  We don't have any way to specify
6945 	     the offset inside original regno, so do so only for lowpart.
6946 	     The information is used only by alias analysis that cannot
6947 	     grog partial register anyway.  */
6948 
6949 	  if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6950 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6951 	  return x;
6952 	}
6953     }
6954 
6955   /* If we have a SUBREG of a register that we are replacing and we are
6956      replacing it with a MEM, make a new MEM and try replacing the
6957      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
6958      or if we would be widening it.  */
6959 
6960   if (MEM_P (op)
6961       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6962       /* Allow splitting of volatile memory references in case we don't
6963          have instruction to move the whole thing.  */
6964       && (! MEM_VOLATILE_P (op)
6965 	  || ! have_insn_for (SET, innermode))
6966       && known_le (outersize, innersize))
6967     return adjust_address_nv (op, outermode, byte);
6968 
6969   /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6970      of two parts.  */
6971   if (GET_CODE (op) == CONCAT
6972       || GET_CODE (op) == VEC_CONCAT)
6973     {
6974       poly_uint64 final_offset;
6975       rtx part, res;
6976 
6977       machine_mode part_mode = GET_MODE (XEXP (op, 0));
6978       if (part_mode == VOIDmode)
6979 	part_mode = GET_MODE_INNER (GET_MODE (op));
6980       poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6981       if (known_lt (byte, part_size))
6982 	{
6983 	  part = XEXP (op, 0);
6984 	  final_offset = byte;
6985 	}
6986       else if (known_ge (byte, part_size))
6987 	{
6988 	  part = XEXP (op, 1);
6989 	  final_offset = byte - part_size;
6990 	}
6991       else
6992 	return NULL_RTX;
6993 
6994       if (maybe_gt (final_offset + outersize, part_size))
6995 	return NULL_RTX;
6996 
6997       part_mode = GET_MODE (part);
6998       if (part_mode == VOIDmode)
6999 	part_mode = GET_MODE_INNER (GET_MODE (op));
7000       res = simplify_subreg (outermode, part, part_mode, final_offset);
7001       if (res)
7002 	return res;
7003       if (validate_subreg (outermode, part_mode, part, final_offset))
7004 	return gen_rtx_SUBREG (outermode, part, final_offset);
7005       return NULL_RTX;
7006     }
7007 
7008   /* Simplify
7009 	(subreg (vec_merge (X)
7010 			   (vector)
7011 			   (const_int ((1 << N) | M)))
7012 		(N * sizeof (outermode)))
7013      to
7014 	(subreg (X) (N * sizeof (outermode)))
7015    */
7016   unsigned int idx;
7017   if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7018       && idx < HOST_BITS_PER_WIDE_INT
7019       && GET_CODE (op) == VEC_MERGE
7020       && GET_MODE_INNER (innermode) == outermode
7021       && CONST_INT_P (XEXP (op, 2))
7022       && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7023     return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7024 
7025   /* A SUBREG resulting from a zero extension may fold to zero if
7026      it extracts higher bits that the ZERO_EXTEND's source bits.  */
7027   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7028     {
7029       poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7030       if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7031 	return CONST0_RTX (outermode);
7032     }
7033 
7034   scalar_int_mode int_outermode, int_innermode;
7035   if (is_a <scalar_int_mode> (outermode, &int_outermode)
7036       && is_a <scalar_int_mode> (innermode, &int_innermode)
7037       && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7038     {
7039       /* Handle polynomial integers.  The upper bits of a paradoxical
7040 	 subreg are undefined, so this is safe regardless of whether
7041 	 we're truncating or extending.  */
7042       if (CONST_POLY_INT_P (op))
7043 	{
7044 	  poly_wide_int val
7045 	    = poly_wide_int::from (const_poly_int_value (op),
7046 				   GET_MODE_PRECISION (int_outermode),
7047 				   SIGNED);
7048 	  return immed_wide_int_const (val, int_outermode);
7049 	}
7050 
7051       if (GET_MODE_PRECISION (int_outermode)
7052 	  < GET_MODE_PRECISION (int_innermode))
7053 	{
7054 	  rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7055 	  if (tem)
7056 	    return tem;
7057 	}
7058     }
7059 
7060   /* If OP is a vector comparison and the subreg is not changing the
7061      number of elements or the size of the elements, change the result
7062      of the comparison to the new mode.  */
7063   if (COMPARISON_P (op)
7064       && VECTOR_MODE_P (outermode)
7065       && VECTOR_MODE_P (innermode)
7066       && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7067       && known_eq (GET_MODE_UNIT_SIZE (outermode),
7068 		    GET_MODE_UNIT_SIZE (innermode)))
7069     return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7070 				    XEXP (op, 0), XEXP (op, 1));
7071   return NULL_RTX;
7072 }
7073 
7074 /* Make a SUBREG operation or equivalent if it folds.  */
7075 
7076 rtx
simplify_gen_subreg(machine_mode outermode,rtx op,machine_mode innermode,poly_uint64 byte)7077 simplify_gen_subreg (machine_mode outermode, rtx op,
7078 		     machine_mode innermode, poly_uint64 byte)
7079 {
7080   rtx newx;
7081 
7082   newx = simplify_subreg (outermode, op, innermode, byte);
7083   if (newx)
7084     return newx;
7085 
7086   if (GET_CODE (op) == SUBREG
7087       || GET_CODE (op) == CONCAT
7088       || GET_MODE (op) == VOIDmode)
7089     return NULL_RTX;
7090 
7091   if (validate_subreg (outermode, innermode, op, byte))
7092     return gen_rtx_SUBREG (outermode, op, byte);
7093 
7094   return NULL_RTX;
7095 }
7096 
7097 /* Generates a subreg to get the least significant part of EXPR (in mode
7098    INNER_MODE) to OUTER_MODE.  */
7099 
7100 rtx
lowpart_subreg(machine_mode outer_mode,rtx expr,machine_mode inner_mode)7101 lowpart_subreg (machine_mode outer_mode, rtx expr,
7102 			     machine_mode inner_mode)
7103 {
7104   return simplify_gen_subreg (outer_mode, expr, inner_mode,
7105 			      subreg_lowpart_offset (outer_mode, inner_mode));
7106 }
7107 
7108 /* Simplify X, an rtx expression.
7109 
7110    Return the simplified expression or NULL if no simplifications
7111    were possible.
7112 
7113    This is the preferred entry point into the simplification routines;
7114    however, we still allow passes to call the more specific routines.
7115 
7116    Right now GCC has three (yes, three) major bodies of RTL simplification
7117    code that need to be unified.
7118 
7119 	1. fold_rtx in cse.c.  This code uses various CSE specific
7120 	   information to aid in RTL simplification.
7121 
7122 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
7123 	   it uses combine specific information to aid in RTL
7124 	   simplification.
7125 
7126 	3. The routines in this file.
7127 
7128 
7129    Long term we want to only have one body of simplification code; to
7130    get to that state I recommend the following steps:
7131 
7132 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
7133 	   which are not pass dependent state into these routines.
7134 
7135 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
7136 	   use this routine whenever possible.
7137 
7138 	3. Allow for pass dependent state to be provided to these
7139 	   routines and add simplifications based on the pass dependent
7140 	   state.  Remove code from cse.c & combine.c that becomes
7141 	   redundant/dead.
7142 
7143     It will take time, but ultimately the compiler will be easier to
7144     maintain and improve.  It's totally silly that when we add a
7145     simplification that it needs to be added to 4 places (3 for RTL
7146     simplification and 1 for tree simplification.  */
7147 
7148 rtx
simplify_rtx(const_rtx x)7149 simplify_rtx (const_rtx x)
7150 {
7151   const enum rtx_code code = GET_CODE (x);
7152   const machine_mode mode = GET_MODE (x);
7153 
7154   switch (GET_RTX_CLASS (code))
7155     {
7156     case RTX_UNARY:
7157       return simplify_unary_operation (code, mode,
7158 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7159     case RTX_COMM_ARITH:
7160       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7161 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7162 
7163       /* Fall through.  */
7164 
7165     case RTX_BIN_ARITH:
7166       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7167 
7168     case RTX_TERNARY:
7169     case RTX_BITFIELD_OPS:
7170       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7171 					 XEXP (x, 0), XEXP (x, 1),
7172 					 XEXP (x, 2));
7173 
7174     case RTX_COMPARE:
7175     case RTX_COMM_COMPARE:
7176       return simplify_relational_operation (code, mode,
7177                                             ((GET_MODE (XEXP (x, 0))
7178                                              != VOIDmode)
7179                                             ? GET_MODE (XEXP (x, 0))
7180                                             : GET_MODE (XEXP (x, 1))),
7181                                             XEXP (x, 0),
7182                                             XEXP (x, 1));
7183 
7184     case RTX_EXTRA:
7185       if (code == SUBREG)
7186 	return simplify_subreg (mode, SUBREG_REG (x),
7187 				GET_MODE (SUBREG_REG (x)),
7188 				SUBREG_BYTE (x));
7189       break;
7190 
7191     case RTX_OBJ:
7192       if (code == LO_SUM)
7193 	{
7194 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
7195 	  if (GET_CODE (XEXP (x, 0)) == HIGH
7196 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7197 	  return XEXP (x, 1);
7198 	}
7199       break;
7200 
7201     default:
7202       break;
7203     }
7204   return NULL;
7205 }
7206 
7207 #if CHECKING_P
7208 
7209 namespace selftest {
7210 
7211 /* Make a unique pseudo REG of mode MODE for use by selftests.  */
7212 
7213 static rtx
make_test_reg(machine_mode mode)7214 make_test_reg (machine_mode mode)
7215 {
7216   static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7217 
7218   return gen_rtx_REG (mode, test_reg_num++);
7219 }
7220 
7221 /* Test vector simplifications involving VEC_DUPLICATE in which the
7222    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
7223    register that holds one element of MODE.  */
7224 
7225 static void
test_vector_ops_duplicate(machine_mode mode,rtx scalar_reg)7226 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7227 {
7228   scalar_mode inner_mode = GET_MODE_INNER (mode);
7229   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7230   poly_uint64 nunits = GET_MODE_NUNITS (mode);
7231   if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7232     {
7233       /* Test some simple unary cases with VEC_DUPLICATE arguments.  */
7234       rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7235       rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7236       ASSERT_RTX_EQ (duplicate,
7237 		     simplify_unary_operation (NOT, mode,
7238 					       duplicate_not, mode));
7239 
7240       rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7241       rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7242       ASSERT_RTX_EQ (duplicate,
7243 		     simplify_unary_operation (NEG, mode,
7244 					       duplicate_neg, mode));
7245 
7246       /* Test some simple binary cases with VEC_DUPLICATE arguments.  */
7247       ASSERT_RTX_EQ (duplicate,
7248 		     simplify_binary_operation (PLUS, mode, duplicate,
7249 						CONST0_RTX (mode)));
7250 
7251       ASSERT_RTX_EQ (duplicate,
7252 		     simplify_binary_operation (MINUS, mode, duplicate,
7253 						CONST0_RTX (mode)));
7254 
7255       ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7256 			 simplify_binary_operation (MINUS, mode, duplicate,
7257 						    duplicate));
7258     }
7259 
7260   /* Test a scalar VEC_SELECT of a VEC_DUPLICATE.  */
7261   rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7262   ASSERT_RTX_PTR_EQ (scalar_reg,
7263 		     simplify_binary_operation (VEC_SELECT, inner_mode,
7264 						duplicate, zero_par));
7265 
7266   unsigned HOST_WIDE_INT const_nunits;
7267   if (nunits.is_constant (&const_nunits))
7268     {
7269       /* And again with the final element.  */
7270       rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7271       rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7272       ASSERT_RTX_PTR_EQ (scalar_reg,
7273 			 simplify_binary_operation (VEC_SELECT, inner_mode,
7274 						    duplicate, last_par));
7275 
7276       /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE.  */
7277       rtx vector_reg = make_test_reg (mode);
7278       for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7279 	{
7280 	  if (i >= HOST_BITS_PER_WIDE_INT)
7281 	    break;
7282 	  rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
7283 	  rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
7284 	  poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
7285 	  ASSERT_RTX_EQ (scalar_reg,
7286 			 simplify_gen_subreg (inner_mode, vm,
7287 					      mode, offset));
7288 	}
7289     }
7290 
7291   /* Test a scalar subreg of a VEC_DUPLICATE.  */
7292   poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
7293   ASSERT_RTX_EQ (scalar_reg,
7294 		 simplify_gen_subreg (inner_mode, duplicate,
7295 				      mode, offset));
7296 
7297   machine_mode narrower_mode;
7298   if (maybe_ne (nunits, 2U)
7299       && multiple_p (nunits, 2)
7300       && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
7301       && VECTOR_MODE_P (narrower_mode))
7302     {
7303       /* Test VEC_DUPLICATE of a vector.  */
7304       rtx_vector_builder nbuilder (narrower_mode, 2, 1);
7305       nbuilder.quick_push (const0_rtx);
7306       nbuilder.quick_push (const1_rtx);
7307       rtx_vector_builder builder (mode, 2, 1);
7308       builder.quick_push (const0_rtx);
7309       builder.quick_push (const1_rtx);
7310       ASSERT_RTX_EQ (builder.build (),
7311 		     simplify_unary_operation (VEC_DUPLICATE, mode,
7312 					       nbuilder.build (),
7313 					       narrower_mode));
7314 
7315       /* Test VEC_SELECT of a vector.  */
7316       rtx vec_par
7317 	= gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
7318       rtx narrower_duplicate
7319 	= gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
7320       ASSERT_RTX_EQ (narrower_duplicate,
7321 		     simplify_binary_operation (VEC_SELECT, narrower_mode,
7322 						duplicate, vec_par));
7323 
7324       /* Test a vector subreg of a VEC_DUPLICATE.  */
7325       poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
7326       ASSERT_RTX_EQ (narrower_duplicate,
7327 		     simplify_gen_subreg (narrower_mode, duplicate,
7328 					  mode, offset));
7329     }
7330 }
7331 
7332 /* Test vector simplifications involving VEC_SERIES in which the
7333    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
7334    register that holds one element of MODE.  */
7335 
7336 static void
test_vector_ops_series(machine_mode mode,rtx scalar_reg)7337 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
7338 {
7339   /* Test unary cases with VEC_SERIES arguments.  */
7340   scalar_mode inner_mode = GET_MODE_INNER (mode);
7341   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7342   rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7343   rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
7344   rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
7345   rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
7346   rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
7347   rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
7348   rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
7349 					 neg_scalar_reg);
7350   ASSERT_RTX_EQ (series_0_r,
7351 		 simplify_unary_operation (NEG, mode, series_0_nr, mode));
7352   ASSERT_RTX_EQ (series_r_m1,
7353 		 simplify_unary_operation (NEG, mode, series_nr_1, mode));
7354   ASSERT_RTX_EQ (series_r_r,
7355 		 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
7356 
7357   /* Test that a VEC_SERIES with a zero step is simplified away.  */
7358   ASSERT_RTX_EQ (duplicate,
7359 		 simplify_binary_operation (VEC_SERIES, mode,
7360 					    scalar_reg, const0_rtx));
7361 
7362   /* Test PLUS and MINUS with VEC_SERIES.  */
7363   rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
7364   rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
7365   rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
7366   ASSERT_RTX_EQ (series_r_r,
7367 		 simplify_binary_operation (PLUS, mode, series_0_r,
7368 					    duplicate));
7369   ASSERT_RTX_EQ (series_r_1,
7370 		 simplify_binary_operation (PLUS, mode, duplicate,
7371 					    series_0_1));
7372   ASSERT_RTX_EQ (series_r_m1,
7373 		 simplify_binary_operation (PLUS, mode, duplicate,
7374 					    series_0_m1));
7375   ASSERT_RTX_EQ (series_0_r,
7376 		 simplify_binary_operation (MINUS, mode, series_r_r,
7377 					    duplicate));
7378   ASSERT_RTX_EQ (series_r_m1,
7379 		 simplify_binary_operation (MINUS, mode, duplicate,
7380 					    series_0_1));
7381   ASSERT_RTX_EQ (series_r_1,
7382 		 simplify_binary_operation (MINUS, mode, duplicate,
7383 					    series_0_m1));
7384   ASSERT_RTX_EQ (series_0_m1,
7385 		 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
7386 					    constm1_rtx));
7387 
7388   /* Test NEG on constant vector series.  */
7389   ASSERT_RTX_EQ (series_0_m1,
7390 		 simplify_unary_operation (NEG, mode, series_0_1, mode));
7391   ASSERT_RTX_EQ (series_0_1,
7392 		 simplify_unary_operation (NEG, mode, series_0_m1, mode));
7393 
7394   /* Test PLUS and MINUS on constant vector series.  */
7395   rtx scalar2 = gen_int_mode (2, inner_mode);
7396   rtx scalar3 = gen_int_mode (3, inner_mode);
7397   rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
7398   rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
7399   rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
7400   ASSERT_RTX_EQ (series_1_1,
7401 		 simplify_binary_operation (PLUS, mode, series_0_1,
7402 					    CONST1_RTX (mode)));
7403   ASSERT_RTX_EQ (series_0_m1,
7404 		 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
7405 					    series_0_m1));
7406   ASSERT_RTX_EQ (series_1_3,
7407 		 simplify_binary_operation (PLUS, mode, series_1_1,
7408 					    series_0_2));
7409   ASSERT_RTX_EQ (series_0_1,
7410 		 simplify_binary_operation (MINUS, mode, series_1_1,
7411 					    CONST1_RTX (mode)));
7412   ASSERT_RTX_EQ (series_1_1,
7413 		 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
7414 					    series_0_m1));
7415   ASSERT_RTX_EQ (series_1_1,
7416 		 simplify_binary_operation (MINUS, mode, series_1_3,
7417 					    series_0_2));
7418 
7419   /* Test MULT between constant vectors.  */
7420   rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
7421   rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
7422   rtx scalar9 = gen_int_mode (9, inner_mode);
7423   rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
7424   ASSERT_RTX_EQ (series_0_2,
7425 		 simplify_binary_operation (MULT, mode, series_0_1, vec2));
7426   ASSERT_RTX_EQ (series_3_9,
7427 		 simplify_binary_operation (MULT, mode, vec3, series_1_3));
7428   if (!GET_MODE_NUNITS (mode).is_constant ())
7429     ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
7430 					     series_0_1));
7431 
7432   /* Test ASHIFT between constant vectors.  */
7433   ASSERT_RTX_EQ (series_0_2,
7434 		 simplify_binary_operation (ASHIFT, mode, series_0_1,
7435 					    CONST1_RTX (mode)));
7436   if (!GET_MODE_NUNITS (mode).is_constant ())
7437     ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
7438 					     series_0_1));
7439 }
7440 
7441 /* Verify simplify_merge_mask works correctly.  */
7442 
7443 static void
test_vec_merge(machine_mode mode)7444 test_vec_merge (machine_mode mode)
7445 {
7446   rtx op0 = make_test_reg (mode);
7447   rtx op1 = make_test_reg (mode);
7448   rtx op2 = make_test_reg (mode);
7449   rtx op3 = make_test_reg (mode);
7450   rtx op4 = make_test_reg (mode);
7451   rtx op5 = make_test_reg (mode);
7452   rtx mask1 = make_test_reg (SImode);
7453   rtx mask2 = make_test_reg (SImode);
7454   rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
7455   rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
7456   rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
7457 
7458   /* Simple vec_merge.  */
7459   ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
7460   ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
7461   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
7462   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
7463 
7464   /* Nested vec_merge.
7465      It's tempting to make this simplify right down to opN, but we don't
7466      because all the simplify_* functions assume that the operands have
7467      already been simplified.  */
7468   rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
7469   ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
7470   ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
7471 
7472   /* Intermediate unary op. */
7473   rtx unop = gen_rtx_NOT (mode, vm1);
7474   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
7475 		 simplify_merge_mask (unop, mask1, 0));
7476   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
7477 		 simplify_merge_mask (unop, mask1, 1));
7478 
7479   /* Intermediate binary op. */
7480   rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7481   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7482 		 simplify_merge_mask (binop, mask1, 0));
7483   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7484 		 simplify_merge_mask (binop, mask1, 1));
7485 
7486   /* Intermediate ternary op. */
7487   rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7488   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7489 		 simplify_merge_mask (tenop, mask1, 0));
7490   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7491 		 simplify_merge_mask (tenop, mask1, 1));
7492 
7493   /* Side effects.  */
7494   rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7495   rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7496   ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7497   ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7498 
7499   /* Called indirectly.  */
7500   ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7501 		 simplify_rtx (nvm));
7502 }
7503 
7504 /* Test subregs of integer vector constant X, trying elements in
7505    the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
7506    where NELTS is the number of elements in X.  Subregs involving
7507    elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail.  */
7508 
7509 static void
7510 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
7511 			   unsigned int first_valid = 0)
7512 {
7513   machine_mode inner_mode = GET_MODE (x);
7514   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7515 
7516   for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
7517     {
7518       machine_mode outer_mode = (machine_mode) modei;
7519       if (!VECTOR_MODE_P (outer_mode))
7520 	continue;
7521 
7522       unsigned int outer_nunits;
7523       if (GET_MODE_INNER (outer_mode) == int_mode
7524 	  && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
7525 	  && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
7526 	{
7527 	  /* Test subregs in which the outer mode is a smaller,
7528 	     constant-sized vector of the same element type.  */
7529 	  unsigned int limit
7530 	    = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
7531 	  for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
7532 	    {
7533 	      rtx expected = NULL_RTX;
7534 	      if (elt >= first_valid)
7535 		{
7536 		  rtx_vector_builder builder (outer_mode, outer_nunits, 1);
7537 		  for (unsigned int i = 0; i < outer_nunits; ++i)
7538 		    builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
7539 		  expected = builder.build ();
7540 		}
7541 	      poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
7542 	      ASSERT_RTX_EQ (expected,
7543 			     simplify_subreg (outer_mode, x,
7544 					      inner_mode, byte));
7545 	    }
7546 	}
7547       else if (known_eq (GET_MODE_SIZE (outer_mode),
7548 			 GET_MODE_SIZE (inner_mode))
7549 	       && known_eq (elt_bias, 0U)
7550 	       && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
7551 		   || known_eq (GET_MODE_BITSIZE (outer_mode),
7552 				GET_MODE_NUNITS (outer_mode)))
7553 	       && (!FLOAT_MODE_P (outer_mode)
7554 		   || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
7555 		       == GET_MODE_UNIT_PRECISION (outer_mode)))
7556 	       && (GET_MODE_SIZE (inner_mode).is_constant ()
7557 		   || !CONST_VECTOR_STEPPED_P (x)))
7558 	{
7559 	  /* Try converting to OUTER_MODE and back.  */
7560 	  rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
7561 	  ASSERT_TRUE (outer_x != NULL_RTX);
7562 	  ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
7563 					     outer_mode, 0));
7564 	}
7565     }
7566 
7567   if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
7568     {
7569       /* Test each byte in the element range.  */
7570       unsigned int limit
7571 	= constant_lower_bound (GET_MODE_SIZE (inner_mode));
7572       for (unsigned int i = 0; i < limit; ++i)
7573 	{
7574 	  unsigned int elt = i / GET_MODE_SIZE (int_mode);
7575 	  rtx expected = NULL_RTX;
7576 	  if (elt >= first_valid)
7577 	    {
7578 	      unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
7579 	      if (BYTES_BIG_ENDIAN)
7580 		byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
7581 	      rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
7582 	      wide_int shifted_elt
7583 		= wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
7584 	      expected = immed_wide_int_const (shifted_elt, QImode);
7585 	    }
7586 	  poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
7587 	  ASSERT_RTX_EQ (expected,
7588 			 simplify_subreg (QImode, x, inner_mode, byte));
7589 	}
7590     }
7591 }
7592 
7593 /* Test constant subregs of integer vector mode INNER_MODE, using 1
7594    element per pattern.  */
7595 
7596 static void
test_vector_subregs_repeating(machine_mode inner_mode)7597 test_vector_subregs_repeating (machine_mode inner_mode)
7598 {
7599   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
7600   unsigned int min_nunits = constant_lower_bound (nunits);
7601   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7602   unsigned int count = gcd (min_nunits, 8);
7603 
7604   rtx_vector_builder builder (inner_mode, count, 1);
7605   for (unsigned int i = 0; i < count; ++i)
7606     builder.quick_push (gen_int_mode (8 - i, int_mode));
7607   rtx x = builder.build ();
7608 
7609   test_vector_subregs_modes (x);
7610   if (!nunits.is_constant ())
7611     test_vector_subregs_modes (x, nunits - min_nunits);
7612 }
7613 
7614 /* Test constant subregs of integer vector mode INNER_MODE, using 2
7615    elements per pattern.  */
7616 
7617 static void
test_vector_subregs_fore_back(machine_mode inner_mode)7618 test_vector_subregs_fore_back (machine_mode inner_mode)
7619 {
7620   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
7621   unsigned int min_nunits = constant_lower_bound (nunits);
7622   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7623   unsigned int count = gcd (min_nunits, 4);
7624 
7625   rtx_vector_builder builder (inner_mode, count, 2);
7626   for (unsigned int i = 0; i < count; ++i)
7627     builder.quick_push (gen_int_mode (i, int_mode));
7628   for (unsigned int i = 0; i < count; ++i)
7629     builder.quick_push (gen_int_mode (-(int) i, int_mode));
7630   rtx x = builder.build ();
7631 
7632   test_vector_subregs_modes (x);
7633   if (!nunits.is_constant ())
7634     test_vector_subregs_modes (x, nunits - min_nunits, count);
7635 }
7636 
7637 /* Test constant subregs of integer vector mode INNER_MODE, using 3
7638    elements per pattern.  */
7639 
7640 static void
test_vector_subregs_stepped(machine_mode inner_mode)7641 test_vector_subregs_stepped (machine_mode inner_mode)
7642 {
7643   /* Build { 0, 1, 2, 3, ... }.  */
7644   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
7645   rtx_vector_builder builder (inner_mode, 1, 3);
7646   for (unsigned int i = 0; i < 3; ++i)
7647     builder.quick_push (gen_int_mode (i, int_mode));
7648   rtx x = builder.build ();
7649 
7650   test_vector_subregs_modes (x);
7651 }
7652 
7653 /* Test constant subregs of integer vector mode INNER_MODE.  */
7654 
7655 static void
test_vector_subregs(machine_mode inner_mode)7656 test_vector_subregs (machine_mode inner_mode)
7657 {
7658   test_vector_subregs_repeating (inner_mode);
7659   test_vector_subregs_fore_back (inner_mode);
7660   test_vector_subregs_stepped (inner_mode);
7661 }
7662 
7663 /* Verify some simplifications involving vectors.  */
7664 
7665 static void
test_vector_ops()7666 test_vector_ops ()
7667 {
7668   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7669     {
7670       machine_mode mode = (machine_mode) i;
7671       if (VECTOR_MODE_P (mode))
7672 	{
7673 	  rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
7674 	  test_vector_ops_duplicate (mode, scalar_reg);
7675 	  if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7676 	      && maybe_gt (GET_MODE_NUNITS (mode), 2))
7677 	    {
7678 	      test_vector_ops_series (mode, scalar_reg);
7679 	      test_vector_subregs (mode);
7680 	    }
7681 	  test_vec_merge (mode);
7682 	}
7683     }
7684 }
7685 
7686 template<unsigned int N>
7687 struct simplify_const_poly_int_tests
7688 {
7689   static void run ();
7690 };
7691 
7692 template<>
7693 struct simplify_const_poly_int_tests<1>
7694 {
7695   static void run () {}
7696 };
7697 
7698 /* Test various CONST_POLY_INT properties.  */
7699 
7700 template<unsigned int N>
7701 void
7702 simplify_const_poly_int_tests<N>::run ()
7703 {
7704   rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
7705   rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
7706   rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
7707   rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
7708   rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
7709   rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
7710   rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
7711   rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
7712   rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
7713   rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
7714   rtx two = GEN_INT (2);
7715   rtx six = GEN_INT (6);
7716   poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
7717 
7718   /* These tests only try limited operation combinations.  Fuller arithmetic
7719      testing is done directly on poly_ints.  */
7720   ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
7721   ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
7722   ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
7723   ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
7724   ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
7725   ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
7726   ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
7727   ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
7728   ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
7729   ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
7730   ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
7731 }
7732 
7733 /* Run all of the selftests within this file.  */
7734 
7735 void
7736 simplify_rtx_c_tests ()
7737 {
7738   test_vector_ops ();
7739   simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
7740 }
7741 
7742 } // namespace selftest
7743 
7744 #endif /* CHECKING_P */
7745