1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39 #include "rtlanal.h"
40 
41 /* Simplification and canonicalization of RTL.  */
42 
43 /* Much code operates on (low, high) pairs; the low value is an
44    unsigned wide int, the high value a signed wide int.  We
45    occasionally need to sign extend from low to high as if low were a
46    signed wide int.  */
47 #define HWI_SIGN_EXTEND(low) \
48   ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 
50 static bool plus_minus_operand_p (const_rtx);
51 
52 /* Negate I, which satisfies poly_int_rtx_p.  MODE is the mode of I.  */
53 
54 static rtx
neg_poly_int_rtx(machine_mode mode,const_rtx i)55 neg_poly_int_rtx (machine_mode mode, const_rtx i)
56 {
57   return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
58 }
59 
60 /* Test whether expression, X, is an immediate constant that represents
61    the most significant bit of machine mode MODE.  */
62 
63 bool
mode_signbit_p(machine_mode mode,const_rtx x)64 mode_signbit_p (machine_mode mode, const_rtx x)
65 {
66   unsigned HOST_WIDE_INT val;
67   unsigned int width;
68   scalar_int_mode int_mode;
69 
70   if (!is_int_mode (mode, &int_mode))
71     return false;
72 
73   width = GET_MODE_PRECISION (int_mode);
74   if (width == 0)
75     return false;
76 
77   if (width <= HOST_BITS_PER_WIDE_INT
78       && CONST_INT_P (x))
79     val = INTVAL (x);
80 #if TARGET_SUPPORTS_WIDE_INT
81   else if (CONST_WIDE_INT_P (x))
82     {
83       unsigned int i;
84       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
85       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
86 	return false;
87       for (i = 0; i < elts - 1; i++)
88 	if (CONST_WIDE_INT_ELT (x, i) != 0)
89 	  return false;
90       val = CONST_WIDE_INT_ELT (x, elts - 1);
91       width %= HOST_BITS_PER_WIDE_INT;
92       if (width == 0)
93 	width = HOST_BITS_PER_WIDE_INT;
94     }
95 #else
96   else if (width <= HOST_BITS_PER_DOUBLE_INT
97 	   && CONST_DOUBLE_AS_INT_P (x)
98 	   && CONST_DOUBLE_LOW (x) == 0)
99     {
100       val = CONST_DOUBLE_HIGH (x);
101       width -= HOST_BITS_PER_WIDE_INT;
102     }
103 #endif
104   else
105     /* X is not an integer constant.  */
106     return false;
107 
108   if (width < HOST_BITS_PER_WIDE_INT)
109     val &= (HOST_WIDE_INT_1U << width) - 1;
110   return val == (HOST_WIDE_INT_1U << (width - 1));
111 }
112 
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114    (after masking with the mode mask of MODE).  Returns false if the
115    precision of MODE is too large to handle.  */
116 
117 bool
val_signbit_p(machine_mode mode,unsigned HOST_WIDE_INT val)118 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
119 {
120   unsigned int width;
121   scalar_int_mode int_mode;
122 
123   if (!is_int_mode (mode, &int_mode))
124     return false;
125 
126   width = GET_MODE_PRECISION (int_mode);
127   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
128     return false;
129 
130   val &= GET_MODE_MASK (int_mode);
131   return val == (HOST_WIDE_INT_1U << (width - 1));
132 }
133 
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135    Returns false if the precision of MODE is too large to handle.  */
136 bool
val_signbit_known_set_p(machine_mode mode,unsigned HOST_WIDE_INT val)137 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
138 {
139   unsigned int width;
140 
141   scalar_int_mode int_mode;
142   if (!is_int_mode (mode, &int_mode))
143     return false;
144 
145   width = GET_MODE_PRECISION (int_mode);
146   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
147     return false;
148 
149   val &= HOST_WIDE_INT_1U << (width - 1);
150   return val != 0;
151 }
152 
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154    Returns false if the precision of MODE is too large to handle.  */
155 bool
val_signbit_known_clear_p(machine_mode mode,unsigned HOST_WIDE_INT val)156 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
157 {
158   unsigned int width;
159 
160   scalar_int_mode int_mode;
161   if (!is_int_mode (mode, &int_mode))
162     return false;
163 
164   width = GET_MODE_PRECISION (int_mode);
165   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166     return false;
167 
168   val &= HOST_WIDE_INT_1U << (width - 1);
169   return val == 0;
170 }
171 
172 /* Make a binary operation by properly ordering the operands and
173    seeing if the expression folds.  */
174 
175 rtx
simplify_gen_binary(rtx_code code,machine_mode mode,rtx op0,rtx op1)176 simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
177 				       rtx op0, rtx op1)
178 {
179   rtx tem;
180 
181   /* If this simplifies, do it.  */
182   tem = simplify_binary_operation (code, mode, op0, op1);
183   if (tem)
184     return tem;
185 
186   /* Put complex operands first and constants second if commutative.  */
187   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
188       && swap_commutative_operands_p (op0, op1))
189     std::swap (op0, op1);
190 
191   return gen_rtx_fmt_ee (code, mode, op0, op1);
192 }
193 
194 /* If X is a MEM referencing the constant pool, return the real value.
195    Otherwise return X.  */
196 rtx
avoid_constant_pool_reference(rtx x)197 avoid_constant_pool_reference (rtx x)
198 {
199   rtx c, tmp, addr;
200   machine_mode cmode;
201   poly_int64 offset = 0;
202 
203   switch (GET_CODE (x))
204     {
205     case MEM:
206       break;
207 
208     case FLOAT_EXTEND:
209       /* Handle float extensions of constant pool references.  */
210       tmp = XEXP (x, 0);
211       c = avoid_constant_pool_reference (tmp);
212       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
213 	return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
214 					     GET_MODE (x));
215       return x;
216 
217     default:
218       return x;
219     }
220 
221   if (GET_MODE (x) == BLKmode)
222     return x;
223 
224   addr = XEXP (x, 0);
225 
226   /* Call target hook to avoid the effects of -fpic etc....  */
227   addr = targetm.delegitimize_address (addr);
228 
229   /* Split the address into a base and integer offset.  */
230   addr = strip_offset (addr, &offset);
231 
232   if (GET_CODE (addr) == LO_SUM)
233     addr = XEXP (addr, 1);
234 
235   /* If this is a constant pool reference, we can turn it into its
236      constant and hope that simplifications happen.  */
237   if (GET_CODE (addr) == SYMBOL_REF
238       && CONSTANT_POOL_ADDRESS_P (addr))
239     {
240       c = get_pool_constant (addr);
241       cmode = get_pool_mode (addr);
242 
243       /* If we're accessing the constant in a different mode than it was
244          originally stored, attempt to fix that up via subreg simplifications.
245          If that fails we have no choice but to return the original memory.  */
246       if (known_eq (offset, 0) && cmode == GET_MODE (x))
247 	return c;
248       else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
249         {
250           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251           if (tem && CONSTANT_P (tem))
252             return tem;
253         }
254     }
255 
256   return x;
257 }
258 
259 /* Simplify a MEM based on its attributes.  This is the default
260    delegitimize_address target hook, and it's recommended that every
261    overrider call it.  */
262 
263 rtx
delegitimize_mem_from_attrs(rtx x)264 delegitimize_mem_from_attrs (rtx x)
265 {
266   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267      use their base addresses as equivalent.  */
268   if (MEM_P (x)
269       && MEM_EXPR (x)
270       && MEM_OFFSET_KNOWN_P (x))
271     {
272       tree decl = MEM_EXPR (x);
273       machine_mode mode = GET_MODE (x);
274       poly_int64 offset = 0;
275 
276       switch (TREE_CODE (decl))
277 	{
278 	default:
279 	  decl = NULL;
280 	  break;
281 
282 	case VAR_DECL:
283 	  break;
284 
285 	case ARRAY_REF:
286 	case ARRAY_RANGE_REF:
287 	case COMPONENT_REF:
288 	case BIT_FIELD_REF:
289 	case REALPART_EXPR:
290 	case IMAGPART_EXPR:
291 	case VIEW_CONVERT_EXPR:
292 	  {
293 	    poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
294 	    tree toffset;
295 	    int unsignedp, reversep, volatilep = 0;
296 
297 	    decl
298 	      = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
299 				     &unsignedp, &reversep, &volatilep);
300 	    if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
301 		|| !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
302 		|| (toffset && !poly_int_tree_p (toffset, &toffset_val)))
303 	      decl = NULL;
304 	    else
305 	      offset += bytepos + toffset_val;
306 	    break;
307 	  }
308 	}
309 
310       if (decl
311 	  && mode == GET_MODE (x)
312 	  && VAR_P (decl)
313 	  && (TREE_STATIC (decl)
314 	      || DECL_THREAD_LOCAL_P (decl))
315 	  && DECL_RTL_SET_P (decl)
316 	  && MEM_P (DECL_RTL (decl)))
317 	{
318 	  rtx newx;
319 
320 	  offset += MEM_OFFSET (x);
321 
322 	  newx = DECL_RTL (decl);
323 
324 	  if (MEM_P (newx))
325 	    {
326 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
327 	      poly_int64 n_offset, o_offset;
328 
329 	      /* Avoid creating a new MEM needlessly if we already had
330 		 the same address.  We do if there's no OFFSET and the
331 		 old address X is identical to NEWX, or if X is of the
332 		 form (plus NEWX OFFSET), or the NEWX is of the form
333 		 (plus Y (const_int Z)) and X is that with the offset
334 		 added: (plus Y (const_int Z+OFFSET)).  */
335 	      n = strip_offset (n, &n_offset);
336 	      o = strip_offset (o, &o_offset);
337 	      if (!(known_eq (o_offset, n_offset + offset)
338 		    && rtx_equal_p (o, n)))
339 		x = adjust_address_nv (newx, mode, offset);
340 	    }
341 	  else if (GET_MODE (x) == GET_MODE (newx)
342 		   && known_eq (offset, 0))
343 	    x = newx;
344 	}
345     }
346 
347   return x;
348 }
349 
350 /* Make a unary operation by first seeing if it folds and otherwise making
351    the specified operation.  */
352 
353 rtx
simplify_gen_unary(rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)354 simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
355 				      machine_mode op_mode)
356 {
357   rtx tem;
358 
359   /* If this simplifies, use it.  */
360   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
361     return tem;
362 
363   return gen_rtx_fmt_e (code, mode, op);
364 }
365 
366 /* Likewise for ternary operations.  */
367 
368 rtx
simplify_gen_ternary(rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)369 simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
370 					machine_mode op0_mode,
371 					rtx op0, rtx op1, rtx op2)
372 {
373   rtx tem;
374 
375   /* If this simplifies, use it.  */
376   if ((tem = simplify_ternary_operation (code, mode, op0_mode,
377 					 op0, op1, op2)) != 0)
378     return tem;
379 
380   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
381 }
382 
383 /* Likewise, for relational operations.
384    CMP_MODE specifies mode comparison is done in.  */
385 
386 rtx
simplify_gen_relational(rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)387 simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
388 					   machine_mode cmp_mode,
389 					   rtx op0, rtx op1)
390 {
391   rtx tem;
392 
393   if ((tem = simplify_relational_operation (code, mode, cmp_mode,
394 					    op0, op1)) != 0)
395     return tem;
396 
397   return gen_rtx_fmt_ee (code, mode, op0, op1);
398 }
399 
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401    and simplify the result.  If FN is non-NULL, call this callback on each
402    X, if it returns non-NULL, replace X with its return value and simplify the
403    result.  */
404 
405 rtx
simplify_replace_fn_rtx(rtx x,const_rtx old_rtx,rtx (* fn)(rtx,const_rtx,void *),void * data)406 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
407 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
408 {
409   enum rtx_code code = GET_CODE (x);
410   machine_mode mode = GET_MODE (x);
411   machine_mode op_mode;
412   const char *fmt;
413   rtx op0, op1, op2, newx, op;
414   rtvec vec, newvec;
415   int i, j;
416 
417   if (__builtin_expect (fn != NULL, 0))
418     {
419       newx = fn (x, old_rtx, data);
420       if (newx)
421 	return newx;
422     }
423   else if (rtx_equal_p (x, old_rtx))
424     return copy_rtx ((rtx) data);
425 
426   switch (GET_RTX_CLASS (code))
427     {
428     case RTX_UNARY:
429       op0 = XEXP (x, 0);
430       op_mode = GET_MODE (op0);
431       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
432       if (op0 == XEXP (x, 0))
433 	return x;
434       return simplify_gen_unary (code, mode, op0, op_mode);
435 
436     case RTX_BIN_ARITH:
437     case RTX_COMM_ARITH:
438       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
439       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
440       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
441 	return x;
442       return simplify_gen_binary (code, mode, op0, op1);
443 
444     case RTX_COMPARE:
445     case RTX_COMM_COMPARE:
446       op0 = XEXP (x, 0);
447       op1 = XEXP (x, 1);
448       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
449       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
450       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
451       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 	return x;
453       return simplify_gen_relational (code, mode, op_mode, op0, op1);
454 
455     case RTX_TERNARY:
456     case RTX_BITFIELD_OPS:
457       op0 = XEXP (x, 0);
458       op_mode = GET_MODE (op0);
459       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
462       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
463 	return x;
464       if (op_mode == VOIDmode)
465 	op_mode = GET_MODE (op0);
466       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
467 
468     case RTX_EXTRA:
469       if (code == SUBREG)
470 	{
471 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
472 	  if (op0 == SUBREG_REG (x))
473 	    return x;
474 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
475 				     GET_MODE (SUBREG_REG (x)),
476 				     SUBREG_BYTE (x));
477 	  return op0 ? op0 : x;
478 	}
479       break;
480 
481     case RTX_OBJ:
482       if (code == MEM)
483 	{
484 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
485 	  if (op0 == XEXP (x, 0))
486 	    return x;
487 	  return replace_equiv_address_nv (x, op0);
488 	}
489       else if (code == LO_SUM)
490 	{
491 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
493 
494 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
495 	  if (GET_CODE (op0) == HIGH)
496 	    {
497 	      rtx base0, base1, offset0, offset1;
498 	      split_const (XEXP (op0, 0), &base0, &offset0);
499 	      split_const (op1, &base1, &offset1);
500 	      if (rtx_equal_p (base0, base1))
501 		return op1;
502 	    }
503 
504 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
505 	    return x;
506 	  return gen_rtx_LO_SUM (mode, op0, op1);
507 	}
508       break;
509 
510     default:
511       break;
512     }
513 
514   newx = x;
515   fmt = GET_RTX_FORMAT (code);
516   for (i = 0; fmt[i]; i++)
517     switch (fmt[i])
518       {
519       case 'E':
520 	vec = XVEC (x, i);
521 	newvec = XVEC (newx, i);
522 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
523 	  {
524 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
525 					  old_rtx, fn, data);
526 	    if (op != RTVEC_ELT (vec, j))
527 	      {
528 		if (newvec == vec)
529 		  {
530 		    newvec = shallow_copy_rtvec (vec);
531 		    if (x == newx)
532 		      newx = shallow_copy_rtx (x);
533 		    XVEC (newx, i) = newvec;
534 		  }
535 		RTVEC_ELT (newvec, j) = op;
536 	      }
537 	  }
538 	break;
539 
540       case 'e':
541 	if (XEXP (x, i))
542 	  {
543 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
544 	    if (op != XEXP (x, i))
545 	      {
546 		if (x == newx)
547 		  newx = shallow_copy_rtx (x);
548 		XEXP (newx, i) = op;
549 	      }
550 	  }
551 	break;
552       }
553   return newx;
554 }
555 
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557    resulting RTX.  Return a new RTX which is as simplified as possible.  */
558 
559 rtx
simplify_replace_rtx(rtx x,const_rtx old_rtx,rtx new_rtx)560 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
561 {
562   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
563 }
564 
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566    Only handle cases where the truncated value is inherently an rvalue.
567 
568    RTL provides two ways of truncating a value:
569 
570    1. a lowpart subreg.  This form is only a truncation when both
571       the outer and inner modes (here MODE and OP_MODE respectively)
572       are scalar integers, and only then when the subreg is used as
573       an rvalue.
574 
575       It is only valid to form such truncating subregs if the
576       truncation requires no action by the target.  The onus for
577       proving this is on the creator of the subreg -- e.g. the
578       caller to simplify_subreg or simplify_gen_subreg -- and typically
579       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
580 
581    2. a TRUNCATE.  This form handles both scalar and compound integers.
582 
583    The first form is preferred where valid.  However, the TRUNCATE
584    handling in simplify_unary_operation turns the second form into the
585    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586    so it is generally safe to form rvalue truncations using:
587 
588       simplify_gen_unary (TRUNCATE, ...)
589 
590    and leave simplify_unary_operation to work out which representation
591    should be used.
592 
593    Because of the proof requirements on (1), simplify_truncation must
594    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595    regardless of whether the outer truncation came from a SUBREG or a
596    TRUNCATE.  For example, if the caller has proven that an SImode
597    truncation of:
598 
599       (and:DI X Y)
600 
601    is a no-op and can be represented as a subreg, it does not follow
602    that SImode truncations of X and Y are also no-ops.  On a target
603    like 64-bit MIPS that requires SImode values to be stored in
604    sign-extended form, an SImode truncation of:
605 
606       (and:DI (reg:DI X) (const_int 63))
607 
608    is trivially a no-op because only the lower 6 bits can be set.
609    However, X is still an arbitrary 64-bit number and so we cannot
610    assume that truncating it too is a no-op.  */
611 
612 rtx
simplify_truncation(machine_mode mode,rtx op,machine_mode op_mode)613 simplify_context::simplify_truncation (machine_mode mode, rtx op,
614 				       machine_mode op_mode)
615 {
616   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
617   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
618   scalar_int_mode int_mode, int_op_mode, subreg_mode;
619 
620   gcc_assert (precision <= op_precision);
621 
622   /* Optimize truncations of zero and sign extended values.  */
623   if (GET_CODE (op) == ZERO_EXTEND
624       || GET_CODE (op) == SIGN_EXTEND)
625     {
626       /* There are three possibilities.  If MODE is the same as the
627 	 origmode, we can omit both the extension and the subreg.
628 	 If MODE is not larger than the origmode, we can apply the
629 	 truncation without the extension.  Finally, if the outermode
630 	 is larger than the origmode, we can just extend to the appropriate
631 	 mode.  */
632       machine_mode origmode = GET_MODE (XEXP (op, 0));
633       if (mode == origmode)
634 	return XEXP (op, 0);
635       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 	return simplify_gen_unary (TRUNCATE, mode,
637 				   XEXP (op, 0), origmode);
638       else
639 	return simplify_gen_unary (GET_CODE (op), mode,
640 				   XEXP (op, 0), origmode);
641     }
642 
643   /* If the machine can perform operations in the truncated mode, distribute
644      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
646   if (1
647       && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
648       && (GET_CODE (op) == PLUS
649 	  || GET_CODE (op) == MINUS
650 	  || GET_CODE (op) == MULT))
651     {
652       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
653       if (op0)
654 	{
655 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
656 	  if (op1)
657 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
658 	}
659     }
660 
661   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663      the outer subreg is effectively a truncation to the original mode.  */
664   if ((GET_CODE (op) == LSHIFTRT
665        || GET_CODE (op) == ASHIFTRT)
666       /* Ensure that OP_MODE is at least twice as wide as MODE
667 	 to avoid the possibility that an outer LSHIFTRT shifts by more
668 	 than the sign extension's sign_bit_copies and introduces zeros
669 	 into the high bits of the result.  */
670       && 2 * precision <= op_precision
671       && CONST_INT_P (XEXP (op, 1))
672       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
673       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
674       && UINTVAL (XEXP (op, 1)) < precision)
675     return simplify_gen_binary (ASHIFTRT, mode,
676 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
677 
678   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680      the outer subreg is effectively a truncation to the original mode.  */
681   if ((GET_CODE (op) == LSHIFTRT
682        || GET_CODE (op) == ASHIFTRT)
683       && CONST_INT_P (XEXP (op, 1))
684       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
685       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
686       && UINTVAL (XEXP (op, 1)) < precision)
687     return simplify_gen_binary (LSHIFTRT, mode,
688 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
689 
690   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691      to (ashift:QI (x:QI) C), where C is a suitable small constant and
692      the outer subreg is effectively a truncation to the original mode.  */
693   if (GET_CODE (op) == ASHIFT
694       && CONST_INT_P (XEXP (op, 1))
695       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
697       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698       && UINTVAL (XEXP (op, 1)) < precision)
699     return simplify_gen_binary (ASHIFT, mode,
700 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
701 
702   /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703      (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
704      and C2.  */
705   if (GET_CODE (op) == AND
706       && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
707 	  || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
708       && CONST_INT_P (XEXP (XEXP (op, 0), 1))
709       && CONST_INT_P (XEXP (op, 1)))
710     {
711       rtx op0 = (XEXP (XEXP (op, 0), 0));
712       rtx shift_op = XEXP (XEXP (op, 0), 1);
713       rtx mask_op = XEXP (op, 1);
714       unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
715       unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
716 
717       if (shift < precision
718 	  /* If doing this transform works for an X with all bits set,
719 	     it works for any X.  */
720 	  && ((GET_MODE_MASK (mode) >> shift) & mask)
721 	     == ((GET_MODE_MASK (op_mode) >> shift) & mask)
722 	  && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
723 	  && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
724 	{
725 	  mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
726 	  return simplify_gen_binary (AND, mode, op0, mask_op);
727 	}
728     }
729 
730   /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731      (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
732      changing len.  */
733   if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
734       && REG_P (XEXP (op, 0))
735       && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
736       && CONST_INT_P (XEXP (op, 1))
737       && CONST_INT_P (XEXP (op, 2)))
738     {
739       rtx op0 = XEXP (op, 0);
740       unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
741       unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
742       if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
743 	{
744 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
745 	  if (op0)
746 	    {
747 	      pos -= op_precision - precision;
748 	      return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
749 					   XEXP (op, 1), GEN_INT (pos));
750 	    }
751 	}
752       else if (!BITS_BIG_ENDIAN && precision >= len + pos)
753 	{
754 	  op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 	  if (op0)
756 	    return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
757 					 XEXP (op, 1), XEXP (op, 2));
758 	}
759     }
760 
761   /* Recognize a word extraction from a multi-word subreg.  */
762   if ((GET_CODE (op) == LSHIFTRT
763        || GET_CODE (op) == ASHIFTRT)
764       && SCALAR_INT_MODE_P (mode)
765       && SCALAR_INT_MODE_P (op_mode)
766       && precision >= BITS_PER_WORD
767       && 2 * precision <= op_precision
768       && CONST_INT_P (XEXP (op, 1))
769       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
770       && UINTVAL (XEXP (op, 1)) < op_precision)
771     {
772       poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
773       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
774       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
775 				  (WORDS_BIG_ENDIAN
776 				   ? byte - shifted_bytes
777 				   : byte + shifted_bytes));
778     }
779 
780   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781      and try replacing the TRUNCATE and shift with it.  Don't do this
782      if the MEM has a mode-dependent address.  */
783   if ((GET_CODE (op) == LSHIFTRT
784        || GET_CODE (op) == ASHIFTRT)
785       && is_a <scalar_int_mode> (mode, &int_mode)
786       && is_a <scalar_int_mode> (op_mode, &int_op_mode)
787       && MEM_P (XEXP (op, 0))
788       && CONST_INT_P (XEXP (op, 1))
789       && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
790       && INTVAL (XEXP (op, 1)) > 0
791       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
792       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
793 				     MEM_ADDR_SPACE (XEXP (op, 0)))
794       && ! MEM_VOLATILE_P (XEXP (op, 0))
795       && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
796 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
797     {
798       poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
799       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800       return adjust_address_nv (XEXP (op, 0), int_mode,
801 				(WORDS_BIG_ENDIAN
802 				 ? byte - shifted_bytes
803 				 : byte + shifted_bytes));
804     }
805 
806   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807      (OP:SI foo:SI) if OP is NEG or ABS.  */
808   if ((GET_CODE (op) == ABS
809        || GET_CODE (op) == NEG)
810       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
811 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
812       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813     return simplify_gen_unary (GET_CODE (op), mode,
814 			       XEXP (XEXP (op, 0), 0), mode);
815 
816   /* Simplifications of (truncate:A (subreg:B X 0)).  */
817   if (GET_CODE (op) == SUBREG
818       && is_a <scalar_int_mode> (mode, &int_mode)
819       && SCALAR_INT_MODE_P (op_mode)
820       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
821       && subreg_lowpart_p (op))
822     {
823       /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X).  */
824       if (GET_CODE (SUBREG_REG (op)) == TRUNCATE)
825 	{
826 	  rtx inner = XEXP (SUBREG_REG (op), 0);
827 	  if (GET_MODE_PRECISION (int_mode)
828 	      <= GET_MODE_PRECISION (subreg_mode))
829 	    return simplify_gen_unary (TRUNCATE, int_mode, inner,
830 				       GET_MODE (inner));
831 	  else
832 	    /* If subreg above is paradoxical and C is narrower
833 	       than A, return (subreg:A (truncate:C X) 0).  */
834 	    return simplify_gen_subreg (int_mode, SUBREG_REG (op),
835 					subreg_mode, 0);
836 	}
837 
838       /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 	 paradoxical subregs (B is wider than C).  */
840       if (is_a <scalar_int_mode> (op_mode, &int_op_mode))
841 	{
842 	  unsigned int int_op_prec = GET_MODE_PRECISION (int_op_mode);
843 	  unsigned int subreg_prec = GET_MODE_PRECISION (subreg_mode);
844 	  if (int_op_prec > subreg_prec)
845 	    {
846 	      if (int_mode == subreg_mode)
847 		return SUBREG_REG (op);
848 	      if (GET_MODE_PRECISION (int_mode) < subreg_prec)
849 		return simplify_gen_unary (TRUNCATE, int_mode,
850 					   SUBREG_REG (op), subreg_mode);
851 	    }
852 	  /* Simplification of (truncate:A (subreg:B X:C 0)) where
853  	     A is narrower than B and B is narrower than C.  */
854 	  else if (int_op_prec < subreg_prec
855 		   && GET_MODE_PRECISION (int_mode) < int_op_prec)
856 	    return simplify_gen_unary (TRUNCATE, int_mode,
857 				       SUBREG_REG (op), subreg_mode);
858 	}
859     }
860 
861   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
862   if (GET_CODE (op) == TRUNCATE)
863     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 			       GET_MODE (XEXP (op, 0)));
865 
866   /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867      in mode A.  */
868   if (GET_CODE (op) == IOR
869       && SCALAR_INT_MODE_P (mode)
870       && SCALAR_INT_MODE_P (op_mode)
871       && CONST_INT_P (XEXP (op, 1))
872       && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873     return constm1_rtx;
874 
875   return NULL_RTX;
876 }
877 
878 /* Try to simplify a unary operation CODE whose output mode is to be
879    MODE with input operand OP whose mode was originally OP_MODE.
880    Return zero if no simplification can be made.  */
881 rtx
simplify_unary_operation(rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)882 simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
883 					    rtx op, machine_mode op_mode)
884 {
885   rtx trueop, tem;
886 
887   trueop = avoid_constant_pool_reference (op);
888 
889   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890   if (tem)
891     return tem;
892 
893   return simplify_unary_operation_1 (code, mode, op);
894 }
895 
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897    to be exact.  */
898 
899 static bool
exact_int_to_float_conversion_p(const_rtx op)900 exact_int_to_float_conversion_p (const_rtx op)
901 {
902   machine_mode op0_mode = GET_MODE (XEXP (op, 0));
903   /* Constants can reach here with -frounding-math, if they do then
904      the conversion isn't exact.  */
905   if (op0_mode == VOIDmode)
906     return false;
907   int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
908   int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
909   int in_bits = in_prec;
910   if (HWI_COMPUTABLE_MODE_P (op0_mode))
911     {
912       unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
913       if (GET_CODE (op) == FLOAT)
914 	in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
915       else if (GET_CODE (op) == UNSIGNED_FLOAT)
916 	in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
917       else
918 	gcc_unreachable ();
919       in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
920     }
921   return in_bits <= out_bits;
922 }
923 
924 /* Perform some simplifications we can do even if the operands
925    aren't constant.  */
926 rtx
simplify_unary_operation_1(rtx_code code,machine_mode mode,rtx op)927 simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
928 					      rtx op)
929 {
930   enum rtx_code reversed;
931   rtx temp, elt, base, step;
932   scalar_int_mode inner, int_mode, op_mode, op0_mode;
933 
934   switch (code)
935     {
936     case NOT:
937       /* (not (not X)) == X.  */
938       if (GET_CODE (op) == NOT)
939 	return XEXP (op, 0);
940 
941       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
942 	 comparison is all ones.   */
943       if (COMPARISON_P (op)
944 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
945 	  && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
946 	return simplify_gen_relational (reversed, mode, VOIDmode,
947 					XEXP (op, 0), XEXP (op, 1));
948 
949       /* (not (plus X -1)) can become (neg X).  */
950       if (GET_CODE (op) == PLUS
951 	  && XEXP (op, 1) == constm1_rtx)
952 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
953 
954       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
955 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
956 	 and MODE_VECTOR_INT.  */
957       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
958 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
959 				    CONSTM1_RTX (mode));
960 
961       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
962       if (GET_CODE (op) == XOR
963 	  && CONST_INT_P (XEXP (op, 1))
964 	  && (temp = simplify_unary_operation (NOT, mode,
965 					       XEXP (op, 1), mode)) != 0)
966 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
967 
968       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
969       if (GET_CODE (op) == PLUS
970 	  && CONST_INT_P (XEXP (op, 1))
971 	  && mode_signbit_p (mode, XEXP (op, 1))
972 	  && (temp = simplify_unary_operation (NOT, mode,
973 					       XEXP (op, 1), mode)) != 0)
974 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
975 
976 
977       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
978 	 operands other than 1, but that is not valid.  We could do a
979 	 similar simplification for (not (lshiftrt C X)) where C is
980 	 just the sign bit, but this doesn't seem common enough to
981 	 bother with.  */
982       if (GET_CODE (op) == ASHIFT
983 	  && XEXP (op, 0) == const1_rtx)
984 	{
985 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
986 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
987 	}
988 
989       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
990 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
991 	 so we can perform the above simplification.  */
992       if (STORE_FLAG_VALUE == -1
993 	  && is_a <scalar_int_mode> (mode, &int_mode)
994 	  && GET_CODE (op) == ASHIFTRT
995 	  && CONST_INT_P (XEXP (op, 1))
996 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
997 	return simplify_gen_relational (GE, int_mode, VOIDmode,
998 					XEXP (op, 0), const0_rtx);
999 
1000 
1001       if (partial_subreg_p (op)
1002 	  && subreg_lowpart_p (op)
1003 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
1004 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1005 	{
1006 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1007 	  rtx x;
1008 
1009 	  x = gen_rtx_ROTATE (inner_mode,
1010 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
1011 						  inner_mode),
1012 			      XEXP (SUBREG_REG (op), 1));
1013 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1014 	  if (temp)
1015 	    return temp;
1016 	}
1017 
1018       /* Apply De Morgan's laws to reduce number of patterns for machines
1019 	 with negating logical insns (and-not, nand, etc.).  If result has
1020 	 only one NOT, put it first, since that is how the patterns are
1021 	 coded.  */
1022       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1023 	{
1024 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1025 	  machine_mode op_mode;
1026 
1027 	  op_mode = GET_MODE (in1);
1028 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1029 
1030 	  op_mode = GET_MODE (in2);
1031 	  if (op_mode == VOIDmode)
1032 	    op_mode = mode;
1033 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1034 
1035 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1036 	    std::swap (in1, in2);
1037 
1038 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1039 				 mode, in1, in2);
1040 	}
1041 
1042       /* (not (bswap x)) -> (bswap (not x)).  */
1043       if (GET_CODE (op) == BSWAP)
1044 	{
1045 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1046 	  return simplify_gen_unary (BSWAP, mode, x, mode);
1047 	}
1048       break;
1049 
1050     case NEG:
1051       /* (neg (neg X)) == X.  */
1052       if (GET_CODE (op) == NEG)
1053 	return XEXP (op, 0);
1054 
1055       /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1056 	 If comparison is not reversible use
1057 	 x ? y : (neg y).  */
1058       if (GET_CODE (op) == IF_THEN_ELSE)
1059 	{
1060 	  rtx cond = XEXP (op, 0);
1061 	  rtx true_rtx = XEXP (op, 1);
1062 	  rtx false_rtx = XEXP (op, 2);
1063 
1064 	  if ((GET_CODE (true_rtx) == NEG
1065 	       && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1066 	       || (GET_CODE (false_rtx) == NEG
1067 		   && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1068 	    {
1069 	      if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1070 		temp = reversed_comparison (cond, mode);
1071 	      else
1072 		{
1073 		  temp = cond;
1074 		  std::swap (true_rtx, false_rtx);
1075 		}
1076 	      return simplify_gen_ternary (IF_THEN_ELSE, mode,
1077 					    mode, temp, true_rtx, false_rtx);
1078 	    }
1079 	}
1080 
1081       /* (neg (plus X 1)) can become (not X).  */
1082       if (GET_CODE (op) == PLUS
1083 	  && XEXP (op, 1) == const1_rtx)
1084 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1085 
1086       /* Similarly, (neg (not X)) is (plus X 1).  */
1087       if (GET_CODE (op) == NOT)
1088 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1089 				    CONST1_RTX (mode));
1090 
1091       /* (neg (minus X Y)) can become (minus Y X).  This transformation
1092 	 isn't safe for modes with signed zeros, since if X and Y are
1093 	 both +0, (minus Y X) is the same as (minus X Y).  If the
1094 	 rounding mode is towards +infinity (or -infinity) then the two
1095 	 expressions will be rounded differently.  */
1096       if (GET_CODE (op) == MINUS
1097 	  && !HONOR_SIGNED_ZEROS (mode)
1098 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1099 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1100 
1101       if (GET_CODE (op) == PLUS
1102 	  && !HONOR_SIGNED_ZEROS (mode)
1103 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 	{
1105 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1106 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1107 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1108 	    {
1109 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1110 	      if (temp)
1111 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1112 	    }
1113 
1114 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1115 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1116 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1117 	}
1118 
1119       /* (neg (mult A B)) becomes (mult A (neg B)).
1120 	 This works even for floating-point values.  */
1121       if (GET_CODE (op) == MULT
1122 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1123 	{
1124 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1125 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1126 	}
1127 
1128       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1129 	 this if we can then eliminate the NEG (e.g., if the operand
1130 	 is a constant).  */
1131       if (GET_CODE (op) == ASHIFT)
1132 	{
1133 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1134 	  if (temp)
1135 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1136 	}
1137 
1138       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1139 	 C is equal to the width of MODE minus 1.  */
1140       if (GET_CODE (op) == ASHIFTRT
1141 	  && CONST_INT_P (XEXP (op, 1))
1142 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1143 	return simplify_gen_binary (LSHIFTRT, mode,
1144 				    XEXP (op, 0), XEXP (op, 1));
1145 
1146       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1147 	 C is equal to the width of MODE minus 1.  */
1148       if (GET_CODE (op) == LSHIFTRT
1149 	  && CONST_INT_P (XEXP (op, 1))
1150 	  && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1151 	return simplify_gen_binary (ASHIFTRT, mode,
1152 				    XEXP (op, 0), XEXP (op, 1));
1153 
1154       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1155       if (GET_CODE (op) == XOR
1156 	  && XEXP (op, 1) == const1_rtx
1157 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1158 	return plus_constant (mode, XEXP (op, 0), -1);
1159 
1160       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1161       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1162       if (GET_CODE (op) == LT
1163 	  && XEXP (op, 1) == const0_rtx
1164 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1165 	{
1166 	  int_mode = as_a <scalar_int_mode> (mode);
1167 	  int isize = GET_MODE_PRECISION (inner);
1168 	  if (STORE_FLAG_VALUE == 1)
1169 	    {
1170 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1171 					  gen_int_shift_amount (inner,
1172 								isize - 1));
1173 	      if (int_mode == inner)
1174 		return temp;
1175 	      if (GET_MODE_PRECISION (int_mode) > isize)
1176 		return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1177 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1178 	    }
1179 	  else if (STORE_FLAG_VALUE == -1)
1180 	    {
1181 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1182 					  gen_int_shift_amount (inner,
1183 								isize - 1));
1184 	      if (int_mode == inner)
1185 		return temp;
1186 	      if (GET_MODE_PRECISION (int_mode) > isize)
1187 		return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1188 	      return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1189 	    }
1190 	}
1191 
1192       if (vec_series_p (op, &base, &step))
1193 	{
1194 	  /* Only create a new series if we can simplify both parts.  In other
1195 	     cases this isn't really a simplification, and it's not necessarily
1196 	     a win to replace a vector operation with a scalar operation.  */
1197 	  scalar_mode inner_mode = GET_MODE_INNER (mode);
1198 	  base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1199 	  if (base)
1200 	    {
1201 	      step = simplify_unary_operation (NEG, inner_mode,
1202 					       step, inner_mode);
1203 	      if (step)
1204 		return gen_vec_series (mode, base, step);
1205 	    }
1206 	}
1207       break;
1208 
1209     case TRUNCATE:
1210       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1211 	 with the umulXi3_highpart patterns.  */
1212       if (GET_CODE (op) == LSHIFTRT
1213 	  && GET_CODE (XEXP (op, 0)) == MULT)
1214 	break;
1215 
1216       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1217 	{
1218 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1219 	    {
1220 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 	      if (temp)
1222 		return temp;
1223 	    }
1224 	  /* We can't handle truncation to a partial integer mode here
1225 	     because we don't know the real bitsize of the partial
1226 	     integer mode.  */
1227 	  break;
1228 	}
1229 
1230       if (GET_MODE (op) != VOIDmode)
1231 	{
1232 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1233 	  if (temp)
1234 	    return temp;
1235 	}
1236 
1237       /* If we know that the value is already truncated, we can
1238 	 replace the TRUNCATE with a SUBREG.  */
1239       if (known_eq (GET_MODE_NUNITS (mode), 1)
1240 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1241 	      || truncated_to_mode (mode, op)))
1242 	{
1243 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 	  if (temp)
1245 	    return temp;
1246 	}
1247 
1248       /* A truncate of a comparison can be replaced with a subreg if
1249          STORE_FLAG_VALUE permits.  This is like the previous test,
1250          but it works even if the comparison is done in a mode larger
1251          than HOST_BITS_PER_WIDE_INT.  */
1252       if (HWI_COMPUTABLE_MODE_P (mode)
1253 	  && COMPARISON_P (op)
1254 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
1255 	  && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1256 	{
1257 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1258 	  if (temp)
1259 	    return temp;
1260 	}
1261 
1262       /* A truncate of a memory is just loading the low part of the memory
1263 	 if we are not changing the meaning of the address. */
1264       if (GET_CODE (op) == MEM
1265 	  && !VECTOR_MODE_P (mode)
1266 	  && !MEM_VOLATILE_P (op)
1267 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1268 	{
1269 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1270 	  if (temp)
1271 	    return temp;
1272 	}
1273 
1274       /* Check for useless truncation.  */
1275       if (GET_MODE (op) == mode)
1276 	return op;
1277       break;
1278 
1279     case FLOAT_TRUNCATE:
1280       /* Check for useless truncation.  */
1281       if (GET_MODE (op) == mode)
1282 	return op;
1283 
1284       if (DECIMAL_FLOAT_MODE_P (mode))
1285 	break;
1286 
1287       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1288       if (GET_CODE (op) == FLOAT_EXTEND
1289 	  && GET_MODE (XEXP (op, 0)) == mode)
1290 	return XEXP (op, 0);
1291 
1292       /* (float_truncate:SF (float_truncate:DF foo:XF))
1293          = (float_truncate:SF foo:XF).
1294 	 This may eliminate double rounding, so it is unsafe.
1295 
1296          (float_truncate:SF (float_extend:XF foo:DF))
1297          = (float_truncate:SF foo:DF).
1298 
1299          (float_truncate:DF (float_extend:XF foo:SF))
1300          = (float_extend:DF foo:SF).  */
1301       if ((GET_CODE (op) == FLOAT_TRUNCATE
1302 	   && flag_unsafe_math_optimizations)
1303 	  || GET_CODE (op) == FLOAT_EXTEND)
1304 	return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1305 	  			   > GET_MODE_UNIT_SIZE (mode)
1306 	  			   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1307 				   mode,
1308 				   XEXP (op, 0), mode);
1309 
1310       /*  (float_truncate (float x)) is (float x)  */
1311       if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1312 	  && (flag_unsafe_math_optimizations
1313 	      || exact_int_to_float_conversion_p (op)))
1314 	return simplify_gen_unary (GET_CODE (op), mode,
1315 				   XEXP (op, 0),
1316 				   GET_MODE (XEXP (op, 0)));
1317 
1318       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1319 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1320       if ((GET_CODE (op) == ABS
1321 	   || GET_CODE (op) == NEG)
1322 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1323 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1324 	return simplify_gen_unary (GET_CODE (op), mode,
1325 				   XEXP (XEXP (op, 0), 0), mode);
1326 
1327       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1328 	 is (float_truncate:SF x).  */
1329       if (GET_CODE (op) == SUBREG
1330 	  && subreg_lowpart_p (op)
1331 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1332 	return SUBREG_REG (op);
1333       break;
1334 
1335     case FLOAT_EXTEND:
1336       /* Check for useless extension.  */
1337       if (GET_MODE (op) == mode)
1338 	return op;
1339 
1340       if (DECIMAL_FLOAT_MODE_P (mode))
1341 	break;
1342 
1343       /*  (float_extend (float_extend x)) is (float_extend x)
1344 
1345 	  (float_extend (float x)) is (float x) assuming that double
1346 	  rounding can't happen.
1347           */
1348       if (GET_CODE (op) == FLOAT_EXTEND
1349 	  || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1350 	      && exact_int_to_float_conversion_p (op)))
1351 	return simplify_gen_unary (GET_CODE (op), mode,
1352 				   XEXP (op, 0),
1353 				   GET_MODE (XEXP (op, 0)));
1354 
1355       break;
1356 
1357     case ABS:
1358       /* (abs (neg <foo>)) -> (abs <foo>) */
1359       if (GET_CODE (op) == NEG)
1360 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1361 				   GET_MODE (XEXP (op, 0)));
1362 
1363       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1364          do nothing.  */
1365       if (GET_MODE (op) == VOIDmode)
1366 	break;
1367 
1368       /* If operand is something known to be positive, ignore the ABS.  */
1369       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1370 	  || val_signbit_known_clear_p (GET_MODE (op),
1371 					nonzero_bits (op, GET_MODE (op))))
1372 	return op;
1373 
1374       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1375       if (is_a <scalar_int_mode> (mode, &int_mode)
1376 	  && (num_sign_bit_copies (op, int_mode)
1377 	      == GET_MODE_PRECISION (int_mode)))
1378 	return gen_rtx_NEG (int_mode, op);
1379 
1380       break;
1381 
1382     case FFS:
1383       /* (ffs (*_extend <X>)) = (ffs <X>) */
1384       if (GET_CODE (op) == SIGN_EXTEND
1385 	  || GET_CODE (op) == ZERO_EXTEND)
1386 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1387 				   GET_MODE (XEXP (op, 0)));
1388       break;
1389 
1390     case POPCOUNT:
1391       switch (GET_CODE (op))
1392 	{
1393 	case BSWAP:
1394 	case ZERO_EXTEND:
1395 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1396 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1397 				     GET_MODE (XEXP (op, 0)));
1398 
1399 	case ROTATE:
1400 	case ROTATERT:
1401 	  /* Rotations don't affect popcount.  */
1402 	  if (!side_effects_p (XEXP (op, 1)))
1403 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1404 				       GET_MODE (XEXP (op, 0)));
1405 	  break;
1406 
1407 	default:
1408 	  break;
1409 	}
1410       break;
1411 
1412     case PARITY:
1413       switch (GET_CODE (op))
1414 	{
1415 	case NOT:
1416 	case BSWAP:
1417 	case ZERO_EXTEND:
1418 	case SIGN_EXTEND:
1419 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1420 				     GET_MODE (XEXP (op, 0)));
1421 
1422 	case ROTATE:
1423 	case ROTATERT:
1424 	  /* Rotations don't affect parity.  */
1425 	  if (!side_effects_p (XEXP (op, 1)))
1426 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1427 				       GET_MODE (XEXP (op, 0)));
1428 	  break;
1429 
1430 	case PARITY:
1431 	  /* (parity (parity x)) -> parity (x).  */
1432 	  return op;
1433 
1434 	default:
1435 	  break;
1436 	}
1437       break;
1438 
1439     case BSWAP:
1440       /* (bswap (bswap x)) -> x.  */
1441       if (GET_CODE (op) == BSWAP)
1442 	return XEXP (op, 0);
1443       break;
1444 
1445     case FLOAT:
1446       /* (float (sign_extend <X>)) = (float <X>).  */
1447       if (GET_CODE (op) == SIGN_EXTEND)
1448 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1449 				   GET_MODE (XEXP (op, 0)));
1450       break;
1451 
1452     case SIGN_EXTEND:
1453       /* Check for useless extension.  */
1454       if (GET_MODE (op) == mode)
1455 	return op;
1456 
1457       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1458 	 becomes just the MINUS if its mode is MODE.  This allows
1459 	 folding switch statements on machines using casesi (such as
1460 	 the VAX).  */
1461       if (GET_CODE (op) == TRUNCATE
1462 	  && GET_MODE (XEXP (op, 0)) == mode
1463 	  && GET_CODE (XEXP (op, 0)) == MINUS
1464 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1465 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1466 	return XEXP (op, 0);
1467 
1468       /* Extending a widening multiplication should be canonicalized to
1469 	 a wider widening multiplication.  */
1470       if (GET_CODE (op) == MULT)
1471 	{
1472 	  rtx lhs = XEXP (op, 0);
1473 	  rtx rhs = XEXP (op, 1);
1474 	  enum rtx_code lcode = GET_CODE (lhs);
1475 	  enum rtx_code rcode = GET_CODE (rhs);
1476 
1477 	  /* Widening multiplies usually extend both operands, but sometimes
1478 	     they use a shift to extract a portion of a register.  */
1479 	  if ((lcode == SIGN_EXTEND
1480 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1481 	      && (rcode == SIGN_EXTEND
1482 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1483 	    {
1484 	      machine_mode lmode = GET_MODE (lhs);
1485 	      machine_mode rmode = GET_MODE (rhs);
1486 	      int bits;
1487 
1488 	      if (lcode == ASHIFTRT)
1489 		/* Number of bits not shifted off the end.  */
1490 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1491 			- INTVAL (XEXP (lhs, 1)));
1492 	      else /* lcode == SIGN_EXTEND */
1493 		/* Size of inner mode.  */
1494 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1495 
1496 	      if (rcode == ASHIFTRT)
1497 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1498 			 - INTVAL (XEXP (rhs, 1)));
1499 	      else /* rcode == SIGN_EXTEND */
1500 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1501 
1502 	      /* We can only widen multiplies if the result is mathematiclly
1503 		 equivalent.  I.e. if overflow was impossible.  */
1504 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1505 		return simplify_gen_binary
1506 			 (MULT, mode,
1507 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1508 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1509 	    }
1510 	}
1511 
1512       /* Check for a sign extension of a subreg of a promoted
1513 	 variable, where the promotion is sign-extended, and the
1514 	 target mode is the same as the variable's promotion.  */
1515       if (GET_CODE (op) == SUBREG
1516 	  && SUBREG_PROMOTED_VAR_P (op)
1517 	  && SUBREG_PROMOTED_SIGNED_P (op))
1518 	{
1519 	  rtx subreg = SUBREG_REG (op);
1520 	  machine_mode subreg_mode = GET_MODE (subreg);
1521 	  if (!paradoxical_subreg_p (mode, subreg_mode))
1522 	    {
1523 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1524 	      if (temp)
1525 		{
1526 		  /* Preserve SUBREG_PROMOTED_VAR_P.  */
1527 		  if (partial_subreg_p (temp))
1528 		    {
1529 		      SUBREG_PROMOTED_VAR_P (temp) = 1;
1530 		      SUBREG_PROMOTED_SET (temp, 1);
1531 		    }
1532 		  return temp;
1533 		}
1534 	    }
1535 	  else
1536 	    /* Sign-extending a sign-extended subreg.  */
1537 	    return simplify_gen_unary (SIGN_EXTEND, mode,
1538 				       subreg, subreg_mode);
1539 	}
1540 
1541       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1542 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1543       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1544 	{
1545 	  gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1546 		      > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1547 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1548 				     GET_MODE (XEXP (op, 0)));
1549 	}
1550 
1551       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1552 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1553 	 GET_MODE_BITSIZE (N) - I bits.
1554 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1555 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1556       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1557 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1558 	  && is_a <scalar_int_mode> (mode, &int_mode)
1559 	  && CONST_INT_P (XEXP (op, 1))
1560 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1561 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1562 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1563 	{
1564 	  scalar_int_mode tmode;
1565 	  gcc_assert (GET_MODE_PRECISION (int_mode)
1566 		      > GET_MODE_PRECISION (op_mode));
1567 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1568 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1569 	    {
1570 	      rtx inner =
1571 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1572 	      if (inner)
1573 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1574 					   ? SIGN_EXTEND : ZERO_EXTEND,
1575 					   int_mode, inner, tmode);
1576 	    }
1577 	}
1578 
1579       /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1580          (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0.  */
1581       if (GET_CODE (op) == LSHIFTRT
1582 	  && CONST_INT_P (XEXP (op, 1))
1583 	  && XEXP (op, 1) != const0_rtx)
1584 	return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1585 
1586       /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1587 	 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1588 	 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1589 	 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1590 	 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1591 	 wider than O.  */
1592       if (GET_CODE (op) == TRUNCATE
1593 	  && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1594 	  && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1595 	{
1596 	  scalar_int_mode m_mode, n_mode, o_mode;
1597 	  rtx old_shift = XEXP (op, 0);
1598 	  if (is_a <scalar_int_mode> (mode, &m_mode)
1599 	      && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1600 	      && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1601 	      && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1602 		 == INTVAL (XEXP (old_shift, 1)))
1603 	    {
1604 	      rtx new_shift = simplify_gen_binary (ASHIFTRT,
1605 						   GET_MODE (old_shift),
1606 						   XEXP (old_shift, 0),
1607 						   XEXP (old_shift, 1));
1608 	      if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1609 		return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1610 					   GET_MODE (new_shift));
1611 	      if (mode != GET_MODE (new_shift))
1612 		return simplify_gen_unary (TRUNCATE, mode, new_shift,
1613 					   GET_MODE (new_shift));
1614 	      return new_shift;
1615 	    }
1616 	}
1617 
1618 #if defined(POINTERS_EXTEND_UNSIGNED)
1619       /* As we do not know which address space the pointer is referring to,
1620 	 we can do this only if the target does not support different pointer
1621 	 or address modes depending on the address space.  */
1622       if (target_default_pointer_address_modes_p ()
1623 	  && ! POINTERS_EXTEND_UNSIGNED
1624 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1625 	  && (CONSTANT_P (op)
1626 	      || (GET_CODE (op) == SUBREG
1627 		  && REG_P (SUBREG_REG (op))
1628 		  && REG_POINTER (SUBREG_REG (op))
1629 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1630 	  && !targetm.have_ptr_extend ())
1631 	{
1632 	  temp
1633 	    = convert_memory_address_addr_space_1 (Pmode, op,
1634 						   ADDR_SPACE_GENERIC, false,
1635 						   true);
1636 	  if (temp)
1637 	    return temp;
1638 	}
1639 #endif
1640       break;
1641 
1642     case ZERO_EXTEND:
1643       /* Check for useless extension.  */
1644       if (GET_MODE (op) == mode)
1645 	return op;
1646 
1647       /* Check for a zero extension of a subreg of a promoted
1648 	 variable, where the promotion is zero-extended, and the
1649 	 target mode is the same as the variable's promotion.  */
1650       if (GET_CODE (op) == SUBREG
1651 	  && SUBREG_PROMOTED_VAR_P (op)
1652 	  && SUBREG_PROMOTED_UNSIGNED_P (op))
1653 	{
1654 	  rtx subreg = SUBREG_REG (op);
1655 	  machine_mode subreg_mode = GET_MODE (subreg);
1656 	  if (!paradoxical_subreg_p (mode, subreg_mode))
1657 	    {
1658 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1659 	      if (temp)
1660 		{
1661 		  /* Preserve SUBREG_PROMOTED_VAR_P.  */
1662 		  if (partial_subreg_p (temp))
1663 		    {
1664 		      SUBREG_PROMOTED_VAR_P (temp) = 1;
1665 		      SUBREG_PROMOTED_SET (temp, 0);
1666 		    }
1667 		  return temp;
1668 		}
1669 	    }
1670 	  else
1671 	    /* Zero-extending a zero-extended subreg.  */
1672 	    return simplify_gen_unary (ZERO_EXTEND, mode,
1673 				       subreg, subreg_mode);
1674 	}
1675 
1676       /* Extending a widening multiplication should be canonicalized to
1677 	 a wider widening multiplication.  */
1678       if (GET_CODE (op) == MULT)
1679 	{
1680 	  rtx lhs = XEXP (op, 0);
1681 	  rtx rhs = XEXP (op, 1);
1682 	  enum rtx_code lcode = GET_CODE (lhs);
1683 	  enum rtx_code rcode = GET_CODE (rhs);
1684 
1685 	  /* Widening multiplies usually extend both operands, but sometimes
1686 	     they use a shift to extract a portion of a register.  */
1687 	  if ((lcode == ZERO_EXTEND
1688 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1689 	      && (rcode == ZERO_EXTEND
1690 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1691 	    {
1692 	      machine_mode lmode = GET_MODE (lhs);
1693 	      machine_mode rmode = GET_MODE (rhs);
1694 	      int bits;
1695 
1696 	      if (lcode == LSHIFTRT)
1697 		/* Number of bits not shifted off the end.  */
1698 		bits = (GET_MODE_UNIT_PRECISION (lmode)
1699 			- INTVAL (XEXP (lhs, 1)));
1700 	      else /* lcode == ZERO_EXTEND */
1701 		/* Size of inner mode.  */
1702 		bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1703 
1704 	      if (rcode == LSHIFTRT)
1705 		bits += (GET_MODE_UNIT_PRECISION (rmode)
1706 			 - INTVAL (XEXP (rhs, 1)));
1707 	      else /* rcode == ZERO_EXTEND */
1708 		bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1709 
1710 	      /* We can only widen multiplies if the result is mathematiclly
1711 		 equivalent.  I.e. if overflow was impossible.  */
1712 	      if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1713 		return simplify_gen_binary
1714 			 (MULT, mode,
1715 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1716 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1717 	    }
1718 	}
1719 
1720       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1721       if (GET_CODE (op) == ZERO_EXTEND)
1722 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1723 				   GET_MODE (XEXP (op, 0)));
1724 
1725       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1726 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1727 	 GET_MODE_PRECISION (N) - I bits.  */
1728       if (GET_CODE (op) == LSHIFTRT
1729 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1730 	  && is_a <scalar_int_mode> (mode, &int_mode)
1731 	  && CONST_INT_P (XEXP (op, 1))
1732 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1733 	  && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1734 	      GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1735 	{
1736 	  scalar_int_mode tmode;
1737 	  if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1738 				 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1739 	    {
1740 	      rtx inner =
1741 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1742 	      if (inner)
1743 		return simplify_gen_unary (ZERO_EXTEND, int_mode,
1744 					   inner, tmode);
1745 	    }
1746 	}
1747 
1748       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1749 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1750 	 of mode N.  E.g.
1751 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1752 	 (and:SI (reg:SI) (const_int 63)).  */
1753       if (partial_subreg_p (op)
1754 	  && is_a <scalar_int_mode> (mode, &int_mode)
1755 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1756 	  && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1757 	  && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1758 	  && subreg_lowpart_p (op)
1759 	  && (nonzero_bits (SUBREG_REG (op), op0_mode)
1760 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1761 	{
1762 	  if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1763 	    return SUBREG_REG (op);
1764 	  return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1765 				     op0_mode);
1766 	}
1767 
1768 #if defined(POINTERS_EXTEND_UNSIGNED)
1769       /* As we do not know which address space the pointer is referring to,
1770 	 we can do this only if the target does not support different pointer
1771 	 or address modes depending on the address space.  */
1772       if (target_default_pointer_address_modes_p ()
1773 	  && POINTERS_EXTEND_UNSIGNED > 0
1774 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1775 	  && (CONSTANT_P (op)
1776 	      || (GET_CODE (op) == SUBREG
1777 		  && REG_P (SUBREG_REG (op))
1778 		  && REG_POINTER (SUBREG_REG (op))
1779 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1780 	  && !targetm.have_ptr_extend ())
1781 	{
1782 	  temp
1783 	    = convert_memory_address_addr_space_1 (Pmode, op,
1784 						   ADDR_SPACE_GENERIC, false,
1785 						   true);
1786 	  if (temp)
1787 	    return temp;
1788 	}
1789 #endif
1790       break;
1791 
1792     default:
1793       break;
1794     }
1795 
1796   if (VECTOR_MODE_P (mode)
1797       && vec_duplicate_p (op, &elt)
1798       && code != VEC_DUPLICATE)
1799     {
1800       if (code == SIGN_EXTEND || code == ZERO_EXTEND)
1801 	/* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1802 	   operations by promoting VEC_DUPLICATE to the root of the expression
1803 	   (as far as possible).  */
1804 	temp = simplify_gen_unary (code, GET_MODE_INNER (mode),
1805 				   elt, GET_MODE_INNER (GET_MODE (op)));
1806       else
1807 	/* Try applying the operator to ELT and see if that simplifies.
1808 	   We can duplicate the result if so.
1809 
1810 	   The reason we traditionally haven't used simplify_gen_unary
1811 	   for these codes is that it didn't necessarily seem to be a
1812 	   win to convert things like:
1813 
1814 	     (neg:V (vec_duplicate:V (reg:S R)))
1815 
1816 	   to:
1817 
1818 	     (vec_duplicate:V (neg:S (reg:S R)))
1819 
1820 	   The first might be done entirely in vector registers while the
1821 	   second might need a move between register files.
1822 
1823 	   However, there also cases where promoting the vec_duplicate is
1824 	   more efficient, and there is definite value in having a canonical
1825 	   form when matching instruction patterns.  We should consider
1826 	   extending the simplify_gen_unary code above to more cases.  */
1827 	temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1828 					 elt, GET_MODE_INNER (GET_MODE (op)));
1829       if (temp)
1830 	return gen_vec_duplicate (mode, temp);
1831     }
1832 
1833   return 0;
1834 }
1835 
1836 /* Try to compute the value of a unary operation CODE whose output mode is to
1837    be MODE with input operand OP whose mode was originally OP_MODE.
1838    Return zero if the value cannot be computed.  */
1839 rtx
simplify_const_unary_operation(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)1840 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1841 				rtx op, machine_mode op_mode)
1842 {
1843   scalar_int_mode result_mode;
1844 
1845   if (code == VEC_DUPLICATE)
1846     {
1847       gcc_assert (VECTOR_MODE_P (mode));
1848       if (GET_MODE (op) != VOIDmode)
1849       {
1850 	if (!VECTOR_MODE_P (GET_MODE (op)))
1851 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1852 	else
1853 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1854 						(GET_MODE (op)));
1855       }
1856       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1857 	return gen_const_vec_duplicate (mode, op);
1858       if (GET_CODE (op) == CONST_VECTOR
1859 	  && (CONST_VECTOR_DUPLICATE_P (op)
1860 	      || CONST_VECTOR_NUNITS (op).is_constant ()))
1861 	{
1862 	  unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1863 				    ? CONST_VECTOR_NPATTERNS (op)
1864 				    : CONST_VECTOR_NUNITS (op).to_constant ());
1865 	  gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1866 	  rtx_vector_builder builder (mode, npatterns, 1);
1867 	  for (unsigned i = 0; i < npatterns; i++)
1868 	    builder.quick_push (CONST_VECTOR_ELT (op, i));
1869 	  return builder.build ();
1870 	}
1871     }
1872 
1873   if (VECTOR_MODE_P (mode)
1874       && GET_CODE (op) == CONST_VECTOR
1875       && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1876     {
1877       gcc_assert (GET_MODE (op) == op_mode);
1878 
1879       rtx_vector_builder builder;
1880       if (!builder.new_unary_operation (mode, op, false))
1881 	return 0;
1882 
1883       unsigned int count = builder.encoded_nelts ();
1884       for (unsigned int i = 0; i < count; i++)
1885 	{
1886 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1887 					    CONST_VECTOR_ELT (op, i),
1888 					    GET_MODE_INNER (op_mode));
1889 	  if (!x || !valid_for_const_vector_p (mode, x))
1890 	    return 0;
1891 	  builder.quick_push (x);
1892 	}
1893       return builder.build ();
1894     }
1895 
1896   /* The order of these tests is critical so that, for example, we don't
1897      check the wrong mode (input vs. output) for a conversion operation,
1898      such as FIX.  At some point, this should be simplified.  */
1899 
1900   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1901     {
1902       REAL_VALUE_TYPE d;
1903 
1904       if (op_mode == VOIDmode)
1905 	{
1906 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1907 	     the bits of the constant are significant, though, this is
1908 	     a dangerous assumption as many times CONST_INTs are
1909 	     created and used with garbage in the bits outside of the
1910 	     precision of the implied mode of the const_int.  */
1911 	  op_mode = MAX_MODE_INT;
1912 	}
1913 
1914       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1915 
1916       /* Avoid the folding if flag_signaling_nans is on and
1917          operand is a signaling NaN.  */
1918       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1919         return 0;
1920 
1921       d = real_value_truncate (mode, d);
1922 
1923       /* Avoid the folding if flag_rounding_math is on and the
1924 	 conversion is not exact.  */
1925       if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1926 	{
1927 	  bool fail = false;
1928 	  wide_int w = real_to_integer (&d, &fail,
1929 					GET_MODE_PRECISION
1930 					  (as_a <scalar_int_mode> (op_mode)));
1931 	  if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
1932 	    return 0;
1933 	}
1934 
1935       return const_double_from_real_value (d, mode);
1936     }
1937   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1938     {
1939       REAL_VALUE_TYPE d;
1940 
1941       if (op_mode == VOIDmode)
1942 	{
1943 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1944 	     the bits of the constant are significant, though, this is
1945 	     a dangerous assumption as many times CONST_INTs are
1946 	     created and used with garbage in the bits outside of the
1947 	     precision of the implied mode of the const_int.  */
1948 	  op_mode = MAX_MODE_INT;
1949 	}
1950 
1951       real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1952 
1953       /* Avoid the folding if flag_signaling_nans is on and
1954          operand is a signaling NaN.  */
1955       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1956         return 0;
1957 
1958       d = real_value_truncate (mode, d);
1959 
1960       /* Avoid the folding if flag_rounding_math is on and the
1961 	 conversion is not exact.  */
1962       if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1963 	{
1964 	  bool fail = false;
1965 	  wide_int w = real_to_integer (&d, &fail,
1966 					GET_MODE_PRECISION
1967 					  (as_a <scalar_int_mode> (op_mode)));
1968 	  if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
1969 	    return 0;
1970 	}
1971 
1972       return const_double_from_real_value (d, mode);
1973     }
1974 
1975   if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1976     {
1977       unsigned int width = GET_MODE_PRECISION (result_mode);
1978       if (width > MAX_BITSIZE_MODE_ANY_INT)
1979 	return 0;
1980 
1981       wide_int result;
1982       scalar_int_mode imode = (op_mode == VOIDmode
1983 			       ? result_mode
1984 			       : as_a <scalar_int_mode> (op_mode));
1985       rtx_mode_t op0 = rtx_mode_t (op, imode);
1986       int int_value;
1987 
1988 #if TARGET_SUPPORTS_WIDE_INT == 0
1989       /* This assert keeps the simplification from producing a result
1990 	 that cannot be represented in a CONST_DOUBLE but a lot of
1991 	 upstream callers expect that this function never fails to
1992 	 simplify something and so you if you added this to the test
1993 	 above the code would die later anyway.  If this assert
1994 	 happens, you just need to make the port support wide int.  */
1995       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1996 #endif
1997 
1998       switch (code)
1999 	{
2000 	case NOT:
2001 	  result = wi::bit_not (op0);
2002 	  break;
2003 
2004 	case NEG:
2005 	  result = wi::neg (op0);
2006 	  break;
2007 
2008 	case ABS:
2009 	  result = wi::abs (op0);
2010 	  break;
2011 
2012 	case FFS:
2013 	  result = wi::shwi (wi::ffs (op0), result_mode);
2014 	  break;
2015 
2016 	case CLZ:
2017 	  if (wi::ne_p (op0, 0))
2018 	    int_value = wi::clz (op0);
2019 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2020 	    return NULL_RTX;
2021 	  result = wi::shwi (int_value, result_mode);
2022 	  break;
2023 
2024 	case CLRSB:
2025 	  result = wi::shwi (wi::clrsb (op0), result_mode);
2026 	  break;
2027 
2028 	case CTZ:
2029 	  if (wi::ne_p (op0, 0))
2030 	    int_value = wi::ctz (op0);
2031 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2032 	    return NULL_RTX;
2033 	  result = wi::shwi (int_value, result_mode);
2034 	  break;
2035 
2036 	case POPCOUNT:
2037 	  result = wi::shwi (wi::popcount (op0), result_mode);
2038 	  break;
2039 
2040 	case PARITY:
2041 	  result = wi::shwi (wi::parity (op0), result_mode);
2042 	  break;
2043 
2044 	case BSWAP:
2045 	  result = wide_int (op0).bswap ();
2046 	  break;
2047 
2048 	case TRUNCATE:
2049 	case ZERO_EXTEND:
2050 	  result = wide_int::from (op0, width, UNSIGNED);
2051 	  break;
2052 
2053 	case SIGN_EXTEND:
2054 	  result = wide_int::from (op0, width, SIGNED);
2055 	  break;
2056 
2057 	case SS_NEG:
2058 	  if (wi::only_sign_bit_p (op0))
2059 	    result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2060 	  else
2061 	    result = wi::neg (op0);
2062 	  break;
2063 
2064 	case SS_ABS:
2065 	  if (wi::only_sign_bit_p (op0))
2066 	    result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2067 	  else
2068 	    result = wi::abs (op0);
2069 	  break;
2070 
2071 	case SQRT:
2072 	default:
2073 	  return 0;
2074 	}
2075 
2076       return immed_wide_int_const (result, result_mode);
2077     }
2078 
2079   else if (CONST_DOUBLE_AS_FLOAT_P (op)
2080 	   && SCALAR_FLOAT_MODE_P (mode)
2081 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
2082     {
2083       REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
2084       switch (code)
2085 	{
2086 	case SQRT:
2087 	  return 0;
2088 	case ABS:
2089 	  d = real_value_abs (&d);
2090 	  break;
2091 	case NEG:
2092 	  d = real_value_negate (&d);
2093 	  break;
2094 	case FLOAT_TRUNCATE:
2095 	  /* Don't perform the operation if flag_signaling_nans is on
2096 	     and the operand is a signaling NaN.  */
2097 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2098 	    return NULL_RTX;
2099 	  /* Or if flag_rounding_math is on and the truncation is not
2100 	     exact.  */
2101 	  if (HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2102 	      && !exact_real_truncate (mode, &d))
2103 	    return NULL_RTX;
2104 	  d = real_value_truncate (mode, d);
2105 	  break;
2106 	case FLOAT_EXTEND:
2107 	  /* Don't perform the operation if flag_signaling_nans is on
2108 	     and the operand is a signaling NaN.  */
2109 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2110 	    return NULL_RTX;
2111 	  /* All this does is change the mode, unless changing
2112 	     mode class.  */
2113 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
2114 	    real_convert (&d, mode, &d);
2115 	  break;
2116 	case FIX:
2117 	  /* Don't perform the operation if flag_signaling_nans is on
2118 	     and the operand is a signaling NaN.  */
2119 	  if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2120 	    return NULL_RTX;
2121 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
2122 	  break;
2123 	case NOT:
2124 	  {
2125 	    long tmp[4];
2126 	    int i;
2127 
2128 	    real_to_target (tmp, &d, GET_MODE (op));
2129 	    for (i = 0; i < 4; i++)
2130 	      tmp[i] = ~tmp[i];
2131 	    real_from_target (&d, tmp, mode);
2132 	    break;
2133 	  }
2134 	default:
2135 	  gcc_unreachable ();
2136 	}
2137       return const_double_from_real_value (d, mode);
2138     }
2139   else if (CONST_DOUBLE_AS_FLOAT_P (op)
2140 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2141 	   && is_int_mode (mode, &result_mode))
2142     {
2143       unsigned int width = GET_MODE_PRECISION (result_mode);
2144       if (width > MAX_BITSIZE_MODE_ANY_INT)
2145 	return 0;
2146 
2147       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2148 	 operators are intentionally left unspecified (to ease implementation
2149 	 by target backends), for consistency, this routine implements the
2150 	 same semantics for constant folding as used by the middle-end.  */
2151 
2152       /* This was formerly used only for non-IEEE float.
2153 	 eggert@twinsun.com says it is safe for IEEE also.  */
2154       REAL_VALUE_TYPE t;
2155       const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2156       wide_int wmax, wmin;
2157       /* This is part of the abi to real_to_integer, but we check
2158 	 things before making this call.  */
2159       bool fail;
2160 
2161       switch (code)
2162 	{
2163 	case FIX:
2164 	  if (REAL_VALUE_ISNAN (*x))
2165 	    return const0_rtx;
2166 
2167 	  /* Test against the signed upper bound.  */
2168 	  wmax = wi::max_value (width, SIGNED);
2169 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
2170 	  if (real_less (&t, x))
2171 	    return immed_wide_int_const (wmax, mode);
2172 
2173 	  /* Test against the signed lower bound.  */
2174 	  wmin = wi::min_value (width, SIGNED);
2175 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
2176 	  if (real_less (x, &t))
2177 	    return immed_wide_int_const (wmin, mode);
2178 
2179 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2180 				       mode);
2181 
2182 	case UNSIGNED_FIX:
2183 	  if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2184 	    return const0_rtx;
2185 
2186 	  /* Test against the unsigned upper bound.  */
2187 	  wmax = wi::max_value (width, UNSIGNED);
2188 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2189 	  if (real_less (&t, x))
2190 	    return immed_wide_int_const (wmax, mode);
2191 
2192 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
2193 				       mode);
2194 
2195 	default:
2196 	  gcc_unreachable ();
2197 	}
2198     }
2199 
2200   /* Handle polynomial integers.  */
2201   else if (CONST_POLY_INT_P (op))
2202     {
2203       poly_wide_int result;
2204       switch (code)
2205 	{
2206 	case NEG:
2207 	  result = -const_poly_int_value (op);
2208 	  break;
2209 
2210 	case NOT:
2211 	  result = ~const_poly_int_value (op);
2212 	  break;
2213 
2214 	default:
2215 	  return NULL_RTX;
2216 	}
2217       return immed_wide_int_const (result, mode);
2218     }
2219 
2220   return NULL_RTX;
2221 }
2222 
2223 /* Subroutine of simplify_binary_operation to simplify a binary operation
2224    CODE that can commute with byte swapping, with result mode MODE and
2225    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
2226    Return zero if no simplification or canonicalization is possible.  */
2227 
2228 rtx
simplify_byte_swapping_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2229 simplify_context::simplify_byte_swapping_operation (rtx_code code,
2230 						    machine_mode mode,
2231 						    rtx op0, rtx op1)
2232 {
2233   rtx tem;
2234 
2235   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
2236   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2237     {
2238       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2239 				 simplify_gen_unary (BSWAP, mode, op1, mode));
2240       return simplify_gen_unary (BSWAP, mode, tem, mode);
2241     }
2242 
2243   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
2244   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2245     {
2246       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2247       return simplify_gen_unary (BSWAP, mode, tem, mode);
2248     }
2249 
2250   return NULL_RTX;
2251 }
2252 
2253 /* Subroutine of simplify_binary_operation to simplify a commutative,
2254    associative binary operation CODE with result mode MODE, operating
2255    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2256    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2257    canonicalization is possible.  */
2258 
2259 rtx
simplify_associative_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2260 simplify_context::simplify_associative_operation (rtx_code code,
2261 						  machine_mode mode,
2262 						  rtx op0, rtx op1)
2263 {
2264   rtx tem;
2265 
2266   /* Normally expressions simplified by simplify-rtx.c are combined
2267      at most from a few machine instructions and therefore the
2268      expressions should be fairly small.  During var-tracking
2269      we can see arbitrarily large expressions though and reassociating
2270      those can be quadratic, so punt after encountering max_assoc_count
2271      simplify_associative_operation calls during outermost simplify_*
2272      call.  */
2273   if (++assoc_count >= max_assoc_count)
2274     return NULL_RTX;
2275 
2276   /* Linearize the operator to the left.  */
2277   if (GET_CODE (op1) == code)
2278     {
2279       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2280       if (GET_CODE (op0) == code)
2281 	{
2282 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2283 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2284 	}
2285 
2286       /* "a op (b op c)" becomes "(b op c) op a".  */
2287       if (! swap_commutative_operands_p (op1, op0))
2288 	return simplify_gen_binary (code, mode, op1, op0);
2289 
2290       std::swap (op0, op1);
2291     }
2292 
2293   if (GET_CODE (op0) == code)
2294     {
2295       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2296       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2297 	{
2298 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2299 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2300 	}
2301 
2302       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2303       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2304       if (tem != 0)
2305         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2306 
2307       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2308       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2309       if (tem != 0)
2310         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2311     }
2312 
2313   return 0;
2314 }
2315 
2316 /* Return a mask describing the COMPARISON.  */
2317 static int
comparison_to_mask(enum rtx_code comparison)2318 comparison_to_mask (enum rtx_code comparison)
2319 {
2320   switch (comparison)
2321     {
2322     case LT:
2323       return 8;
2324     case GT:
2325       return 4;
2326     case EQ:
2327       return 2;
2328     case UNORDERED:
2329       return 1;
2330 
2331     case LTGT:
2332       return 12;
2333     case LE:
2334       return 10;
2335     case GE:
2336       return 6;
2337     case UNLT:
2338       return 9;
2339     case UNGT:
2340       return 5;
2341     case UNEQ:
2342       return 3;
2343 
2344     case ORDERED:
2345       return 14;
2346     case NE:
2347       return 13;
2348     case UNLE:
2349       return 11;
2350     case UNGE:
2351       return 7;
2352 
2353     default:
2354       gcc_unreachable ();
2355     }
2356 }
2357 
2358 /* Return a comparison corresponding to the MASK.  */
2359 static enum rtx_code
mask_to_comparison(int mask)2360 mask_to_comparison (int mask)
2361 {
2362   switch (mask)
2363     {
2364     case 8:
2365       return LT;
2366     case 4:
2367       return GT;
2368     case 2:
2369       return EQ;
2370     case 1:
2371       return UNORDERED;
2372 
2373     case 12:
2374       return LTGT;
2375     case 10:
2376       return LE;
2377     case 6:
2378       return GE;
2379     case 9:
2380       return UNLT;
2381     case 5:
2382       return UNGT;
2383     case 3:
2384       return UNEQ;
2385 
2386     case 14:
2387       return ORDERED;
2388     case 13:
2389       return NE;
2390     case 11:
2391       return UNLE;
2392     case 7:
2393       return UNGE;
2394 
2395     default:
2396       gcc_unreachable ();
2397     }
2398 }
2399 
2400 /* Return true if CODE is valid for comparisons of mode MODE, false
2401    otherwise.
2402 
2403    It is always safe to return false, even if the code was valid for the
2404    given mode as that will merely suppress optimizations.  */
2405 
2406 static bool
comparison_code_valid_for_mode(enum rtx_code code,enum machine_mode mode)2407 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2408 {
2409   switch (code)
2410     {
2411       /* These are valid for integral, floating and vector modes.  */
2412       case NE:
2413       case EQ:
2414       case GE:
2415       case GT:
2416       case LE:
2417       case LT:
2418 	return (INTEGRAL_MODE_P (mode)
2419 		|| FLOAT_MODE_P (mode)
2420 		|| VECTOR_MODE_P (mode));
2421 
2422       /* These are valid for floating point modes.  */
2423       case LTGT:
2424       case UNORDERED:
2425       case ORDERED:
2426       case UNEQ:
2427       case UNGE:
2428       case UNGT:
2429       case UNLE:
2430       case UNLT:
2431 	return FLOAT_MODE_P (mode);
2432 
2433       /* These are filtered out in simplify_logical_operation, but
2434 	 we check for them too as a matter of safety.   They are valid
2435 	 for integral and vector modes.  */
2436       case GEU:
2437       case GTU:
2438       case LEU:
2439       case LTU:
2440 	return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2441 
2442       default:
2443 	gcc_unreachable ();
2444     }
2445 }
2446 
2447 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2448    false/true value of comparison with MODE where comparison operands
2449    have CMP_MODE.  */
2450 
2451 static rtx
relational_result(machine_mode mode,machine_mode cmp_mode,rtx res)2452 relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
2453 {
2454   if (SCALAR_FLOAT_MODE_P (mode))
2455     {
2456       if (res == const0_rtx)
2457         return CONST0_RTX (mode);
2458 #ifdef FLOAT_STORE_FLAG_VALUE
2459       REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
2460       return const_double_from_real_value (val, mode);
2461 #else
2462       return NULL_RTX;
2463 #endif
2464     }
2465   if (VECTOR_MODE_P (mode))
2466     {
2467       if (res == const0_rtx)
2468 	return CONST0_RTX (mode);
2469 #ifdef VECTOR_STORE_FLAG_VALUE
2470       rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2471       if (val == NULL_RTX)
2472 	return NULL_RTX;
2473       if (val == const1_rtx)
2474 	return CONST1_RTX (mode);
2475 
2476       return gen_const_vec_duplicate (mode, val);
2477 #else
2478       return NULL_RTX;
2479 #endif
2480     }
2481   /* For vector comparison with scalar int result, it is unknown
2482      if the target means here a comparison into an integral bitmask,
2483      or comparison where all comparisons true mean const_true_rtx
2484      whole result, or where any comparisons true mean const_true_rtx
2485      whole result.  For const0_rtx all the cases are the same.  */
2486   if (VECTOR_MODE_P (cmp_mode)
2487       && SCALAR_INT_MODE_P (mode)
2488       && res == const_true_rtx)
2489     return NULL_RTX;
2490 
2491   return res;
2492 }
2493 
2494 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2495    and OP1, which should be both relational operations.  Return 0 if no such
2496    simplification is possible.  */
2497 rtx
simplify_logical_relational_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2498 simplify_context::simplify_logical_relational_operation (rtx_code code,
2499 							 machine_mode mode,
2500 							 rtx op0, rtx op1)
2501 {
2502   /* We only handle IOR of two relational operations.  */
2503   if (code != IOR)
2504     return 0;
2505 
2506   if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2507     return 0;
2508 
2509   if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2510 	&& rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2511     return 0;
2512 
2513   enum rtx_code code0 = GET_CODE (op0);
2514   enum rtx_code code1 = GET_CODE (op1);
2515 
2516   /* We don't handle unsigned comparisons currently.  */
2517   if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2518     return 0;
2519   if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2520     return 0;
2521 
2522   int mask0 = comparison_to_mask (code0);
2523   int mask1 = comparison_to_mask (code1);
2524 
2525   int mask = mask0 | mask1;
2526 
2527   if (mask == 15)
2528     return relational_result (mode, GET_MODE (op0), const_true_rtx);
2529 
2530   code = mask_to_comparison (mask);
2531 
2532   /* Many comparison codes are only valid for certain mode classes.  */
2533   if (!comparison_code_valid_for_mode (code, mode))
2534     return 0;
2535 
2536   op0 = XEXP (op1, 0);
2537   op1 = XEXP (op1, 1);
2538 
2539   return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2540 }
2541 
2542 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2543    and OP1.  Return 0 if no simplification is possible.
2544 
2545    Don't use this for relational operations such as EQ or LT.
2546    Use simplify_relational_operation instead.  */
2547 rtx
simplify_binary_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2548 simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
2549 					     rtx op0, rtx op1)
2550 {
2551   rtx trueop0, trueop1;
2552   rtx tem;
2553 
2554   /* Relational operations don't work here.  We must know the mode
2555      of the operands in order to do the comparison correctly.
2556      Assuming a full word can give incorrect results.
2557      Consider comparing 128 with -128 in QImode.  */
2558   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2559   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2560 
2561   /* Make sure the constant is second.  */
2562   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2563       && swap_commutative_operands_p (op0, op1))
2564     std::swap (op0, op1);
2565 
2566   trueop0 = avoid_constant_pool_reference (op0);
2567   trueop1 = avoid_constant_pool_reference (op1);
2568 
2569   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2570   if (tem)
2571     return tem;
2572   tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2573 
2574   if (tem)
2575     return tem;
2576 
2577   /* If the above steps did not result in a simplification and op0 or op1
2578      were constant pool references, use the referenced constants directly.  */
2579   if (trueop0 != op0 || trueop1 != op1)
2580     return simplify_gen_binary (code, mode, trueop0, trueop1);
2581 
2582   return NULL_RTX;
2583 }
2584 
2585 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2586    which OP0 and OP1 are both vector series or vector duplicates
2587    (which are really just series with a step of 0).  If so, try to
2588    form a new series by applying CODE to the bases and to the steps.
2589    Return null if no simplification is possible.
2590 
2591    MODE is the mode of the operation and is known to be a vector
2592    integer mode.  */
2593 
2594 rtx
simplify_binary_operation_series(rtx_code code,machine_mode mode,rtx op0,rtx op1)2595 simplify_context::simplify_binary_operation_series (rtx_code code,
2596 						    machine_mode mode,
2597 						    rtx op0, rtx op1)
2598 {
2599   rtx base0, step0;
2600   if (vec_duplicate_p (op0, &base0))
2601     step0 = const0_rtx;
2602   else if (!vec_series_p (op0, &base0, &step0))
2603     return NULL_RTX;
2604 
2605   rtx base1, step1;
2606   if (vec_duplicate_p (op1, &base1))
2607     step1 = const0_rtx;
2608   else if (!vec_series_p (op1, &base1, &step1))
2609     return NULL_RTX;
2610 
2611   /* Only create a new series if we can simplify both parts.  In other
2612      cases this isn't really a simplification, and it's not necessarily
2613      a win to replace a vector operation with a scalar operation.  */
2614   scalar_mode inner_mode = GET_MODE_INNER (mode);
2615   rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2616   if (!new_base)
2617     return NULL_RTX;
2618 
2619   rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2620   if (!new_step)
2621     return NULL_RTX;
2622 
2623   return gen_vec_series (mode, new_base, new_step);
2624 }
2625 
2626 /* Subroutine of simplify_binary_operation_1.  Un-distribute a binary
2627    operation CODE with result mode MODE, operating on OP0 and OP1.
2628    e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2629    Returns NULL_RTX if no simplification is possible.  */
2630 
2631 rtx
simplify_distributive_operation(rtx_code code,machine_mode mode,rtx op0,rtx op1)2632 simplify_context::simplify_distributive_operation (rtx_code code,
2633 						   machine_mode mode,
2634 						   rtx op0, rtx op1)
2635 {
2636   enum rtx_code op = GET_CODE (op0);
2637   gcc_assert (GET_CODE (op1) == op);
2638 
2639   if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2640       && ! side_effects_p (XEXP (op0, 1)))
2641     return simplify_gen_binary (op, mode,
2642 				simplify_gen_binary (code, mode,
2643 						     XEXP (op0, 0),
2644 						     XEXP (op1, 0)),
2645 				XEXP (op0, 1));
2646 
2647   if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2648     {
2649       if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2650 	  && ! side_effects_p (XEXP (op0, 0)))
2651 	return simplify_gen_binary (op, mode,
2652 				    simplify_gen_binary (code, mode,
2653 							 XEXP (op0, 1),
2654 							 XEXP (op1, 1)),
2655 				    XEXP (op0, 0));
2656       if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2657 	  && ! side_effects_p (XEXP (op0, 0)))
2658 	return simplify_gen_binary (op, mode,
2659 				    simplify_gen_binary (code, mode,
2660 							 XEXP (op0, 1),
2661 							 XEXP (op1, 0)),
2662 				    XEXP (op0, 0));
2663       if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2664 	  && ! side_effects_p (XEXP (op0, 1)))
2665 	return simplify_gen_binary (op, mode,
2666 				    simplify_gen_binary (code, mode,
2667 							 XEXP (op0, 0),
2668 							 XEXP (op1, 1)),
2669 				    XEXP (op0, 1));
2670     }
2671 
2672   return NULL_RTX;
2673 }
2674 
2675 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2676    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2677    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2678    actual constants.  */
2679 
2680 rtx
simplify_binary_operation_1(rtx_code code,machine_mode mode,rtx op0,rtx op1,rtx trueop0,rtx trueop1)2681 simplify_context::simplify_binary_operation_1 (rtx_code code,
2682 					       machine_mode mode,
2683 					       rtx op0, rtx op1,
2684 					       rtx trueop0, rtx trueop1)
2685 {
2686   rtx tem, reversed, opleft, opright, elt0, elt1;
2687   HOST_WIDE_INT val;
2688   scalar_int_mode int_mode, inner_mode;
2689   poly_int64 offset;
2690 
2691   /* Even if we can't compute a constant result,
2692      there are some cases worth simplifying.  */
2693 
2694   switch (code)
2695     {
2696     case PLUS:
2697       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2698 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2699 	 when x is -0 and the rounding mode is not towards -infinity,
2700 	 since (-0) + 0 is then 0.  */
2701       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2702 	return op0;
2703 
2704       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2705 	 transformations are safe even for IEEE.  */
2706       if (GET_CODE (op0) == NEG)
2707 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2708       else if (GET_CODE (op1) == NEG)
2709 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2710 
2711       /* (~a) + 1 -> -a */
2712       if (INTEGRAL_MODE_P (mode)
2713 	  && GET_CODE (op0) == NOT
2714 	  && trueop1 == const1_rtx)
2715 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2716 
2717       /* Handle both-operands-constant cases.  We can only add
2718 	 CONST_INTs to constants since the sum of relocatable symbols
2719 	 can't be handled by most assemblers.  Don't add CONST_INT
2720 	 to CONST_INT since overflow won't be computed properly if wider
2721 	 than HOST_BITS_PER_WIDE_INT.  */
2722 
2723       if ((GET_CODE (op0) == CONST
2724 	   || GET_CODE (op0) == SYMBOL_REF
2725 	   || GET_CODE (op0) == LABEL_REF)
2726 	  && poly_int_rtx_p (op1, &offset))
2727 	return plus_constant (mode, op0, offset);
2728       else if ((GET_CODE (op1) == CONST
2729 		|| GET_CODE (op1) == SYMBOL_REF
2730 		|| GET_CODE (op1) == LABEL_REF)
2731 	       && poly_int_rtx_p (op0, &offset))
2732 	return plus_constant (mode, op1, offset);
2733 
2734       /* See if this is something like X * C - X or vice versa or
2735 	 if the multiplication is written as a shift.  If so, we can
2736 	 distribute and make a new multiply, shift, or maybe just
2737 	 have X (if C is 2 in the example above).  But don't make
2738 	 something more expensive than we had before.  */
2739 
2740       if (is_a <scalar_int_mode> (mode, &int_mode))
2741 	{
2742 	  rtx lhs = op0, rhs = op1;
2743 
2744 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2745 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2746 
2747 	  if (GET_CODE (lhs) == NEG)
2748 	    {
2749 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2750 	      lhs = XEXP (lhs, 0);
2751 	    }
2752 	  else if (GET_CODE (lhs) == MULT
2753 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2754 	    {
2755 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2756 	      lhs = XEXP (lhs, 0);
2757 	    }
2758 	  else if (GET_CODE (lhs) == ASHIFT
2759 		   && CONST_INT_P (XEXP (lhs, 1))
2760                    && INTVAL (XEXP (lhs, 1)) >= 0
2761 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2762 	    {
2763 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2764 					    GET_MODE_PRECISION (int_mode));
2765 	      lhs = XEXP (lhs, 0);
2766 	    }
2767 
2768 	  if (GET_CODE (rhs) == NEG)
2769 	    {
2770 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2771 	      rhs = XEXP (rhs, 0);
2772 	    }
2773 	  else if (GET_CODE (rhs) == MULT
2774 		   && CONST_INT_P (XEXP (rhs, 1)))
2775 	    {
2776 	      coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2777 	      rhs = XEXP (rhs, 0);
2778 	    }
2779 	  else if (GET_CODE (rhs) == ASHIFT
2780 		   && CONST_INT_P (XEXP (rhs, 1))
2781 		   && INTVAL (XEXP (rhs, 1)) >= 0
2782 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2783 	    {
2784 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2785 					    GET_MODE_PRECISION (int_mode));
2786 	      rhs = XEXP (rhs, 0);
2787 	    }
2788 
2789 	  if (rtx_equal_p (lhs, rhs))
2790 	    {
2791 	      rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2792 	      rtx coeff;
2793 	      bool speed = optimize_function_for_speed_p (cfun);
2794 
2795 	      coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2796 
2797 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2798 	      return (set_src_cost (tem, int_mode, speed)
2799 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2800 	    }
2801 
2802 	  /* Optimize (X - 1) * Y + Y to X * Y.  */
2803 	  lhs = op0;
2804 	  rhs = op1;
2805 	  if (GET_CODE (op0) == MULT)
2806 	    {
2807 	      if (((GET_CODE (XEXP (op0, 0)) == PLUS
2808 		    && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
2809 		   || (GET_CODE (XEXP (op0, 0)) == MINUS
2810 		       && XEXP (XEXP (op0, 0), 1) == const1_rtx))
2811 		  && rtx_equal_p (XEXP (op0, 1), op1))
2812 		lhs = XEXP (XEXP (op0, 0), 0);
2813 	      else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2814 			 && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
2815 			|| (GET_CODE (XEXP (op0, 1)) == MINUS
2816 			    && XEXP (XEXP (op0, 1), 1) == const1_rtx))
2817 		       && rtx_equal_p (XEXP (op0, 0), op1))
2818 		lhs = XEXP (XEXP (op0, 1), 0);
2819 	    }
2820 	  else if (GET_CODE (op1) == MULT)
2821 	    {
2822 	      if (((GET_CODE (XEXP (op1, 0)) == PLUS
2823 		    && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
2824 		   || (GET_CODE (XEXP (op1, 0)) == MINUS
2825 		       && XEXP (XEXP (op1, 0), 1) == const1_rtx))
2826 		  && rtx_equal_p (XEXP (op1, 1), op0))
2827 		rhs = XEXP (XEXP (op1, 0), 0);
2828 	      else if (((GET_CODE (XEXP (op1, 1)) == PLUS
2829 			 && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
2830 			|| (GET_CODE (XEXP (op1, 1)) == MINUS
2831 			    && XEXP (XEXP (op1, 1), 1) == const1_rtx))
2832 		       && rtx_equal_p (XEXP (op1, 0), op0))
2833 		rhs = XEXP (XEXP (op1, 1), 0);
2834 	    }
2835 	  if (lhs != op0 || rhs != op1)
2836 	    return simplify_gen_binary (MULT, int_mode, lhs, rhs);
2837 	}
2838 
2839       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2840       if (CONST_SCALAR_INT_P (op1)
2841 	  && GET_CODE (op0) == XOR
2842 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2843 	  && mode_signbit_p (mode, op1))
2844 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2845 				    simplify_gen_binary (XOR, mode, op1,
2846 							 XEXP (op0, 1)));
2847 
2848       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2849       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2850 	  && GET_CODE (op0) == MULT
2851 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2852 	{
2853 	  rtx in1, in2;
2854 
2855 	  in1 = XEXP (XEXP (op0, 0), 0);
2856 	  in2 = XEXP (op0, 1);
2857 	  return simplify_gen_binary (MINUS, mode, op1,
2858 				      simplify_gen_binary (MULT, mode,
2859 							   in1, in2));
2860 	}
2861 
2862       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2863 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2864 	 is 1.  */
2865       if (COMPARISON_P (op0)
2866 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2867 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2868 	  && (reversed = reversed_comparison (op0, mode)))
2869 	return
2870 	  simplify_gen_unary (NEG, mode, reversed, mode);
2871 
2872       /* If one of the operands is a PLUS or a MINUS, see if we can
2873 	 simplify this by the associative law.
2874 	 Don't use the associative law for floating point.
2875 	 The inaccuracy makes it nonassociative,
2876 	 and subtle programs can break if operations are associated.  */
2877 
2878       if (INTEGRAL_MODE_P (mode)
2879 	  && (plus_minus_operand_p (op0)
2880 	      || plus_minus_operand_p (op1))
2881 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2882 	return tem;
2883 
2884       /* Reassociate floating point addition only when the user
2885 	 specifies associative math operations.  */
2886       if (FLOAT_MODE_P (mode)
2887 	  && flag_associative_math)
2888 	{
2889 	  tem = simplify_associative_operation (code, mode, op0, op1);
2890 	  if (tem)
2891 	    return tem;
2892 	}
2893 
2894       /* Handle vector series.  */
2895       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2896 	{
2897 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
2898 	  if (tem)
2899 	    return tem;
2900 	}
2901       break;
2902 
2903     case COMPARE:
2904       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2905       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2906 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2907 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2908 	{
2909 	  rtx xop00 = XEXP (op0, 0);
2910 	  rtx xop10 = XEXP (op1, 0);
2911 
2912 	  if (REG_P (xop00) && REG_P (xop10)
2913 	      && REGNO (xop00) == REGNO (xop10)
2914 	      && GET_MODE (xop00) == mode
2915 	      && GET_MODE (xop10) == mode
2916 	      && GET_MODE_CLASS (mode) == MODE_CC)
2917 	    return xop00;
2918 	}
2919       break;
2920 
2921     case MINUS:
2922       /* We can't assume x-x is 0 even with non-IEEE floating point,
2923 	 but since it is zero except in very strange circumstances, we
2924 	 will treat it as zero with -ffinite-math-only.  */
2925       if (rtx_equal_p (trueop0, trueop1)
2926 	  && ! side_effects_p (op0)
2927 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2928 	return CONST0_RTX (mode);
2929 
2930       /* Change subtraction from zero into negation.  (0 - x) is the
2931 	 same as -x when x is NaN, infinite, or finite and nonzero.
2932 	 But if the mode has signed zeros, and does not round towards
2933 	 -infinity, then 0 - 0 is 0, not -0.  */
2934       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2935 	return simplify_gen_unary (NEG, mode, op1, mode);
2936 
2937       /* (-1 - a) is ~a, unless the expression contains symbolic
2938 	 constants, in which case not retaining additions and
2939 	 subtractions could cause invalid assembly to be produced.  */
2940       if (trueop0 == constm1_rtx
2941 	  && !contains_symbolic_reference_p (op1))
2942 	return simplify_gen_unary (NOT, mode, op1, mode);
2943 
2944       /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2945 	 or has signed zeros and supports rounding towards -infinity.
2946 	 In such a case, 0 - 0 is -0.  */
2947       if (!(HONOR_SIGNED_ZEROS (mode)
2948 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2949 	  && !HONOR_SNANS (mode)
2950 	  && trueop1 == CONST0_RTX (mode))
2951 	return op0;
2952 
2953       /* See if this is something like X * C - X or vice versa or
2954 	 if the multiplication is written as a shift.  If so, we can
2955 	 distribute and make a new multiply, shift, or maybe just
2956 	 have X (if C is 2 in the example above).  But don't make
2957 	 something more expensive than we had before.  */
2958 
2959       if (is_a <scalar_int_mode> (mode, &int_mode))
2960 	{
2961 	  rtx lhs = op0, rhs = op1;
2962 
2963 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2964 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2965 
2966 	  if (GET_CODE (lhs) == NEG)
2967 	    {
2968 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2969 	      lhs = XEXP (lhs, 0);
2970 	    }
2971 	  else if (GET_CODE (lhs) == MULT
2972 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2973 	    {
2974 	      coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2975 	      lhs = XEXP (lhs, 0);
2976 	    }
2977 	  else if (GET_CODE (lhs) == ASHIFT
2978 		   && CONST_INT_P (XEXP (lhs, 1))
2979 		   && INTVAL (XEXP (lhs, 1)) >= 0
2980 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2981 	    {
2982 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2983 					    GET_MODE_PRECISION (int_mode));
2984 	      lhs = XEXP (lhs, 0);
2985 	    }
2986 
2987 	  if (GET_CODE (rhs) == NEG)
2988 	    {
2989 	      negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2990 	      rhs = XEXP (rhs, 0);
2991 	    }
2992 	  else if (GET_CODE (rhs) == MULT
2993 		   && CONST_INT_P (XEXP (rhs, 1)))
2994 	    {
2995 	      negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2996 	      rhs = XEXP (rhs, 0);
2997 	    }
2998 	  else if (GET_CODE (rhs) == ASHIFT
2999 		   && CONST_INT_P (XEXP (rhs, 1))
3000 		   && INTVAL (XEXP (rhs, 1)) >= 0
3001 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
3002 	    {
3003 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
3004 					       GET_MODE_PRECISION (int_mode));
3005 	      negcoeff1 = -negcoeff1;
3006 	      rhs = XEXP (rhs, 0);
3007 	    }
3008 
3009 	  if (rtx_equal_p (lhs, rhs))
3010 	    {
3011 	      rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
3012 	      rtx coeff;
3013 	      bool speed = optimize_function_for_speed_p (cfun);
3014 
3015 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
3016 
3017 	      tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
3018 	      return (set_src_cost (tem, int_mode, speed)
3019 		      <= set_src_cost (orig, int_mode, speed) ? tem : 0);
3020 	    }
3021 
3022 	  /* Optimize (X + 1) * Y - Y to X * Y.  */
3023 	  lhs = op0;
3024 	  if (GET_CODE (op0) == MULT)
3025 	    {
3026 	      if (((GET_CODE (XEXP (op0, 0)) == PLUS
3027 		    && XEXP (XEXP (op0, 0), 1) == const1_rtx)
3028 		   || (GET_CODE (XEXP (op0, 0)) == MINUS
3029 		       && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
3030 		  && rtx_equal_p (XEXP (op0, 1), op1))
3031 		lhs = XEXP (XEXP (op0, 0), 0);
3032 	      else if (((GET_CODE (XEXP (op0, 1)) == PLUS
3033 			 && XEXP (XEXP (op0, 1), 1) == const1_rtx)
3034 			|| (GET_CODE (XEXP (op0, 1)) == MINUS
3035 			    && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
3036 		       && rtx_equal_p (XEXP (op0, 0), op1))
3037 		lhs = XEXP (XEXP (op0, 1), 0);
3038 	    }
3039 	  if (lhs != op0)
3040 	    return simplify_gen_binary (MULT, int_mode, lhs, op1);
3041 	}
3042 
3043       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
3044       if (GET_CODE (op1) == NEG)
3045 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3046 
3047       /* (-x - c) may be simplified as (-c - x).  */
3048       if (GET_CODE (op0) == NEG
3049 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
3050 	{
3051 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
3052 	  if (tem)
3053 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
3054 	}
3055 
3056       if ((GET_CODE (op0) == CONST
3057 	   || GET_CODE (op0) == SYMBOL_REF
3058 	   || GET_CODE (op0) == LABEL_REF)
3059 	  && poly_int_rtx_p (op1, &offset))
3060 	return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
3061 
3062       /* Don't let a relocatable value get a negative coeff.  */
3063       if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
3064 	return simplify_gen_binary (PLUS, mode,
3065 				    op0,
3066 				    neg_poly_int_rtx (mode, op1));
3067 
3068       /* (x - (x & y)) -> (x & ~y) */
3069       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
3070 	{
3071 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
3072 	    {
3073 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
3074 					GET_MODE (XEXP (op1, 1)));
3075 	      return simplify_gen_binary (AND, mode, op0, tem);
3076 	    }
3077 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
3078 	    {
3079 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
3080 					GET_MODE (XEXP (op1, 0)));
3081 	      return simplify_gen_binary (AND, mode, op0, tem);
3082 	    }
3083 	}
3084 
3085       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3086 	 by reversing the comparison code if valid.  */
3087       if (STORE_FLAG_VALUE == 1
3088 	  && trueop0 == const1_rtx
3089 	  && COMPARISON_P (op1)
3090 	  && (reversed = reversed_comparison (op1, mode)))
3091 	return reversed;
3092 
3093       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
3094       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3095 	  && GET_CODE (op1) == MULT
3096 	  && GET_CODE (XEXP (op1, 0)) == NEG)
3097 	{
3098 	  rtx in1, in2;
3099 
3100 	  in1 = XEXP (XEXP (op1, 0), 0);
3101 	  in2 = XEXP (op1, 1);
3102 	  return simplify_gen_binary (PLUS, mode,
3103 				      simplify_gen_binary (MULT, mode,
3104 							   in1, in2),
3105 				      op0);
3106 	}
3107 
3108       /* Canonicalize (minus (neg A) (mult B C)) to
3109 	 (minus (mult (neg B) C) A).  */
3110       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3111 	  && GET_CODE (op1) == MULT
3112 	  && GET_CODE (op0) == NEG)
3113 	{
3114 	  rtx in1, in2;
3115 
3116 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
3117 	  in2 = XEXP (op1, 1);
3118 	  return simplify_gen_binary (MINUS, mode,
3119 				      simplify_gen_binary (MULT, mode,
3120 							   in1, in2),
3121 				      XEXP (op0, 0));
3122 	}
3123 
3124       /* If one of the operands is a PLUS or a MINUS, see if we can
3125 	 simplify this by the associative law.  This will, for example,
3126          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3127 	 Don't use the associative law for floating point.
3128 	 The inaccuracy makes it nonassociative,
3129 	 and subtle programs can break if operations are associated.  */
3130 
3131       if (INTEGRAL_MODE_P (mode)
3132 	  && (plus_minus_operand_p (op0)
3133 	      || plus_minus_operand_p (op1))
3134 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3135 	return tem;
3136 
3137       /* Handle vector series.  */
3138       if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3139 	{
3140 	  tem = simplify_binary_operation_series (code, mode, op0, op1);
3141 	  if (tem)
3142 	    return tem;
3143 	}
3144       break;
3145 
3146     case MULT:
3147       if (trueop1 == constm1_rtx)
3148 	return simplify_gen_unary (NEG, mode, op0, mode);
3149 
3150       if (GET_CODE (op0) == NEG)
3151 	{
3152 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
3153 	  /* If op1 is a MULT as well and simplify_unary_operation
3154 	     just moved the NEG to the second operand, simplify_gen_binary
3155 	     below could through simplify_associative_operation move
3156 	     the NEG around again and recurse endlessly.  */
3157 	  if (temp
3158 	      && GET_CODE (op1) == MULT
3159 	      && GET_CODE (temp) == MULT
3160 	      && XEXP (op1, 0) == XEXP (temp, 0)
3161 	      && GET_CODE (XEXP (temp, 1)) == NEG
3162 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
3163 	    temp = NULL_RTX;
3164 	  if (temp)
3165 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
3166 	}
3167       if (GET_CODE (op1) == NEG)
3168 	{
3169 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
3170 	  /* If op0 is a MULT as well and simplify_unary_operation
3171 	     just moved the NEG to the second operand, simplify_gen_binary
3172 	     below could through simplify_associative_operation move
3173 	     the NEG around again and recurse endlessly.  */
3174 	  if (temp
3175 	      && GET_CODE (op0) == MULT
3176 	      && GET_CODE (temp) == MULT
3177 	      && XEXP (op0, 0) == XEXP (temp, 0)
3178 	      && GET_CODE (XEXP (temp, 1)) == NEG
3179 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
3180 	    temp = NULL_RTX;
3181 	  if (temp)
3182 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
3183 	}
3184 
3185       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
3186 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
3187 	 when the mode has signed zeros, since multiplying a negative
3188 	 number by 0 will give -0, not 0.  */
3189       if (!HONOR_NANS (mode)
3190 	  && !HONOR_SIGNED_ZEROS (mode)
3191 	  && trueop1 == CONST0_RTX (mode)
3192 	  && ! side_effects_p (op0))
3193 	return op1;
3194 
3195       /* In IEEE floating point, x*1 is not equivalent to x for
3196 	 signalling NaNs.  */
3197       if (!HONOR_SNANS (mode)
3198 	  && trueop1 == CONST1_RTX (mode))
3199 	return op0;
3200 
3201       /* Convert multiply by constant power of two into shift.  */
3202       if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
3203 	{
3204 	  val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
3205 	  if (val >= 0)
3206 	    return simplify_gen_binary (ASHIFT, mode, op0,
3207 					gen_int_shift_amount (mode, val));
3208 	}
3209 
3210       /* x*2 is x+x and x*(-1) is -x */
3211       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3212 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
3213 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
3214 	  && GET_MODE (op0) == mode)
3215 	{
3216 	  const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3217 
3218 	  if (real_equal (d1, &dconst2))
3219 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3220 
3221 	  if (!HONOR_SNANS (mode)
3222 	      && real_equal (d1, &dconstm1))
3223 	    return simplify_gen_unary (NEG, mode, op0, mode);
3224 	}
3225 
3226       /* Optimize -x * -x as x * x.  */
3227       if (FLOAT_MODE_P (mode)
3228 	  && GET_CODE (op0) == NEG
3229 	  && GET_CODE (op1) == NEG
3230 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3231 	  && !side_effects_p (XEXP (op0, 0)))
3232 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3233 
3234       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
3235       if (SCALAR_FLOAT_MODE_P (mode)
3236 	  && GET_CODE (op0) == ABS
3237 	  && GET_CODE (op1) == ABS
3238 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3239 	  && !side_effects_p (XEXP (op0, 0)))
3240 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3241 
3242       /* Reassociate multiplication, but for floating point MULTs
3243 	 only when the user specifies unsafe math optimizations.  */
3244       if (! FLOAT_MODE_P (mode)
3245 	  || flag_unsafe_math_optimizations)
3246 	{
3247 	  tem = simplify_associative_operation (code, mode, op0, op1);
3248 	  if (tem)
3249 	    return tem;
3250 	}
3251       break;
3252 
3253     case IOR:
3254       if (trueop1 == CONST0_RTX (mode))
3255 	return op0;
3256       if (INTEGRAL_MODE_P (mode)
3257 	  && trueop1 == CONSTM1_RTX (mode)
3258 	  && !side_effects_p (op0))
3259 	return op1;
3260       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3261 	return op0;
3262       /* A | (~A) -> -1 */
3263       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3264 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3265 	  && ! side_effects_p (op0)
3266 	  && SCALAR_INT_MODE_P (mode))
3267 	return constm1_rtx;
3268 
3269       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
3270       if (CONST_INT_P (op1)
3271 	  && HWI_COMPUTABLE_MODE_P (mode)
3272 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3273 	  && !side_effects_p (op0))
3274 	return op1;
3275 
3276       /* Canonicalize (X & C1) | C2.  */
3277       if (GET_CODE (op0) == AND
3278 	  && CONST_INT_P (trueop1)
3279 	  && CONST_INT_P (XEXP (op0, 1)))
3280 	{
3281 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3282 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3283 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
3284 
3285 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2.  */
3286 	  if ((c1 & c2) == c1
3287 	      && !side_effects_p (XEXP (op0, 0)))
3288 	    return trueop1;
3289 
3290 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
3291 	  if (((c1|c2) & mask) == mask)
3292 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3293 	}
3294 
3295       /* Convert (A & B) | A to A.  */
3296       if (GET_CODE (op0) == AND
3297 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3298 	      || rtx_equal_p (XEXP (op0, 1), op1))
3299 	  && ! side_effects_p (XEXP (op0, 0))
3300 	  && ! side_effects_p (XEXP (op0, 1)))
3301 	return op1;
3302 
3303       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3304          mode size to (rotate A CX).  */
3305 
3306       if (GET_CODE (op1) == ASHIFT
3307           || GET_CODE (op1) == SUBREG)
3308         {
3309 	  opleft = op1;
3310 	  opright = op0;
3311 	}
3312       else
3313         {
3314 	  opright = op1;
3315 	  opleft = op0;
3316 	}
3317 
3318       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
3319           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
3320           && CONST_INT_P (XEXP (opleft, 1))
3321           && CONST_INT_P (XEXP (opright, 1))
3322           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
3323 	      == GET_MODE_UNIT_PRECISION (mode)))
3324         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
3325 
3326       /* Same, but for ashift that has been "simplified" to a wider mode
3327         by simplify_shift_const.  */
3328 
3329       if (GET_CODE (opleft) == SUBREG
3330 	  && is_a <scalar_int_mode> (mode, &int_mode)
3331 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
3332 				     &inner_mode)
3333           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
3334           && GET_CODE (opright) == LSHIFTRT
3335           && GET_CODE (XEXP (opright, 0)) == SUBREG
3336 	  && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3337 	  && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3338           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3339                           SUBREG_REG (XEXP (opright, 0)))
3340           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3341           && CONST_INT_P (XEXP (opright, 1))
3342 	  && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3343 	      + INTVAL (XEXP (opright, 1))
3344 	      == GET_MODE_PRECISION (int_mode)))
3345 	return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3346 			       XEXP (SUBREG_REG (opleft), 1));
3347 
3348       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3349          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
3350 	 the PLUS does not affect any of the bits in OP1: then we can do
3351 	 the IOR as a PLUS and we can associate.  This is valid if OP1
3352          can be safely shifted left C bits.  */
3353       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3354           && GET_CODE (XEXP (op0, 0)) == PLUS
3355           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3356           && CONST_INT_P (XEXP (op0, 1))
3357           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3358         {
3359 	  int count = INTVAL (XEXP (op0, 1));
3360 	  HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3361 
3362           if (mask >> count == INTVAL (trueop1)
3363 	      && trunc_int_for_mode (mask, mode) == mask
3364               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3365 	    return simplify_gen_binary (ASHIFTRT, mode,
3366 					plus_constant (mode, XEXP (op0, 0),
3367 						       mask),
3368 					XEXP (op0, 1));
3369         }
3370 
3371       /* The following happens with bitfield merging.
3372          (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3373       if (GET_CODE (op0) == AND
3374 	  && GET_CODE (op1) == AND
3375 	  && CONST_INT_P (XEXP (op0, 1))
3376 	  && CONST_INT_P (XEXP (op1, 1))
3377 	  && (INTVAL (XEXP (op0, 1))
3378 	      == ~INTVAL (XEXP (op1, 1))))
3379 	{
3380 	  /* The IOR may be on both sides.  */
3381 	  rtx top0 = NULL_RTX, top1 = NULL_RTX;
3382 	  if (GET_CODE (XEXP (op1, 0)) == IOR)
3383 	    top0 = op0, top1 = op1;
3384 	  else if (GET_CODE (XEXP (op0, 0)) == IOR)
3385 	    top0 = op1, top1 = op0;
3386 	  if (top0 && top1)
3387 	    {
3388 	      /* X may be on either side of the inner IOR.  */
3389 	      rtx tem = NULL_RTX;
3390 	      if (rtx_equal_p (XEXP (top0, 0),
3391 			       XEXP (XEXP (top1, 0), 0)))
3392 		tem = XEXP (XEXP (top1, 0), 1);
3393 	      else if (rtx_equal_p (XEXP (top0, 0),
3394 				    XEXP (XEXP (top1, 0), 1)))
3395 		tem = XEXP (XEXP (top1, 0), 0);
3396 	      if (tem)
3397 		return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3398 					    simplify_gen_binary
3399 					      (AND, mode, tem, XEXP (top1, 1)));
3400 	    }
3401 	}
3402 
3403       /* Convert (ior (and A C) (and B C)) into (and (ior A B) C).  */
3404       if (GET_CODE (op0) == GET_CODE (op1)
3405 	  && (GET_CODE (op0) == AND
3406 	      || GET_CODE (op0) == IOR
3407 	      || GET_CODE (op0) == LSHIFTRT
3408 	      || GET_CODE (op0) == ASHIFTRT
3409 	      || GET_CODE (op0) == ASHIFT
3410 	      || GET_CODE (op0) == ROTATE
3411 	      || GET_CODE (op0) == ROTATERT))
3412 	{
3413 	  tem = simplify_distributive_operation (code, mode, op0, op1);
3414 	  if (tem)
3415 	    return tem;
3416 	}
3417 
3418       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3419       if (tem)
3420 	return tem;
3421 
3422       tem = simplify_associative_operation (code, mode, op0, op1);
3423       if (tem)
3424 	return tem;
3425 
3426       tem = simplify_logical_relational_operation (code, mode, op0, op1);
3427       if (tem)
3428 	return tem;
3429       break;
3430 
3431     case XOR:
3432       if (trueop1 == CONST0_RTX (mode))
3433 	return op0;
3434       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3435 	return simplify_gen_unary (NOT, mode, op0, mode);
3436       if (rtx_equal_p (trueop0, trueop1)
3437 	  && ! side_effects_p (op0)
3438 	  && GET_MODE_CLASS (mode) != MODE_CC)
3439 	 return CONST0_RTX (mode);
3440 
3441       /* Canonicalize XOR of the most significant bit to PLUS.  */
3442       if (CONST_SCALAR_INT_P (op1)
3443 	  && mode_signbit_p (mode, op1))
3444 	return simplify_gen_binary (PLUS, mode, op0, op1);
3445       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
3446       if (CONST_SCALAR_INT_P (op1)
3447 	  && GET_CODE (op0) == PLUS
3448 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
3449 	  && mode_signbit_p (mode, XEXP (op0, 1)))
3450 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3451 				    simplify_gen_binary (XOR, mode, op1,
3452 							 XEXP (op0, 1)));
3453 
3454       /* If we are XORing two things that have no bits in common,
3455 	 convert them into an IOR.  This helps to detect rotation encoded
3456 	 using those methods and possibly other simplifications.  */
3457 
3458       if (HWI_COMPUTABLE_MODE_P (mode)
3459 	  && (nonzero_bits (op0, mode)
3460 	      & nonzero_bits (op1, mode)) == 0)
3461 	return (simplify_gen_binary (IOR, mode, op0, op1));
3462 
3463       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3464 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3465 	 (NOT y).  */
3466       {
3467 	int num_negated = 0;
3468 
3469 	if (GET_CODE (op0) == NOT)
3470 	  num_negated++, op0 = XEXP (op0, 0);
3471 	if (GET_CODE (op1) == NOT)
3472 	  num_negated++, op1 = XEXP (op1, 0);
3473 
3474 	if (num_negated == 2)
3475 	  return simplify_gen_binary (XOR, mode, op0, op1);
3476 	else if (num_negated == 1)
3477 	  return simplify_gen_unary (NOT, mode,
3478 				     simplify_gen_binary (XOR, mode, op0, op1),
3479 				     mode);
3480       }
3481 
3482       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
3483 	 correspond to a machine insn or result in further simplifications
3484 	 if B is a constant.  */
3485 
3486       if (GET_CODE (op0) == AND
3487 	  && rtx_equal_p (XEXP (op0, 1), op1)
3488 	  && ! side_effects_p (op1))
3489 	return simplify_gen_binary (AND, mode,
3490 				    simplify_gen_unary (NOT, mode,
3491 							XEXP (op0, 0), mode),
3492 				    op1);
3493 
3494       else if (GET_CODE (op0) == AND
3495 	       && rtx_equal_p (XEXP (op0, 0), op1)
3496 	       && ! side_effects_p (op1))
3497 	return simplify_gen_binary (AND, mode,
3498 				    simplify_gen_unary (NOT, mode,
3499 							XEXP (op0, 1), mode),
3500 				    op1);
3501 
3502       /* Given (xor (ior (xor A B) C) D), where B, C and D are
3503 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3504 	 out bits inverted twice and not set by C.  Similarly, given
3505 	 (xor (and (xor A B) C) D), simplify without inverting C in
3506 	 the xor operand: (xor (and A C) (B&C)^D).
3507       */
3508       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3509 	       && GET_CODE (XEXP (op0, 0)) == XOR
3510 	       && CONST_INT_P (op1)
3511 	       && CONST_INT_P (XEXP (op0, 1))
3512 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3513 	{
3514 	  enum rtx_code op = GET_CODE (op0);
3515 	  rtx a = XEXP (XEXP (op0, 0), 0);
3516 	  rtx b = XEXP (XEXP (op0, 0), 1);
3517 	  rtx c = XEXP (op0, 1);
3518 	  rtx d = op1;
3519 	  HOST_WIDE_INT bval = INTVAL (b);
3520 	  HOST_WIDE_INT cval = INTVAL (c);
3521 	  HOST_WIDE_INT dval = INTVAL (d);
3522 	  HOST_WIDE_INT xcval;
3523 
3524 	  if (op == IOR)
3525 	    xcval = ~cval;
3526 	  else
3527 	    xcval = cval;
3528 
3529 	  return simplify_gen_binary (XOR, mode,
3530 				      simplify_gen_binary (op, mode, a, c),
3531 				      gen_int_mode ((bval & xcval) ^ dval,
3532 						    mode));
3533 	}
3534 
3535       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3536 	 we can transform like this:
3537             (A&B)^C == ~(A&B)&C | ~C&(A&B)
3538                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
3539                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
3540 	 Attempt a few simplifications when B and C are both constants.  */
3541       if (GET_CODE (op0) == AND
3542 	  && CONST_INT_P (op1)
3543 	  && CONST_INT_P (XEXP (op0, 1)))
3544 	{
3545 	  rtx a = XEXP (op0, 0);
3546 	  rtx b = XEXP (op0, 1);
3547 	  rtx c = op1;
3548 	  HOST_WIDE_INT bval = INTVAL (b);
3549 	  HOST_WIDE_INT cval = INTVAL (c);
3550 
3551 	  /* Instead of computing ~A&C, we compute its negated value,
3552 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
3553 	     optimize for sure.  If it does not simplify, we still try
3554 	     to compute ~A&C below, but since that always allocates
3555 	     RTL, we don't try that before committing to returning a
3556 	     simplified expression.  */
3557 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3558 						  GEN_INT (~cval));
3559 
3560 	  if ((~cval & bval) == 0)
3561 	    {
3562 	      rtx na_c = NULL_RTX;
3563 	      if (n_na_c)
3564 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3565 	      else
3566 		{
3567 		  /* If ~A does not simplify, don't bother: we don't
3568 		     want to simplify 2 operations into 3, and if na_c
3569 		     were to simplify with na, n_na_c would have
3570 		     simplified as well.  */
3571 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
3572 		  if (na)
3573 		    na_c = simplify_gen_binary (AND, mode, na, c);
3574 		}
3575 
3576 	      /* Try to simplify ~A&C | ~B&C.  */
3577 	      if (na_c != NULL_RTX)
3578 		return simplify_gen_binary (IOR, mode, na_c,
3579 					    gen_int_mode (~bval & cval, mode));
3580 	    }
3581 	  else
3582 	    {
3583 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
3584 	      if (n_na_c == CONSTM1_RTX (mode))
3585 		{
3586 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3587 						    gen_int_mode (~cval & bval,
3588 								  mode));
3589 		  return simplify_gen_binary (IOR, mode, a_nc_b,
3590 					      gen_int_mode (~bval & cval,
3591 							    mode));
3592 		}
3593 	    }
3594 	}
3595 
3596       /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3597 	 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3598 	 machines, and also has shorter instruction path length.  */
3599       if (GET_CODE (op0) == AND
3600 	  && GET_CODE (XEXP (op0, 0)) == XOR
3601 	  && CONST_INT_P (XEXP (op0, 1))
3602 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3603 	{
3604 	  rtx a = trueop1;
3605 	  rtx b = XEXP (XEXP (op0, 0), 1);
3606 	  rtx c = XEXP (op0, 1);
3607 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3608 	  rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3609 	  rtx bc = simplify_gen_binary (AND, mode, b, c);
3610 	  return simplify_gen_binary (IOR, mode, a_nc, bc);
3611 	}
3612       /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C))  */
3613       else if (GET_CODE (op0) == AND
3614 	  && GET_CODE (XEXP (op0, 0)) == XOR
3615 	  && CONST_INT_P (XEXP (op0, 1))
3616 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3617 	{
3618 	  rtx a = XEXP (XEXP (op0, 0), 0);
3619 	  rtx b = trueop1;
3620 	  rtx c = XEXP (op0, 1);
3621 	  rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3622 	  rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3623 	  rtx ac = simplify_gen_binary (AND, mode, a, c);
3624 	  return simplify_gen_binary (IOR, mode, ac, b_nc);
3625 	}
3626 
3627       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3628 	 comparison if STORE_FLAG_VALUE is 1.  */
3629       if (STORE_FLAG_VALUE == 1
3630 	  && trueop1 == const1_rtx
3631 	  && COMPARISON_P (op0)
3632 	  && (reversed = reversed_comparison (op0, mode)))
3633 	return reversed;
3634 
3635       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3636 	 is (lt foo (const_int 0)), so we can perform the above
3637 	 simplification if STORE_FLAG_VALUE is 1.  */
3638 
3639       if (is_a <scalar_int_mode> (mode, &int_mode)
3640 	  && STORE_FLAG_VALUE == 1
3641 	  && trueop1 == const1_rtx
3642 	  && GET_CODE (op0) == LSHIFTRT
3643 	  && CONST_INT_P (XEXP (op0, 1))
3644 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3645 	return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3646 
3647       /* (xor (comparison foo bar) (const_int sign-bit))
3648 	 when STORE_FLAG_VALUE is the sign bit.  */
3649       if (is_a <scalar_int_mode> (mode, &int_mode)
3650 	  && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3651 	  && trueop1 == const_true_rtx
3652 	  && COMPARISON_P (op0)
3653 	  && (reversed = reversed_comparison (op0, int_mode)))
3654 	return reversed;
3655 
3656       /* Convert (xor (and A C) (and B C)) into (and (xor A B) C).  */
3657       if (GET_CODE (op0) == GET_CODE (op1)
3658 	  && (GET_CODE (op0) == AND
3659 	      || GET_CODE (op0) == LSHIFTRT
3660 	      || GET_CODE (op0) == ASHIFTRT
3661 	      || GET_CODE (op0) == ASHIFT
3662 	      || GET_CODE (op0) == ROTATE
3663 	      || GET_CODE (op0) == ROTATERT))
3664 	{
3665 	  tem = simplify_distributive_operation (code, mode, op0, op1);
3666 	  if (tem)
3667 	    return tem;
3668 	}
3669 
3670       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3671       if (tem)
3672 	return tem;
3673 
3674       tem = simplify_associative_operation (code, mode, op0, op1);
3675       if (tem)
3676 	return tem;
3677       break;
3678 
3679     case AND:
3680       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3681 	return trueop1;
3682       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3683 	return op0;
3684       if (HWI_COMPUTABLE_MODE_P (mode))
3685 	{
3686 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3687 	  HOST_WIDE_INT nzop1;
3688 	  if (CONST_INT_P (trueop1))
3689 	    {
3690 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
3691 	      /* If we are turning off bits already known off in OP0, we need
3692 		 not do an AND.  */
3693 	      if ((nzop0 & ~val1) == 0)
3694 		return op0;
3695 	    }
3696 	  nzop1 = nonzero_bits (trueop1, mode);
3697 	  /* If we are clearing all the nonzero bits, the result is zero.  */
3698 	  if ((nzop1 & nzop0) == 0
3699 	      && !side_effects_p (op0) && !side_effects_p (op1))
3700 	    return CONST0_RTX (mode);
3701 	}
3702       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3703 	  && GET_MODE_CLASS (mode) != MODE_CC)
3704 	return op0;
3705       /* A & (~A) -> 0 */
3706       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3707 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3708 	  && ! side_effects_p (op0)
3709 	  && GET_MODE_CLASS (mode) != MODE_CC)
3710 	return CONST0_RTX (mode);
3711 
3712       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3713 	 there are no nonzero bits of C outside of X's mode.  */
3714       if ((GET_CODE (op0) == SIGN_EXTEND
3715 	   || GET_CODE (op0) == ZERO_EXTEND)
3716 	  && CONST_INT_P (trueop1)
3717 	  && HWI_COMPUTABLE_MODE_P (mode)
3718 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3719 	      & UINTVAL (trueop1)) == 0)
3720 	{
3721 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3722 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3723 				     gen_int_mode (INTVAL (trueop1),
3724 						   imode));
3725 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3726 	}
3727 
3728       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
3729 	 we might be able to further simplify the AND with X and potentially
3730 	 remove the truncation altogether.  */
3731       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3732 	{
3733 	  rtx x = XEXP (op0, 0);
3734 	  machine_mode xmode = GET_MODE (x);
3735 	  tem = simplify_gen_binary (AND, xmode, x,
3736 				     gen_int_mode (INTVAL (trueop1), xmode));
3737 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3738 	}
3739 
3740       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3741       if (GET_CODE (op0) == IOR
3742 	  && CONST_INT_P (trueop1)
3743 	  && CONST_INT_P (XEXP (op0, 1)))
3744 	{
3745 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3746 	  return simplify_gen_binary (IOR, mode,
3747 				      simplify_gen_binary (AND, mode,
3748 							   XEXP (op0, 0), op1),
3749 				      gen_int_mode (tmp, mode));
3750 	}
3751 
3752       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3753 	 insn (and may simplify more).  */
3754       if (GET_CODE (op0) == XOR
3755 	  && rtx_equal_p (XEXP (op0, 0), op1)
3756 	  && ! side_effects_p (op1))
3757 	return simplify_gen_binary (AND, mode,
3758 				    simplify_gen_unary (NOT, mode,
3759 							XEXP (op0, 1), mode),
3760 				    op1);
3761 
3762       if (GET_CODE (op0) == XOR
3763 	  && rtx_equal_p (XEXP (op0, 1), op1)
3764 	  && ! side_effects_p (op1))
3765 	return simplify_gen_binary (AND, mode,
3766 				    simplify_gen_unary (NOT, mode,
3767 							XEXP (op0, 0), mode),
3768 				    op1);
3769 
3770       /* Similarly for (~(A ^ B)) & A.  */
3771       if (GET_CODE (op0) == NOT
3772 	  && GET_CODE (XEXP (op0, 0)) == XOR
3773 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3774 	  && ! side_effects_p (op1))
3775 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3776 
3777       if (GET_CODE (op0) == NOT
3778 	  && GET_CODE (XEXP (op0, 0)) == XOR
3779 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3780 	  && ! side_effects_p (op1))
3781 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3782 
3783       /* Convert (A | B) & A to A.  */
3784       if (GET_CODE (op0) == IOR
3785 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3786 	      || rtx_equal_p (XEXP (op0, 1), op1))
3787 	  && ! side_effects_p (XEXP (op0, 0))
3788 	  && ! side_effects_p (XEXP (op0, 1)))
3789 	return op1;
3790 
3791       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3792 	 ((A & N) + B) & M -> (A + B) & M
3793 	 Similarly if (N & M) == 0,
3794 	 ((A | N) + B) & M -> (A + B) & M
3795 	 and for - instead of + and/or ^ instead of |.
3796          Also, if (N & M) == 0, then
3797 	 (A +- N) & M -> A & M.  */
3798       if (CONST_INT_P (trueop1)
3799 	  && HWI_COMPUTABLE_MODE_P (mode)
3800 	  && ~UINTVAL (trueop1)
3801 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3802 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3803 	{
3804 	  rtx pmop[2];
3805 	  int which;
3806 
3807 	  pmop[0] = XEXP (op0, 0);
3808 	  pmop[1] = XEXP (op0, 1);
3809 
3810 	  if (CONST_INT_P (pmop[1])
3811 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3812 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3813 
3814 	  for (which = 0; which < 2; which++)
3815 	    {
3816 	      tem = pmop[which];
3817 	      switch (GET_CODE (tem))
3818 		{
3819 		case AND:
3820 		  if (CONST_INT_P (XEXP (tem, 1))
3821 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3822 		      == UINTVAL (trueop1))
3823 		    pmop[which] = XEXP (tem, 0);
3824 		  break;
3825 		case IOR:
3826 		case XOR:
3827 		  if (CONST_INT_P (XEXP (tem, 1))
3828 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3829 		    pmop[which] = XEXP (tem, 0);
3830 		  break;
3831 		default:
3832 		  break;
3833 		}
3834 	    }
3835 
3836 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3837 	    {
3838 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3839 					 pmop[0], pmop[1]);
3840 	      return simplify_gen_binary (code, mode, tem, op1);
3841 	    }
3842 	}
3843 
3844       /* (and X (ior (not X) Y) -> (and X Y) */
3845       if (GET_CODE (op1) == IOR
3846 	  && GET_CODE (XEXP (op1, 0)) == NOT
3847 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3848        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3849 
3850       /* (and (ior (not X) Y) X) -> (and X Y) */
3851       if (GET_CODE (op0) == IOR
3852 	  && GET_CODE (XEXP (op0, 0)) == NOT
3853 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3854 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3855 
3856       /* (and X (ior Y (not X)) -> (and X Y) */
3857       if (GET_CODE (op1) == IOR
3858 	  && GET_CODE (XEXP (op1, 1)) == NOT
3859 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3860        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3861 
3862       /* (and (ior Y (not X)) X) -> (and X Y) */
3863       if (GET_CODE (op0) == IOR
3864 	  && GET_CODE (XEXP (op0, 1)) == NOT
3865 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3866 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3867 
3868       /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C).  */
3869       if (GET_CODE (op0) == GET_CODE (op1)
3870 	  && (GET_CODE (op0) == AND
3871 	      || GET_CODE (op0) == IOR
3872 	      || GET_CODE (op0) == LSHIFTRT
3873 	      || GET_CODE (op0) == ASHIFTRT
3874 	      || GET_CODE (op0) == ASHIFT
3875 	      || GET_CODE (op0) == ROTATE
3876 	      || GET_CODE (op0) == ROTATERT))
3877 	{
3878 	  tem = simplify_distributive_operation (code, mode, op0, op1);
3879 	  if (tem)
3880 	    return tem;
3881 	}
3882 
3883       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3884       if (tem)
3885 	return tem;
3886 
3887       tem = simplify_associative_operation (code, mode, op0, op1);
3888       if (tem)
3889 	return tem;
3890       break;
3891 
3892     case UDIV:
3893       /* 0/x is 0 (or x&0 if x has side-effects).  */
3894       if (trueop0 == CONST0_RTX (mode)
3895 	  && !cfun->can_throw_non_call_exceptions)
3896 	{
3897 	  if (side_effects_p (op1))
3898 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3899 	  return trueop0;
3900 	}
3901       /* x/1 is x.  */
3902       if (trueop1 == CONST1_RTX (mode))
3903 	{
3904 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3905 	  if (tem)
3906 	    return tem;
3907 	}
3908       /* Convert divide by power of two into shift.  */
3909       if (CONST_INT_P (trueop1)
3910 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3911 	return simplify_gen_binary (LSHIFTRT, mode, op0,
3912 				    gen_int_shift_amount (mode, val));
3913       break;
3914 
3915     case DIV:
3916       /* Handle floating point and integers separately.  */
3917       if (SCALAR_FLOAT_MODE_P (mode))
3918 	{
3919 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3920 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3921 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3922 	     zeros, since dividing 0 by a negative number gives -0.0  */
3923 	  if (trueop0 == CONST0_RTX (mode)
3924 	      && !HONOR_NANS (mode)
3925 	      && !HONOR_SIGNED_ZEROS (mode)
3926 	      && ! side_effects_p (op1))
3927 	    return op0;
3928 	  /* x/1.0 is x.  */
3929 	  if (trueop1 == CONST1_RTX (mode)
3930 	      && !HONOR_SNANS (mode))
3931 	    return op0;
3932 
3933 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3934 	      && trueop1 != CONST0_RTX (mode))
3935 	    {
3936 	      const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3937 
3938 	      /* x/-1.0 is -x.  */
3939 	      if (real_equal (d1, &dconstm1)
3940 		  && !HONOR_SNANS (mode))
3941 		return simplify_gen_unary (NEG, mode, op0, mode);
3942 
3943 	      /* Change FP division by a constant into multiplication.
3944 		 Only do this with -freciprocal-math.  */
3945 	      if (flag_reciprocal_math
3946 		  && !real_equal (d1, &dconst0))
3947 		{
3948 		  REAL_VALUE_TYPE d;
3949 		  real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3950 		  tem = const_double_from_real_value (d, mode);
3951 		  return simplify_gen_binary (MULT, mode, op0, tem);
3952 		}
3953 	    }
3954 	}
3955       else if (SCALAR_INT_MODE_P (mode))
3956 	{
3957 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3958 	  if (trueop0 == CONST0_RTX (mode)
3959 	      && !cfun->can_throw_non_call_exceptions)
3960 	    {
3961 	      if (side_effects_p (op1))
3962 		return simplify_gen_binary (AND, mode, op1, trueop0);
3963 	      return trueop0;
3964 	    }
3965 	  /* x/1 is x.  */
3966 	  if (trueop1 == CONST1_RTX (mode))
3967 	    {
3968 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3969 	      if (tem)
3970 		return tem;
3971 	    }
3972 	  /* x/-1 is -x.  */
3973 	  if (trueop1 == constm1_rtx)
3974 	    {
3975 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3976 	      if (x)
3977 		return simplify_gen_unary (NEG, mode, x, mode);
3978 	    }
3979 	}
3980       break;
3981 
3982     case UMOD:
3983       /* 0%x is 0 (or x&0 if x has side-effects).  */
3984       if (trueop0 == CONST0_RTX (mode))
3985 	{
3986 	  if (side_effects_p (op1))
3987 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3988 	  return trueop0;
3989 	}
3990       /* x%1 is 0 (of x&0 if x has side-effects).  */
3991       if (trueop1 == CONST1_RTX (mode))
3992 	{
3993 	  if (side_effects_p (op0))
3994 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3995 	  return CONST0_RTX (mode);
3996 	}
3997       /* Implement modulus by power of two as AND.  */
3998       if (CONST_INT_P (trueop1)
3999 	  && exact_log2 (UINTVAL (trueop1)) > 0)
4000 	return simplify_gen_binary (AND, mode, op0,
4001 				    gen_int_mode (UINTVAL (trueop1) - 1,
4002 						  mode));
4003       break;
4004 
4005     case MOD:
4006       /* 0%x is 0 (or x&0 if x has side-effects).  */
4007       if (trueop0 == CONST0_RTX (mode))
4008 	{
4009 	  if (side_effects_p (op1))
4010 	    return simplify_gen_binary (AND, mode, op1, trueop0);
4011 	  return trueop0;
4012 	}
4013       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
4014       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
4015 	{
4016 	  if (side_effects_p (op0))
4017 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
4018 	  return CONST0_RTX (mode);
4019 	}
4020       break;
4021 
4022     case ROTATERT:
4023     case ROTATE:
4024       if (trueop1 == CONST0_RTX (mode))
4025 	return op0;
4026       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
4027 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
4028 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
4029 	 amount instead.  */
4030 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
4031       if (CONST_INT_P (trueop1)
4032 	  && IN_RANGE (INTVAL (trueop1),
4033 		       GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
4034 		       GET_MODE_UNIT_PRECISION (mode) - 1))
4035 	{
4036 	  int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
4037 	  rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
4038 	  return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
4039 				      mode, op0, new_amount_rtx);
4040 	}
4041 #endif
4042       /* FALLTHRU */
4043     case ASHIFTRT:
4044       if (trueop1 == CONST0_RTX (mode))
4045 	return op0;
4046       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4047 	return op0;
4048       /* Rotating ~0 always results in ~0.  */
4049       if (CONST_INT_P (trueop0)
4050 	  && HWI_COMPUTABLE_MODE_P (mode)
4051 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4052 	  && ! side_effects_p (op1))
4053 	return op0;
4054 
4055     canonicalize_shift:
4056       /* Given:
4057 	 scalar modes M1, M2
4058 	 scalar constants c1, c2
4059 	 size (M2) > size (M1)
4060 	 c1 == size (M2) - size (M1)
4061 	 optimize:
4062 	 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4063 				 <low_part>)
4064 		      (const_int <c2>))
4065 	 to:
4066 	 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4067 		    <low_part>).  */
4068       if ((code == ASHIFTRT || code == LSHIFTRT)
4069 	  && is_a <scalar_int_mode> (mode, &int_mode)
4070 	  && SUBREG_P (op0)
4071 	  && CONST_INT_P (op1)
4072 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
4073 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
4074 				     &inner_mode)
4075 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
4076 	  && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
4077 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
4078 	      == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
4079 	  && subreg_lowpart_p (op0))
4080 	{
4081 	  rtx tmp = gen_int_shift_amount
4082 	    (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
4083 
4084 	 /* Combine would usually zero out the value when combining two
4085 	    local shifts and the range becomes larger or equal to the mode.
4086 	    However since we fold away one of the shifts here combine won't
4087 	    see it so we should immediately zero the result if it's out of
4088 	    range.  */
4089 	 if (code == LSHIFTRT
4090 	     && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
4091 	  tmp = const0_rtx;
4092 	 else
4093 	   tmp = simplify_gen_binary (code,
4094 				      inner_mode,
4095 				      XEXP (SUBREG_REG (op0), 0),
4096 				      tmp);
4097 
4098 	  return lowpart_subreg (int_mode, tmp, inner_mode);
4099 	}
4100 
4101       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
4102 	{
4103 	  val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
4104 	  if (val != INTVAL (op1))
4105 	    return simplify_gen_binary (code, mode, op0,
4106 					gen_int_shift_amount (mode, val));
4107 	}
4108       break;
4109 
4110     case SS_ASHIFT:
4111       if (CONST_INT_P (trueop0)
4112 	  && HWI_COMPUTABLE_MODE_P (mode)
4113 	  && (UINTVAL (trueop0) == (GET_MODE_MASK (mode) >> 1)
4114 	      || mode_signbit_p (mode, trueop0))
4115 	  && ! side_effects_p (op1))
4116 	return op0;
4117       goto simplify_ashift;
4118 
4119     case US_ASHIFT:
4120       if (CONST_INT_P (trueop0)
4121 	  && HWI_COMPUTABLE_MODE_P (mode)
4122 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4123 	  && ! side_effects_p (op1))
4124 	return op0;
4125       /* FALLTHRU */
4126 
4127     case ASHIFT:
4128 simplify_ashift:
4129       if (trueop1 == CONST0_RTX (mode))
4130 	return op0;
4131       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4132 	return op0;
4133       if (mem_depth
4134 	  && code == ASHIFT
4135 	  && CONST_INT_P (trueop1)
4136 	  && is_a <scalar_int_mode> (mode, &int_mode)
4137 	  && IN_RANGE (UINTVAL (trueop1),
4138 		       1, GET_MODE_PRECISION (int_mode) - 1))
4139 	{
4140 	  auto c = (wi::one (GET_MODE_PRECISION (int_mode))
4141 		    << UINTVAL (trueop1));
4142 	  rtx new_op1 = immed_wide_int_const (c, int_mode);
4143 	  return simplify_gen_binary (MULT, int_mode, op0, new_op1);
4144 	}
4145       goto canonicalize_shift;
4146 
4147     case LSHIFTRT:
4148       if (trueop1 == CONST0_RTX (mode))
4149 	return op0;
4150       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4151 	return op0;
4152       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
4153       if (GET_CODE (op0) == CLZ
4154 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
4155 	  && CONST_INT_P (trueop1)
4156 	  && STORE_FLAG_VALUE == 1
4157 	  && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
4158 	{
4159 	  unsigned HOST_WIDE_INT zero_val = 0;
4160 
4161 	  if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
4162 	      && zero_val == GET_MODE_PRECISION (inner_mode)
4163 	      && INTVAL (trueop1) == exact_log2 (zero_val))
4164 	    return simplify_gen_relational (EQ, mode, inner_mode,
4165 					    XEXP (op0, 0), const0_rtx);
4166 	}
4167       goto canonicalize_shift;
4168 
4169     case SMIN:
4170       if (HWI_COMPUTABLE_MODE_P (mode)
4171 	  && mode_signbit_p (mode, trueop1)
4172 	  && ! side_effects_p (op0))
4173 	return op1;
4174       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4175 	return op0;
4176       tem = simplify_associative_operation (code, mode, op0, op1);
4177       if (tem)
4178 	return tem;
4179       break;
4180 
4181     case SMAX:
4182       if (HWI_COMPUTABLE_MODE_P (mode)
4183 	  && CONST_INT_P (trueop1)
4184 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
4185 	  && ! side_effects_p (op0))
4186 	return op1;
4187       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4188 	return op0;
4189       tem = simplify_associative_operation (code, mode, op0, op1);
4190       if (tem)
4191 	return tem;
4192       break;
4193 
4194     case UMIN:
4195       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4196 	return op1;
4197       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4198 	return op0;
4199       tem = simplify_associative_operation (code, mode, op0, op1);
4200       if (tem)
4201 	return tem;
4202       break;
4203 
4204     case UMAX:
4205       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
4206 	return op1;
4207       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4208 	return op0;
4209       tem = simplify_associative_operation (code, mode, op0, op1);
4210       if (tem)
4211 	return tem;
4212       break;
4213 
4214     case SS_PLUS:
4215     case US_PLUS:
4216     case SS_MINUS:
4217     case US_MINUS:
4218       /* Simplify x +/- 0 to x, if possible.  */
4219       if (trueop1 == CONST0_RTX (mode))
4220 	return op0;
4221       return 0;
4222 
4223     case SS_MULT:
4224     case US_MULT:
4225       /* Simplify x * 0 to 0, if possible.  */
4226       if (trueop1 == CONST0_RTX (mode)
4227 	  && !side_effects_p (op0))
4228 	return op1;
4229 
4230       /* Simplify x * 1 to x, if possible.  */
4231       if (trueop1 == CONST1_RTX (mode))
4232 	return op0;
4233       return 0;
4234 
4235     case SMUL_HIGHPART:
4236     case UMUL_HIGHPART:
4237       /* Simplify x * 0 to 0, if possible.  */
4238       if (trueop1 == CONST0_RTX (mode)
4239 	  && !side_effects_p (op0))
4240 	return op1;
4241       return 0;
4242 
4243     case SS_DIV:
4244     case US_DIV:
4245       /* Simplify x / 1 to x, if possible.  */
4246       if (trueop1 == CONST1_RTX (mode))
4247 	return op0;
4248       return 0;
4249 
4250     case VEC_SERIES:
4251       if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
4252 	return gen_vec_duplicate (mode, op0);
4253       if (valid_for_const_vector_p (mode, op0)
4254 	  && valid_for_const_vector_p (mode, op1))
4255 	return gen_const_vec_series (mode, op0, op1);
4256       return 0;
4257 
4258     case VEC_SELECT:
4259       if (!VECTOR_MODE_P (mode))
4260 	{
4261 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4262 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
4263 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
4264 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
4265 
4266 	  /* We can't reason about selections made at runtime.  */
4267 	  if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4268 	    return 0;
4269 
4270 	  if (vec_duplicate_p (trueop0, &elt0))
4271 	    return elt0;
4272 
4273 	  if (GET_CODE (trueop0) == CONST_VECTOR)
4274 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
4275 						      (trueop1, 0, 0)));
4276 
4277 	  /* Extract a scalar element from a nested VEC_SELECT expression
4278 	     (with optional nested VEC_CONCAT expression).  Some targets
4279 	     (i386) extract scalar element from a vector using chain of
4280 	     nested VEC_SELECT expressions.  When input operand is a memory
4281 	     operand, this operation can be simplified to a simple scalar
4282 	     load from an offseted memory address.  */
4283 	  int n_elts;
4284 	  if (GET_CODE (trueop0) == VEC_SELECT
4285 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4286 		  .is_constant (&n_elts)))
4287 	    {
4288 	      rtx op0 = XEXP (trueop0, 0);
4289 	      rtx op1 = XEXP (trueop0, 1);
4290 
4291 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
4292 	      int elem;
4293 
4294 	      rtvec vec;
4295 	      rtx tmp_op, tmp;
4296 
4297 	      gcc_assert (GET_CODE (op1) == PARALLEL);
4298 	      gcc_assert (i < n_elts);
4299 
4300 	      /* Select element, pointed by nested selector.  */
4301 	      elem = INTVAL (XVECEXP (op1, 0, i));
4302 
4303 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
4304 	      if (GET_CODE (op0) == VEC_CONCAT)
4305 		{
4306 		  rtx op00 = XEXP (op0, 0);
4307 		  rtx op01 = XEXP (op0, 1);
4308 
4309 		  machine_mode mode00, mode01;
4310 		  int n_elts00, n_elts01;
4311 
4312 		  mode00 = GET_MODE (op00);
4313 		  mode01 = GET_MODE (op01);
4314 
4315 		  /* Find out the number of elements of each operand.
4316 		     Since the concatenated result has a constant number
4317 		     of elements, the operands must too.  */
4318 		  n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
4319 		  n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
4320 
4321 		  gcc_assert (n_elts == n_elts00 + n_elts01);
4322 
4323 		  /* Select correct operand of VEC_CONCAT
4324 		     and adjust selector. */
4325 		  if (elem < n_elts01)
4326 		    tmp_op = op00;
4327 		  else
4328 		    {
4329 		      tmp_op = op01;
4330 		      elem -= n_elts00;
4331 		    }
4332 		}
4333 	      else
4334 		tmp_op = op0;
4335 
4336 	      vec = rtvec_alloc (1);
4337 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
4338 
4339 	      tmp = gen_rtx_fmt_ee (code, mode,
4340 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
4341 	      return tmp;
4342 	    }
4343 	}
4344       else
4345 	{
4346 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4347 	  gcc_assert (GET_MODE_INNER (mode)
4348 		      == GET_MODE_INNER (GET_MODE (trueop0)));
4349 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
4350 
4351 	  if (vec_duplicate_p (trueop0, &elt0))
4352 	    /* It doesn't matter which elements are selected by trueop1,
4353 	       because they are all the same.  */
4354 	    return gen_vec_duplicate (mode, elt0);
4355 
4356 	  if (GET_CODE (trueop0) == CONST_VECTOR)
4357 	    {
4358 	      unsigned n_elts = XVECLEN (trueop1, 0);
4359 	      rtvec v = rtvec_alloc (n_elts);
4360 	      unsigned int i;
4361 
4362 	      gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4363 	      for (i = 0; i < n_elts; i++)
4364 		{
4365 		  rtx x = XVECEXP (trueop1, 0, i);
4366 
4367 		  if (!CONST_INT_P (x))
4368 		    return 0;
4369 
4370 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
4371 						       INTVAL (x));
4372 		}
4373 
4374 	      return gen_rtx_CONST_VECTOR (mode, v);
4375 	    }
4376 
4377 	  /* Recognize the identity.  */
4378 	  if (GET_MODE (trueop0) == mode)
4379 	    {
4380 	      bool maybe_ident = true;
4381 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4382 		{
4383 		  rtx j = XVECEXP (trueop1, 0, i);
4384 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
4385 		    {
4386 		      maybe_ident = false;
4387 		      break;
4388 		    }
4389 		}
4390 	      if (maybe_ident)
4391 		return trueop0;
4392 	    }
4393 
4394 	  /* If we select a low-part subreg, return that.  */
4395 	  if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
4396 	    {
4397 	      rtx new_rtx = lowpart_subreg (mode, trueop0,
4398 					    GET_MODE (trueop0));
4399 	      if (new_rtx != NULL_RTX)
4400 		return new_rtx;
4401 	    }
4402 
4403 	  /* If we build {a,b} then permute it, build the result directly.  */
4404 	  if (XVECLEN (trueop1, 0) == 2
4405 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4406 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4407 	      && GET_CODE (trueop0) == VEC_CONCAT
4408 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
4409 	      && GET_MODE (XEXP (trueop0, 0)) == mode
4410 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
4411 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
4412 	    {
4413 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4414 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4415 	      rtx subop0, subop1;
4416 
4417 	      gcc_assert (i0 < 4 && i1 < 4);
4418 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
4419 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
4420 
4421 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4422 	    }
4423 
4424 	  if (XVECLEN (trueop1, 0) == 2
4425 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4426 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4427 	      && GET_CODE (trueop0) == VEC_CONCAT
4428 	      && GET_MODE (trueop0) == mode)
4429 	    {
4430 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4431 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4432 	      rtx subop0, subop1;
4433 
4434 	      gcc_assert (i0 < 2 && i1 < 2);
4435 	      subop0 = XEXP (trueop0, i0);
4436 	      subop1 = XEXP (trueop0, i1);
4437 
4438 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4439 	    }
4440 
4441 	  /* If we select one half of a vec_concat, return that.  */
4442 	  int l0, l1;
4443 	  if (GET_CODE (trueop0) == VEC_CONCAT
4444 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4445 		  .is_constant (&l0))
4446 	      && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4447 		  .is_constant (&l1))
4448 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4449 	    {
4450 	      rtx subop0 = XEXP (trueop0, 0);
4451 	      rtx subop1 = XEXP (trueop0, 1);
4452 	      machine_mode mode0 = GET_MODE (subop0);
4453 	      machine_mode mode1 = GET_MODE (subop1);
4454 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4455 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4456 		{
4457 		  bool success = true;
4458 		  for (int i = 1; i < l0; ++i)
4459 		    {
4460 		      rtx j = XVECEXP (trueop1, 0, i);
4461 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
4462 			{
4463 			  success = false;
4464 			  break;
4465 			}
4466 		    }
4467 		  if (success)
4468 		    return subop0;
4469 		}
4470 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4471 		{
4472 		  bool success = true;
4473 		  for (int i = 1; i < l1; ++i)
4474 		    {
4475 		      rtx j = XVECEXP (trueop1, 0, i);
4476 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4477 			{
4478 			  success = false;
4479 			  break;
4480 			}
4481 		    }
4482 		  if (success)
4483 		    return subop1;
4484 		}
4485 	    }
4486 
4487 	  /* Simplify vec_select of a subreg of X to just a vec_select of X
4488 	     when X has same component mode as vec_select.  */
4489 	  unsigned HOST_WIDE_INT subreg_offset = 0;
4490 	  if (GET_CODE (trueop0) == SUBREG
4491 	      && GET_MODE_INNER (mode)
4492 		 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
4493 	      && GET_MODE_NUNITS (mode).is_constant (&l1)
4494 	      && constant_multiple_p (subreg_memory_offset (trueop0),
4495 				      GET_MODE_UNIT_BITSIZE (mode),
4496 				      &subreg_offset))
4497 	    {
4498 	      poly_uint64 nunits
4499 		= GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
4500 	      bool success = true;
4501 	      for (int i = 0; i != l1; i++)
4502 		{
4503 		  rtx idx = XVECEXP (trueop1, 0, i);
4504 		  if (!CONST_INT_P (idx)
4505 		      || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
4506 		    {
4507 		      success = false;
4508 		      break;
4509 		    }
4510 		}
4511 
4512 	      if (success)
4513 		{
4514 		  rtx par = trueop1;
4515 		  if (subreg_offset)
4516 		    {
4517 		      rtvec vec = rtvec_alloc (l1);
4518 		      for (int i = 0; i < l1; i++)
4519 			RTVEC_ELT (vec, i)
4520 			  = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
4521 				     + subreg_offset);
4522 		      par = gen_rtx_PARALLEL (VOIDmode, vec);
4523 		    }
4524 		  return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
4525 		}
4526 	    }
4527 	}
4528 
4529       if (XVECLEN (trueop1, 0) == 1
4530 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4531 	  && GET_CODE (trueop0) == VEC_CONCAT)
4532 	{
4533 	  rtx vec = trueop0;
4534 	  offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4535 
4536 	  /* Try to find the element in the VEC_CONCAT.  */
4537 	  while (GET_MODE (vec) != mode
4538 		 && GET_CODE (vec) == VEC_CONCAT)
4539 	    {
4540 	      poly_int64 vec_size;
4541 
4542 	      if (CONST_INT_P (XEXP (vec, 0)))
4543 	        {
4544 	          /* vec_concat of two const_ints doesn't make sense with
4545 	             respect to modes.  */
4546 	          if (CONST_INT_P (XEXP (vec, 1)))
4547 	            return 0;
4548 
4549 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4550 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4551 	        }
4552 	      else
4553 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4554 
4555 	      if (known_lt (offset, vec_size))
4556 		vec = XEXP (vec, 0);
4557 	      else if (known_ge (offset, vec_size))
4558 		{
4559 		  offset -= vec_size;
4560 		  vec = XEXP (vec, 1);
4561 		}
4562 	      else
4563 		break;
4564 	      vec = avoid_constant_pool_reference (vec);
4565 	    }
4566 
4567 	  if (GET_MODE (vec) == mode)
4568 	    return vec;
4569 	}
4570 
4571       /* If we select elements in a vec_merge that all come from the same
4572 	 operand, select from that operand directly.  */
4573       if (GET_CODE (op0) == VEC_MERGE)
4574 	{
4575 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4576 	  if (CONST_INT_P (trueop02))
4577 	    {
4578 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4579 	      bool all_operand0 = true;
4580 	      bool all_operand1 = true;
4581 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4582 		{
4583 		  rtx j = XVECEXP (trueop1, 0, i);
4584 		  if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4585 		    all_operand1 = false;
4586 		  else
4587 		    all_operand0 = false;
4588 		}
4589 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4590 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4591 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4592 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4593 	    }
4594 	}
4595 
4596       /* If we have two nested selects that are inverses of each
4597 	 other, replace them with the source operand.  */
4598       if (GET_CODE (trueop0) == VEC_SELECT
4599 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
4600 	{
4601 	  rtx op0_subop1 = XEXP (trueop0, 1);
4602 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4603 	  gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4604 
4605 	  /* Apply the outer ordering vector to the inner one.  (The inner
4606 	     ordering vector is expressly permitted to be of a different
4607 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
4608 	     then the two VEC_SELECTs cancel.  */
4609 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4610 	    {
4611 	      rtx x = XVECEXP (trueop1, 0, i);
4612 	      if (!CONST_INT_P (x))
4613 		return 0;
4614 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4615 	      if (!CONST_INT_P (y) || i != INTVAL (y))
4616 		return 0;
4617 	    }
4618 	  return XEXP (trueop0, 0);
4619 	}
4620 
4621       return 0;
4622     case VEC_CONCAT:
4623       {
4624 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4625 				      ? GET_MODE (trueop0)
4626 				      : GET_MODE_INNER (mode));
4627 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4628 				      ? GET_MODE (trueop1)
4629 				      : GET_MODE_INNER (mode));
4630 
4631 	gcc_assert (VECTOR_MODE_P (mode));
4632 	gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4633 			      + GET_MODE_SIZE (op1_mode),
4634 			      GET_MODE_SIZE (mode)));
4635 
4636 	if (VECTOR_MODE_P (op0_mode))
4637 	  gcc_assert (GET_MODE_INNER (mode)
4638 		      == GET_MODE_INNER (op0_mode));
4639 	else
4640 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4641 
4642 	if (VECTOR_MODE_P (op1_mode))
4643 	  gcc_assert (GET_MODE_INNER (mode)
4644 		      == GET_MODE_INNER (op1_mode));
4645 	else
4646 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4647 
4648 	unsigned int n_elts, in_n_elts;
4649 	if ((GET_CODE (trueop0) == CONST_VECTOR
4650 	     || CONST_SCALAR_INT_P (trueop0)
4651 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4652 	    && (GET_CODE (trueop1) == CONST_VECTOR
4653 		|| CONST_SCALAR_INT_P (trueop1)
4654 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1))
4655 	    && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4656 	    && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4657 	  {
4658 	    rtvec v = rtvec_alloc (n_elts);
4659 	    unsigned int i;
4660 	    for (i = 0; i < n_elts; i++)
4661 	      {
4662 		if (i < in_n_elts)
4663 		  {
4664 		    if (!VECTOR_MODE_P (op0_mode))
4665 		      RTVEC_ELT (v, i) = trueop0;
4666 		    else
4667 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4668 		  }
4669 		else
4670 		  {
4671 		    if (!VECTOR_MODE_P (op1_mode))
4672 		      RTVEC_ELT (v, i) = trueop1;
4673 		    else
4674 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4675 							   i - in_n_elts);
4676 		  }
4677 	      }
4678 
4679 	    return gen_rtx_CONST_VECTOR (mode, v);
4680 	  }
4681 
4682 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
4683 	   Restrict the transformation to avoid generating a VEC_SELECT with a
4684 	   mode unrelated to its operand.  */
4685 	if (GET_CODE (trueop0) == VEC_SELECT
4686 	    && GET_CODE (trueop1) == VEC_SELECT
4687 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4688 	    && GET_MODE_INNER (GET_MODE (XEXP (trueop0, 0)))
4689 	       == GET_MODE_INNER(mode))
4690 	  {
4691 	    rtx par0 = XEXP (trueop0, 1);
4692 	    rtx par1 = XEXP (trueop1, 1);
4693 	    int len0 = XVECLEN (par0, 0);
4694 	    int len1 = XVECLEN (par1, 0);
4695 	    rtvec vec = rtvec_alloc (len0 + len1);
4696 	    for (int i = 0; i < len0; i++)
4697 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4698 	    for (int i = 0; i < len1; i++)
4699 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4700 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4701 					gen_rtx_PARALLEL (VOIDmode, vec));
4702 	  }
4703       }
4704       return 0;
4705 
4706     default:
4707       gcc_unreachable ();
4708     }
4709 
4710   if (mode == GET_MODE (op0)
4711       && mode == GET_MODE (op1)
4712       && vec_duplicate_p (op0, &elt0)
4713       && vec_duplicate_p (op1, &elt1))
4714     {
4715       /* Try applying the operator to ELT and see if that simplifies.
4716 	 We can duplicate the result if so.
4717 
4718 	 The reason we don't use simplify_gen_binary is that it isn't
4719 	 necessarily a win to convert things like:
4720 
4721 	   (plus:V (vec_duplicate:V (reg:S R1))
4722 		   (vec_duplicate:V (reg:S R2)))
4723 
4724 	 to:
4725 
4726 	   (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4727 
4728 	 The first might be done entirely in vector registers while the
4729 	 second might need a move between register files.  */
4730       tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4731 				       elt0, elt1);
4732       if (tem)
4733 	return gen_vec_duplicate (mode, tem);
4734     }
4735 
4736   return 0;
4737 }
4738 
4739 /* Return true if binary operation OP distributes over addition in operand
4740    OPNO, with the other operand being held constant.  OPNO counts from 1.  */
4741 
4742 static bool
distributes_over_addition_p(rtx_code op,int opno)4743 distributes_over_addition_p (rtx_code op, int opno)
4744 {
4745   switch (op)
4746     {
4747     case PLUS:
4748     case MINUS:
4749     case MULT:
4750       return true;
4751 
4752     case ASHIFT:
4753       return opno == 1;
4754 
4755     default:
4756       return false;
4757     }
4758 }
4759 
4760 rtx
simplify_const_binary_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)4761 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4762 				 rtx op0, rtx op1)
4763 {
4764   if (VECTOR_MODE_P (mode)
4765       && code != VEC_CONCAT
4766       && GET_CODE (op0) == CONST_VECTOR
4767       && GET_CODE (op1) == CONST_VECTOR)
4768     {
4769       bool step_ok_p;
4770       if (CONST_VECTOR_STEPPED_P (op0)
4771 	  && CONST_VECTOR_STEPPED_P (op1))
4772 	/* We can operate directly on the encoding if:
4773 
4774 	      a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4775 	    implies
4776 	      (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4777 
4778 	   Addition and subtraction are the supported operators
4779 	   for which this is true.  */
4780 	step_ok_p = (code == PLUS || code == MINUS);
4781       else if (CONST_VECTOR_STEPPED_P (op0))
4782 	/* We can operate directly on stepped encodings if:
4783 
4784 	     a3 - a2 == a2 - a1
4785 	   implies:
4786 	     (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4787 
4788 	   which is true if (x -> x op c) distributes over addition.  */
4789 	step_ok_p = distributes_over_addition_p (code, 1);
4790       else
4791 	/* Similarly in reverse.  */
4792 	step_ok_p = distributes_over_addition_p (code, 2);
4793       rtx_vector_builder builder;
4794       if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4795 	return 0;
4796 
4797       unsigned int count = builder.encoded_nelts ();
4798       for (unsigned int i = 0; i < count; i++)
4799 	{
4800 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4801 					     CONST_VECTOR_ELT (op0, i),
4802 					     CONST_VECTOR_ELT (op1, i));
4803 	  if (!x || !valid_for_const_vector_p (mode, x))
4804 	    return 0;
4805 	  builder.quick_push (x);
4806 	}
4807       return builder.build ();
4808     }
4809 
4810   if (VECTOR_MODE_P (mode)
4811       && code == VEC_CONCAT
4812       && (CONST_SCALAR_INT_P (op0)
4813 	  || CONST_FIXED_P (op0)
4814 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
4815       && (CONST_SCALAR_INT_P (op1)
4816 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
4817 	  || CONST_FIXED_P (op1)))
4818     {
4819       /* Both inputs have a constant number of elements, so the result
4820 	 must too.  */
4821       unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4822       rtvec v = rtvec_alloc (n_elts);
4823 
4824       gcc_assert (n_elts >= 2);
4825       if (n_elts == 2)
4826 	{
4827 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4828 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4829 
4830 	  RTVEC_ELT (v, 0) = op0;
4831 	  RTVEC_ELT (v, 1) = op1;
4832 	}
4833       else
4834 	{
4835 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4836 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4837 	  unsigned i;
4838 
4839 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4840 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4841 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4842 
4843 	  for (i = 0; i < op0_n_elts; ++i)
4844 	    RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4845 	  for (i = 0; i < op1_n_elts; ++i)
4846 	    RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4847 	}
4848 
4849       return gen_rtx_CONST_VECTOR (mode, v);
4850     }
4851 
4852   if (SCALAR_FLOAT_MODE_P (mode)
4853       && CONST_DOUBLE_AS_FLOAT_P (op0)
4854       && CONST_DOUBLE_AS_FLOAT_P (op1)
4855       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4856     {
4857       if (code == AND
4858 	  || code == IOR
4859 	  || code == XOR)
4860 	{
4861 	  long tmp0[4];
4862 	  long tmp1[4];
4863 	  REAL_VALUE_TYPE r;
4864 	  int i;
4865 
4866 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4867 			  GET_MODE (op0));
4868 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4869 			  GET_MODE (op1));
4870 	  for (i = 0; i < 4; i++)
4871 	    {
4872 	      switch (code)
4873 	      {
4874 	      case AND:
4875 		tmp0[i] &= tmp1[i];
4876 		break;
4877 	      case IOR:
4878 		tmp0[i] |= tmp1[i];
4879 		break;
4880 	      case XOR:
4881 		tmp0[i] ^= tmp1[i];
4882 		break;
4883 	      default:
4884 		gcc_unreachable ();
4885 	      }
4886 	    }
4887 	   real_from_target (&r, tmp0, mode);
4888 	   return const_double_from_real_value (r, mode);
4889 	}
4890       else
4891 	{
4892 	  REAL_VALUE_TYPE f0, f1, value, result;
4893 	  const REAL_VALUE_TYPE *opr0, *opr1;
4894 	  bool inexact;
4895 
4896 	  opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4897 	  opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4898 
4899 	  if (HONOR_SNANS (mode)
4900 	      && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4901 	          || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4902 	    return 0;
4903 
4904 	  real_convert (&f0, mode, opr0);
4905 	  real_convert (&f1, mode, opr1);
4906 
4907 	  if (code == DIV
4908 	      && real_equal (&f1, &dconst0)
4909 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4910 	    return 0;
4911 
4912 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4913 	      && flag_trapping_math
4914 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4915 	    {
4916 	      int s0 = REAL_VALUE_NEGATIVE (f0);
4917 	      int s1 = REAL_VALUE_NEGATIVE (f1);
4918 
4919 	      switch (code)
4920 		{
4921 		case PLUS:
4922 		  /* Inf + -Inf = NaN plus exception.  */
4923 		  if (s0 != s1)
4924 		    return 0;
4925 		  break;
4926 		case MINUS:
4927 		  /* Inf - Inf = NaN plus exception.  */
4928 		  if (s0 == s1)
4929 		    return 0;
4930 		  break;
4931 		case DIV:
4932 		  /* Inf / Inf = NaN plus exception.  */
4933 		  return 0;
4934 		default:
4935 		  break;
4936 		}
4937 	    }
4938 
4939 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4940 	      && flag_trapping_math
4941 	      && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4942 		  || (REAL_VALUE_ISINF (f1)
4943 		      && real_equal (&f0, &dconst0))))
4944 	    /* Inf * 0 = NaN plus exception.  */
4945 	    return 0;
4946 
4947 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4948 				     &f0, &f1);
4949 	  real_convert (&result, mode, &value);
4950 
4951 	  /* Don't constant fold this floating point operation if
4952 	     the result has overflowed and flag_trapping_math.  */
4953 
4954 	  if (flag_trapping_math
4955 	      && MODE_HAS_INFINITIES (mode)
4956 	      && REAL_VALUE_ISINF (result)
4957 	      && !REAL_VALUE_ISINF (f0)
4958 	      && !REAL_VALUE_ISINF (f1))
4959 	    /* Overflow plus exception.  */
4960 	    return 0;
4961 
4962 	  /* Don't constant fold this floating point operation if the
4963 	     result may dependent upon the run-time rounding mode and
4964 	     flag_rounding_math is set, or if GCC's software emulation
4965 	     is unable to accurately represent the result.  */
4966 
4967 	  if ((flag_rounding_math
4968 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4969 	      && (inexact || !real_identical (&result, &value)))
4970 	    return NULL_RTX;
4971 
4972 	  return const_double_from_real_value (result, mode);
4973 	}
4974     }
4975 
4976   /* We can fold some multi-word operations.  */
4977   scalar_int_mode int_mode;
4978   if (is_a <scalar_int_mode> (mode, &int_mode)
4979       && CONST_SCALAR_INT_P (op0)
4980       && CONST_SCALAR_INT_P (op1)
4981       && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4982     {
4983       wide_int result;
4984       wi::overflow_type overflow;
4985       rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4986       rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4987 
4988 #if TARGET_SUPPORTS_WIDE_INT == 0
4989       /* This assert keeps the simplification from producing a result
4990 	 that cannot be represented in a CONST_DOUBLE but a lot of
4991 	 upstream callers expect that this function never fails to
4992 	 simplify something and so you if you added this to the test
4993 	 above the code would die later anyway.  If this assert
4994 	 happens, you just need to make the port support wide int.  */
4995       gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4996 #endif
4997       switch (code)
4998 	{
4999 	case MINUS:
5000 	  result = wi::sub (pop0, pop1);
5001 	  break;
5002 
5003 	case PLUS:
5004 	  result = wi::add (pop0, pop1);
5005 	  break;
5006 
5007 	case MULT:
5008 	  result = wi::mul (pop0, pop1);
5009 	  break;
5010 
5011 	case DIV:
5012 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
5013 	  if (overflow)
5014 	    return NULL_RTX;
5015 	  break;
5016 
5017 	case MOD:
5018 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
5019 	  if (overflow)
5020 	    return NULL_RTX;
5021 	  break;
5022 
5023 	case UDIV:
5024 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
5025 	  if (overflow)
5026 	    return NULL_RTX;
5027 	  break;
5028 
5029 	case UMOD:
5030 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
5031 	  if (overflow)
5032 	    return NULL_RTX;
5033 	  break;
5034 
5035 	case AND:
5036 	  result = wi::bit_and (pop0, pop1);
5037 	  break;
5038 
5039 	case IOR:
5040 	  result = wi::bit_or (pop0, pop1);
5041 	  break;
5042 
5043 	case XOR:
5044 	  result = wi::bit_xor (pop0, pop1);
5045 	  break;
5046 
5047 	case SMIN:
5048 	  result = wi::smin (pop0, pop1);
5049 	  break;
5050 
5051 	case SMAX:
5052 	  result = wi::smax (pop0, pop1);
5053 	  break;
5054 
5055 	case UMIN:
5056 	  result = wi::umin (pop0, pop1);
5057 	  break;
5058 
5059 	case UMAX:
5060 	  result = wi::umax (pop0, pop1);
5061 	  break;
5062 
5063 	case LSHIFTRT:
5064 	case ASHIFTRT:
5065 	case ASHIFT:
5066 	case SS_ASHIFT:
5067 	case US_ASHIFT:
5068 	  {
5069 	    wide_int wop1 = pop1;
5070 	    if (SHIFT_COUNT_TRUNCATED)
5071 	      wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
5072 	    else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
5073 	      return NULL_RTX;
5074 
5075 	    switch (code)
5076 	      {
5077 	      case LSHIFTRT:
5078 		result = wi::lrshift (pop0, wop1);
5079 		break;
5080 
5081 	      case ASHIFTRT:
5082 		result = wi::arshift (pop0, wop1);
5083 		break;
5084 
5085 	      case ASHIFT:
5086 		result = wi::lshift (pop0, wop1);
5087 		break;
5088 
5089 	      case SS_ASHIFT:
5090 		if (wi::leu_p (wop1, wi::clrsb (pop0)))
5091 		  result = wi::lshift (pop0, wop1);
5092 		else if (wi::neg_p (pop0))
5093 		  result = wi::min_value (int_mode, SIGNED);
5094 		else
5095 		  result = wi::max_value (int_mode, SIGNED);
5096 		break;
5097 
5098 	      case US_ASHIFT:
5099 		if (wi::eq_p (pop0, 0))
5100 		  result = pop0;
5101 		else if (wi::leu_p (wop1, wi::clz (pop0)))
5102 		  result = wi::lshift (pop0, wop1);
5103 		else
5104 		  result = wi::max_value (int_mode, UNSIGNED);
5105 		break;
5106 
5107 	      default:
5108 		gcc_unreachable ();
5109 	      }
5110 	    break;
5111 	  }
5112 	case ROTATE:
5113 	case ROTATERT:
5114 	  {
5115 	    if (wi::neg_p (pop1))
5116 	      return NULL_RTX;
5117 
5118 	    switch (code)
5119 	      {
5120 	      case ROTATE:
5121 		result = wi::lrotate (pop0, pop1);
5122 		break;
5123 
5124 	      case ROTATERT:
5125 		result = wi::rrotate (pop0, pop1);
5126 		break;
5127 
5128 	      default:
5129 		gcc_unreachable ();
5130 	      }
5131 	    break;
5132 	  }
5133 
5134 	case SS_PLUS:
5135 	  result = wi::add (pop0, pop1, SIGNED, &overflow);
5136  clamp_signed_saturation:
5137 	  if (overflow == wi::OVF_OVERFLOW)
5138 	    result = wi::max_value (GET_MODE_PRECISION (int_mode), SIGNED);
5139 	  else if (overflow == wi::OVF_UNDERFLOW)
5140 	    result = wi::min_value (GET_MODE_PRECISION (int_mode), SIGNED);
5141 	  else if (overflow != wi::OVF_NONE)
5142 	    return NULL_RTX;
5143 	  break;
5144 
5145 	case US_PLUS:
5146 	  result = wi::add (pop0, pop1, UNSIGNED, &overflow);
5147  clamp_unsigned_saturation:
5148 	  if (overflow != wi::OVF_NONE)
5149 	    result = wi::max_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5150 	  break;
5151 
5152 	case SS_MINUS:
5153 	  result = wi::sub (pop0, pop1, SIGNED, &overflow);
5154 	  goto clamp_signed_saturation;
5155 
5156 	case US_MINUS:
5157 	  result = wi::sub (pop0, pop1, UNSIGNED, &overflow);
5158 	  if (overflow != wi::OVF_NONE)
5159 	    result = wi::min_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5160 	  break;
5161 
5162 	case SS_MULT:
5163 	  result = wi::mul (pop0, pop1, SIGNED, &overflow);
5164 	  goto clamp_signed_saturation;
5165 
5166 	case US_MULT:
5167 	  result = wi::mul (pop0, pop1, UNSIGNED, &overflow);
5168 	  goto clamp_unsigned_saturation;
5169 
5170 	case SMUL_HIGHPART:
5171 	  result = wi::mul_high (pop0, pop1, SIGNED);
5172 	  break;
5173 
5174 	case UMUL_HIGHPART:
5175 	  result = wi::mul_high (pop0, pop1, UNSIGNED);
5176 	  break;
5177 
5178 	default:
5179 	  return NULL_RTX;
5180 	}
5181       return immed_wide_int_const (result, int_mode);
5182     }
5183 
5184   /* Handle polynomial integers.  */
5185   if (NUM_POLY_INT_COEFFS > 1
5186       && is_a <scalar_int_mode> (mode, &int_mode)
5187       && poly_int_rtx_p (op0)
5188       && poly_int_rtx_p (op1))
5189     {
5190       poly_wide_int result;
5191       switch (code)
5192 	{
5193 	case PLUS:
5194 	  result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
5195 	  break;
5196 
5197 	case MINUS:
5198 	  result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
5199 	  break;
5200 
5201 	case MULT:
5202 	  if (CONST_SCALAR_INT_P (op1))
5203 	    result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
5204 	  else
5205 	    return NULL_RTX;
5206 	  break;
5207 
5208 	case ASHIFT:
5209 	  if (CONST_SCALAR_INT_P (op1))
5210 	    {
5211 	      wide_int shift = rtx_mode_t (op1, mode);
5212 	      if (SHIFT_COUNT_TRUNCATED)
5213 		shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
5214 	      else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
5215 		return NULL_RTX;
5216 	      result = wi::to_poly_wide (op0, mode) << shift;
5217 	    }
5218 	  else
5219 	    return NULL_RTX;
5220 	  break;
5221 
5222 	case IOR:
5223 	  if (!CONST_SCALAR_INT_P (op1)
5224 	      || !can_ior_p (wi::to_poly_wide (op0, mode),
5225 			     rtx_mode_t (op1, mode), &result))
5226 	    return NULL_RTX;
5227 	  break;
5228 
5229 	default:
5230 	  return NULL_RTX;
5231 	}
5232       return immed_wide_int_const (result, int_mode);
5233     }
5234 
5235   return NULL_RTX;
5236 }
5237 
5238 
5239 
5240 /* Return a positive integer if X should sort after Y.  The value
5241    returned is 1 if and only if X and Y are both regs.  */
5242 
5243 static int
simplify_plus_minus_op_data_cmp(rtx x,rtx y)5244 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
5245 {
5246   int result;
5247 
5248   result = (commutative_operand_precedence (y)
5249 	    - commutative_operand_precedence (x));
5250   if (result)
5251     return result + result;
5252 
5253   /* Group together equal REGs to do more simplification.  */
5254   if (REG_P (x) && REG_P (y))
5255     return REGNO (x) > REGNO (y);
5256 
5257   return 0;
5258 }
5259 
5260 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5261    operands may be another PLUS or MINUS.
5262 
5263    Rather than test for specific case, we do this by a brute-force method
5264    and do all possible simplifications until no more changes occur.  Then
5265    we rebuild the operation.
5266 
5267    May return NULL_RTX when no changes were made.  */
5268 
5269 rtx
simplify_plus_minus(rtx_code code,machine_mode mode,rtx op0,rtx op1)5270 simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
5271 				       rtx op0, rtx op1)
5272 {
5273   struct simplify_plus_minus_op_data
5274   {
5275     rtx op;
5276     short neg;
5277   } ops[16];
5278   rtx result, tem;
5279   int n_ops = 2;
5280   int changed, n_constants, canonicalized = 0;
5281   int i, j;
5282 
5283   memset (ops, 0, sizeof ops);
5284 
5285   /* Set up the two operands and then expand them until nothing has been
5286      changed.  If we run out of room in our array, give up; this should
5287      almost never happen.  */
5288 
5289   ops[0].op = op0;
5290   ops[0].neg = 0;
5291   ops[1].op = op1;
5292   ops[1].neg = (code == MINUS);
5293 
5294   do
5295     {
5296       changed = 0;
5297       n_constants = 0;
5298 
5299       for (i = 0; i < n_ops; i++)
5300 	{
5301 	  rtx this_op = ops[i].op;
5302 	  int this_neg = ops[i].neg;
5303 	  enum rtx_code this_code = GET_CODE (this_op);
5304 
5305 	  switch (this_code)
5306 	    {
5307 	    case PLUS:
5308 	    case MINUS:
5309 	      if (n_ops == ARRAY_SIZE (ops))
5310 		return NULL_RTX;
5311 
5312 	      ops[n_ops].op = XEXP (this_op, 1);
5313 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
5314 	      n_ops++;
5315 
5316 	      ops[i].op = XEXP (this_op, 0);
5317 	      changed = 1;
5318 	      /* If this operand was negated then we will potentially
5319 		 canonicalize the expression.  Similarly if we don't
5320 		 place the operands adjacent we're re-ordering the
5321 		 expression and thus might be performing a
5322 		 canonicalization.  Ignore register re-ordering.
5323 		 ??? It might be better to shuffle the ops array here,
5324 		 but then (plus (plus (A, B), plus (C, D))) wouldn't
5325 		 be seen as non-canonical.  */
5326 	      if (this_neg
5327 		  || (i != n_ops - 2
5328 		      && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
5329 		canonicalized = 1;
5330 	      break;
5331 
5332 	    case NEG:
5333 	      ops[i].op = XEXP (this_op, 0);
5334 	      ops[i].neg = ! this_neg;
5335 	      changed = 1;
5336 	      canonicalized = 1;
5337 	      break;
5338 
5339 	    case CONST:
5340 	      if (n_ops != ARRAY_SIZE (ops)
5341 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
5342 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
5343 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
5344 		{
5345 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
5346 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
5347 		  ops[n_ops].neg = this_neg;
5348 		  n_ops++;
5349 		  changed = 1;
5350 		  canonicalized = 1;
5351 		}
5352 	      break;
5353 
5354 	    case NOT:
5355 	      /* ~a -> (-a - 1) */
5356 	      if (n_ops != ARRAY_SIZE (ops))
5357 		{
5358 		  ops[n_ops].op = CONSTM1_RTX (mode);
5359 		  ops[n_ops++].neg = this_neg;
5360 		  ops[i].op = XEXP (this_op, 0);
5361 		  ops[i].neg = !this_neg;
5362 		  changed = 1;
5363 		  canonicalized = 1;
5364 		}
5365 	      break;
5366 
5367 	    CASE_CONST_SCALAR_INT:
5368 	    case CONST_POLY_INT:
5369 	      n_constants++;
5370 	      if (this_neg)
5371 		{
5372 		  ops[i].op = neg_poly_int_rtx (mode, this_op);
5373 		  ops[i].neg = 0;
5374 		  changed = 1;
5375 		  canonicalized = 1;
5376 		}
5377 	      break;
5378 
5379 	    default:
5380 	      break;
5381 	    }
5382 	}
5383     }
5384   while (changed);
5385 
5386   if (n_constants > 1)
5387     canonicalized = 1;
5388 
5389   gcc_assert (n_ops >= 2);
5390 
5391   /* If we only have two operands, we can avoid the loops.  */
5392   if (n_ops == 2)
5393     {
5394       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
5395       rtx lhs, rhs;
5396 
5397       /* Get the two operands.  Be careful with the order, especially for
5398 	 the cases where code == MINUS.  */
5399       if (ops[0].neg && ops[1].neg)
5400 	{
5401 	  lhs = gen_rtx_NEG (mode, ops[0].op);
5402 	  rhs = ops[1].op;
5403 	}
5404       else if (ops[0].neg)
5405 	{
5406 	  lhs = ops[1].op;
5407 	  rhs = ops[0].op;
5408 	}
5409       else
5410 	{
5411 	  lhs = ops[0].op;
5412 	  rhs = ops[1].op;
5413 	}
5414 
5415       return simplify_const_binary_operation (code, mode, lhs, rhs);
5416     }
5417 
5418   /* Now simplify each pair of operands until nothing changes.  */
5419   while (1)
5420     {
5421       /* Insertion sort is good enough for a small array.  */
5422       for (i = 1; i < n_ops; i++)
5423 	{
5424 	  struct simplify_plus_minus_op_data save;
5425 	  int cmp;
5426 
5427 	  j = i - 1;
5428 	  cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
5429 	  if (cmp <= 0)
5430 	    continue;
5431 	  /* Just swapping registers doesn't count as canonicalization.  */
5432 	  if (cmp != 1)
5433 	    canonicalized = 1;
5434 
5435 	  save = ops[i];
5436 	  do
5437 	    ops[j + 1] = ops[j];
5438 	  while (j--
5439 		 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
5440 	  ops[j + 1] = save;
5441 	}
5442 
5443       changed = 0;
5444       for (i = n_ops - 1; i > 0; i--)
5445 	for (j = i - 1; j >= 0; j--)
5446 	  {
5447 	    rtx lhs = ops[j].op, rhs = ops[i].op;
5448 	    int lneg = ops[j].neg, rneg = ops[i].neg;
5449 
5450 	    if (lhs != 0 && rhs != 0)
5451 	      {
5452 		enum rtx_code ncode = PLUS;
5453 
5454 		if (lneg != rneg)
5455 		  {
5456 		    ncode = MINUS;
5457 		    if (lneg)
5458 		      std::swap (lhs, rhs);
5459 		  }
5460 		else if (swap_commutative_operands_p (lhs, rhs))
5461 		  std::swap (lhs, rhs);
5462 
5463 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
5464 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
5465 		  {
5466 		    rtx tem_lhs, tem_rhs;
5467 
5468 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
5469 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
5470 		    tem = simplify_binary_operation (ncode, mode, tem_lhs,
5471 						     tem_rhs);
5472 
5473 		    if (tem && !CONSTANT_P (tem))
5474 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
5475 		  }
5476 		else
5477 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
5478 
5479 		if (tem)
5480 		  {
5481 		    /* Reject "simplifications" that just wrap the two
5482 		       arguments in a CONST.  Failure to do so can result
5483 		       in infinite recursion with simplify_binary_operation
5484 		       when it calls us to simplify CONST operations.
5485 		       Also, if we find such a simplification, don't try
5486 		       any more combinations with this rhs:  We must have
5487 		       something like symbol+offset, ie. one of the
5488 		       trivial CONST expressions we handle later.  */
5489 		    if (GET_CODE (tem) == CONST
5490 			&& GET_CODE (XEXP (tem, 0)) == ncode
5491 			&& XEXP (XEXP (tem, 0), 0) == lhs
5492 			&& XEXP (XEXP (tem, 0), 1) == rhs)
5493 		      break;
5494 		    lneg &= rneg;
5495 		    if (GET_CODE (tem) == NEG)
5496 		      tem = XEXP (tem, 0), lneg = !lneg;
5497 		    if (poly_int_rtx_p (tem) && lneg)
5498 		      tem = neg_poly_int_rtx (mode, tem), lneg = 0;
5499 
5500 		    ops[i].op = tem;
5501 		    ops[i].neg = lneg;
5502 		    ops[j].op = NULL_RTX;
5503 		    changed = 1;
5504 		    canonicalized = 1;
5505 		  }
5506 	      }
5507 	  }
5508 
5509       if (!changed)
5510 	break;
5511 
5512       /* Pack all the operands to the lower-numbered entries.  */
5513       for (i = 0, j = 0; j < n_ops; j++)
5514 	if (ops[j].op)
5515 	  {
5516 	    ops[i] = ops[j];
5517 	    i++;
5518 	  }
5519       n_ops = i;
5520     }
5521 
5522   /* If nothing changed, check that rematerialization of rtl instructions
5523      is still required.  */
5524   if (!canonicalized)
5525     {
5526       /* Perform rematerialization if only all operands are registers and
5527 	 all operations are PLUS.  */
5528       /* ??? Also disallow (non-global, non-frame) fixed registers to work
5529 	 around rs6000 and how it uses the CA register.  See PR67145.  */
5530       for (i = 0; i < n_ops; i++)
5531 	if (ops[i].neg
5532 	    || !REG_P (ops[i].op)
5533 	    || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
5534 		&& fixed_regs[REGNO (ops[i].op)]
5535 		&& !global_regs[REGNO (ops[i].op)]
5536 		&& ops[i].op != frame_pointer_rtx
5537 		&& ops[i].op != arg_pointer_rtx
5538 		&& ops[i].op != stack_pointer_rtx))
5539 	  return NULL_RTX;
5540       goto gen_result;
5541     }
5542 
5543   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
5544   if (n_ops == 2
5545       && CONST_INT_P (ops[1].op)
5546       && CONSTANT_P (ops[0].op)
5547       && ops[0].neg)
5548     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
5549 
5550   /* We suppressed creation of trivial CONST expressions in the
5551      combination loop to avoid recursion.  Create one manually now.
5552      The combination loop should have ensured that there is exactly
5553      one CONST_INT, and the sort will have ensured that it is last
5554      in the array and that any other constant will be next-to-last.  */
5555 
5556   if (n_ops > 1
5557       && poly_int_rtx_p (ops[n_ops - 1].op)
5558       && CONSTANT_P (ops[n_ops - 2].op))
5559     {
5560       rtx value = ops[n_ops - 1].op;
5561       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5562 	value = neg_poly_int_rtx (mode, value);
5563       if (CONST_INT_P (value))
5564 	{
5565 	  ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5566 					     INTVAL (value));
5567 	  n_ops--;
5568 	}
5569     }
5570 
5571   /* Put a non-negated operand first, if possible.  */
5572 
5573   for (i = 0; i < n_ops && ops[i].neg; i++)
5574     continue;
5575   if (i == n_ops)
5576     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5577   else if (i != 0)
5578     {
5579       tem = ops[0].op;
5580       ops[0] = ops[i];
5581       ops[i].op = tem;
5582       ops[i].neg = 1;
5583     }
5584 
5585   /* Now make the result by performing the requested operations.  */
5586  gen_result:
5587   result = ops[0].op;
5588   for (i = 1; i < n_ops; i++)
5589     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5590 			     mode, result, ops[i].op);
5591 
5592   return result;
5593 }
5594 
5595 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
5596 static bool
plus_minus_operand_p(const_rtx x)5597 plus_minus_operand_p (const_rtx x)
5598 {
5599   return GET_CODE (x) == PLUS
5600          || GET_CODE (x) == MINUS
5601 	 || (GET_CODE (x) == CONST
5602 	     && GET_CODE (XEXP (x, 0)) == PLUS
5603 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5604 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5605 }
5606 
5607 /* Like simplify_binary_operation except used for relational operators.
5608    MODE is the mode of the result. If MODE is VOIDmode, both operands must
5609    not also be VOIDmode.
5610 
5611    CMP_MODE specifies in which mode the comparison is done in, so it is
5612    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
5613    the operands or, if both are VOIDmode, the operands are compared in
5614    "infinite precision".  */
5615 rtx
simplify_relational_operation(rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)5616 simplify_context::simplify_relational_operation (rtx_code code,
5617 						 machine_mode mode,
5618 						 machine_mode cmp_mode,
5619 						 rtx op0, rtx op1)
5620 {
5621   rtx tem, trueop0, trueop1;
5622 
5623   if (cmp_mode == VOIDmode)
5624     cmp_mode = GET_MODE (op0);
5625   if (cmp_mode == VOIDmode)
5626     cmp_mode = GET_MODE (op1);
5627 
5628   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5629   if (tem)
5630     return relational_result (mode, cmp_mode, tem);
5631 
5632   /* For the following tests, ensure const0_rtx is op1.  */
5633   if (swap_commutative_operands_p (op0, op1)
5634       || (op0 == const0_rtx && op1 != const0_rtx))
5635     std::swap (op0, op1), code = swap_condition (code);
5636 
5637   /* If op0 is a compare, extract the comparison arguments from it.  */
5638   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5639     return simplify_gen_relational (code, mode, VOIDmode,
5640 				    XEXP (op0, 0), XEXP (op0, 1));
5641 
5642   if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
5643     return NULL_RTX;
5644 
5645   trueop0 = avoid_constant_pool_reference (op0);
5646   trueop1 = avoid_constant_pool_reference (op1);
5647   return simplify_relational_operation_1 (code, mode, cmp_mode,
5648 		  			  trueop0, trueop1);
5649 }
5650 
5651 /* This part of simplify_relational_operation is only used when CMP_MODE
5652    is not in class MODE_CC (i.e. it is a real comparison).
5653 
5654    MODE is the mode of the result, while CMP_MODE specifies in which
5655    mode the comparison is done in, so it is the mode of the operands.  */
5656 
5657 rtx
simplify_relational_operation_1(rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)5658 simplify_context::simplify_relational_operation_1 (rtx_code code,
5659 						   machine_mode mode,
5660 						   machine_mode cmp_mode,
5661 						   rtx op0, rtx op1)
5662 {
5663   enum rtx_code op0code = GET_CODE (op0);
5664 
5665   if (op1 == const0_rtx && COMPARISON_P (op0))
5666     {
5667       /* If op0 is a comparison, extract the comparison arguments
5668          from it.  */
5669       if (code == NE)
5670 	{
5671 	  if (GET_MODE (op0) == mode)
5672 	    return simplify_rtx (op0);
5673 	  else
5674 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5675 					    XEXP (op0, 0), XEXP (op0, 1));
5676 	}
5677       else if (code == EQ)
5678 	{
5679 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5680 	  if (new_code != UNKNOWN)
5681 	    return simplify_gen_relational (new_code, mode, VOIDmode,
5682 					    XEXP (op0, 0), XEXP (op0, 1));
5683 	}
5684     }
5685 
5686   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5687      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
5688   if ((code == LTU || code == GEU)
5689       && GET_CODE (op0) == PLUS
5690       && CONST_INT_P (XEXP (op0, 1))
5691       && (rtx_equal_p (op1, XEXP (op0, 0))
5692 	  || rtx_equal_p (op1, XEXP (op0, 1)))
5693       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5694       && XEXP (op0, 1) != const0_rtx)
5695     {
5696       rtx new_cmp
5697 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5698       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5699 				      cmp_mode, XEXP (op0, 0), new_cmp);
5700     }
5701 
5702   /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5703      transformed into (LTU a -C).  */
5704   if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5705       && CONST_INT_P (XEXP (op0, 1))
5706       && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5707       && XEXP (op0, 1) != const0_rtx)
5708     {
5709       rtx new_cmp
5710 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5711       return simplify_gen_relational (LTU, mode, cmp_mode,
5712 				       XEXP (op0, 0), new_cmp);
5713     }
5714 
5715   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
5716   if ((code == LTU || code == GEU)
5717       && GET_CODE (op0) == PLUS
5718       && rtx_equal_p (op1, XEXP (op0, 1))
5719       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
5720       && !rtx_equal_p (op1, XEXP (op0, 0)))
5721     return simplify_gen_relational (code, mode, cmp_mode, op0,
5722 				    copy_rtx (XEXP (op0, 0)));
5723 
5724   if (op1 == const0_rtx)
5725     {
5726       /* Canonicalize (GTU x 0) as (NE x 0).  */
5727       if (code == GTU)
5728         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5729       /* Canonicalize (LEU x 0) as (EQ x 0).  */
5730       if (code == LEU)
5731         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5732     }
5733   else if (op1 == const1_rtx)
5734     {
5735       switch (code)
5736         {
5737         case GE:
5738 	  /* Canonicalize (GE x 1) as (GT x 0).  */
5739 	  return simplify_gen_relational (GT, mode, cmp_mode,
5740 					  op0, const0_rtx);
5741 	case GEU:
5742 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
5743 	  return simplify_gen_relational (NE, mode, cmp_mode,
5744 					  op0, const0_rtx);
5745 	case LT:
5746 	  /* Canonicalize (LT x 1) as (LE x 0).  */
5747 	  return simplify_gen_relational (LE, mode, cmp_mode,
5748 					  op0, const0_rtx);
5749 	case LTU:
5750 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
5751 	  return simplify_gen_relational (EQ, mode, cmp_mode,
5752 					  op0, const0_rtx);
5753 	default:
5754 	  break;
5755 	}
5756     }
5757   else if (op1 == constm1_rtx)
5758     {
5759       /* Canonicalize (LE x -1) as (LT x 0).  */
5760       if (code == LE)
5761         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5762       /* Canonicalize (GT x -1) as (GE x 0).  */
5763       if (code == GT)
5764         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5765     }
5766 
5767   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
5768   if ((code == EQ || code == NE)
5769       && (op0code == PLUS || op0code == MINUS)
5770       && CONSTANT_P (op1)
5771       && CONSTANT_P (XEXP (op0, 1))
5772       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5773     {
5774       rtx x = XEXP (op0, 0);
5775       rtx c = XEXP (op0, 1);
5776       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5777       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5778 
5779       /* Detect an infinite recursive condition, where we oscillate at this
5780 	 simplification case between:
5781 	    A + B == C  <--->  C - B == A,
5782 	 where A, B, and C are all constants with non-simplifiable expressions,
5783 	 usually SYMBOL_REFs.  */
5784       if (GET_CODE (tem) == invcode
5785 	  && CONSTANT_P (x)
5786 	  && rtx_equal_p (c, XEXP (tem, 1)))
5787 	return NULL_RTX;
5788 
5789       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5790     }
5791 
5792   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5793      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
5794   scalar_int_mode int_mode, int_cmp_mode;
5795   if (code == NE
5796       && op1 == const0_rtx
5797       && is_int_mode (mode, &int_mode)
5798       && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5799       /* ??? Work-around BImode bugs in the ia64 backend.  */
5800       && int_mode != BImode
5801       && int_cmp_mode != BImode
5802       && nonzero_bits (op0, int_cmp_mode) == 1
5803       && STORE_FLAG_VALUE == 1)
5804     return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5805 	   ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5806 	   : lowpart_subreg (int_mode, op0, int_cmp_mode);
5807 
5808   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
5809   if ((code == EQ || code == NE)
5810       && op1 == const0_rtx
5811       && op0code == XOR)
5812     return simplify_gen_relational (code, mode, cmp_mode,
5813 				    XEXP (op0, 0), XEXP (op0, 1));
5814 
5815   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
5816   if ((code == EQ || code == NE)
5817       && op0code == XOR
5818       && rtx_equal_p (XEXP (op0, 0), op1)
5819       && !side_effects_p (XEXP (op0, 0)))
5820     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5821 				    CONST0_RTX (mode));
5822 
5823   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
5824   if ((code == EQ || code == NE)
5825       && op0code == XOR
5826       && rtx_equal_p (XEXP (op0, 1), op1)
5827       && !side_effects_p (XEXP (op0, 1)))
5828     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5829 				    CONST0_RTX (mode));
5830 
5831   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
5832   if ((code == EQ || code == NE)
5833       && op0code == XOR
5834       && CONST_SCALAR_INT_P (op1)
5835       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5836     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5837 				    simplify_gen_binary (XOR, cmp_mode,
5838 							 XEXP (op0, 1), op1));
5839 
5840   /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5841      constant folding if x/y is a constant.  */
5842   if ((code == EQ || code == NE)
5843       && (op0code == AND || op0code == IOR)
5844       && !side_effects_p (op1)
5845       && op1 != CONST0_RTX (cmp_mode))
5846     {
5847       /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5848 	 (eq/ne (and (not y) x) 0).  */
5849       if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5850 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5851 	{
5852 	  rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5853 					  cmp_mode);
5854 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5855 
5856 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5857 					  CONST0_RTX (cmp_mode));
5858 	}
5859 
5860       /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5861 	 (eq/ne (and (not x) y) 0).  */
5862       if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5863 	  || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5864 	{
5865 	  rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5866 					  cmp_mode);
5867 	  rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5868 
5869 	  return simplify_gen_relational (code, mode, cmp_mode, lhs,
5870 					  CONST0_RTX (cmp_mode));
5871 	}
5872     }
5873 
5874   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
5875   if ((code == EQ || code == NE)
5876       && GET_CODE (op0) == BSWAP
5877       && CONST_SCALAR_INT_P (op1))
5878     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5879 				    simplify_gen_unary (BSWAP, cmp_mode,
5880 							op1, cmp_mode));
5881 
5882   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
5883   if ((code == EQ || code == NE)
5884       && GET_CODE (op0) == BSWAP
5885       && GET_CODE (op1) == BSWAP)
5886     return simplify_gen_relational (code, mode, cmp_mode,
5887 				    XEXP (op0, 0), XEXP (op1, 0));
5888 
5889   if (op0code == POPCOUNT && op1 == const0_rtx)
5890     switch (code)
5891       {
5892       case EQ:
5893       case LE:
5894       case LEU:
5895 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
5896 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5897 					XEXP (op0, 0), const0_rtx);
5898 
5899       case NE:
5900       case GT:
5901       case GTU:
5902 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
5903 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5904 					XEXP (op0, 0), const0_rtx);
5905 
5906       default:
5907 	break;
5908       }
5909 
5910   return NULL_RTX;
5911 }
5912 
5913 enum
5914 {
5915   CMP_EQ = 1,
5916   CMP_LT = 2,
5917   CMP_GT = 4,
5918   CMP_LTU = 8,
5919   CMP_GTU = 16
5920 };
5921 
5922 
5923 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5924    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5925    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5926    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5927    For floating-point comparisons, assume that the operands were ordered.  */
5928 
5929 static rtx
comparison_result(enum rtx_code code,int known_results)5930 comparison_result (enum rtx_code code, int known_results)
5931 {
5932   switch (code)
5933     {
5934     case EQ:
5935     case UNEQ:
5936       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5937     case NE:
5938     case LTGT:
5939       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5940 
5941     case LT:
5942     case UNLT:
5943       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5944     case GE:
5945     case UNGE:
5946       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5947 
5948     case GT:
5949     case UNGT:
5950       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5951     case LE:
5952     case UNLE:
5953       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5954 
5955     case LTU:
5956       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5957     case GEU:
5958       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5959 
5960     case GTU:
5961       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5962     case LEU:
5963       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5964 
5965     case ORDERED:
5966       return const_true_rtx;
5967     case UNORDERED:
5968       return const0_rtx;
5969     default:
5970       gcc_unreachable ();
5971     }
5972 }
5973 
5974 /* Check if the given comparison (done in the given MODE) is actually
5975    a tautology or a contradiction.  If the mode is VOIDmode, the
5976    comparison is done in "infinite precision".  If no simplification
5977    is possible, this function returns zero.  Otherwise, it returns
5978    either const_true_rtx or const0_rtx.  */
5979 
5980 rtx
simplify_const_relational_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)5981 simplify_const_relational_operation (enum rtx_code code,
5982 				     machine_mode mode,
5983 				     rtx op0, rtx op1)
5984 {
5985   rtx tem;
5986   rtx trueop0;
5987   rtx trueop1;
5988 
5989   gcc_assert (mode != VOIDmode
5990 	      || (GET_MODE (op0) == VOIDmode
5991 		  && GET_MODE (op1) == VOIDmode));
5992 
5993   /* If op0 is a compare, extract the comparison arguments from it.  */
5994   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5995     {
5996       op1 = XEXP (op0, 1);
5997       op0 = XEXP (op0, 0);
5998 
5999       if (GET_MODE (op0) != VOIDmode)
6000 	mode = GET_MODE (op0);
6001       else if (GET_MODE (op1) != VOIDmode)
6002 	mode = GET_MODE (op1);
6003       else
6004 	return 0;
6005     }
6006 
6007   /* We can't simplify MODE_CC values since we don't know what the
6008      actual comparison is.  */
6009   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6010     return 0;
6011 
6012   /* Make sure the constant is second.  */
6013   if (swap_commutative_operands_p (op0, op1))
6014     {
6015       std::swap (op0, op1);
6016       code = swap_condition (code);
6017     }
6018 
6019   trueop0 = avoid_constant_pool_reference (op0);
6020   trueop1 = avoid_constant_pool_reference (op1);
6021 
6022   /* For integer comparisons of A and B maybe we can simplify A - B and can
6023      then simplify a comparison of that with zero.  If A and B are both either
6024      a register or a CONST_INT, this can't help; testing for these cases will
6025      prevent infinite recursion here and speed things up.
6026 
6027      We can only do this for EQ and NE comparisons as otherwise we may
6028      lose or introduce overflow which we cannot disregard as undefined as
6029      we do not know the signedness of the operation on either the left or
6030      the right hand side of the comparison.  */
6031 
6032   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
6033       && (code == EQ || code == NE)
6034       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
6035 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
6036       && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
6037       /* We cannot do this if tem is a nonzero address.  */
6038       && ! nonzero_address_p (tem))
6039     return simplify_const_relational_operation (signed_condition (code),
6040 						mode, tem, const0_rtx);
6041 
6042   if (! HONOR_NANS (mode) && code == ORDERED)
6043     return const_true_rtx;
6044 
6045   if (! HONOR_NANS (mode) && code == UNORDERED)
6046     return const0_rtx;
6047 
6048   /* For modes without NaNs, if the two operands are equal, we know the
6049      result except if they have side-effects.  Even with NaNs we know
6050      the result of unordered comparisons and, if signaling NaNs are
6051      irrelevant, also the result of LT/GT/LTGT.  */
6052   if ((! HONOR_NANS (trueop0)
6053        || code == UNEQ || code == UNLE || code == UNGE
6054        || ((code == LT || code == GT || code == LTGT)
6055 	   && ! HONOR_SNANS (trueop0)))
6056       && rtx_equal_p (trueop0, trueop1)
6057       && ! side_effects_p (trueop0))
6058     return comparison_result (code, CMP_EQ);
6059 
6060   /* If the operands are floating-point constants, see if we can fold
6061      the result.  */
6062   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
6063       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
6064       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
6065     {
6066       const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
6067       const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
6068 
6069       /* Comparisons are unordered iff at least one of the values is NaN.  */
6070       if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
6071 	switch (code)
6072 	  {
6073 	  case UNEQ:
6074 	  case UNLT:
6075 	  case UNGT:
6076 	  case UNLE:
6077 	  case UNGE:
6078 	  case NE:
6079 	  case UNORDERED:
6080 	    return const_true_rtx;
6081 	  case EQ:
6082 	  case LT:
6083 	  case GT:
6084 	  case LE:
6085 	  case GE:
6086 	  case LTGT:
6087 	  case ORDERED:
6088 	    return const0_rtx;
6089 	  default:
6090 	    return 0;
6091 	  }
6092 
6093       return comparison_result (code,
6094 				(real_equal (d0, d1) ? CMP_EQ :
6095 				 real_less (d0, d1) ? CMP_LT : CMP_GT));
6096     }
6097 
6098   /* Otherwise, see if the operands are both integers.  */
6099   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
6100       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
6101     {
6102       /* It would be nice if we really had a mode here.  However, the
6103 	 largest int representable on the target is as good as
6104 	 infinite.  */
6105       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
6106       rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
6107       rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
6108 
6109       if (wi::eq_p (ptrueop0, ptrueop1))
6110 	return comparison_result (code, CMP_EQ);
6111       else
6112 	{
6113 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
6114 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
6115 	  return comparison_result (code, cr);
6116 	}
6117     }
6118 
6119   /* Optimize comparisons with upper and lower bounds.  */
6120   scalar_int_mode int_mode;
6121   if (CONST_INT_P (trueop1)
6122       && is_a <scalar_int_mode> (mode, &int_mode)
6123       && HWI_COMPUTABLE_MODE_P (int_mode)
6124       && !side_effects_p (trueop0))
6125     {
6126       int sign;
6127       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
6128       HOST_WIDE_INT val = INTVAL (trueop1);
6129       HOST_WIDE_INT mmin, mmax;
6130 
6131       if (code == GEU
6132 	  || code == LEU
6133 	  || code == GTU
6134 	  || code == LTU)
6135 	sign = 0;
6136       else
6137 	sign = 1;
6138 
6139       /* Get a reduced range if the sign bit is zero.  */
6140       if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
6141 	{
6142 	  mmin = 0;
6143 	  mmax = nonzero;
6144 	}
6145       else
6146 	{
6147 	  rtx mmin_rtx, mmax_rtx;
6148 	  get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
6149 
6150 	  mmin = INTVAL (mmin_rtx);
6151 	  mmax = INTVAL (mmax_rtx);
6152 	  if (sign)
6153 	    {
6154 	      unsigned int sign_copies
6155 		= num_sign_bit_copies (trueop0, int_mode);
6156 
6157 	      mmin >>= (sign_copies - 1);
6158 	      mmax >>= (sign_copies - 1);
6159 	    }
6160 	}
6161 
6162       switch (code)
6163 	{
6164 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
6165 	case GEU:
6166 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6167 	    return const_true_rtx;
6168 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6169 	    return const0_rtx;
6170 	  break;
6171 	case GE:
6172 	  if (val <= mmin)
6173 	    return const_true_rtx;
6174 	  if (val > mmax)
6175 	    return const0_rtx;
6176 	  break;
6177 
6178 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
6179 	case LEU:
6180 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6181 	    return const_true_rtx;
6182 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6183 	    return const0_rtx;
6184 	  break;
6185 	case LE:
6186 	  if (val >= mmax)
6187 	    return const_true_rtx;
6188 	  if (val < mmin)
6189 	    return const0_rtx;
6190 	  break;
6191 
6192 	case EQ:
6193 	  /* x == y is always false for y out of range.  */
6194 	  if (val < mmin || val > mmax)
6195 	    return const0_rtx;
6196 	  break;
6197 
6198 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
6199 	case GTU:
6200 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6201 	    return const0_rtx;
6202 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6203 	    return const_true_rtx;
6204 	  break;
6205 	case GT:
6206 	  if (val >= mmax)
6207 	    return const0_rtx;
6208 	  if (val < mmin)
6209 	    return const_true_rtx;
6210 	  break;
6211 
6212 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
6213 	case LTU:
6214 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6215 	    return const0_rtx;
6216 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6217 	    return const_true_rtx;
6218 	  break;
6219 	case LT:
6220 	  if (val <= mmin)
6221 	    return const0_rtx;
6222 	  if (val > mmax)
6223 	    return const_true_rtx;
6224 	  break;
6225 
6226 	case NE:
6227 	  /* x != y is always true for y out of range.  */
6228 	  if (val < mmin || val > mmax)
6229 	    return const_true_rtx;
6230 	  break;
6231 
6232 	default:
6233 	  break;
6234 	}
6235     }
6236 
6237   /* Optimize integer comparisons with zero.  */
6238   if (is_a <scalar_int_mode> (mode, &int_mode)
6239       && trueop1 == const0_rtx
6240       && !side_effects_p (trueop0))
6241     {
6242       /* Some addresses are known to be nonzero.  We don't know
6243 	 their sign, but equality comparisons are known.  */
6244       if (nonzero_address_p (trueop0))
6245 	{
6246 	  if (code == EQ || code == LEU)
6247 	    return const0_rtx;
6248 	  if (code == NE || code == GTU)
6249 	    return const_true_rtx;
6250 	}
6251 
6252       /* See if the first operand is an IOR with a constant.  If so, we
6253 	 may be able to determine the result of this comparison.  */
6254       if (GET_CODE (op0) == IOR)
6255 	{
6256 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
6257 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
6258 	    {
6259 	      int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
6260 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
6261 			      && (UINTVAL (inner_const)
6262 				  & (HOST_WIDE_INT_1U
6263 				     << sign_bitnum)));
6264 
6265 	      switch (code)
6266 		{
6267 		case EQ:
6268 		case LEU:
6269 		  return const0_rtx;
6270 		case NE:
6271 		case GTU:
6272 		  return const_true_rtx;
6273 		case LT:
6274 		case LE:
6275 		  if (has_sign)
6276 		    return const_true_rtx;
6277 		  break;
6278 		case GT:
6279 		case GE:
6280 		  if (has_sign)
6281 		    return const0_rtx;
6282 		  break;
6283 		default:
6284 		  break;
6285 		}
6286 	    }
6287 	}
6288     }
6289 
6290   /* Optimize comparison of ABS with zero.  */
6291   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
6292       && (GET_CODE (trueop0) == ABS
6293 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
6294 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
6295     {
6296       switch (code)
6297 	{
6298 	case LT:
6299 	  /* Optimize abs(x) < 0.0.  */
6300 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
6301 	    return const0_rtx;
6302 	  break;
6303 
6304 	case GE:
6305 	  /* Optimize abs(x) >= 0.0.  */
6306 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
6307 	    return const_true_rtx;
6308 	  break;
6309 
6310 	case UNGE:
6311 	  /* Optimize ! (abs(x) < 0.0).  */
6312 	  return const_true_rtx;
6313 
6314 	default:
6315 	  break;
6316 	}
6317     }
6318 
6319   return 0;
6320 }
6321 
6322 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6323    where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6324    or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6325    can be simplified to that or NULL_RTX if not.
6326    Assume X is compared against zero with CMP_CODE and the true
6327    arm is TRUE_VAL and the false arm is FALSE_VAL.  */
6328 
6329 rtx
simplify_cond_clz_ctz(rtx x,rtx_code cmp_code,rtx true_val,rtx false_val)6330 simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
6331 					 rtx true_val, rtx false_val)
6332 {
6333   if (cmp_code != EQ && cmp_code != NE)
6334     return NULL_RTX;
6335 
6336   /* Result on X == 0 and X !=0 respectively.  */
6337   rtx on_zero, on_nonzero;
6338   if (cmp_code == EQ)
6339     {
6340       on_zero = true_val;
6341       on_nonzero = false_val;
6342     }
6343   else
6344     {
6345       on_zero = false_val;
6346       on_nonzero = true_val;
6347     }
6348 
6349   rtx_code op_code = GET_CODE (on_nonzero);
6350   if ((op_code != CLZ && op_code != CTZ)
6351       || !rtx_equal_p (XEXP (on_nonzero, 0), x)
6352       || !CONST_INT_P (on_zero))
6353     return NULL_RTX;
6354 
6355   HOST_WIDE_INT op_val;
6356   scalar_int_mode mode ATTRIBUTE_UNUSED
6357     = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
6358   if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
6359        || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
6360       && op_val == INTVAL (on_zero))
6361     return on_nonzero;
6362 
6363   return NULL_RTX;
6364 }
6365 
6366 /* Try to simplify X given that it appears within operand OP of a
6367    VEC_MERGE operation whose mask is MASK.  X need not use the same
6368    vector mode as the VEC_MERGE, but it must have the same number of
6369    elements.
6370 
6371    Return the simplified X on success, otherwise return NULL_RTX.  */
6372 
6373 rtx
simplify_merge_mask(rtx x,rtx mask,int op)6374 simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
6375 {
6376   gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
6377   poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
6378   if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
6379     {
6380       if (side_effects_p (XEXP (x, 1 - op)))
6381 	return NULL_RTX;
6382 
6383       return XEXP (x, op);
6384     }
6385   if (UNARY_P (x)
6386       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6387       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
6388     {
6389       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6390       if (top0)
6391 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
6392 				   GET_MODE (XEXP (x, 0)));
6393     }
6394   if (BINARY_P (x)
6395       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6396       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6397       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6398       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
6399     {
6400       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6401       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6402       if (top0 || top1)
6403 	{
6404 	  if (COMPARISON_P (x))
6405 	    return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
6406 					    GET_MODE (XEXP (x, 0)) != VOIDmode
6407 					    ? GET_MODE (XEXP (x, 0))
6408 					    : GET_MODE (XEXP (x, 1)),
6409 					    top0 ? top0 : XEXP (x, 0),
6410 					    top1 ? top1 : XEXP (x, 1));
6411 	  else
6412 	    return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
6413 					top0 ? top0 : XEXP (x, 0),
6414 					top1 ? top1 : XEXP (x, 1));
6415 	}
6416     }
6417   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
6418       && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6419       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6420       && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6421       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
6422       && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
6423       && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
6424     {
6425       rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6426       rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6427       rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
6428       if (top0 || top1 || top2)
6429 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
6430 				     GET_MODE (XEXP (x, 0)),
6431 				     top0 ? top0 : XEXP (x, 0),
6432 				     top1 ? top1 : XEXP (x, 1),
6433 				     top2 ? top2 : XEXP (x, 2));
6434     }
6435   return NULL_RTX;
6436 }
6437 
6438 
6439 /* Simplify CODE, an operation with result mode MODE and three operands,
6440    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
6441    a constant.  Return 0 if no simplifications is possible.  */
6442 
6443 rtx
simplify_ternary_operation(rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)6444 simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
6445 					      machine_mode op0_mode,
6446 					      rtx op0, rtx op1, rtx op2)
6447 {
6448   bool any_change = false;
6449   rtx tem, trueop2;
6450   scalar_int_mode int_mode, int_op0_mode;
6451   unsigned int n_elts;
6452 
6453   switch (code)
6454     {
6455     case FMA:
6456       /* Simplify negations around the multiplication.  */
6457       /* -a * -b + c  =>  a * b + c.  */
6458       if (GET_CODE (op0) == NEG)
6459 	{
6460 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
6461 	  if (tem)
6462 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
6463 	}
6464       else if (GET_CODE (op1) == NEG)
6465 	{
6466 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
6467 	  if (tem)
6468 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
6469 	}
6470 
6471       /* Canonicalize the two multiplication operands.  */
6472       /* a * -b + c  =>  -b * a + c.  */
6473       if (swap_commutative_operands_p (op0, op1))
6474 	std::swap (op0, op1), any_change = true;
6475 
6476       if (any_change)
6477 	return gen_rtx_FMA (mode, op0, op1, op2);
6478       return NULL_RTX;
6479 
6480     case SIGN_EXTRACT:
6481     case ZERO_EXTRACT:
6482       if (CONST_INT_P (op0)
6483 	  && CONST_INT_P (op1)
6484 	  && CONST_INT_P (op2)
6485 	  && is_a <scalar_int_mode> (mode, &int_mode)
6486 	  && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
6487 	  && HWI_COMPUTABLE_MODE_P (int_mode))
6488 	{
6489 	  /* Extracting a bit-field from a constant */
6490 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
6491 	  HOST_WIDE_INT op1val = INTVAL (op1);
6492 	  HOST_WIDE_INT op2val = INTVAL (op2);
6493 	  if (!BITS_BIG_ENDIAN)
6494 	    val >>= op2val;
6495 	  else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
6496 	    val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
6497 	  else
6498 	    /* Not enough information to calculate the bit position.  */
6499 	    break;
6500 
6501 	  if (HOST_BITS_PER_WIDE_INT != op1val)
6502 	    {
6503 	      /* First zero-extend.  */
6504 	      val &= (HOST_WIDE_INT_1U << op1val) - 1;
6505 	      /* If desired, propagate sign bit.  */
6506 	      if (code == SIGN_EXTRACT
6507 		  && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
6508 		     != 0)
6509 		val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
6510 	    }
6511 
6512 	  return gen_int_mode (val, int_mode);
6513 	}
6514       break;
6515 
6516     case IF_THEN_ELSE:
6517       if (CONST_INT_P (op0))
6518 	return op0 != const0_rtx ? op1 : op2;
6519 
6520       /* Convert c ? a : a into "a".  */
6521       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6522 	return op1;
6523 
6524       /* Convert a != b ? a : b into "a".  */
6525       if (GET_CODE (op0) == NE
6526 	  && ! side_effects_p (op0)
6527 	  && ! HONOR_NANS (mode)
6528 	  && ! HONOR_SIGNED_ZEROS (mode)
6529 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
6530 	       && rtx_equal_p (XEXP (op0, 1), op2))
6531 	      || (rtx_equal_p (XEXP (op0, 0), op2)
6532 		  && rtx_equal_p (XEXP (op0, 1), op1))))
6533 	return op1;
6534 
6535       /* Convert a == b ? a : b into "b".  */
6536       if (GET_CODE (op0) == EQ
6537 	  && ! side_effects_p (op0)
6538 	  && ! HONOR_NANS (mode)
6539 	  && ! HONOR_SIGNED_ZEROS (mode)
6540 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
6541 	       && rtx_equal_p (XEXP (op0, 1), op2))
6542 	      || (rtx_equal_p (XEXP (op0, 0), op2)
6543 		  && rtx_equal_p (XEXP (op0, 1), op1))))
6544 	return op2;
6545 
6546       /* Convert (!c) != {0,...,0} ? a : b into
6547          c != {0,...,0} ? b : a for vector modes.  */
6548       if (VECTOR_MODE_P (GET_MODE (op1))
6549 	  && GET_CODE (op0) == NE
6550 	  && GET_CODE (XEXP (op0, 0)) == NOT
6551 	  && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6552 	{
6553 	  rtx cv = XEXP (op0, 1);
6554 	  int nunits;
6555 	  bool ok = true;
6556 	  if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6557 	    ok = false;
6558 	  else
6559 	    for (int i = 0; i < nunits; ++i)
6560 	      if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6561 		{
6562 		  ok = false;
6563 		  break;
6564 		}
6565 	  if (ok)
6566 	    {
6567 	      rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6568 					XEXP (XEXP (op0, 0), 0),
6569 					XEXP (op0, 1));
6570 	      rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6571 	      return retval;
6572 	    }
6573 	}
6574 
6575       /* Convert x == 0 ? N : clz (x) into clz (x) when
6576 	 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6577 	 Similarly for ctz (x).  */
6578       if (COMPARISON_P (op0) && !side_effects_p (op0)
6579 	  && XEXP (op0, 1) == const0_rtx)
6580 	{
6581 	  rtx simplified
6582 	    = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6583 				     op1, op2);
6584 	  if (simplified)
6585 	    return simplified;
6586 	}
6587 
6588       if (COMPARISON_P (op0) && ! side_effects_p (op0))
6589 	{
6590 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6591 					? GET_MODE (XEXP (op0, 1))
6592 					: GET_MODE (XEXP (op0, 0)));
6593 	  rtx temp;
6594 
6595 	  /* Look for happy constants in op1 and op2.  */
6596 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
6597 	    {
6598 	      HOST_WIDE_INT t = INTVAL (op1);
6599 	      HOST_WIDE_INT f = INTVAL (op2);
6600 
6601 	      if (t == STORE_FLAG_VALUE && f == 0)
6602 	        code = GET_CODE (op0);
6603 	      else if (t == 0 && f == STORE_FLAG_VALUE)
6604 		{
6605 		  enum rtx_code tmp;
6606 		  tmp = reversed_comparison_code (op0, NULL);
6607 		  if (tmp == UNKNOWN)
6608 		    break;
6609 		  code = tmp;
6610 		}
6611 	      else
6612 		break;
6613 
6614 	      return simplify_gen_relational (code, mode, cmp_mode,
6615 					      XEXP (op0, 0), XEXP (op0, 1));
6616 	    }
6617 
6618 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6619 			  			cmp_mode, XEXP (op0, 0),
6620 						XEXP (op0, 1));
6621 
6622 	  /* See if any simplifications were possible.  */
6623 	  if (temp)
6624 	    {
6625 	      if (CONST_INT_P (temp))
6626 		return temp == const0_rtx ? op2 : op1;
6627 	      else if (temp)
6628 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6629 	    }
6630 	}
6631       break;
6632 
6633     case VEC_MERGE:
6634       gcc_assert (GET_MODE (op0) == mode);
6635       gcc_assert (GET_MODE (op1) == mode);
6636       gcc_assert (VECTOR_MODE_P (mode));
6637       trueop2 = avoid_constant_pool_reference (op2);
6638       if (CONST_INT_P (trueop2)
6639 	  && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6640 	{
6641 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6642 	  unsigned HOST_WIDE_INT mask;
6643 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
6644 	    mask = -1;
6645 	  else
6646 	    mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6647 
6648 	  if (!(sel & mask) && !side_effects_p (op0))
6649 	    return op1;
6650 	  if ((sel & mask) == mask && !side_effects_p (op1))
6651 	    return op0;
6652 
6653 	  rtx trueop0 = avoid_constant_pool_reference (op0);
6654 	  rtx trueop1 = avoid_constant_pool_reference (op1);
6655 	  if (GET_CODE (trueop0) == CONST_VECTOR
6656 	      && GET_CODE (trueop1) == CONST_VECTOR)
6657 	    {
6658 	      rtvec v = rtvec_alloc (n_elts);
6659 	      unsigned int i;
6660 
6661 	      for (i = 0; i < n_elts; i++)
6662 		RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6663 				    ? CONST_VECTOR_ELT (trueop0, i)
6664 				    : CONST_VECTOR_ELT (trueop1, i));
6665 	      return gen_rtx_CONST_VECTOR (mode, v);
6666 	    }
6667 
6668 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6669 	     if no element from a appears in the result.  */
6670 	  if (GET_CODE (op0) == VEC_MERGE)
6671 	    {
6672 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
6673 	      if (CONST_INT_P (tem))
6674 		{
6675 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6676 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6677 		    return simplify_gen_ternary (code, mode, mode,
6678 						 XEXP (op0, 1), op1, op2);
6679 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6680 		    return simplify_gen_ternary (code, mode, mode,
6681 						 XEXP (op0, 0), op1, op2);
6682 		}
6683 	    }
6684 	  if (GET_CODE (op1) == VEC_MERGE)
6685 	    {
6686 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
6687 	      if (CONST_INT_P (tem))
6688 		{
6689 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6690 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6691 		    return simplify_gen_ternary (code, mode, mode,
6692 						 op0, XEXP (op1, 1), op2);
6693 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6694 		    return simplify_gen_ternary (code, mode, mode,
6695 						 op0, XEXP (op1, 0), op2);
6696 		}
6697 	    }
6698 
6699 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6700 	     with a.  */
6701 	  if (GET_CODE (op0) == VEC_DUPLICATE
6702 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6703 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6704 	      && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6705 	    {
6706 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6707 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
6708 		{
6709 		  if (XEXP (XEXP (op0, 0), 0) == op1
6710 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6711 		    return op1;
6712 		}
6713 	    }
6714 	  /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6715 	     (const_int N))
6716 	     with (vec_concat (X) (B)) if N == 1 or
6717 	     (vec_concat (A) (X)) if N == 2.  */
6718 	  if (GET_CODE (op0) == VEC_DUPLICATE
6719 	      && GET_CODE (op1) == CONST_VECTOR
6720 	      && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6721 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6722 	      && IN_RANGE (sel, 1, 2))
6723 	    {
6724 	      rtx newop0 = XEXP (op0, 0);
6725 	      rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6726 	      if (sel == 2)
6727 		std::swap (newop0, newop1);
6728 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6729 	    }
6730 	  /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6731 	     with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6732 	     Only applies for vectors of two elements.  */
6733 	  if (GET_CODE (op0) == VEC_DUPLICATE
6734 	      && GET_CODE (op1) == VEC_CONCAT
6735 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6736 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6737 	      && IN_RANGE (sel, 1, 2))
6738 	    {
6739 	      rtx newop0 = XEXP (op0, 0);
6740 	      rtx newop1 = XEXP (op1, 2 - sel);
6741 	      rtx otherop = XEXP (op1, sel - 1);
6742 	      if (sel == 2)
6743 		std::swap (newop0, newop1);
6744 	      /* Don't want to throw away the other part of the vec_concat if
6745 		 it has side-effects.  */
6746 	      if (!side_effects_p (otherop))
6747 		return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6748 	    }
6749 
6750 	  /* Replace:
6751 
6752 	      (vec_merge:outer (vec_duplicate:outer x:inner)
6753 			       (subreg:outer y:inner 0)
6754 			       (const_int N))
6755 
6756 	     with (vec_concat:outer x:inner y:inner) if N == 1,
6757 	     or (vec_concat:outer y:inner x:inner) if N == 2.
6758 
6759 	     Implicitly, this means we have a paradoxical subreg, but such
6760 	     a check is cheap, so make it anyway.
6761 
6762 	     Only applies for vectors of two elements.  */
6763 	  if (GET_CODE (op0) == VEC_DUPLICATE
6764 	      && GET_CODE (op1) == SUBREG
6765 	      && GET_MODE (op1) == GET_MODE (op0)
6766 	      && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6767 	      && paradoxical_subreg_p (op1)
6768 	      && subreg_lowpart_p (op1)
6769 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6770 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6771 	      && IN_RANGE (sel, 1, 2))
6772 	    {
6773 	      rtx newop0 = XEXP (op0, 0);
6774 	      rtx newop1 = SUBREG_REG (op1);
6775 	      if (sel == 2)
6776 		std::swap (newop0, newop1);
6777 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6778 	    }
6779 
6780 	  /* Same as above but with switched operands:
6781 		Replace (vec_merge:outer (subreg:outer x:inner 0)
6782 					 (vec_duplicate:outer y:inner)
6783 			       (const_int N))
6784 
6785 	     with (vec_concat:outer x:inner y:inner) if N == 1,
6786 	     or (vec_concat:outer y:inner x:inner) if N == 2.  */
6787 	  if (GET_CODE (op1) == VEC_DUPLICATE
6788 	      && GET_CODE (op0) == SUBREG
6789 	      && GET_MODE (op0) == GET_MODE (op1)
6790 	      && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6791 	      && paradoxical_subreg_p (op0)
6792 	      && subreg_lowpart_p (op0)
6793 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6794 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6795 	      && IN_RANGE (sel, 1, 2))
6796 	    {
6797 	      rtx newop0 = SUBREG_REG (op0);
6798 	      rtx newop1 = XEXP (op1, 0);
6799 	      if (sel == 2)
6800 		std::swap (newop0, newop1);
6801 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6802 	    }
6803 
6804 	  /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6805 				 (const_int n))
6806 	     with (vec_concat x y) or (vec_concat y x) depending on value
6807 	     of N.  */
6808 	  if (GET_CODE (op0) == VEC_DUPLICATE
6809 	      && GET_CODE (op1) == VEC_DUPLICATE
6810 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6811 	      && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6812 	      && IN_RANGE (sel, 1, 2))
6813 	    {
6814 	      rtx newop0 = XEXP (op0, 0);
6815 	      rtx newop1 = XEXP (op1, 0);
6816 	      if (sel == 2)
6817 		std::swap (newop0, newop1);
6818 
6819 	      return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6820 	    }
6821 	}
6822 
6823       if (rtx_equal_p (op0, op1)
6824 	  && !side_effects_p (op2) && !side_effects_p (op1))
6825 	return op0;
6826 
6827       if (!side_effects_p (op2))
6828 	{
6829 	  rtx top0
6830 	    = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6831 	  rtx top1
6832 	    = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6833 	  if (top0 || top1)
6834 	    return simplify_gen_ternary (code, mode, mode,
6835 					 top0 ? top0 : op0,
6836 					 top1 ? top1 : op1, op2);
6837 	}
6838 
6839       break;
6840 
6841     default:
6842       gcc_unreachable ();
6843     }
6844 
6845   return 0;
6846 }
6847 
6848 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6849    starting at byte FIRST_BYTE.  Return true on success and add the
6850    bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6851    that the bytes follow target memory order.  Leave BYTES unmodified
6852    on failure.
6853 
6854    MODE is the mode of X.  The caller must reserve NUM_BYTES bytes in
6855    BYTES before calling this function.  */
6856 
6857 bool
native_encode_rtx(machine_mode mode,rtx x,vec<target_unit> & bytes,unsigned int first_byte,unsigned int num_bytes)6858 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6859 		   unsigned int first_byte, unsigned int num_bytes)
6860 {
6861   /* Check the mode is sensible.  */
6862   gcc_assert (GET_MODE (x) == VOIDmode
6863 	      ? is_a <scalar_int_mode> (mode)
6864 	      : mode == GET_MODE (x));
6865 
6866   if (GET_CODE (x) == CONST_VECTOR)
6867     {
6868       /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6869 	 is necessary.  The only complication is that MODE_VECTOR_BOOL
6870 	 vectors can have several elements per byte.  */
6871       unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6872 						   GET_MODE_NUNITS (mode));
6873       unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6874       if (elt_bits < BITS_PER_UNIT)
6875 	{
6876 	  /* This is the only case in which elements can be smaller than
6877 	     a byte.  */
6878 	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6879 	  for (unsigned int i = 0; i < num_bytes; ++i)
6880 	    {
6881 	      target_unit value = 0;
6882 	      for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6883 		{
6884 		  value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6885 		  elt += 1;
6886 		}
6887 	      bytes.quick_push (value);
6888 	    }
6889 	  return true;
6890 	}
6891 
6892       unsigned int start = bytes.length ();
6893       unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6894       /* Make FIRST_BYTE relative to ELT.  */
6895       first_byte %= elt_bytes;
6896       while (num_bytes > 0)
6897 	{
6898 	  /* Work out how many bytes we want from element ELT.  */
6899 	  unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6900 	  if (!native_encode_rtx (GET_MODE_INNER (mode),
6901 				  CONST_VECTOR_ELT (x, elt), bytes,
6902 				  first_byte, chunk_bytes))
6903 	    {
6904 	      bytes.truncate (start);
6905 	      return false;
6906 	    }
6907 	  elt += 1;
6908 	  first_byte = 0;
6909 	  num_bytes -= chunk_bytes;
6910 	}
6911       return true;
6912     }
6913 
6914   /* All subsequent cases are limited to scalars.  */
6915   scalar_mode smode;
6916   if (!is_a <scalar_mode> (mode, &smode))
6917     return false;
6918 
6919   /* Make sure that the region is in range.  */
6920   unsigned int end_byte = first_byte + num_bytes;
6921   unsigned int mode_bytes = GET_MODE_SIZE (smode);
6922   gcc_assert (end_byte <= mode_bytes);
6923 
6924   if (CONST_SCALAR_INT_P (x))
6925     {
6926       /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6927 	 and WORDS_BIG_ENDIAN.  Use the subreg machinery to get the lsb
6928 	 position of each byte.  */
6929       rtx_mode_t value (x, smode);
6930       wide_int_ref value_wi (value);
6931       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6932 	{
6933 	  /* Always constant because the inputs are.  */
6934 	  unsigned int lsb
6935 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6936 	  /* Operate directly on the encoding rather than using
6937 	     wi::extract_uhwi, so that we preserve the sign or zero
6938 	     extension for modes that are not a whole number of bits in
6939 	     size.  (Zero extension is only used for the combination of
6940 	     innermode == BImode && STORE_FLAG_VALUE == 1).  */
6941 	  unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6942 	  unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6943 	  unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6944 	  bytes.quick_push (uhwi >> shift);
6945 	}
6946       return true;
6947     }
6948 
6949   if (CONST_DOUBLE_P (x))
6950     {
6951       /* real_to_target produces an array of integers in target memory order.
6952 	 All integers before the last one have 32 bits; the last one may
6953 	 have 32 bits or fewer, depending on whether the mode bitsize
6954 	 is divisible by 32.  Each of these integers is then laid out
6955 	 in target memory as any other integer would be.  */
6956       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6957       real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6958 
6959       /* The (maximum) number of target bytes per element of el32.  */
6960       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6961       gcc_assert (bytes_per_el32 != 0);
6962 
6963       /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6964 	 handling above.  */
6965       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6966 	{
6967 	  unsigned int index = byte / bytes_per_el32;
6968 	  unsigned int subbyte = byte % bytes_per_el32;
6969 	  unsigned int int_bytes = MIN (bytes_per_el32,
6970 					mode_bytes - index * bytes_per_el32);
6971 	  /* Always constant because the inputs are.  */
6972 	  unsigned int lsb
6973 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6974 	  bytes.quick_push ((unsigned long) el32[index] >> lsb);
6975 	}
6976       return true;
6977     }
6978 
6979   if (GET_CODE (x) == CONST_FIXED)
6980     {
6981       for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6982 	{
6983 	  /* Always constant because the inputs are.  */
6984 	  unsigned int lsb
6985 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6986 	  unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6987 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
6988 	    {
6989 	      lsb -= HOST_BITS_PER_WIDE_INT;
6990 	      piece = CONST_FIXED_VALUE_HIGH (x);
6991 	    }
6992 	  bytes.quick_push (piece >> lsb);
6993 	}
6994       return true;
6995     }
6996 
6997   return false;
6998 }
6999 
7000 /* Read a vector of mode MODE from the target memory image given by BYTES,
7001    starting at byte FIRST_BYTE.  The vector is known to be encodable using
7002    NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
7003    and BYTES is known to have enough bytes to supply NPATTERNS *
7004    NELTS_PER_PATTERN vector elements.  Each element of BYTES contains
7005    BITS_PER_UNIT bits and the bytes are in target memory order.
7006 
7007    Return the vector on success, otherwise return NULL_RTX.  */
7008 
7009 rtx
native_decode_vector_rtx(machine_mode mode,const vec<target_unit> & bytes,unsigned int first_byte,unsigned int npatterns,unsigned int nelts_per_pattern)7010 native_decode_vector_rtx (machine_mode mode, const vec<target_unit> &bytes,
7011 			  unsigned int first_byte, unsigned int npatterns,
7012 			  unsigned int nelts_per_pattern)
7013 {
7014   rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
7015 
7016   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7017 					       GET_MODE_NUNITS (mode));
7018   if (elt_bits < BITS_PER_UNIT)
7019     {
7020       /* This is the only case in which elements can be smaller than a byte.
7021 	 Element 0 is always in the lsb of the containing byte.  */
7022       gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
7023       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7024 	{
7025 	  unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
7026 	  unsigned int byte_index = bit_index / BITS_PER_UNIT;
7027 	  unsigned int lsb = bit_index % BITS_PER_UNIT;
7028 	  builder.quick_push (bytes[byte_index] & (1 << lsb)
7029 			      ? CONST1_RTX (BImode)
7030 			      : CONST0_RTX (BImode));
7031 	}
7032     }
7033   else
7034     {
7035       for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7036 	{
7037 	  rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
7038 	  if (!x)
7039 	    return NULL_RTX;
7040 	  builder.quick_push (x);
7041 	  first_byte += elt_bits / BITS_PER_UNIT;
7042 	}
7043     }
7044   return builder.build ();
7045 }
7046 
7047 /* Read an rtx of mode MODE from the target memory image given by BYTES,
7048    starting at byte FIRST_BYTE.  Each element of BYTES contains BITS_PER_UNIT
7049    bits and the bytes are in target memory order.  The image has enough
7050    values to specify all bytes of MODE.
7051 
7052    Return the rtx on success, otherwise return NULL_RTX.  */
7053 
7054 rtx
native_decode_rtx(machine_mode mode,const vec<target_unit> & bytes,unsigned int first_byte)7055 native_decode_rtx (machine_mode mode, const vec<target_unit> &bytes,
7056 		   unsigned int first_byte)
7057 {
7058   if (VECTOR_MODE_P (mode))
7059     {
7060       /* If we know at compile time how many elements there are,
7061 	 pull each element directly from BYTES.  */
7062       unsigned int nelts;
7063       if (GET_MODE_NUNITS (mode).is_constant (&nelts))
7064 	return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
7065       return NULL_RTX;
7066     }
7067 
7068   scalar_int_mode imode;
7069   if (is_a <scalar_int_mode> (mode, &imode)
7070       && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
7071     {
7072       /* Pull the bytes msb first, so that we can use simple
7073 	 shift-and-insert wide_int operations.  */
7074       unsigned int size = GET_MODE_SIZE (imode);
7075       wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
7076       for (unsigned int i = 0; i < size; ++i)
7077 	{
7078 	  unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
7079 	  /* Always constant because the inputs are.  */
7080 	  unsigned int subbyte
7081 	    = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
7082 	  result <<= BITS_PER_UNIT;
7083 	  result |= bytes[first_byte + subbyte];
7084 	}
7085       return immed_wide_int_const (result, imode);
7086     }
7087 
7088   scalar_float_mode fmode;
7089   if (is_a <scalar_float_mode> (mode, &fmode))
7090     {
7091       /* We need to build an array of integers in target memory order.
7092 	 All integers before the last one have 32 bits; the last one may
7093 	 have 32 bits or fewer, depending on whether the mode bitsize
7094 	 is divisible by 32.  */
7095       long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
7096       unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
7097       memset (el32, 0, num_el32 * sizeof (long));
7098 
7099       /* The (maximum) number of target bytes per element of el32.  */
7100       unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
7101       gcc_assert (bytes_per_el32 != 0);
7102 
7103       unsigned int mode_bytes = GET_MODE_SIZE (fmode);
7104       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7105 	{
7106 	  unsigned int index = byte / bytes_per_el32;
7107 	  unsigned int subbyte = byte % bytes_per_el32;
7108 	  unsigned int int_bytes = MIN (bytes_per_el32,
7109 					mode_bytes - index * bytes_per_el32);
7110 	  /* Always constant because the inputs are.  */
7111 	  unsigned int lsb
7112 	    = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
7113 	  el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
7114 	}
7115       REAL_VALUE_TYPE r;
7116       real_from_target (&r, el32, fmode);
7117       return const_double_from_real_value (r, fmode);
7118     }
7119 
7120   if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
7121     {
7122       scalar_mode smode = as_a <scalar_mode> (mode);
7123       FIXED_VALUE_TYPE f;
7124       f.data.low = 0;
7125       f.data.high = 0;
7126       f.mode = smode;
7127 
7128       unsigned int mode_bytes = GET_MODE_SIZE (smode);
7129       for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7130 	{
7131 	  /* Always constant because the inputs are.  */
7132 	  unsigned int lsb
7133 	    = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
7134 	  unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
7135 	  if (lsb >= HOST_BITS_PER_WIDE_INT)
7136 	    f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
7137 	  else
7138 	    f.data.low |= unit << lsb;
7139 	}
7140       return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
7141     }
7142 
7143   return NULL_RTX;
7144 }
7145 
7146 /* Simplify a byte offset BYTE into CONST_VECTOR X.  The main purpose
7147    is to convert a runtime BYTE value into a constant one.  */
7148 
7149 static poly_uint64
simplify_const_vector_byte_offset(rtx x,poly_uint64 byte)7150 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
7151 {
7152   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
7153   machine_mode mode = GET_MODE (x);
7154   unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7155 					       GET_MODE_NUNITS (mode));
7156   /* The number of bits needed to encode one element from each pattern.  */
7157   unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
7158 
7159   /* Identify the start point in terms of a sequence number and a byte offset
7160      within that sequence.  */
7161   poly_uint64 first_sequence;
7162   unsigned HOST_WIDE_INT subbit;
7163   if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
7164 		       &first_sequence, &subbit))
7165     {
7166       unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7167       if (nelts_per_pattern == 1)
7168 	/* This is a duplicated vector, so the value of FIRST_SEQUENCE
7169 	   doesn't matter.  */
7170 	byte = subbit / BITS_PER_UNIT;
7171       else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
7172 	{
7173 	  /* The subreg drops the first element from each pattern and
7174 	     only uses the second element.  Find the first sequence
7175 	     that starts on a byte boundary.  */
7176 	  subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
7177 	  byte = subbit / BITS_PER_UNIT;
7178 	}
7179     }
7180   return byte;
7181 }
7182 
7183 /* Subroutine of simplify_subreg in which:
7184 
7185    - X is known to be a CONST_VECTOR
7186    - OUTERMODE is known to be a vector mode
7187 
7188    Try to handle the subreg by operating on the CONST_VECTOR encoding
7189    rather than on each individual element of the CONST_VECTOR.
7190 
7191    Return the simplified subreg on success, otherwise return NULL_RTX.  */
7192 
7193 static rtx
simplify_const_vector_subreg(machine_mode outermode,rtx x,machine_mode innermode,unsigned int first_byte)7194 simplify_const_vector_subreg (machine_mode outermode, rtx x,
7195 			      machine_mode innermode, unsigned int first_byte)
7196 {
7197   /* Paradoxical subregs of vectors have dubious semantics.  */
7198   if (paradoxical_subreg_p (outermode, innermode))
7199     return NULL_RTX;
7200 
7201   /* We can only preserve the semantics of a stepped pattern if the new
7202      vector element is the same as the original one.  */
7203   if (CONST_VECTOR_STEPPED_P (x)
7204       && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
7205     return NULL_RTX;
7206 
7207   /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes.  */
7208   unsigned int x_elt_bits
7209     = vector_element_size (GET_MODE_BITSIZE (innermode),
7210 			   GET_MODE_NUNITS (innermode));
7211   unsigned int out_elt_bits
7212     = vector_element_size (GET_MODE_BITSIZE (outermode),
7213 			   GET_MODE_NUNITS (outermode));
7214 
7215   /* The number of bits needed to encode one element from every pattern
7216      of the original vector.  */
7217   unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
7218 
7219   /* The number of bits needed to encode one element from every pattern
7220      of the result.  */
7221   unsigned int out_sequence_bits
7222     = least_common_multiple (x_sequence_bits, out_elt_bits);
7223 
7224   /* Work out the number of interleaved patterns in the output vector
7225      and the number of encoded elements per pattern.  */
7226   unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
7227   unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7228 
7229   /* The encoding scheme requires the number of elements to be a multiple
7230      of the number of patterns, so that each pattern appears at least once
7231      and so that the same number of elements appear from each pattern.  */
7232   bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
7233   unsigned int const_nunits;
7234   if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
7235       && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
7236     {
7237       /* Either the encoding is invalid, or applying it would give us
7238 	 more elements than we need.  Just encode each element directly.  */
7239       out_npatterns = const_nunits;
7240       nelts_per_pattern = 1;
7241     }
7242   else if (!ok_p)
7243     return NULL_RTX;
7244 
7245   /* Get enough bytes of X to form the new encoding.  */
7246   unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
7247   unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
7248   auto_vec<target_unit, 128> buffer (buffer_bytes);
7249   if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
7250     return NULL_RTX;
7251 
7252   /* Reencode the bytes as OUTERMODE.  */
7253   return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
7254 				   nelts_per_pattern);
7255 }
7256 
7257 /* Try to simplify a subreg of a constant by encoding the subreg region
7258    as a sequence of target bytes and reading them back in the new mode.
7259    Return the new value on success, otherwise return null.
7260 
7261    The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7262    and byte offset FIRST_BYTE.  */
7263 
7264 static rtx
simplify_immed_subreg(fixed_size_mode outermode,rtx x,machine_mode innermode,unsigned int first_byte)7265 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
7266 		       machine_mode innermode, unsigned int first_byte)
7267 {
7268   unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
7269   auto_vec<target_unit, 128> buffer (buffer_bytes);
7270 
7271   /* Some ports misuse CCmode.  */
7272   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
7273     return x;
7274 
7275   /* Paradoxical subregs read undefined values for bytes outside of the
7276      inner value.  However, we have traditionally always sign-extended
7277      integer constants and zero-extended others.  */
7278   unsigned int inner_bytes = buffer_bytes;
7279   if (paradoxical_subreg_p (outermode, innermode))
7280     {
7281       if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
7282 	return NULL_RTX;
7283 
7284       target_unit filler = 0;
7285       if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
7286 	filler = -1;
7287 
7288       /* Add any leading bytes due to big-endian layout.  The number of
7289 	 bytes must be constant because both modes have constant size.  */
7290       unsigned int leading_bytes
7291 	= -byte_lowpart_offset (outermode, innermode).to_constant ();
7292       for (unsigned int i = 0; i < leading_bytes; ++i)
7293 	buffer.quick_push (filler);
7294 
7295       if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7296 	return NULL_RTX;
7297 
7298       /* Add any trailing bytes due to little-endian layout.  */
7299       while (buffer.length () < buffer_bytes)
7300 	buffer.quick_push (filler);
7301     }
7302   else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7303     return NULL_RTX;
7304   rtx ret = native_decode_rtx (outermode, buffer, 0);
7305   if (ret && MODE_COMPOSITE_P (outermode))
7306     {
7307       auto_vec<target_unit, 128> buffer2 (buffer_bytes);
7308       if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
7309 	return NULL_RTX;
7310       for (unsigned int i = 0; i < buffer_bytes; ++i)
7311 	if (buffer[i] != buffer2[i])
7312 	  return NULL_RTX;
7313     }
7314   return ret;
7315 }
7316 
7317 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7318    Return 0 if no simplifications are possible.  */
7319 rtx
simplify_subreg(machine_mode outermode,rtx op,machine_mode innermode,poly_uint64 byte)7320 simplify_context::simplify_subreg (machine_mode outermode, rtx op,
7321 				   machine_mode innermode, poly_uint64 byte)
7322 {
7323   /* Little bit of sanity checking.  */
7324   gcc_assert (innermode != VOIDmode);
7325   gcc_assert (outermode != VOIDmode);
7326   gcc_assert (innermode != BLKmode);
7327   gcc_assert (outermode != BLKmode);
7328 
7329   gcc_assert (GET_MODE (op) == innermode
7330 	      || GET_MODE (op) == VOIDmode);
7331 
7332   poly_uint64 outersize = GET_MODE_SIZE (outermode);
7333   if (!multiple_p (byte, outersize))
7334     return NULL_RTX;
7335 
7336   poly_uint64 innersize = GET_MODE_SIZE (innermode);
7337   if (maybe_ge (byte, innersize))
7338     return NULL_RTX;
7339 
7340   if (outermode == innermode && known_eq (byte, 0U))
7341     return op;
7342 
7343   if (GET_CODE (op) == CONST_VECTOR)
7344     byte = simplify_const_vector_byte_offset (op, byte);
7345 
7346   if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
7347     {
7348       rtx elt;
7349 
7350       if (VECTOR_MODE_P (outermode)
7351 	  && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
7352 	  && vec_duplicate_p (op, &elt))
7353 	return gen_vec_duplicate (outermode, elt);
7354 
7355       if (outermode == GET_MODE_INNER (innermode)
7356 	  && vec_duplicate_p (op, &elt))
7357 	return elt;
7358     }
7359 
7360   if (CONST_SCALAR_INT_P (op)
7361       || CONST_DOUBLE_AS_FLOAT_P (op)
7362       || CONST_FIXED_P (op)
7363       || GET_CODE (op) == CONST_VECTOR)
7364     {
7365       unsigned HOST_WIDE_INT cbyte;
7366       if (byte.is_constant (&cbyte))
7367 	{
7368 	  if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
7369 	    {
7370 	      rtx tmp = simplify_const_vector_subreg (outermode, op,
7371 						      innermode, cbyte);
7372 	      if (tmp)
7373 		return tmp;
7374 	    }
7375 
7376 	  fixed_size_mode fs_outermode;
7377 	  if (is_a <fixed_size_mode> (outermode, &fs_outermode))
7378 	    return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
7379 	}
7380     }
7381 
7382   /* Changing mode twice with SUBREG => just change it once,
7383      or not at all if changing back op starting mode.  */
7384   if (GET_CODE (op) == SUBREG)
7385     {
7386       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
7387       poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
7388       rtx newx;
7389 
7390       if (outermode == innermostmode
7391 	  && known_eq (byte, 0U)
7392 	  && known_eq (SUBREG_BYTE (op), 0))
7393 	return SUBREG_REG (op);
7394 
7395       /* Work out the memory offset of the final OUTERMODE value relative
7396 	 to the inner value of OP.  */
7397       poly_int64 mem_offset = subreg_memory_offset (outermode,
7398 						    innermode, byte);
7399       poly_int64 op_mem_offset = subreg_memory_offset (op);
7400       poly_int64 final_offset = mem_offset + op_mem_offset;
7401 
7402       /* See whether resulting subreg will be paradoxical.  */
7403       if (!paradoxical_subreg_p (outermode, innermostmode))
7404 	{
7405 	  /* Bail out in case resulting subreg would be incorrect.  */
7406 	  if (maybe_lt (final_offset, 0)
7407 	      || maybe_ge (poly_uint64 (final_offset), innermostsize)
7408 	      || !multiple_p (final_offset, outersize))
7409 	    return NULL_RTX;
7410 	}
7411       else
7412 	{
7413 	  poly_int64 required_offset = subreg_memory_offset (outermode,
7414 							     innermostmode, 0);
7415 	  if (maybe_ne (final_offset, required_offset))
7416 	    return NULL_RTX;
7417 	  /* Paradoxical subregs always have byte offset 0.  */
7418 	  final_offset = 0;
7419 	}
7420 
7421       /* Recurse for further possible simplifications.  */
7422       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
7423 			      final_offset);
7424       if (newx)
7425 	return newx;
7426       if (validate_subreg (outermode, innermostmode,
7427 			   SUBREG_REG (op), final_offset))
7428 	{
7429 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
7430 	  if (SUBREG_PROMOTED_VAR_P (op)
7431 	      && SUBREG_PROMOTED_SIGN (op) >= 0
7432 	      && GET_MODE_CLASS (outermode) == MODE_INT
7433 	      && known_ge (outersize, innersize)
7434 	      && known_le (outersize, innermostsize)
7435 	      && subreg_lowpart_p (newx))
7436 	    {
7437 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
7438 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
7439 	    }
7440 	  return newx;
7441 	}
7442       return NULL_RTX;
7443     }
7444 
7445   /* SUBREG of a hard register => just change the register number
7446      and/or mode.  If the hard register is not valid in that mode,
7447      suppress this simplification.  If the hard register is the stack,
7448      frame, or argument pointer, leave this as a SUBREG.  */
7449 
7450   if (REG_P (op) && HARD_REGISTER_P (op))
7451     {
7452       unsigned int regno, final_regno;
7453 
7454       regno = REGNO (op);
7455       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
7456       if (HARD_REGISTER_NUM_P (final_regno))
7457 	{
7458 	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
7459 				      subreg_memory_offset (outermode,
7460 							    innermode, byte));
7461 
7462 	  /* Propagate original regno.  We don't have any way to specify
7463 	     the offset inside original regno, so do so only for lowpart.
7464 	     The information is used only by alias analysis that cannot
7465 	     grog partial register anyway.  */
7466 
7467 	  if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
7468 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
7469 	  return x;
7470 	}
7471     }
7472 
7473   /* If we have a SUBREG of a register that we are replacing and we are
7474      replacing it with a MEM, make a new MEM and try replacing the
7475      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
7476      or if we would be widening it.  */
7477 
7478   if (MEM_P (op)
7479       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
7480       /* Allow splitting of volatile memory references in case we don't
7481          have instruction to move the whole thing.  */
7482       && (! MEM_VOLATILE_P (op)
7483 	  || ! have_insn_for (SET, innermode))
7484       && !(STRICT_ALIGNMENT && MEM_ALIGN (op) < GET_MODE_ALIGNMENT (outermode))
7485       && known_le (outersize, innersize))
7486     return adjust_address_nv (op, outermode, byte);
7487 
7488   /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7489      of two parts.  */
7490   if (GET_CODE (op) == CONCAT
7491       || GET_CODE (op) == VEC_CONCAT)
7492     {
7493       poly_uint64 final_offset;
7494       rtx part, res;
7495 
7496       machine_mode part_mode = GET_MODE (XEXP (op, 0));
7497       if (part_mode == VOIDmode)
7498 	part_mode = GET_MODE_INNER (GET_MODE (op));
7499       poly_uint64 part_size = GET_MODE_SIZE (part_mode);
7500       if (known_lt (byte, part_size))
7501 	{
7502 	  part = XEXP (op, 0);
7503 	  final_offset = byte;
7504 	}
7505       else if (known_ge (byte, part_size))
7506 	{
7507 	  part = XEXP (op, 1);
7508 	  final_offset = byte - part_size;
7509 	}
7510       else
7511 	return NULL_RTX;
7512 
7513       if (maybe_gt (final_offset + outersize, part_size))
7514 	return NULL_RTX;
7515 
7516       part_mode = GET_MODE (part);
7517       if (part_mode == VOIDmode)
7518 	part_mode = GET_MODE_INNER (GET_MODE (op));
7519       res = simplify_subreg (outermode, part, part_mode, final_offset);
7520       if (res)
7521 	return res;
7522       if (validate_subreg (outermode, part_mode, part, final_offset))
7523 	return gen_rtx_SUBREG (outermode, part, final_offset);
7524       return NULL_RTX;
7525     }
7526 
7527   /* Simplify
7528 	(subreg (vec_merge (X)
7529 			   (vector)
7530 			   (const_int ((1 << N) | M)))
7531 		(N * sizeof (outermode)))
7532      to
7533 	(subreg (X) (N * sizeof (outermode)))
7534    */
7535   unsigned int idx;
7536   if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7537       && idx < HOST_BITS_PER_WIDE_INT
7538       && GET_CODE (op) == VEC_MERGE
7539       && GET_MODE_INNER (innermode) == outermode
7540       && CONST_INT_P (XEXP (op, 2))
7541       && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7542     return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7543 
7544   /* A SUBREG resulting from a zero extension may fold to zero if
7545      it extracts higher bits that the ZERO_EXTEND's source bits.  */
7546   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7547     {
7548       poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7549       if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7550 	return CONST0_RTX (outermode);
7551     }
7552 
7553   scalar_int_mode int_outermode, int_innermode;
7554   if (is_a <scalar_int_mode> (outermode, &int_outermode)
7555       && is_a <scalar_int_mode> (innermode, &int_innermode)
7556       && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7557     {
7558       /* Handle polynomial integers.  The upper bits of a paradoxical
7559 	 subreg are undefined, so this is safe regardless of whether
7560 	 we're truncating or extending.  */
7561       if (CONST_POLY_INT_P (op))
7562 	{
7563 	  poly_wide_int val
7564 	    = poly_wide_int::from (const_poly_int_value (op),
7565 				   GET_MODE_PRECISION (int_outermode),
7566 				   SIGNED);
7567 	  return immed_wide_int_const (val, int_outermode);
7568 	}
7569 
7570       if (GET_MODE_PRECISION (int_outermode)
7571 	  < GET_MODE_PRECISION (int_innermode))
7572 	{
7573 	  rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7574 	  if (tem)
7575 	    return tem;
7576 	}
7577     }
7578 
7579   /* If OP is a vector comparison and the subreg is not changing the
7580      number of elements or the size of the elements, change the result
7581      of the comparison to the new mode.  */
7582   if (COMPARISON_P (op)
7583       && VECTOR_MODE_P (outermode)
7584       && VECTOR_MODE_P (innermode)
7585       && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7586       && known_eq (GET_MODE_UNIT_SIZE (outermode),
7587 		    GET_MODE_UNIT_SIZE (innermode)))
7588     return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7589 				    XEXP (op, 0), XEXP (op, 1));
7590   return NULL_RTX;
7591 }
7592 
7593 /* Make a SUBREG operation or equivalent if it folds.  */
7594 
7595 rtx
simplify_gen_subreg(machine_mode outermode,rtx op,machine_mode innermode,poly_uint64 byte)7596 simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
7597 				       machine_mode innermode,
7598 				       poly_uint64 byte)
7599 {
7600   rtx newx;
7601 
7602   newx = simplify_subreg (outermode, op, innermode, byte);
7603   if (newx)
7604     return newx;
7605 
7606   if (GET_CODE (op) == SUBREG
7607       || GET_CODE (op) == CONCAT
7608       || GET_MODE (op) == VOIDmode)
7609     return NULL_RTX;
7610 
7611   if (MODE_COMPOSITE_P (outermode)
7612       && (CONST_SCALAR_INT_P (op)
7613 	  || CONST_DOUBLE_AS_FLOAT_P (op)
7614 	  || CONST_FIXED_P (op)
7615 	  || GET_CODE (op) == CONST_VECTOR))
7616     return NULL_RTX;
7617 
7618   if (validate_subreg (outermode, innermode, op, byte))
7619     return gen_rtx_SUBREG (outermode, op, byte);
7620 
7621   return NULL_RTX;
7622 }
7623 
7624 /* Generates a subreg to get the least significant part of EXPR (in mode
7625    INNER_MODE) to OUTER_MODE.  */
7626 
7627 rtx
lowpart_subreg(machine_mode outer_mode,rtx expr,machine_mode inner_mode)7628 simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
7629 				  machine_mode inner_mode)
7630 {
7631   return simplify_gen_subreg (outer_mode, expr, inner_mode,
7632 			      subreg_lowpart_offset (outer_mode, inner_mode));
7633 }
7634 
7635 /* Generate RTX to select element at INDEX out of vector OP.  */
7636 
7637 rtx
simplify_gen_vec_select(rtx op,unsigned int index)7638 simplify_context::simplify_gen_vec_select (rtx op, unsigned int index)
7639 {
7640   gcc_assert (VECTOR_MODE_P (GET_MODE (op)));
7641 
7642   scalar_mode imode = GET_MODE_INNER (GET_MODE (op));
7643 
7644   if (known_eq (index * GET_MODE_SIZE (imode),
7645 		subreg_lowpart_offset (imode, GET_MODE (op))))
7646     {
7647       rtx res = lowpart_subreg (imode, op, GET_MODE (op));
7648       if (res)
7649 	return res;
7650     }
7651 
7652   rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (index)));
7653   return gen_rtx_VEC_SELECT (imode, op, tmp);
7654 }
7655 
7656 
7657 /* Simplify X, an rtx expression.
7658 
7659    Return the simplified expression or NULL if no simplifications
7660    were possible.
7661 
7662    This is the preferred entry point into the simplification routines;
7663    however, we still allow passes to call the more specific routines.
7664 
7665    Right now GCC has three (yes, three) major bodies of RTL simplification
7666    code that need to be unified.
7667 
7668 	1. fold_rtx in cse.c.  This code uses various CSE specific
7669 	   information to aid in RTL simplification.
7670 
7671 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
7672 	   it uses combine specific information to aid in RTL
7673 	   simplification.
7674 
7675 	3. The routines in this file.
7676 
7677 
7678    Long term we want to only have one body of simplification code; to
7679    get to that state I recommend the following steps:
7680 
7681 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
7682 	   which are not pass dependent state into these routines.
7683 
7684 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
7685 	   use this routine whenever possible.
7686 
7687 	3. Allow for pass dependent state to be provided to these
7688 	   routines and add simplifications based on the pass dependent
7689 	   state.  Remove code from cse.c & combine.c that becomes
7690 	   redundant/dead.
7691 
7692     It will take time, but ultimately the compiler will be easier to
7693     maintain and improve.  It's totally silly that when we add a
7694     simplification that it needs to be added to 4 places (3 for RTL
7695     simplification and 1 for tree simplification.  */
7696 
7697 rtx
simplify_rtx(const_rtx x)7698 simplify_rtx (const_rtx x)
7699 {
7700   const enum rtx_code code = GET_CODE (x);
7701   const machine_mode mode = GET_MODE (x);
7702 
7703   switch (GET_RTX_CLASS (code))
7704     {
7705     case RTX_UNARY:
7706       return simplify_unary_operation (code, mode,
7707 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7708     case RTX_COMM_ARITH:
7709       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7710 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7711 
7712       /* Fall through.  */
7713 
7714     case RTX_BIN_ARITH:
7715       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7716 
7717     case RTX_TERNARY:
7718     case RTX_BITFIELD_OPS:
7719       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7720 					 XEXP (x, 0), XEXP (x, 1),
7721 					 XEXP (x, 2));
7722 
7723     case RTX_COMPARE:
7724     case RTX_COMM_COMPARE:
7725       return simplify_relational_operation (code, mode,
7726                                             ((GET_MODE (XEXP (x, 0))
7727                                              != VOIDmode)
7728                                             ? GET_MODE (XEXP (x, 0))
7729                                             : GET_MODE (XEXP (x, 1))),
7730                                             XEXP (x, 0),
7731                                             XEXP (x, 1));
7732 
7733     case RTX_EXTRA:
7734       if (code == SUBREG)
7735 	return simplify_subreg (mode, SUBREG_REG (x),
7736 				GET_MODE (SUBREG_REG (x)),
7737 				SUBREG_BYTE (x));
7738       break;
7739 
7740     case RTX_OBJ:
7741       if (code == LO_SUM)
7742 	{
7743 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
7744 	  if (GET_CODE (XEXP (x, 0)) == HIGH
7745 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7746 	  return XEXP (x, 1);
7747 	}
7748       break;
7749 
7750     default:
7751       break;
7752     }
7753   return NULL;
7754 }
7755 
7756 #if CHECKING_P
7757 
7758 namespace selftest {
7759 
7760 /* Make a unique pseudo REG of mode MODE for use by selftests.  */
7761 
7762 static rtx
make_test_reg(machine_mode mode)7763 make_test_reg (machine_mode mode)
7764 {
7765   static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7766 
7767   return gen_rtx_REG (mode, test_reg_num++);
7768 }
7769 
7770 static void
test_scalar_int_ops(machine_mode mode)7771 test_scalar_int_ops (machine_mode mode)
7772 {
7773   rtx op0 = make_test_reg (mode);
7774   rtx op1 = make_test_reg (mode);
7775   rtx six = GEN_INT (6);
7776 
7777   rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
7778   rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
7779   rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
7780 
7781   rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
7782   rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
7783   rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
7784 
7785   rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
7786   rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
7787 
7788   /* Test some binary identities.  */
7789   ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
7790   ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
7791   ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
7792   ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
7793   ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
7794   ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
7795   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
7796   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
7797   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
7798   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
7799   ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
7800   ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
7801   ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
7802   ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
7803   ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
7804   ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
7805   ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
7806 
7807   /* Test some self-inverse operations.  */
7808   ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
7809   ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
7810   ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
7811 
7812   /* Test some reflexive operations.  */
7813   ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
7814   ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
7815   ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
7816   ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
7817   ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
7818   ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
7819 
7820   ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
7821   ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
7822 
7823   /* Test simplify_distributive_operation.  */
7824   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
7825 		 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
7826   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
7827 		 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
7828   ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
7829 		 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
7830 
7831   /* Test useless extensions are eliminated.  */
7832   ASSERT_RTX_EQ (op0, simplify_gen_unary (TRUNCATE, mode, op0, mode));
7833   ASSERT_RTX_EQ (op0, simplify_gen_unary (ZERO_EXTEND, mode, op0, mode));
7834   ASSERT_RTX_EQ (op0, simplify_gen_unary (SIGN_EXTEND, mode, op0, mode));
7835   ASSERT_RTX_EQ (op0, lowpart_subreg (mode, op0, mode));
7836 }
7837 
7838 /* Verify some simplifications of integer extension/truncation.
7839    Machine mode BMODE is the guaranteed wider than SMODE.  */
7840 
7841 static void
test_scalar_int_ext_ops(machine_mode bmode,machine_mode smode)7842 test_scalar_int_ext_ops (machine_mode bmode, machine_mode smode)
7843 {
7844   rtx sreg = make_test_reg (smode);
7845 
7846   /* Check truncation of extension.  */
7847   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7848 				     simplify_gen_unary (ZERO_EXTEND, bmode,
7849 							 sreg, smode),
7850 				     bmode),
7851 		 sreg);
7852   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7853 				     simplify_gen_unary (SIGN_EXTEND, bmode,
7854 							 sreg, smode),
7855 				     bmode),
7856 		 sreg);
7857   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7858 				     lowpart_subreg (bmode, sreg, smode),
7859 				     bmode),
7860 		 sreg);
7861 }
7862 
7863 /* Verify more simplifications of integer extension/truncation.
7864    BMODE is wider than MMODE which is wider than SMODE.  */
7865 
7866 static void
test_scalar_int_ext_ops2(machine_mode bmode,machine_mode mmode,machine_mode smode)7867 test_scalar_int_ext_ops2 (machine_mode bmode, machine_mode mmode,
7868 			  machine_mode smode)
7869 {
7870   rtx breg = make_test_reg (bmode);
7871   rtx mreg = make_test_reg (mmode);
7872   rtx sreg = make_test_reg (smode);
7873 
7874   /* Check truncate of truncate.  */
7875   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7876 				     simplify_gen_unary (TRUNCATE, mmode,
7877 							 breg, bmode),
7878 				     mmode),
7879 		 simplify_gen_unary (TRUNCATE, smode, breg, bmode));
7880 
7881   /* Check extension of extension.  */
7882   ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND, bmode,
7883 				     simplify_gen_unary (ZERO_EXTEND, mmode,
7884 							 sreg, smode),
7885 				     mmode),
7886 		 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7887   ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7888 				     simplify_gen_unary (SIGN_EXTEND, mmode,
7889 							 sreg, smode),
7890 				     mmode),
7891 		 simplify_gen_unary (SIGN_EXTEND, bmode, sreg, smode));
7892   ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7893 				     simplify_gen_unary (ZERO_EXTEND, mmode,
7894 							 sreg, smode),
7895 				     mmode),
7896 		 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7897 
7898   /* Check truncation of extension.  */
7899   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7900 				     simplify_gen_unary (ZERO_EXTEND, bmode,
7901 							 mreg, mmode),
7902 				     bmode),
7903 		 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7904   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7905 				     simplify_gen_unary (SIGN_EXTEND, bmode,
7906 							 mreg, mmode),
7907 				     bmode),
7908 		 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7909   ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7910 				     lowpart_subreg (bmode, mreg, mmode),
7911 				     bmode),
7912 		 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7913 }
7914 
7915 
7916 /* Verify some simplifications involving scalar expressions.  */
7917 
7918 static void
test_scalar_ops()7919 test_scalar_ops ()
7920 {
7921   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7922     {
7923       machine_mode mode = (machine_mode) i;
7924       if (SCALAR_INT_MODE_P (mode) && mode != BImode)
7925 	test_scalar_int_ops (mode);
7926     }
7927 
7928   test_scalar_int_ext_ops (HImode, QImode);
7929   test_scalar_int_ext_ops (SImode, QImode);
7930   test_scalar_int_ext_ops (SImode, HImode);
7931   test_scalar_int_ext_ops (DImode, QImode);
7932   test_scalar_int_ext_ops (DImode, HImode);
7933   test_scalar_int_ext_ops (DImode, SImode);
7934 
7935   test_scalar_int_ext_ops2 (SImode, HImode, QImode);
7936   test_scalar_int_ext_ops2 (DImode, HImode, QImode);
7937   test_scalar_int_ext_ops2 (DImode, SImode, QImode);
7938   test_scalar_int_ext_ops2 (DImode, SImode, HImode);
7939 }
7940 
7941 /* Test vector simplifications involving VEC_DUPLICATE in which the
7942    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
7943    register that holds one element of MODE.  */
7944 
7945 static void
test_vector_ops_duplicate(machine_mode mode,rtx scalar_reg)7946 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7947 {
7948   scalar_mode inner_mode = GET_MODE_INNER (mode);
7949   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7950   poly_uint64 nunits = GET_MODE_NUNITS (mode);
7951   if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7952     {
7953       /* Test some simple unary cases with VEC_DUPLICATE arguments.  */
7954       rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7955       rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7956       ASSERT_RTX_EQ (duplicate,
7957 		     simplify_unary_operation (NOT, mode,
7958 					       duplicate_not, mode));
7959 
7960       rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7961       rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7962       ASSERT_RTX_EQ (duplicate,
7963 		     simplify_unary_operation (NEG, mode,
7964 					       duplicate_neg, mode));
7965 
7966       /* Test some simple binary cases with VEC_DUPLICATE arguments.  */
7967       ASSERT_RTX_EQ (duplicate,
7968 		     simplify_binary_operation (PLUS, mode, duplicate,
7969 						CONST0_RTX (mode)));
7970 
7971       ASSERT_RTX_EQ (duplicate,
7972 		     simplify_binary_operation (MINUS, mode, duplicate,
7973 						CONST0_RTX (mode)));
7974 
7975       ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7976 			 simplify_binary_operation (MINUS, mode, duplicate,
7977 						    duplicate));
7978     }
7979 
7980   /* Test a scalar VEC_SELECT of a VEC_DUPLICATE.  */
7981   rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7982   ASSERT_RTX_PTR_EQ (scalar_reg,
7983 		     simplify_binary_operation (VEC_SELECT, inner_mode,
7984 						duplicate, zero_par));
7985 
7986   unsigned HOST_WIDE_INT const_nunits;
7987   if (nunits.is_constant (&const_nunits))
7988     {
7989       /* And again with the final element.  */
7990       rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7991       rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7992       ASSERT_RTX_PTR_EQ (scalar_reg,
7993 			 simplify_binary_operation (VEC_SELECT, inner_mode,
7994 						    duplicate, last_par));
7995 
7996       /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE.  */
7997       rtx vector_reg = make_test_reg (mode);
7998       for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7999 	{
8000 	  if (i >= HOST_BITS_PER_WIDE_INT)
8001 	    break;
8002 	  rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
8003 	  rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
8004 	  poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
8005 	  ASSERT_RTX_EQ (scalar_reg,
8006 			 simplify_gen_subreg (inner_mode, vm,
8007 					      mode, offset));
8008 	}
8009     }
8010 
8011   /* Test a scalar subreg of a VEC_DUPLICATE.  */
8012   poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
8013   ASSERT_RTX_EQ (scalar_reg,
8014 		 simplify_gen_subreg (inner_mode, duplicate,
8015 				      mode, offset));
8016 
8017   machine_mode narrower_mode;
8018   if (maybe_ne (nunits, 2U)
8019       && multiple_p (nunits, 2)
8020       && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
8021       && VECTOR_MODE_P (narrower_mode))
8022     {
8023       /* Test VEC_DUPLICATE of a vector.  */
8024       rtx_vector_builder nbuilder (narrower_mode, 2, 1);
8025       nbuilder.quick_push (const0_rtx);
8026       nbuilder.quick_push (const1_rtx);
8027       rtx_vector_builder builder (mode, 2, 1);
8028       builder.quick_push (const0_rtx);
8029       builder.quick_push (const1_rtx);
8030       ASSERT_RTX_EQ (builder.build (),
8031 		     simplify_unary_operation (VEC_DUPLICATE, mode,
8032 					       nbuilder.build (),
8033 					       narrower_mode));
8034 
8035       /* Test VEC_SELECT of a vector.  */
8036       rtx vec_par
8037 	= gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
8038       rtx narrower_duplicate
8039 	= gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
8040       ASSERT_RTX_EQ (narrower_duplicate,
8041 		     simplify_binary_operation (VEC_SELECT, narrower_mode,
8042 						duplicate, vec_par));
8043 
8044       /* Test a vector subreg of a VEC_DUPLICATE.  */
8045       poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
8046       ASSERT_RTX_EQ (narrower_duplicate,
8047 		     simplify_gen_subreg (narrower_mode, duplicate,
8048 					  mode, offset));
8049     }
8050 }
8051 
8052 /* Test vector simplifications involving VEC_SERIES in which the
8053    operands and result have vector mode MODE.  SCALAR_REG is a pseudo
8054    register that holds one element of MODE.  */
8055 
8056 static void
test_vector_ops_series(machine_mode mode,rtx scalar_reg)8057 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
8058 {
8059   /* Test unary cases with VEC_SERIES arguments.  */
8060   scalar_mode inner_mode = GET_MODE_INNER (mode);
8061   rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
8062   rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
8063   rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
8064   rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
8065   rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
8066   rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
8067   rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
8068   rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
8069 					 neg_scalar_reg);
8070   ASSERT_RTX_EQ (series_0_r,
8071 		 simplify_unary_operation (NEG, mode, series_0_nr, mode));
8072   ASSERT_RTX_EQ (series_r_m1,
8073 		 simplify_unary_operation (NEG, mode, series_nr_1, mode));
8074   ASSERT_RTX_EQ (series_r_r,
8075 		 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
8076 
8077   /* Test that a VEC_SERIES with a zero step is simplified away.  */
8078   ASSERT_RTX_EQ (duplicate,
8079 		 simplify_binary_operation (VEC_SERIES, mode,
8080 					    scalar_reg, const0_rtx));
8081 
8082   /* Test PLUS and MINUS with VEC_SERIES.  */
8083   rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
8084   rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
8085   rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
8086   ASSERT_RTX_EQ (series_r_r,
8087 		 simplify_binary_operation (PLUS, mode, series_0_r,
8088 					    duplicate));
8089   ASSERT_RTX_EQ (series_r_1,
8090 		 simplify_binary_operation (PLUS, mode, duplicate,
8091 					    series_0_1));
8092   ASSERT_RTX_EQ (series_r_m1,
8093 		 simplify_binary_operation (PLUS, mode, duplicate,
8094 					    series_0_m1));
8095   ASSERT_RTX_EQ (series_0_r,
8096 		 simplify_binary_operation (MINUS, mode, series_r_r,
8097 					    duplicate));
8098   ASSERT_RTX_EQ (series_r_m1,
8099 		 simplify_binary_operation (MINUS, mode, duplicate,
8100 					    series_0_1));
8101   ASSERT_RTX_EQ (series_r_1,
8102 		 simplify_binary_operation (MINUS, mode, duplicate,
8103 					    series_0_m1));
8104   ASSERT_RTX_EQ (series_0_m1,
8105 		 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
8106 					    constm1_rtx));
8107 
8108   /* Test NEG on constant vector series.  */
8109   ASSERT_RTX_EQ (series_0_m1,
8110 		 simplify_unary_operation (NEG, mode, series_0_1, mode));
8111   ASSERT_RTX_EQ (series_0_1,
8112 		 simplify_unary_operation (NEG, mode, series_0_m1, mode));
8113 
8114   /* Test PLUS and MINUS on constant vector series.  */
8115   rtx scalar2 = gen_int_mode (2, inner_mode);
8116   rtx scalar3 = gen_int_mode (3, inner_mode);
8117   rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
8118   rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
8119   rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
8120   ASSERT_RTX_EQ (series_1_1,
8121 		 simplify_binary_operation (PLUS, mode, series_0_1,
8122 					    CONST1_RTX (mode)));
8123   ASSERT_RTX_EQ (series_0_m1,
8124 		 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
8125 					    series_0_m1));
8126   ASSERT_RTX_EQ (series_1_3,
8127 		 simplify_binary_operation (PLUS, mode, series_1_1,
8128 					    series_0_2));
8129   ASSERT_RTX_EQ (series_0_1,
8130 		 simplify_binary_operation (MINUS, mode, series_1_1,
8131 					    CONST1_RTX (mode)));
8132   ASSERT_RTX_EQ (series_1_1,
8133 		 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
8134 					    series_0_m1));
8135   ASSERT_RTX_EQ (series_1_1,
8136 		 simplify_binary_operation (MINUS, mode, series_1_3,
8137 					    series_0_2));
8138 
8139   /* Test MULT between constant vectors.  */
8140   rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
8141   rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
8142   rtx scalar9 = gen_int_mode (9, inner_mode);
8143   rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
8144   ASSERT_RTX_EQ (series_0_2,
8145 		 simplify_binary_operation (MULT, mode, series_0_1, vec2));
8146   ASSERT_RTX_EQ (series_3_9,
8147 		 simplify_binary_operation (MULT, mode, vec3, series_1_3));
8148   if (!GET_MODE_NUNITS (mode).is_constant ())
8149     ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
8150 					     series_0_1));
8151 
8152   /* Test ASHIFT between constant vectors.  */
8153   ASSERT_RTX_EQ (series_0_2,
8154 		 simplify_binary_operation (ASHIFT, mode, series_0_1,
8155 					    CONST1_RTX (mode)));
8156   if (!GET_MODE_NUNITS (mode).is_constant ())
8157     ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
8158 					     series_0_1));
8159 }
8160 
8161 static rtx
simplify_merge_mask(rtx x,rtx mask,int op)8162 simplify_merge_mask (rtx x, rtx mask, int op)
8163 {
8164   return simplify_context ().simplify_merge_mask (x, mask, op);
8165 }
8166 
8167 /* Verify simplify_merge_mask works correctly.  */
8168 
8169 static void
test_vec_merge(machine_mode mode)8170 test_vec_merge (machine_mode mode)
8171 {
8172   rtx op0 = make_test_reg (mode);
8173   rtx op1 = make_test_reg (mode);
8174   rtx op2 = make_test_reg (mode);
8175   rtx op3 = make_test_reg (mode);
8176   rtx op4 = make_test_reg (mode);
8177   rtx op5 = make_test_reg (mode);
8178   rtx mask1 = make_test_reg (SImode);
8179   rtx mask2 = make_test_reg (SImode);
8180   rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
8181   rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
8182   rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
8183 
8184   /* Simple vec_merge.  */
8185   ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
8186   ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
8187   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
8188   ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
8189 
8190   /* Nested vec_merge.
8191      It's tempting to make this simplify right down to opN, but we don't
8192      because all the simplify_* functions assume that the operands have
8193      already been simplified.  */
8194   rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
8195   ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
8196   ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
8197 
8198   /* Intermediate unary op. */
8199   rtx unop = gen_rtx_NOT (mode, vm1);
8200   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
8201 		 simplify_merge_mask (unop, mask1, 0));
8202   ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
8203 		 simplify_merge_mask (unop, mask1, 1));
8204 
8205   /* Intermediate binary op. */
8206   rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
8207   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
8208 		 simplify_merge_mask (binop, mask1, 0));
8209   ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
8210 		 simplify_merge_mask (binop, mask1, 1));
8211 
8212   /* Intermediate ternary op. */
8213   rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
8214   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
8215 		 simplify_merge_mask (tenop, mask1, 0));
8216   ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
8217 		 simplify_merge_mask (tenop, mask1, 1));
8218 
8219   /* Side effects.  */
8220   rtx badop0 = gen_rtx_PRE_INC (mode, op0);
8221   rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
8222   ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
8223   ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
8224 
8225   /* Called indirectly.  */
8226   ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
8227 		 simplify_rtx (nvm));
8228 }
8229 
8230 /* Test subregs of integer vector constant X, trying elements in
8231    the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8232    where NELTS is the number of elements in X.  Subregs involving
8233    elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail.  */
8234 
8235 static void
8236 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
8237 			   unsigned int first_valid = 0)
8238 {
8239   machine_mode inner_mode = GET_MODE (x);
8240   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8241 
8242   for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
8243     {
8244       machine_mode outer_mode = (machine_mode) modei;
8245       if (!VECTOR_MODE_P (outer_mode))
8246 	continue;
8247 
8248       unsigned int outer_nunits;
8249       if (GET_MODE_INNER (outer_mode) == int_mode
8250 	  && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
8251 	  && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
8252 	{
8253 	  /* Test subregs in which the outer mode is a smaller,
8254 	     constant-sized vector of the same element type.  */
8255 	  unsigned int limit
8256 	    = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
8257 	  for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
8258 	    {
8259 	      rtx expected = NULL_RTX;
8260 	      if (elt >= first_valid)
8261 		{
8262 		  rtx_vector_builder builder (outer_mode, outer_nunits, 1);
8263 		  for (unsigned int i = 0; i < outer_nunits; ++i)
8264 		    builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
8265 		  expected = builder.build ();
8266 		}
8267 	      poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
8268 	      ASSERT_RTX_EQ (expected,
8269 			     simplify_subreg (outer_mode, x,
8270 					      inner_mode, byte));
8271 	    }
8272 	}
8273       else if (known_eq (GET_MODE_SIZE (outer_mode),
8274 			 GET_MODE_SIZE (inner_mode))
8275 	       && known_eq (elt_bias, 0U)
8276 	       && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
8277 		   || known_eq (GET_MODE_BITSIZE (outer_mode),
8278 				GET_MODE_NUNITS (outer_mode)))
8279 	       && (!FLOAT_MODE_P (outer_mode)
8280 		   || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
8281 		       == GET_MODE_UNIT_PRECISION (outer_mode)))
8282 	       && (GET_MODE_SIZE (inner_mode).is_constant ()
8283 		   || !CONST_VECTOR_STEPPED_P (x)))
8284 	{
8285 	  /* Try converting to OUTER_MODE and back.  */
8286 	  rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
8287 	  ASSERT_TRUE (outer_x != NULL_RTX);
8288 	  ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
8289 					     outer_mode, 0));
8290 	}
8291     }
8292 
8293   if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
8294     {
8295       /* Test each byte in the element range.  */
8296       unsigned int limit
8297 	= constant_lower_bound (GET_MODE_SIZE (inner_mode));
8298       for (unsigned int i = 0; i < limit; ++i)
8299 	{
8300 	  unsigned int elt = i / GET_MODE_SIZE (int_mode);
8301 	  rtx expected = NULL_RTX;
8302 	  if (elt >= first_valid)
8303 	    {
8304 	      unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
8305 	      if (BYTES_BIG_ENDIAN)
8306 		byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
8307 	      rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
8308 	      wide_int shifted_elt
8309 		= wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
8310 	      expected = immed_wide_int_const (shifted_elt, QImode);
8311 	    }
8312 	  poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
8313 	  ASSERT_RTX_EQ (expected,
8314 			 simplify_subreg (QImode, x, inner_mode, byte));
8315 	}
8316     }
8317 }
8318 
8319 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8320    element per pattern.  */
8321 
8322 static void
test_vector_subregs_repeating(machine_mode inner_mode)8323 test_vector_subregs_repeating (machine_mode inner_mode)
8324 {
8325   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8326   unsigned int min_nunits = constant_lower_bound (nunits);
8327   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8328   unsigned int count = gcd (min_nunits, 8);
8329 
8330   rtx_vector_builder builder (inner_mode, count, 1);
8331   for (unsigned int i = 0; i < count; ++i)
8332     builder.quick_push (gen_int_mode (8 - i, int_mode));
8333   rtx x = builder.build ();
8334 
8335   test_vector_subregs_modes (x);
8336   if (!nunits.is_constant ())
8337     test_vector_subregs_modes (x, nunits - min_nunits);
8338 }
8339 
8340 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8341    elements per pattern.  */
8342 
8343 static void
test_vector_subregs_fore_back(machine_mode inner_mode)8344 test_vector_subregs_fore_back (machine_mode inner_mode)
8345 {
8346   poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8347   unsigned int min_nunits = constant_lower_bound (nunits);
8348   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8349   unsigned int count = gcd (min_nunits, 4);
8350 
8351   rtx_vector_builder builder (inner_mode, count, 2);
8352   for (unsigned int i = 0; i < count; ++i)
8353     builder.quick_push (gen_int_mode (i, int_mode));
8354   for (unsigned int i = 0; i < count; ++i)
8355     builder.quick_push (gen_int_mode (-(int) i, int_mode));
8356   rtx x = builder.build ();
8357 
8358   test_vector_subregs_modes (x);
8359   if (!nunits.is_constant ())
8360     test_vector_subregs_modes (x, nunits - min_nunits, count);
8361 }
8362 
8363 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8364    elements per pattern.  */
8365 
8366 static void
test_vector_subregs_stepped(machine_mode inner_mode)8367 test_vector_subregs_stepped (machine_mode inner_mode)
8368 {
8369   /* Build { 0, 1, 2, 3, ... }.  */
8370   scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8371   rtx_vector_builder builder (inner_mode, 1, 3);
8372   for (unsigned int i = 0; i < 3; ++i)
8373     builder.quick_push (gen_int_mode (i, int_mode));
8374   rtx x = builder.build ();
8375 
8376   test_vector_subregs_modes (x);
8377 }
8378 
8379 /* Test constant subregs of integer vector mode INNER_MODE.  */
8380 
8381 static void
test_vector_subregs(machine_mode inner_mode)8382 test_vector_subregs (machine_mode inner_mode)
8383 {
8384   test_vector_subregs_repeating (inner_mode);
8385   test_vector_subregs_fore_back (inner_mode);
8386   test_vector_subregs_stepped (inner_mode);
8387 }
8388 
8389 /* Verify some simplifications involving vectors.  */
8390 
8391 static void
test_vector_ops()8392 test_vector_ops ()
8393 {
8394   for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
8395     {
8396       machine_mode mode = (machine_mode) i;
8397       if (VECTOR_MODE_P (mode))
8398 	{
8399 	  rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
8400 	  test_vector_ops_duplicate (mode, scalar_reg);
8401 	  if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
8402 	      && maybe_gt (GET_MODE_NUNITS (mode), 2))
8403 	    {
8404 	      test_vector_ops_series (mode, scalar_reg);
8405 	      test_vector_subregs (mode);
8406 	    }
8407 	  test_vec_merge (mode);
8408 	}
8409     }
8410 }
8411 
8412 template<unsigned int N>
8413 struct simplify_const_poly_int_tests
8414 {
8415   static void run ();
8416 };
8417 
8418 template<>
8419 struct simplify_const_poly_int_tests<1>
8420 {
8421   static void run () {}
8422 };
8423 
8424 /* Test various CONST_POLY_INT properties.  */
8425 
8426 template<unsigned int N>
8427 void
8428 simplify_const_poly_int_tests<N>::run ()
8429 {
8430   rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
8431   rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
8432   rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
8433   rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
8434   rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
8435   rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
8436   rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
8437   rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
8438   rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
8439   rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
8440   rtx two = GEN_INT (2);
8441   rtx six = GEN_INT (6);
8442   poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
8443 
8444   /* These tests only try limited operation combinations.  Fuller arithmetic
8445      testing is done directly on poly_ints.  */
8446   ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
8447   ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
8448   ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
8449   ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
8450   ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
8451   ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
8452   ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
8453   ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
8454   ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
8455   ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
8456   ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
8457 }
8458 
8459 /* Run all of the selftests within this file.  */
8460 
8461 void
8462 simplify_rtx_c_tests ()
8463 {
8464   test_scalar_ops ();
8465   test_vector_ops ();
8466   simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
8467 }
8468 
8469 } // namespace selftest
8470 
8471 #endif /* CHECKING_P */
8472