1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "optabs.h"
30 #include "emit-rtl.h"
31 #include "recog.h"
32 #include "diagnostic-core.h"
33 #include "varasm.h"
34 #include "flags.h"
35 
36 /* Simplification and canonicalization of RTL.  */
37 
38 /* Much code operates on (low, high) pairs; the low value is an
39    unsigned wide int, the high value a signed wide int.  We
40    occasionally need to sign extend from low to high as if low were a
41    signed wide int.  */
42 #define HWI_SIGN_EXTEND(low) \
43  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
44 
45 static rtx neg_const_int (machine_mode, const_rtx);
46 static bool plus_minus_operand_p (const_rtx);
47 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
48 static rtx simplify_immed_subreg (machine_mode, rtx, machine_mode,
49 				  unsigned int);
50 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
51 					   rtx, rtx);
52 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
53 					    machine_mode, rtx, rtx);
54 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
55 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
56 					rtx, rtx, rtx, rtx);
57 
58 /* Negate a CONST_INT rtx.  */
59 static rtx
neg_const_int(machine_mode mode,const_rtx i)60 neg_const_int (machine_mode mode, const_rtx i)
61 {
62   unsigned HOST_WIDE_INT val = -UINTVAL (i);
63 
64   if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
65       && val == UINTVAL (i))
66     return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
67 					   mode);
68   return gen_int_mode (val, mode);
69 }
70 
71 /* Test whether expression, X, is an immediate constant that represents
72    the most significant bit of machine mode MODE.  */
73 
74 bool
mode_signbit_p(machine_mode mode,const_rtx x)75 mode_signbit_p (machine_mode mode, const_rtx x)
76 {
77   unsigned HOST_WIDE_INT val;
78   unsigned int width;
79 
80   if (GET_MODE_CLASS (mode) != MODE_INT)
81     return false;
82 
83   width = GET_MODE_PRECISION (mode);
84   if (width == 0)
85     return false;
86 
87   if (width <= HOST_BITS_PER_WIDE_INT
88       && CONST_INT_P (x))
89     val = INTVAL (x);
90 #if TARGET_SUPPORTS_WIDE_INT
91   else if (CONST_WIDE_INT_P (x))
92     {
93       unsigned int i;
94       unsigned int elts = CONST_WIDE_INT_NUNITS (x);
95       if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
96 	return false;
97       for (i = 0; i < elts - 1; i++)
98 	if (CONST_WIDE_INT_ELT (x, i) != 0)
99 	  return false;
100       val = CONST_WIDE_INT_ELT (x, elts - 1);
101       width %= HOST_BITS_PER_WIDE_INT;
102       if (width == 0)
103 	width = HOST_BITS_PER_WIDE_INT;
104     }
105 #else
106   else if (width <= HOST_BITS_PER_DOUBLE_INT
107 	   && CONST_DOUBLE_AS_INT_P (x)
108 	   && CONST_DOUBLE_LOW (x) == 0)
109     {
110       val = CONST_DOUBLE_HIGH (x);
111       width -= HOST_BITS_PER_WIDE_INT;
112     }
113 #endif
114   else
115     /* X is not an integer constant.  */
116     return false;
117 
118   if (width < HOST_BITS_PER_WIDE_INT)
119     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
120   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
121 }
122 
123 /* Test whether VAL is equal to the most significant bit of mode MODE
124    (after masking with the mode mask of MODE).  Returns false if the
125    precision of MODE is too large to handle.  */
126 
127 bool
val_signbit_p(machine_mode mode,unsigned HOST_WIDE_INT val)128 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
129 {
130   unsigned int width;
131 
132   if (GET_MODE_CLASS (mode) != MODE_INT)
133     return false;
134 
135   width = GET_MODE_PRECISION (mode);
136   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137     return false;
138 
139   val &= GET_MODE_MASK (mode);
140   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
141 }
142 
143 /* Test whether the most significant bit of mode MODE is set in VAL.
144    Returns false if the precision of MODE is too large to handle.  */
145 bool
val_signbit_known_set_p(machine_mode mode,unsigned HOST_WIDE_INT val)146 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
147 {
148   unsigned int width;
149 
150   if (GET_MODE_CLASS (mode) != MODE_INT)
151     return false;
152 
153   width = GET_MODE_PRECISION (mode);
154   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155     return false;
156 
157   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158   return val != 0;
159 }
160 
161 /* Test whether the most significant bit of mode MODE is clear in VAL.
162    Returns false if the precision of MODE is too large to handle.  */
163 bool
val_signbit_known_clear_p(machine_mode mode,unsigned HOST_WIDE_INT val)164 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
165 {
166   unsigned int width;
167 
168   if (GET_MODE_CLASS (mode) != MODE_INT)
169     return false;
170 
171   width = GET_MODE_PRECISION (mode);
172   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
173     return false;
174 
175   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
176   return val == 0;
177 }
178 
179 /* Make a binary operation by properly ordering the operands and
180    seeing if the expression folds.  */
181 
182 rtx
simplify_gen_binary(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)183 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
184 		     rtx op1)
185 {
186   rtx tem;
187 
188   /* If this simplifies, do it.  */
189   tem = simplify_binary_operation (code, mode, op0, op1);
190   if (tem)
191     return tem;
192 
193   /* Put complex operands first and constants second if commutative.  */
194   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
195       && swap_commutative_operands_p (op0, op1))
196     std::swap (op0, op1);
197 
198   return gen_rtx_fmt_ee (code, mode, op0, op1);
199 }
200 
201 /* If X is a MEM referencing the constant pool, return the real value.
202    Otherwise return X.  */
203 rtx
avoid_constant_pool_reference(rtx x)204 avoid_constant_pool_reference (rtx x)
205 {
206   rtx c, tmp, addr;
207   machine_mode cmode;
208   HOST_WIDE_INT offset = 0;
209 
210   switch (GET_CODE (x))
211     {
212     case MEM:
213       break;
214 
215     case FLOAT_EXTEND:
216       /* Handle float extensions of constant pool references.  */
217       tmp = XEXP (x, 0);
218       c = avoid_constant_pool_reference (tmp);
219       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
220 	return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
221 					     GET_MODE (x));
222       return x;
223 
224     default:
225       return x;
226     }
227 
228   if (GET_MODE (x) == BLKmode)
229     return x;
230 
231   addr = XEXP (x, 0);
232 
233   /* Call target hook to avoid the effects of -fpic etc....  */
234   addr = targetm.delegitimize_address (addr);
235 
236   /* Split the address into a base and integer offset.  */
237   if (GET_CODE (addr) == CONST
238       && GET_CODE (XEXP (addr, 0)) == PLUS
239       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
240     {
241       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
242       addr = XEXP (XEXP (addr, 0), 0);
243     }
244 
245   if (GET_CODE (addr) == LO_SUM)
246     addr = XEXP (addr, 1);
247 
248   /* If this is a constant pool reference, we can turn it into its
249      constant and hope that simplifications happen.  */
250   if (GET_CODE (addr) == SYMBOL_REF
251       && CONSTANT_POOL_ADDRESS_P (addr))
252     {
253       c = get_pool_constant (addr);
254       cmode = get_pool_mode (addr);
255 
256       /* If we're accessing the constant in a different mode than it was
257          originally stored, attempt to fix that up via subreg simplifications.
258          If that fails we have no choice but to return the original memory.  */
259       if (offset == 0 && cmode == GET_MODE (x))
260 	return c;
261       else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
262         {
263           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
264           if (tem && CONSTANT_P (tem))
265             return tem;
266         }
267     }
268 
269   return x;
270 }
271 
272 /* Simplify a MEM based on its attributes.  This is the default
273    delegitimize_address target hook, and it's recommended that every
274    overrider call it.  */
275 
276 rtx
delegitimize_mem_from_attrs(rtx x)277 delegitimize_mem_from_attrs (rtx x)
278 {
279   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
280      use their base addresses as equivalent.  */
281   if (MEM_P (x)
282       && MEM_EXPR (x)
283       && MEM_OFFSET_KNOWN_P (x))
284     {
285       tree decl = MEM_EXPR (x);
286       machine_mode mode = GET_MODE (x);
287       HOST_WIDE_INT offset = 0;
288 
289       switch (TREE_CODE (decl))
290 	{
291 	default:
292 	  decl = NULL;
293 	  break;
294 
295 	case VAR_DECL:
296 	  break;
297 
298 	case ARRAY_REF:
299 	case ARRAY_RANGE_REF:
300 	case COMPONENT_REF:
301 	case BIT_FIELD_REF:
302 	case REALPART_EXPR:
303 	case IMAGPART_EXPR:
304 	case VIEW_CONVERT_EXPR:
305 	  {
306 	    HOST_WIDE_INT bitsize, bitpos;
307 	    tree toffset;
308 	    int unsignedp, reversep, volatilep = 0;
309 
310 	    decl
311 	      = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
312 				     &unsignedp, &reversep, &volatilep, false);
313 	    if (bitsize != GET_MODE_BITSIZE (mode)
314 		|| (bitpos % BITS_PER_UNIT)
315 		|| (toffset && !tree_fits_shwi_p (toffset)))
316 	      decl = NULL;
317 	    else
318 	      {
319 		offset += bitpos / BITS_PER_UNIT;
320 		if (toffset)
321 		  offset += tree_to_shwi (toffset);
322 	      }
323 	    break;
324 	  }
325 	}
326 
327       if (decl
328 	  && mode == GET_MODE (x)
329 	  && TREE_CODE (decl) == VAR_DECL
330 	  && (TREE_STATIC (decl)
331 	      || DECL_THREAD_LOCAL_P (decl))
332 	  && DECL_RTL_SET_P (decl)
333 	  && MEM_P (DECL_RTL (decl)))
334 	{
335 	  rtx newx;
336 
337 	  offset += MEM_OFFSET (x);
338 
339 	  newx = DECL_RTL (decl);
340 
341 	  if (MEM_P (newx))
342 	    {
343 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
344 
345 	      /* Avoid creating a new MEM needlessly if we already had
346 		 the same address.  We do if there's no OFFSET and the
347 		 old address X is identical to NEWX, or if X is of the
348 		 form (plus NEWX OFFSET), or the NEWX is of the form
349 		 (plus Y (const_int Z)) and X is that with the offset
350 		 added: (plus Y (const_int Z+OFFSET)).  */
351 	      if (!((offset == 0
352 		     || (GET_CODE (o) == PLUS
353 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
354 			 && (offset == INTVAL (XEXP (o, 1))
355 			     || (GET_CODE (n) == PLUS
356 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
357 				 && (INTVAL (XEXP (n, 1)) + offset
358 				     == INTVAL (XEXP (o, 1)))
359 				 && (n = XEXP (n, 0))))
360 			 && (o = XEXP (o, 0))))
361 		    && rtx_equal_p (o, n)))
362 		x = adjust_address_nv (newx, mode, offset);
363 	    }
364 	  else if (GET_MODE (x) == GET_MODE (newx)
365 		   && offset == 0)
366 	    x = newx;
367 	}
368     }
369 
370   return x;
371 }
372 
373 /* Make a unary operation by first seeing if it folds and otherwise making
374    the specified operation.  */
375 
376 rtx
simplify_gen_unary(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)377 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
378 		    machine_mode op_mode)
379 {
380   rtx tem;
381 
382   /* If this simplifies, use it.  */
383   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
384     return tem;
385 
386   return gen_rtx_fmt_e (code, mode, op);
387 }
388 
389 /* Likewise for ternary operations.  */
390 
391 rtx
simplify_gen_ternary(enum rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)392 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
393 		      machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
394 {
395   rtx tem;
396 
397   /* If this simplifies, use it.  */
398   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
399 					      op0, op1, op2)))
400     return tem;
401 
402   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
403 }
404 
405 /* Likewise, for relational operations.
406    CMP_MODE specifies mode comparison is done in.  */
407 
408 rtx
simplify_gen_relational(enum rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)409 simplify_gen_relational (enum rtx_code code, machine_mode mode,
410 			 machine_mode cmp_mode, rtx op0, rtx op1)
411 {
412   rtx tem;
413 
414   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
415 						 op0, op1)))
416     return tem;
417 
418   return gen_rtx_fmt_ee (code, mode, op0, op1);
419 }
420 
421 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
422    and simplify the result.  If FN is non-NULL, call this callback on each
423    X, if it returns non-NULL, replace X with its return value and simplify the
424    result.  */
425 
426 rtx
simplify_replace_fn_rtx(rtx x,const_rtx old_rtx,rtx (* fn)(rtx,const_rtx,void *),void * data)427 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
428 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
429 {
430   enum rtx_code code = GET_CODE (x);
431   machine_mode mode = GET_MODE (x);
432   machine_mode op_mode;
433   const char *fmt;
434   rtx op0, op1, op2, newx, op;
435   rtvec vec, newvec;
436   int i, j;
437 
438   if (__builtin_expect (fn != NULL, 0))
439     {
440       newx = fn (x, old_rtx, data);
441       if (newx)
442 	return newx;
443     }
444   else if (rtx_equal_p (x, old_rtx))
445     return copy_rtx ((rtx) data);
446 
447   switch (GET_RTX_CLASS (code))
448     {
449     case RTX_UNARY:
450       op0 = XEXP (x, 0);
451       op_mode = GET_MODE (op0);
452       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
453       if (op0 == XEXP (x, 0))
454 	return x;
455       return simplify_gen_unary (code, mode, op0, op_mode);
456 
457     case RTX_BIN_ARITH:
458     case RTX_COMM_ARITH:
459       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
460       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
462 	return x;
463       return simplify_gen_binary (code, mode, op0, op1);
464 
465     case RTX_COMPARE:
466     case RTX_COMM_COMPARE:
467       op0 = XEXP (x, 0);
468       op1 = XEXP (x, 1);
469       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
470       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
471       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
472       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
473 	return x;
474       return simplify_gen_relational (code, mode, op_mode, op0, op1);
475 
476     case RTX_TERNARY:
477     case RTX_BITFIELD_OPS:
478       op0 = XEXP (x, 0);
479       op_mode = GET_MODE (op0);
480       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
481       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
482       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
483       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
484 	return x;
485       if (op_mode == VOIDmode)
486 	op_mode = GET_MODE (op0);
487       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
488 
489     case RTX_EXTRA:
490       if (code == SUBREG)
491 	{
492 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
493 	  if (op0 == SUBREG_REG (x))
494 	    return x;
495 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
496 				     GET_MODE (SUBREG_REG (x)),
497 				     SUBREG_BYTE (x));
498 	  return op0 ? op0 : x;
499 	}
500       break;
501 
502     case RTX_OBJ:
503       if (code == MEM)
504 	{
505 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
506 	  if (op0 == XEXP (x, 0))
507 	    return x;
508 	  return replace_equiv_address_nv (x, op0);
509 	}
510       else if (code == LO_SUM)
511 	{
512 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
513 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
514 
515 	  /* (lo_sum (high x) y) -> y where x and y have the same base.  */
516 	  if (GET_CODE (op0) == HIGH)
517 	    {
518 	      rtx base0, base1, offset0, offset1;
519 	      split_const (XEXP (op0, 0), &base0, &offset0);
520 	      split_const (op1, &base1, &offset1);
521 	      if (rtx_equal_p (base0, base1))
522 		return op1;
523 	    }
524 
525 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
526 	    return x;
527 	  return gen_rtx_LO_SUM (mode, op0, op1);
528 	}
529       break;
530 
531     default:
532       break;
533     }
534 
535   newx = x;
536   fmt = GET_RTX_FORMAT (code);
537   for (i = 0; fmt[i]; i++)
538     switch (fmt[i])
539       {
540       case 'E':
541 	vec = XVEC (x, i);
542 	newvec = XVEC (newx, i);
543 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
544 	  {
545 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
546 					  old_rtx, fn, data);
547 	    if (op != RTVEC_ELT (vec, j))
548 	      {
549 		if (newvec == vec)
550 		  {
551 		    newvec = shallow_copy_rtvec (vec);
552 		    if (x == newx)
553 		      newx = shallow_copy_rtx (x);
554 		    XVEC (newx, i) = newvec;
555 		  }
556 		RTVEC_ELT (newvec, j) = op;
557 	      }
558 	  }
559 	break;
560 
561       case 'e':
562 	if (XEXP (x, i))
563 	  {
564 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
565 	    if (op != XEXP (x, i))
566 	      {
567 		if (x == newx)
568 		  newx = shallow_copy_rtx (x);
569 		XEXP (newx, i) = op;
570 	      }
571 	  }
572 	break;
573       }
574   return newx;
575 }
576 
577 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
578    resulting RTX.  Return a new RTX which is as simplified as possible.  */
579 
580 rtx
simplify_replace_rtx(rtx x,const_rtx old_rtx,rtx new_rtx)581 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
582 {
583   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
584 }
585 
586 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
587    Only handle cases where the truncated value is inherently an rvalue.
588 
589    RTL provides two ways of truncating a value:
590 
591    1. a lowpart subreg.  This form is only a truncation when both
592       the outer and inner modes (here MODE and OP_MODE respectively)
593       are scalar integers, and only then when the subreg is used as
594       an rvalue.
595 
596       It is only valid to form such truncating subregs if the
597       truncation requires no action by the target.  The onus for
598       proving this is on the creator of the subreg -- e.g. the
599       caller to simplify_subreg or simplify_gen_subreg -- and typically
600       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
601 
602    2. a TRUNCATE.  This form handles both scalar and compound integers.
603 
604    The first form is preferred where valid.  However, the TRUNCATE
605    handling in simplify_unary_operation turns the second form into the
606    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
607    so it is generally safe to form rvalue truncations using:
608 
609       simplify_gen_unary (TRUNCATE, ...)
610 
611    and leave simplify_unary_operation to work out which representation
612    should be used.
613 
614    Because of the proof requirements on (1), simplify_truncation must
615    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
616    regardless of whether the outer truncation came from a SUBREG or a
617    TRUNCATE.  For example, if the caller has proven that an SImode
618    truncation of:
619 
620       (and:DI X Y)
621 
622    is a no-op and can be represented as a subreg, it does not follow
623    that SImode truncations of X and Y are also no-ops.  On a target
624    like 64-bit MIPS that requires SImode values to be stored in
625    sign-extended form, an SImode truncation of:
626 
627       (and:DI (reg:DI X) (const_int 63))
628 
629    is trivially a no-op because only the lower 6 bits can be set.
630    However, X is still an arbitrary 64-bit number and so we cannot
631    assume that truncating it too is a no-op.  */
632 
633 static rtx
simplify_truncation(machine_mode mode,rtx op,machine_mode op_mode)634 simplify_truncation (machine_mode mode, rtx op,
635 		     machine_mode op_mode)
636 {
637   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
638   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
639   gcc_assert (precision <= op_precision);
640 
641   /* Optimize truncations of zero and sign extended values.  */
642   if (GET_CODE (op) == ZERO_EXTEND
643       || GET_CODE (op) == SIGN_EXTEND)
644     {
645       /* There are three possibilities.  If MODE is the same as the
646 	 origmode, we can omit both the extension and the subreg.
647 	 If MODE is not larger than the origmode, we can apply the
648 	 truncation without the extension.  Finally, if the outermode
649 	 is larger than the origmode, we can just extend to the appropriate
650 	 mode.  */
651       machine_mode origmode = GET_MODE (XEXP (op, 0));
652       if (mode == origmode)
653 	return XEXP (op, 0);
654       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
655 	return simplify_gen_unary (TRUNCATE, mode,
656 				   XEXP (op, 0), origmode);
657       else
658 	return simplify_gen_unary (GET_CODE (op), mode,
659 				   XEXP (op, 0), origmode);
660     }
661 
662   /* If the machine can perform operations in the truncated mode, distribute
663      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
664      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
665   if (1
666       && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
667       && (GET_CODE (op) == PLUS
668 	  || GET_CODE (op) == MINUS
669 	  || GET_CODE (op) == MULT))
670     {
671       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
672       if (op0)
673 	{
674 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
675 	  if (op1)
676 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
677 	}
678     }
679 
680   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
681      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
682      the outer subreg is effectively a truncation to the original mode.  */
683   if ((GET_CODE (op) == LSHIFTRT
684        || GET_CODE (op) == ASHIFTRT)
685       /* Ensure that OP_MODE is at least twice as wide as MODE
686 	 to avoid the possibility that an outer LSHIFTRT shifts by more
687 	 than the sign extension's sign_bit_copies and introduces zeros
688 	 into the high bits of the result.  */
689       && 2 * precision <= op_precision
690       && CONST_INT_P (XEXP (op, 1))
691       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
692       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
693       && UINTVAL (XEXP (op, 1)) < precision)
694     return simplify_gen_binary (ASHIFTRT, mode,
695 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
696 
697   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
698      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
699      the outer subreg is effectively a truncation to the original mode.  */
700   if ((GET_CODE (op) == LSHIFTRT
701        || GET_CODE (op) == ASHIFTRT)
702       && CONST_INT_P (XEXP (op, 1))
703       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
704       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
705       && UINTVAL (XEXP (op, 1)) < precision)
706     return simplify_gen_binary (LSHIFTRT, mode,
707 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
708 
709   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
710      to (ashift:QI (x:QI) C), where C is a suitable small constant and
711      the outer subreg is effectively a truncation to the original mode.  */
712   if (GET_CODE (op) == ASHIFT
713       && CONST_INT_P (XEXP (op, 1))
714       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
715 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
716       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
717       && UINTVAL (XEXP (op, 1)) < precision)
718     return simplify_gen_binary (ASHIFT, mode,
719 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
720 
721   /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
722      (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
723      and C2.  */
724   if (GET_CODE (op) == AND
725       && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
726 	  || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
727       && CONST_INT_P (XEXP (XEXP (op, 0), 1))
728       && CONST_INT_P (XEXP (op, 1)))
729     {
730       rtx op0 = (XEXP (XEXP (op, 0), 0));
731       rtx shift_op = XEXP (XEXP (op, 0), 1);
732       rtx mask_op = XEXP (op, 1);
733       unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
734       unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
735 
736       if (shift < precision
737 	  /* If doing this transform works for an X with all bits set,
738 	     it works for any X.  */
739 	  && ((GET_MODE_MASK (mode) >> shift) & mask)
740 	     == ((GET_MODE_MASK (op_mode) >> shift) & mask)
741 	  && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
742 	  && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
743 	{
744 	  mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
745 	  return simplify_gen_binary (AND, mode, op0, mask_op);
746 	}
747     }
748 
749   /* Recognize a word extraction from a multi-word subreg.  */
750   if ((GET_CODE (op) == LSHIFTRT
751        || GET_CODE (op) == ASHIFTRT)
752       && SCALAR_INT_MODE_P (mode)
753       && SCALAR_INT_MODE_P (op_mode)
754       && precision >= BITS_PER_WORD
755       && 2 * precision <= op_precision
756       && CONST_INT_P (XEXP (op, 1))
757       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
758       && UINTVAL (XEXP (op, 1)) < op_precision)
759     {
760       int byte = subreg_lowpart_offset (mode, op_mode);
761       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
762       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
763 				  (WORDS_BIG_ENDIAN
764 				   ? byte - shifted_bytes
765 				   : byte + shifted_bytes));
766     }
767 
768   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
769      and try replacing the TRUNCATE and shift with it.  Don't do this
770      if the MEM has a mode-dependent address.  */
771   if ((GET_CODE (op) == LSHIFTRT
772        || GET_CODE (op) == ASHIFTRT)
773       && SCALAR_INT_MODE_P (op_mode)
774       && MEM_P (XEXP (op, 0))
775       && CONST_INT_P (XEXP (op, 1))
776       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
777       && INTVAL (XEXP (op, 1)) > 0
778       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
779       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
780 				     MEM_ADDR_SPACE (XEXP (op, 0)))
781       && ! MEM_VOLATILE_P (XEXP (op, 0))
782       && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
783 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
784     {
785       int byte = subreg_lowpart_offset (mode, op_mode);
786       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
787       return adjust_address_nv (XEXP (op, 0), mode,
788 				(WORDS_BIG_ENDIAN
789 				 ? byte - shifted_bytes
790 				 : byte + shifted_bytes));
791     }
792 
793   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
794      (OP:SI foo:SI) if OP is NEG or ABS.  */
795   if ((GET_CODE (op) == ABS
796        || GET_CODE (op) == NEG)
797       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
798 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
799       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
800     return simplify_gen_unary (GET_CODE (op), mode,
801 			       XEXP (XEXP (op, 0), 0), mode);
802 
803   /* (truncate:A (subreg:B (truncate:C X) 0)) is
804      (truncate:A X).  */
805   if (GET_CODE (op) == SUBREG
806       && SCALAR_INT_MODE_P (mode)
807       && SCALAR_INT_MODE_P (op_mode)
808       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
809       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
810       && subreg_lowpart_p (op))
811     {
812       rtx inner = XEXP (SUBREG_REG (op), 0);
813       if (GET_MODE_PRECISION (mode)
814 	  <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
815 	return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
816       else
817 	/* If subreg above is paradoxical and C is narrower
818 	   than A, return (subreg:A (truncate:C X) 0).  */
819 	return simplify_gen_subreg (mode, SUBREG_REG (op),
820 				    GET_MODE (SUBREG_REG (op)), 0);
821     }
822 
823   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
824   if (GET_CODE (op) == TRUNCATE)
825     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
826 			       GET_MODE (XEXP (op, 0)));
827 
828   return NULL_RTX;
829 }
830 
831 /* Try to simplify a unary operation CODE whose output mode is to be
832    MODE with input operand OP whose mode was originally OP_MODE.
833    Return zero if no simplification can be made.  */
834 rtx
simplify_unary_operation(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)835 simplify_unary_operation (enum rtx_code code, machine_mode mode,
836 			  rtx op, machine_mode op_mode)
837 {
838   rtx trueop, tem;
839 
840   trueop = avoid_constant_pool_reference (op);
841 
842   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
843   if (tem)
844     return tem;
845 
846   return simplify_unary_operation_1 (code, mode, op);
847 }
848 
849 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
850    to be exact.  */
851 
852 static bool
exact_int_to_float_conversion_p(const_rtx op)853 exact_int_to_float_conversion_p (const_rtx op)
854 {
855   int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
856   machine_mode op0_mode = GET_MODE (XEXP (op, 0));
857   /* Constants shouldn't reach here.  */
858   gcc_assert (op0_mode != VOIDmode);
859   int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
860   int in_bits = in_prec;
861   if (HWI_COMPUTABLE_MODE_P (op0_mode))
862     {
863       unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
864       if (GET_CODE (op) == FLOAT)
865 	in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
866       else if (GET_CODE (op) == UNSIGNED_FLOAT)
867 	in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
868       else
869 	gcc_unreachable ();
870       in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
871     }
872   return in_bits <= out_bits;
873 }
874 
875 /* Perform some simplifications we can do even if the operands
876    aren't constant.  */
877 static rtx
simplify_unary_operation_1(enum rtx_code code,machine_mode mode,rtx op)878 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
879 {
880   enum rtx_code reversed;
881   rtx temp;
882 
883   switch (code)
884     {
885     case NOT:
886       /* (not (not X)) == X.  */
887       if (GET_CODE (op) == NOT)
888 	return XEXP (op, 0);
889 
890       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
891 	 comparison is all ones.   */
892       if (COMPARISON_P (op)
893 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
894 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
895 	return simplify_gen_relational (reversed, mode, VOIDmode,
896 					XEXP (op, 0), XEXP (op, 1));
897 
898       /* (not (plus X -1)) can become (neg X).  */
899       if (GET_CODE (op) == PLUS
900 	  && XEXP (op, 1) == constm1_rtx)
901 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
902 
903       /* Similarly, (not (neg X)) is (plus X -1).  Only do this for
904 	 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
905 	 and MODE_VECTOR_INT.  */
906       if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
907 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
908 				    CONSTM1_RTX (mode));
909 
910       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
911       if (GET_CODE (op) == XOR
912 	  && CONST_INT_P (XEXP (op, 1))
913 	  && (temp = simplify_unary_operation (NOT, mode,
914 					       XEXP (op, 1), mode)) != 0)
915 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
916 
917       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
918       if (GET_CODE (op) == PLUS
919 	  && CONST_INT_P (XEXP (op, 1))
920 	  && mode_signbit_p (mode, XEXP (op, 1))
921 	  && (temp = simplify_unary_operation (NOT, mode,
922 					       XEXP (op, 1), mode)) != 0)
923 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
924 
925 
926       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
927 	 operands other than 1, but that is not valid.  We could do a
928 	 similar simplification for (not (lshiftrt C X)) where C is
929 	 just the sign bit, but this doesn't seem common enough to
930 	 bother with.  */
931       if (GET_CODE (op) == ASHIFT
932 	  && XEXP (op, 0) == const1_rtx)
933 	{
934 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
935 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
936 	}
937 
938       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 	 so we can perform the above simplification.  */
941       if (STORE_FLAG_VALUE == -1
942 	  && GET_CODE (op) == ASHIFTRT
943 	  && CONST_INT_P (XEXP (op, 1))
944 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
945 	return simplify_gen_relational (GE, mode, VOIDmode,
946 					XEXP (op, 0), const0_rtx);
947 
948 
949       if (GET_CODE (op) == SUBREG
950 	  && subreg_lowpart_p (op)
951 	  && (GET_MODE_SIZE (GET_MODE (op))
952 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
953 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
954 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
955 	{
956 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
957 	  rtx x;
958 
959 	  x = gen_rtx_ROTATE (inner_mode,
960 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
961 						  inner_mode),
962 			      XEXP (SUBREG_REG (op), 1));
963 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
964 	  if (temp)
965 	    return temp;
966 	}
967 
968       /* Apply De Morgan's laws to reduce number of patterns for machines
969 	 with negating logical insns (and-not, nand, etc.).  If result has
970 	 only one NOT, put it first, since that is how the patterns are
971 	 coded.  */
972       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
973 	{
974 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
975 	  machine_mode op_mode;
976 
977 	  op_mode = GET_MODE (in1);
978 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
979 
980 	  op_mode = GET_MODE (in2);
981 	  if (op_mode == VOIDmode)
982 	    op_mode = mode;
983 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
984 
985 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
986 	    std::swap (in1, in2);
987 
988 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
989 				 mode, in1, in2);
990 	}
991 
992       /* (not (bswap x)) -> (bswap (not x)).  */
993       if (GET_CODE (op) == BSWAP)
994 	{
995 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
996 	  return simplify_gen_unary (BSWAP, mode, x, mode);
997 	}
998       break;
999 
1000     case NEG:
1001       /* (neg (neg X)) == X.  */
1002       if (GET_CODE (op) == NEG)
1003 	return XEXP (op, 0);
1004 
1005       /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1006 	 If comparison is not reversible use
1007 	 x ? y : (neg y).  */
1008       if (GET_CODE (op) == IF_THEN_ELSE)
1009 	{
1010 	  rtx cond = XEXP (op, 0);
1011 	  rtx true_rtx = XEXP (op, 1);
1012 	  rtx false_rtx = XEXP (op, 2);
1013 
1014 	  if ((GET_CODE (true_rtx) == NEG
1015 	       && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1016 	       || (GET_CODE (false_rtx) == NEG
1017 		   && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1018 	    {
1019 	      if (reversed_comparison_code (cond, NULL_RTX) != UNKNOWN)
1020 		temp = reversed_comparison (cond, mode);
1021 	      else
1022 		{
1023 		  temp = cond;
1024 		  std::swap (true_rtx, false_rtx);
1025 		}
1026 	      return simplify_gen_ternary (IF_THEN_ELSE, mode,
1027 					    mode, temp, true_rtx, false_rtx);
1028 	    }
1029 	}
1030 
1031       /* (neg (plus X 1)) can become (not X).  */
1032       if (GET_CODE (op) == PLUS
1033 	  && XEXP (op, 1) == const1_rtx)
1034 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1035 
1036       /* Similarly, (neg (not X)) is (plus X 1).  */
1037       if (GET_CODE (op) == NOT)
1038 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1039 				    CONST1_RTX (mode));
1040 
1041       /* (neg (minus X Y)) can become (minus Y X).  This transformation
1042 	 isn't safe for modes with signed zeros, since if X and Y are
1043 	 both +0, (minus Y X) is the same as (minus X Y).  If the
1044 	 rounding mode is towards +infinity (or -infinity) then the two
1045 	 expressions will be rounded differently.  */
1046       if (GET_CODE (op) == MINUS
1047 	  && !HONOR_SIGNED_ZEROS (mode)
1048 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1049 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1050 
1051       if (GET_CODE (op) == PLUS
1052 	  && !HONOR_SIGNED_ZEROS (mode)
1053 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1054 	{
1055 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
1056 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
1057 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1058 	    {
1059 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1060 	      if (temp)
1061 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1062 	    }
1063 
1064 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1065 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1066 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1067 	}
1068 
1069       /* (neg (mult A B)) becomes (mult A (neg B)).
1070 	 This works even for floating-point values.  */
1071       if (GET_CODE (op) == MULT
1072 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1073 	{
1074 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1075 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1076 	}
1077 
1078       /* NEG commutes with ASHIFT since it is multiplication.  Only do
1079 	 this if we can then eliminate the NEG (e.g., if the operand
1080 	 is a constant).  */
1081       if (GET_CODE (op) == ASHIFT)
1082 	{
1083 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1084 	  if (temp)
1085 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1086 	}
1087 
1088       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1089 	 C is equal to the width of MODE minus 1.  */
1090       if (GET_CODE (op) == ASHIFTRT
1091 	  && CONST_INT_P (XEXP (op, 1))
1092 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1093 	return simplify_gen_binary (LSHIFTRT, mode,
1094 				    XEXP (op, 0), XEXP (op, 1));
1095 
1096       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1097 	 C is equal to the width of MODE minus 1.  */
1098       if (GET_CODE (op) == LSHIFTRT
1099 	  && CONST_INT_P (XEXP (op, 1))
1100 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1101 	return simplify_gen_binary (ASHIFTRT, mode,
1102 				    XEXP (op, 0), XEXP (op, 1));
1103 
1104       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1105       if (GET_CODE (op) == XOR
1106 	  && XEXP (op, 1) == const1_rtx
1107 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1108 	return plus_constant (mode, XEXP (op, 0), -1);
1109 
1110       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1111       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1112       if (GET_CODE (op) == LT
1113 	  && XEXP (op, 1) == const0_rtx
1114 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1115 	{
1116 	  machine_mode inner = GET_MODE (XEXP (op, 0));
1117 	  int isize = GET_MODE_PRECISION (inner);
1118 	  if (STORE_FLAG_VALUE == 1)
1119 	    {
1120 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1121 					  GEN_INT (isize - 1));
1122 	      if (mode == inner)
1123 		return temp;
1124 	      if (GET_MODE_PRECISION (mode) > isize)
1125 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1126 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1127 	    }
1128 	  else if (STORE_FLAG_VALUE == -1)
1129 	    {
1130 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1131 					  GEN_INT (isize - 1));
1132 	      if (mode == inner)
1133 		return temp;
1134 	      if (GET_MODE_PRECISION (mode) > isize)
1135 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1136 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1137 	    }
1138 	}
1139       break;
1140 
1141     case TRUNCATE:
1142       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1143 	 with the umulXi3_highpart patterns.  */
1144       if (GET_CODE (op) == LSHIFTRT
1145 	  && GET_CODE (XEXP (op, 0)) == MULT)
1146 	break;
1147 
1148       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1149 	{
1150 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1151 	    {
1152 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1153 	      if (temp)
1154 		return temp;
1155 	    }
1156 	  /* We can't handle truncation to a partial integer mode here
1157 	     because we don't know the real bitsize of the partial
1158 	     integer mode.  */
1159 	  break;
1160 	}
1161 
1162       if (GET_MODE (op) != VOIDmode)
1163 	{
1164 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1165 	  if (temp)
1166 	    return temp;
1167 	}
1168 
1169       /* If we know that the value is already truncated, we can
1170 	 replace the TRUNCATE with a SUBREG.  */
1171       if (GET_MODE_NUNITS (mode) == 1
1172 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1173 	      || truncated_to_mode (mode, op)))
1174 	{
1175 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1176 	  if (temp)
1177 	    return temp;
1178 	}
1179 
1180       /* A truncate of a comparison can be replaced with a subreg if
1181          STORE_FLAG_VALUE permits.  This is like the previous test,
1182          but it works even if the comparison is done in a mode larger
1183          than HOST_BITS_PER_WIDE_INT.  */
1184       if (HWI_COMPUTABLE_MODE_P (mode)
1185 	  && COMPARISON_P (op)
1186 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1187 	{
1188 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1189 	  if (temp)
1190 	    return temp;
1191 	}
1192 
1193       /* A truncate of a memory is just loading the low part of the memory
1194 	 if we are not changing the meaning of the address. */
1195       if (GET_CODE (op) == MEM
1196 	  && !VECTOR_MODE_P (mode)
1197 	  && !MEM_VOLATILE_P (op)
1198 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1199 	{
1200 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1201 	  if (temp)
1202 	    return temp;
1203 	}
1204 
1205       break;
1206 
1207     case FLOAT_TRUNCATE:
1208       if (DECIMAL_FLOAT_MODE_P (mode))
1209 	break;
1210 
1211       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1212       if (GET_CODE (op) == FLOAT_EXTEND
1213 	  && GET_MODE (XEXP (op, 0)) == mode)
1214 	return XEXP (op, 0);
1215 
1216       /* (float_truncate:SF (float_truncate:DF foo:XF))
1217          = (float_truncate:SF foo:XF).
1218 	 This may eliminate double rounding, so it is unsafe.
1219 
1220          (float_truncate:SF (float_extend:XF foo:DF))
1221          = (float_truncate:SF foo:DF).
1222 
1223          (float_truncate:DF (float_extend:XF foo:SF))
1224          = (float_extend:DF foo:SF).  */
1225       if ((GET_CODE (op) == FLOAT_TRUNCATE
1226 	   && flag_unsafe_math_optimizations)
1227 	  || GET_CODE (op) == FLOAT_EXTEND)
1228 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1229 							    0)))
1230 				   > GET_MODE_SIZE (mode)
1231 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1232 				   mode,
1233 				   XEXP (op, 0), mode);
1234 
1235       /*  (float_truncate (float x)) is (float x)  */
1236       if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1237 	  && (flag_unsafe_math_optimizations
1238 	      || exact_int_to_float_conversion_p (op)))
1239 	return simplify_gen_unary (GET_CODE (op), mode,
1240 				   XEXP (op, 0),
1241 				   GET_MODE (XEXP (op, 0)));
1242 
1243       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1244 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1245       if ((GET_CODE (op) == ABS
1246 	   || GET_CODE (op) == NEG)
1247 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1248 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1249 	return simplify_gen_unary (GET_CODE (op), mode,
1250 				   XEXP (XEXP (op, 0), 0), mode);
1251 
1252       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1253 	 is (float_truncate:SF x).  */
1254       if (GET_CODE (op) == SUBREG
1255 	  && subreg_lowpart_p (op)
1256 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1257 	return SUBREG_REG (op);
1258       break;
1259 
1260     case FLOAT_EXTEND:
1261       if (DECIMAL_FLOAT_MODE_P (mode))
1262 	break;
1263 
1264       /*  (float_extend (float_extend x)) is (float_extend x)
1265 
1266 	  (float_extend (float x)) is (float x) assuming that double
1267 	  rounding can't happen.
1268           */
1269       if (GET_CODE (op) == FLOAT_EXTEND
1270 	  || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1271 	      && exact_int_to_float_conversion_p (op)))
1272 	return simplify_gen_unary (GET_CODE (op), mode,
1273 				   XEXP (op, 0),
1274 				   GET_MODE (XEXP (op, 0)));
1275 
1276       break;
1277 
1278     case ABS:
1279       /* (abs (neg <foo>)) -> (abs <foo>) */
1280       if (GET_CODE (op) == NEG)
1281 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1282 				   GET_MODE (XEXP (op, 0)));
1283 
1284       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1285          do nothing.  */
1286       if (GET_MODE (op) == VOIDmode)
1287 	break;
1288 
1289       /* If operand is something known to be positive, ignore the ABS.  */
1290       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1291 	  || val_signbit_known_clear_p (GET_MODE (op),
1292 					nonzero_bits (op, GET_MODE (op))))
1293 	return op;
1294 
1295       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1296       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1297 	return gen_rtx_NEG (mode, op);
1298 
1299       break;
1300 
1301     case FFS:
1302       /* (ffs (*_extend <X>)) = (ffs <X>) */
1303       if (GET_CODE (op) == SIGN_EXTEND
1304 	  || GET_CODE (op) == ZERO_EXTEND)
1305 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1306 				   GET_MODE (XEXP (op, 0)));
1307       break;
1308 
1309     case POPCOUNT:
1310       switch (GET_CODE (op))
1311 	{
1312 	case BSWAP:
1313 	case ZERO_EXTEND:
1314 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1315 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1316 				     GET_MODE (XEXP (op, 0)));
1317 
1318 	case ROTATE:
1319 	case ROTATERT:
1320 	  /* Rotations don't affect popcount.  */
1321 	  if (!side_effects_p (XEXP (op, 1)))
1322 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1323 				       GET_MODE (XEXP (op, 0)));
1324 	  break;
1325 
1326 	default:
1327 	  break;
1328 	}
1329       break;
1330 
1331     case PARITY:
1332       switch (GET_CODE (op))
1333 	{
1334 	case NOT:
1335 	case BSWAP:
1336 	case ZERO_EXTEND:
1337 	case SIGN_EXTEND:
1338 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1339 				     GET_MODE (XEXP (op, 0)));
1340 
1341 	case ROTATE:
1342 	case ROTATERT:
1343 	  /* Rotations don't affect parity.  */
1344 	  if (!side_effects_p (XEXP (op, 1)))
1345 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1346 				       GET_MODE (XEXP (op, 0)));
1347 	  break;
1348 
1349 	default:
1350 	  break;
1351 	}
1352       break;
1353 
1354     case BSWAP:
1355       /* (bswap (bswap x)) -> x.  */
1356       if (GET_CODE (op) == BSWAP)
1357 	return XEXP (op, 0);
1358       break;
1359 
1360     case FLOAT:
1361       /* (float (sign_extend <X>)) = (float <X>).  */
1362       if (GET_CODE (op) == SIGN_EXTEND)
1363 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1364 				   GET_MODE (XEXP (op, 0)));
1365       break;
1366 
1367     case SIGN_EXTEND:
1368       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1369 	 becomes just the MINUS if its mode is MODE.  This allows
1370 	 folding switch statements on machines using casesi (such as
1371 	 the VAX).  */
1372       if (GET_CODE (op) == TRUNCATE
1373 	  && GET_MODE (XEXP (op, 0)) == mode
1374 	  && GET_CODE (XEXP (op, 0)) == MINUS
1375 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1376 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1377 	return XEXP (op, 0);
1378 
1379       /* Extending a widening multiplication should be canonicalized to
1380 	 a wider widening multiplication.  */
1381       if (GET_CODE (op) == MULT)
1382 	{
1383 	  rtx lhs = XEXP (op, 0);
1384 	  rtx rhs = XEXP (op, 1);
1385 	  enum rtx_code lcode = GET_CODE (lhs);
1386 	  enum rtx_code rcode = GET_CODE (rhs);
1387 
1388 	  /* Widening multiplies usually extend both operands, but sometimes
1389 	     they use a shift to extract a portion of a register.  */
1390 	  if ((lcode == SIGN_EXTEND
1391 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1392 	      && (rcode == SIGN_EXTEND
1393 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1394 	    {
1395 	      machine_mode lmode = GET_MODE (lhs);
1396 	      machine_mode rmode = GET_MODE (rhs);
1397 	      int bits;
1398 
1399 	      if (lcode == ASHIFTRT)
1400 		/* Number of bits not shifted off the end.  */
1401 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1402 	      else /* lcode == SIGN_EXTEND */
1403 		/* Size of inner mode.  */
1404 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1405 
1406 	      if (rcode == ASHIFTRT)
1407 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1408 	      else /* rcode == SIGN_EXTEND */
1409 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1410 
1411 	      /* We can only widen multiplies if the result is mathematiclly
1412 		 equivalent.  I.e. if overflow was impossible.  */
1413 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1414 		return simplify_gen_binary
1415 			 (MULT, mode,
1416 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1417 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1418 	    }
1419 	}
1420 
1421       /* Check for a sign extension of a subreg of a promoted
1422 	 variable, where the promotion is sign-extended, and the
1423 	 target mode is the same as the variable's promotion.  */
1424       if (GET_CODE (op) == SUBREG
1425 	  && SUBREG_PROMOTED_VAR_P (op)
1426 	  && SUBREG_PROMOTED_SIGNED_P (op)
1427 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1428 	{
1429 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1430 	  if (temp)
1431 	    return temp;
1432 	}
1433 
1434       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1435 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1436       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1437 	{
1438 	  gcc_assert (GET_MODE_PRECISION (mode)
1439 		      > GET_MODE_PRECISION (GET_MODE (op)));
1440 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1441 				     GET_MODE (XEXP (op, 0)));
1442 	}
1443 
1444       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1445 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1446 	 GET_MODE_BITSIZE (N) - I bits.
1447 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1448 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1449       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1450 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1451 	  && CONST_INT_P (XEXP (op, 1))
1452 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1453 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1454 	{
1455 	  machine_mode tmode
1456 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1457 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1458 	  gcc_assert (GET_MODE_BITSIZE (mode)
1459 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1460 	  if (tmode != BLKmode)
1461 	    {
1462 	      rtx inner =
1463 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1464 	      if (inner)
1465 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1466 					   ? SIGN_EXTEND : ZERO_EXTEND,
1467 					   mode, inner, tmode);
1468 	    }
1469 	}
1470 
1471       /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1472          (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0.  */
1473       if (GET_CODE (op) == LSHIFTRT
1474 	  && CONST_INT_P (XEXP (op, 1))
1475 	  && XEXP (op, 1) != const0_rtx)
1476 	return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1477 
1478 #if defined(POINTERS_EXTEND_UNSIGNED)
1479       /* As we do not know which address space the pointer is referring to,
1480 	 we can do this only if the target does not support different pointer
1481 	 or address modes depending on the address space.  */
1482       if (target_default_pointer_address_modes_p ()
1483 	  && ! POINTERS_EXTEND_UNSIGNED
1484 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1485 	  && (CONSTANT_P (op)
1486 	      || (GET_CODE (op) == SUBREG
1487 		  && REG_P (SUBREG_REG (op))
1488 		  && REG_POINTER (SUBREG_REG (op))
1489 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1490 	  && !targetm.have_ptr_extend ())
1491 	{
1492 	  temp
1493 	    = convert_memory_address_addr_space_1 (Pmode, op,
1494 						   ADDR_SPACE_GENERIC, false,
1495 						   true);
1496 	  if (temp)
1497 	    return temp;
1498 	}
1499 #endif
1500       break;
1501 
1502     case ZERO_EXTEND:
1503       /* Check for a zero extension of a subreg of a promoted
1504 	 variable, where the promotion is zero-extended, and the
1505 	 target mode is the same as the variable's promotion.  */
1506       if (GET_CODE (op) == SUBREG
1507 	  && SUBREG_PROMOTED_VAR_P (op)
1508 	  && SUBREG_PROMOTED_UNSIGNED_P (op)
1509 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1510 	{
1511 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1512 	  if (temp)
1513 	    return temp;
1514 	}
1515 
1516       /* Extending a widening multiplication should be canonicalized to
1517 	 a wider widening multiplication.  */
1518       if (GET_CODE (op) == MULT)
1519 	{
1520 	  rtx lhs = XEXP (op, 0);
1521 	  rtx rhs = XEXP (op, 1);
1522 	  enum rtx_code lcode = GET_CODE (lhs);
1523 	  enum rtx_code rcode = GET_CODE (rhs);
1524 
1525 	  /* Widening multiplies usually extend both operands, but sometimes
1526 	     they use a shift to extract a portion of a register.  */
1527 	  if ((lcode == ZERO_EXTEND
1528 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1529 	      && (rcode == ZERO_EXTEND
1530 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1531 	    {
1532 	      machine_mode lmode = GET_MODE (lhs);
1533 	      machine_mode rmode = GET_MODE (rhs);
1534 	      int bits;
1535 
1536 	      if (lcode == LSHIFTRT)
1537 		/* Number of bits not shifted off the end.  */
1538 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1539 	      else /* lcode == ZERO_EXTEND */
1540 		/* Size of inner mode.  */
1541 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1542 
1543 	      if (rcode == LSHIFTRT)
1544 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1545 	      else /* rcode == ZERO_EXTEND */
1546 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1547 
1548 	      /* We can only widen multiplies if the result is mathematiclly
1549 		 equivalent.  I.e. if overflow was impossible.  */
1550 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1551 		return simplify_gen_binary
1552 			 (MULT, mode,
1553 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1554 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1555 	    }
1556 	}
1557 
1558       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1559       if (GET_CODE (op) == ZERO_EXTEND)
1560 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1561 				   GET_MODE (XEXP (op, 0)));
1562 
1563       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1564 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1565 	 GET_MODE_PRECISION (N) - I bits.  */
1566       if (GET_CODE (op) == LSHIFTRT
1567 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1568 	  && CONST_INT_P (XEXP (op, 1))
1569 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1570 	  && GET_MODE_PRECISION (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1571 	{
1572 	  machine_mode tmode
1573 	    = mode_for_size (GET_MODE_PRECISION (GET_MODE (op))
1574 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1575 	  if (tmode != BLKmode)
1576 	    {
1577 	      rtx inner =
1578 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1579 	      if (inner)
1580 		return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1581 	    }
1582 	}
1583 
1584       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1585 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1586 	 of mode N.  E.g.
1587 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1588 	 (and:SI (reg:SI) (const_int 63)).  */
1589       if (GET_CODE (op) == SUBREG
1590 	  && GET_MODE_PRECISION (GET_MODE (op))
1591 	     < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1592 	  && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1593 	     <= HOST_BITS_PER_WIDE_INT
1594 	  && GET_MODE_PRECISION (mode)
1595 	     >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1596 	  && subreg_lowpart_p (op)
1597 	  && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1598 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1599 	{
1600 	  if (GET_MODE_PRECISION (mode)
1601 	      == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1602 	    return SUBREG_REG (op);
1603 	  return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1604 				     GET_MODE (SUBREG_REG (op)));
1605 	}
1606 
1607 #if defined(POINTERS_EXTEND_UNSIGNED)
1608       /* As we do not know which address space the pointer is referring to,
1609 	 we can do this only if the target does not support different pointer
1610 	 or address modes depending on the address space.  */
1611       if (target_default_pointer_address_modes_p ()
1612 	  && POINTERS_EXTEND_UNSIGNED > 0
1613 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1614 	  && (CONSTANT_P (op)
1615 	      || (GET_CODE (op) == SUBREG
1616 		  && REG_P (SUBREG_REG (op))
1617 		  && REG_POINTER (SUBREG_REG (op))
1618 		  && GET_MODE (SUBREG_REG (op)) == Pmode))
1619 	  && !targetm.have_ptr_extend ())
1620 	{
1621 	  temp
1622 	    = convert_memory_address_addr_space_1 (Pmode, op,
1623 						   ADDR_SPACE_GENERIC, false,
1624 						   true);
1625 	  if (temp)
1626 	    return temp;
1627 	}
1628 #endif
1629       break;
1630 
1631     default:
1632       break;
1633     }
1634 
1635   return 0;
1636 }
1637 
1638 /* Try to compute the value of a unary operation CODE whose output mode is to
1639    be MODE with input operand OP whose mode was originally OP_MODE.
1640    Return zero if the value cannot be computed.  */
1641 rtx
simplify_const_unary_operation(enum rtx_code code,machine_mode mode,rtx op,machine_mode op_mode)1642 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1643 				rtx op, machine_mode op_mode)
1644 {
1645   unsigned int width = GET_MODE_PRECISION (mode);
1646 
1647   if (code == VEC_DUPLICATE)
1648     {
1649       gcc_assert (VECTOR_MODE_P (mode));
1650       if (GET_MODE (op) != VOIDmode)
1651       {
1652 	if (!VECTOR_MODE_P (GET_MODE (op)))
1653 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1654 	else
1655 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1656 						(GET_MODE (op)));
1657       }
1658       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1659 	  || GET_CODE (op) == CONST_VECTOR)
1660 	{
1661 	  int elt_size = GET_MODE_UNIT_SIZE (mode);
1662           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1663 	  rtvec v = rtvec_alloc (n_elts);
1664 	  unsigned int i;
1665 
1666 	  if (GET_CODE (op) != CONST_VECTOR)
1667 	    for (i = 0; i < n_elts; i++)
1668 	      RTVEC_ELT (v, i) = op;
1669 	  else
1670 	    {
1671 	      machine_mode inmode = GET_MODE (op);
1672 	      int in_elt_size = GET_MODE_UNIT_SIZE (inmode);
1673               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1674 
1675 	      gcc_assert (in_n_elts < n_elts);
1676 	      gcc_assert ((n_elts % in_n_elts) == 0);
1677 	      for (i = 0; i < n_elts; i++)
1678 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1679 	    }
1680 	  return gen_rtx_CONST_VECTOR (mode, v);
1681 	}
1682     }
1683 
1684   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1685     {
1686       int elt_size = GET_MODE_UNIT_SIZE (mode);
1687       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1688       machine_mode opmode = GET_MODE (op);
1689       int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1690       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1691       rtvec v = rtvec_alloc (n_elts);
1692       unsigned int i;
1693 
1694       gcc_assert (op_n_elts == n_elts);
1695       for (i = 0; i < n_elts; i++)
1696 	{
1697 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1698 					    CONST_VECTOR_ELT (op, i),
1699 					    GET_MODE_INNER (opmode));
1700 	  if (!x)
1701 	    return 0;
1702 	  RTVEC_ELT (v, i) = x;
1703 	}
1704       return gen_rtx_CONST_VECTOR (mode, v);
1705     }
1706 
1707   /* The order of these tests is critical so that, for example, we don't
1708      check the wrong mode (input vs. output) for a conversion operation,
1709      such as FIX.  At some point, this should be simplified.  */
1710 
1711   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1712     {
1713       REAL_VALUE_TYPE d;
1714 
1715       if (op_mode == VOIDmode)
1716 	{
1717 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1718 	     the bits of the constant are significant, though, this is
1719 	     a dangerous assumption as many times CONST_INTs are
1720 	     created and used with garbage in the bits outside of the
1721 	     precision of the implied mode of the const_int.  */
1722 	  op_mode = MAX_MODE_INT;
1723 	}
1724 
1725       real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
1726 
1727       /* Avoid the folding if flag_signaling_nans is on and
1728          operand is a signaling NaN.  */
1729       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1730         return 0;
1731 
1732       d = real_value_truncate (mode, d);
1733       return const_double_from_real_value (d, mode);
1734     }
1735   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1736     {
1737       REAL_VALUE_TYPE d;
1738 
1739       if (op_mode == VOIDmode)
1740 	{
1741 	  /* CONST_INT have VOIDmode as the mode.  We assume that all
1742 	     the bits of the constant are significant, though, this is
1743 	     a dangerous assumption as many times CONST_INTs are
1744 	     created and used with garbage in the bits outside of the
1745 	     precision of the implied mode of the const_int.  */
1746 	  op_mode = MAX_MODE_INT;
1747 	}
1748 
1749       real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
1750 
1751       /* Avoid the folding if flag_signaling_nans is on and
1752          operand is a signaling NaN.  */
1753       if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1754         return 0;
1755 
1756       d = real_value_truncate (mode, d);
1757       return const_double_from_real_value (d, mode);
1758     }
1759 
1760   if (CONST_SCALAR_INT_P (op) && width > 0)
1761     {
1762       wide_int result;
1763       machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
1764       rtx_mode_t op0 = std::make_pair (op, imode);
1765       int int_value;
1766 
1767 #if TARGET_SUPPORTS_WIDE_INT == 0
1768       /* This assert keeps the simplification from producing a result
1769 	 that cannot be represented in a CONST_DOUBLE but a lot of
1770 	 upstream callers expect that this function never fails to
1771 	 simplify something and so you if you added this to the test
1772 	 above the code would die later anyway.  If this assert
1773 	 happens, you just need to make the port support wide int.  */
1774       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1775 #endif
1776 
1777       switch (code)
1778 	{
1779 	case NOT:
1780 	  result = wi::bit_not (op0);
1781 	  break;
1782 
1783 	case NEG:
1784 	  result = wi::neg (op0);
1785 	  break;
1786 
1787 	case ABS:
1788 	  result = wi::abs (op0);
1789 	  break;
1790 
1791 	case FFS:
1792 	  result = wi::shwi (wi::ffs (op0), mode);
1793 	  break;
1794 
1795 	case CLZ:
1796 	  if (wi::ne_p (op0, 0))
1797 	    int_value = wi::clz (op0);
1798 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1799 	    int_value = GET_MODE_PRECISION (mode);
1800 	  result = wi::shwi (int_value, mode);
1801 	  break;
1802 
1803 	case CLRSB:
1804 	  result = wi::shwi (wi::clrsb (op0), mode);
1805 	  break;
1806 
1807 	case CTZ:
1808 	  if (wi::ne_p (op0, 0))
1809 	    int_value = wi::ctz (op0);
1810 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
1811 	    int_value = GET_MODE_PRECISION (mode);
1812 	  result = wi::shwi (int_value, mode);
1813 	  break;
1814 
1815 	case POPCOUNT:
1816 	  result = wi::shwi (wi::popcount (op0), mode);
1817 	  break;
1818 
1819 	case PARITY:
1820 	  result = wi::shwi (wi::parity (op0), mode);
1821 	  break;
1822 
1823 	case BSWAP:
1824 	  result = wide_int (op0).bswap ();
1825 	  break;
1826 
1827 	case TRUNCATE:
1828 	case ZERO_EXTEND:
1829 	  result = wide_int::from (op0, width, UNSIGNED);
1830 	  break;
1831 
1832 	case SIGN_EXTEND:
1833 	  result = wide_int::from (op0, width, SIGNED);
1834 	  break;
1835 
1836 	case SQRT:
1837 	default:
1838 	  return 0;
1839 	}
1840 
1841       return immed_wide_int_const (result, mode);
1842     }
1843 
1844   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1845 	   && SCALAR_FLOAT_MODE_P (mode)
1846 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1847     {
1848       REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1849       switch (code)
1850 	{
1851 	case SQRT:
1852 	  return 0;
1853 	case ABS:
1854 	  d = real_value_abs (&d);
1855 	  break;
1856 	case NEG:
1857 	  d = real_value_negate (&d);
1858 	  break;
1859 	case FLOAT_TRUNCATE:
1860 	  /* Don't perform the operation if flag_signaling_nans is on
1861 	     and the operand is a signaling NaN.  */
1862 	  if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1863 	    d = real_value_truncate (mode, d);
1864 	  break;
1865 	case FLOAT_EXTEND:
1866 	  /* All this does is change the mode, unless changing
1867 	     mode class.  */
1868 	  /* Don't perform the operation if flag_signaling_nans is on
1869 	     and the operand is a signaling NaN.  */
1870 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op))
1871 	      && !(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1872 	    real_convert (&d, mode, &d);
1873 	  break;
1874 	case FIX:
1875 	  /* Don't perform the operation if flag_signaling_nans is on
1876 	     and the operand is a signaling NaN.  */
1877 	  if (!(HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d)))
1878 	    real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1879 	  break;
1880 	case NOT:
1881 	  {
1882 	    long tmp[4];
1883 	    int i;
1884 
1885 	    real_to_target (tmp, &d, GET_MODE (op));
1886 	    for (i = 0; i < 4; i++)
1887 	      tmp[i] = ~tmp[i];
1888 	    real_from_target (&d, tmp, mode);
1889 	    break;
1890 	  }
1891 	default:
1892 	  gcc_unreachable ();
1893 	}
1894       return const_double_from_real_value (d, mode);
1895     }
1896   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1897 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1898 	   && GET_MODE_CLASS (mode) == MODE_INT
1899 	   && width > 0)
1900     {
1901       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1902 	 operators are intentionally left unspecified (to ease implementation
1903 	 by target backends), for consistency, this routine implements the
1904 	 same semantics for constant folding as used by the middle-end.  */
1905 
1906       /* This was formerly used only for non-IEEE float.
1907 	 eggert@twinsun.com says it is safe for IEEE also.  */
1908       REAL_VALUE_TYPE t;
1909       const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1910       wide_int wmax, wmin;
1911       /* This is part of the abi to real_to_integer, but we check
1912 	 things before making this call.  */
1913       bool fail;
1914 
1915       switch (code)
1916 	{
1917 	case FIX:
1918 	  if (REAL_VALUE_ISNAN (*x))
1919 	    return const0_rtx;
1920 
1921 	  /* Test against the signed upper bound.  */
1922 	  wmax = wi::max_value (width, SIGNED);
1923 	  real_from_integer (&t, VOIDmode, wmax, SIGNED);
1924 	  if (real_less (&t, x))
1925 	    return immed_wide_int_const (wmax, mode);
1926 
1927 	  /* Test against the signed lower bound.  */
1928 	  wmin = wi::min_value (width, SIGNED);
1929 	  real_from_integer (&t, VOIDmode, wmin, SIGNED);
1930 	  if (real_less (x, &t))
1931 	    return immed_wide_int_const (wmin, mode);
1932 
1933 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
1934 				       mode);
1935 
1936 	case UNSIGNED_FIX:
1937 	  if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
1938 	    return const0_rtx;
1939 
1940 	  /* Test against the unsigned upper bound.  */
1941 	  wmax = wi::max_value (width, UNSIGNED);
1942 	  real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
1943 	  if (real_less (&t, x))
1944 	    return immed_wide_int_const (wmax, mode);
1945 
1946 	  return immed_wide_int_const (real_to_integer (x, &fail, width),
1947 				       mode);
1948 
1949 	default:
1950 	  gcc_unreachable ();
1951 	}
1952     }
1953 
1954   return NULL_RTX;
1955 }
1956 
1957 /* Subroutine of simplify_binary_operation to simplify a binary operation
1958    CODE that can commute with byte swapping, with result mode MODE and
1959    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
1960    Return zero if no simplification or canonicalization is possible.  */
1961 
1962 static rtx
simplify_byte_swapping_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)1963 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
1964 				  rtx op0, rtx op1)
1965 {
1966   rtx tem;
1967 
1968   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
1969   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
1970     {
1971       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
1972 				 simplify_gen_unary (BSWAP, mode, op1, mode));
1973       return simplify_gen_unary (BSWAP, mode, tem, mode);
1974     }
1975 
1976   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
1977   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
1978     {
1979       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1980       return simplify_gen_unary (BSWAP, mode, tem, mode);
1981     }
1982 
1983   return NULL_RTX;
1984 }
1985 
1986 /* Subroutine of simplify_binary_operation to simplify a commutative,
1987    associative binary operation CODE with result mode MODE, operating
1988    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1989    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1990    canonicalization is possible.  */
1991 
1992 static rtx
simplify_associative_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)1993 simplify_associative_operation (enum rtx_code code, machine_mode mode,
1994 				rtx op0, rtx op1)
1995 {
1996   rtx tem;
1997 
1998   /* Linearize the operator to the left.  */
1999   if (GET_CODE (op1) == code)
2000     {
2001       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2002       if (GET_CODE (op0) == code)
2003 	{
2004 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2005 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2006 	}
2007 
2008       /* "a op (b op c)" becomes "(b op c) op a".  */
2009       if (! swap_commutative_operands_p (op1, op0))
2010 	return simplify_gen_binary (code, mode, op1, op0);
2011 
2012       std::swap (op0, op1);
2013     }
2014 
2015   if (GET_CODE (op0) == code)
2016     {
2017       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2018       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2019 	{
2020 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2021 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2022 	}
2023 
2024       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2025       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2026       if (tem != 0)
2027         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2028 
2029       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2030       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2031       if (tem != 0)
2032         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2033     }
2034 
2035   return 0;
2036 }
2037 
2038 
2039 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2040    and OP1.  Return 0 if no simplification is possible.
2041 
2042    Don't use this for relational operations such as EQ or LT.
2043    Use simplify_relational_operation instead.  */
2044 rtx
simplify_binary_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)2045 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2046 			   rtx op0, rtx op1)
2047 {
2048   rtx trueop0, trueop1;
2049   rtx tem;
2050 
2051   /* Relational operations don't work here.  We must know the mode
2052      of the operands in order to do the comparison correctly.
2053      Assuming a full word can give incorrect results.
2054      Consider comparing 128 with -128 in QImode.  */
2055   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2056   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2057 
2058   /* Make sure the constant is second.  */
2059   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2060       && swap_commutative_operands_p (op0, op1))
2061     std::swap (op0, op1);
2062 
2063   trueop0 = avoid_constant_pool_reference (op0);
2064   trueop1 = avoid_constant_pool_reference (op1);
2065 
2066   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2067   if (tem)
2068     return tem;
2069   tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2070 
2071   if (tem)
2072     return tem;
2073 
2074   /* If the above steps did not result in a simplification and op0 or op1
2075      were constant pool references, use the referenced constants directly.  */
2076   if (trueop0 != op0 || trueop1 != op1)
2077     return simplify_gen_binary (code, mode, trueop0, trueop1);
2078 
2079   return NULL_RTX;
2080 }
2081 
2082 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2083    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2084    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2085    actual constants.  */
2086 
2087 static rtx
simplify_binary_operation_1(enum rtx_code code,machine_mode mode,rtx op0,rtx op1,rtx trueop0,rtx trueop1)2088 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2089 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2090 {
2091   rtx tem, reversed, opleft, opright;
2092   HOST_WIDE_INT val;
2093   unsigned int width = GET_MODE_PRECISION (mode);
2094 
2095   /* Even if we can't compute a constant result,
2096      there are some cases worth simplifying.  */
2097 
2098   switch (code)
2099     {
2100     case PLUS:
2101       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2102 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2103 	 when x is -0 and the rounding mode is not towards -infinity,
2104 	 since (-0) + 0 is then 0.  */
2105       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2106 	return op0;
2107 
2108       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2109 	 transformations are safe even for IEEE.  */
2110       if (GET_CODE (op0) == NEG)
2111 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2112       else if (GET_CODE (op1) == NEG)
2113 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2114 
2115       /* (~a) + 1 -> -a */
2116       if (INTEGRAL_MODE_P (mode)
2117 	  && GET_CODE (op0) == NOT
2118 	  && trueop1 == const1_rtx)
2119 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2120 
2121       /* Handle both-operands-constant cases.  We can only add
2122 	 CONST_INTs to constants since the sum of relocatable symbols
2123 	 can't be handled by most assemblers.  Don't add CONST_INT
2124 	 to CONST_INT since overflow won't be computed properly if wider
2125 	 than HOST_BITS_PER_WIDE_INT.  */
2126 
2127       if ((GET_CODE (op0) == CONST
2128 	   || GET_CODE (op0) == SYMBOL_REF
2129 	   || GET_CODE (op0) == LABEL_REF)
2130 	  && CONST_INT_P (op1))
2131 	return plus_constant (mode, op0, INTVAL (op1));
2132       else if ((GET_CODE (op1) == CONST
2133 		|| GET_CODE (op1) == SYMBOL_REF
2134 		|| GET_CODE (op1) == LABEL_REF)
2135 	       && CONST_INT_P (op0))
2136 	return plus_constant (mode, op1, INTVAL (op0));
2137 
2138       /* See if this is something like X * C - X or vice versa or
2139 	 if the multiplication is written as a shift.  If so, we can
2140 	 distribute and make a new multiply, shift, or maybe just
2141 	 have X (if C is 2 in the example above).  But don't make
2142 	 something more expensive than we had before.  */
2143 
2144       if (SCALAR_INT_MODE_P (mode))
2145 	{
2146 	  rtx lhs = op0, rhs = op1;
2147 
2148 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2149 	  wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
2150 
2151 	  if (GET_CODE (lhs) == NEG)
2152 	    {
2153 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2154 	      lhs = XEXP (lhs, 0);
2155 	    }
2156 	  else if (GET_CODE (lhs) == MULT
2157 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2158 	    {
2159 	      coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2160 	      lhs = XEXP (lhs, 0);
2161 	    }
2162 	  else if (GET_CODE (lhs) == ASHIFT
2163 		   && CONST_INT_P (XEXP (lhs, 1))
2164                    && INTVAL (XEXP (lhs, 1)) >= 0
2165 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2166 	    {
2167 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2168 					    GET_MODE_PRECISION (mode));
2169 	      lhs = XEXP (lhs, 0);
2170 	    }
2171 
2172 	  if (GET_CODE (rhs) == NEG)
2173 	    {
2174 	      coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2175 	      rhs = XEXP (rhs, 0);
2176 	    }
2177 	  else if (GET_CODE (rhs) == MULT
2178 		   && CONST_INT_P (XEXP (rhs, 1)))
2179 	    {
2180 	      coeff1 = std::make_pair (XEXP (rhs, 1), mode);
2181 	      rhs = XEXP (rhs, 0);
2182 	    }
2183 	  else if (GET_CODE (rhs) == ASHIFT
2184 		   && CONST_INT_P (XEXP (rhs, 1))
2185 		   && INTVAL (XEXP (rhs, 1)) >= 0
2186 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2187 	    {
2188 	      coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2189 					    GET_MODE_PRECISION (mode));
2190 	      rhs = XEXP (rhs, 0);
2191 	    }
2192 
2193 	  if (rtx_equal_p (lhs, rhs))
2194 	    {
2195 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2196 	      rtx coeff;
2197 	      bool speed = optimize_function_for_speed_p (cfun);
2198 
2199 	      coeff = immed_wide_int_const (coeff0 + coeff1, mode);
2200 
2201 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2202 	      return (set_src_cost (tem, mode, speed)
2203 		      <= set_src_cost (orig, mode, speed) ? tem : 0);
2204 	    }
2205 	}
2206 
2207       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2208       if (CONST_SCALAR_INT_P (op1)
2209 	  && GET_CODE (op0) == XOR
2210 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2211 	  && mode_signbit_p (mode, op1))
2212 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2213 				    simplify_gen_binary (XOR, mode, op1,
2214 							 XEXP (op0, 1)));
2215 
2216       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2217       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2218 	  && GET_CODE (op0) == MULT
2219 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2220 	{
2221 	  rtx in1, in2;
2222 
2223 	  in1 = XEXP (XEXP (op0, 0), 0);
2224 	  in2 = XEXP (op0, 1);
2225 	  return simplify_gen_binary (MINUS, mode, op1,
2226 				      simplify_gen_binary (MULT, mode,
2227 							   in1, in2));
2228 	}
2229 
2230       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2231 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2232 	 is 1.  */
2233       if (COMPARISON_P (op0)
2234 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2235 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2236 	  && (reversed = reversed_comparison (op0, mode)))
2237 	return
2238 	  simplify_gen_unary (NEG, mode, reversed, mode);
2239 
2240       /* If one of the operands is a PLUS or a MINUS, see if we can
2241 	 simplify this by the associative law.
2242 	 Don't use the associative law for floating point.
2243 	 The inaccuracy makes it nonassociative,
2244 	 and subtle programs can break if operations are associated.  */
2245 
2246       if (INTEGRAL_MODE_P (mode)
2247 	  && (plus_minus_operand_p (op0)
2248 	      || plus_minus_operand_p (op1))
2249 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2250 	return tem;
2251 
2252       /* Reassociate floating point addition only when the user
2253 	 specifies associative math operations.  */
2254       if (FLOAT_MODE_P (mode)
2255 	  && flag_associative_math)
2256 	{
2257 	  tem = simplify_associative_operation (code, mode, op0, op1);
2258 	  if (tem)
2259 	    return tem;
2260 	}
2261       break;
2262 
2263     case COMPARE:
2264       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2265       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2266 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2267 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2268 	{
2269 	  rtx xop00 = XEXP (op0, 0);
2270 	  rtx xop10 = XEXP (op1, 0);
2271 
2272 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2273 	      return xop00;
2274 
2275 	    if (REG_P (xop00) && REG_P (xop10)
2276 		&& REGNO (xop00) == REGNO (xop10)
2277 		&& GET_MODE (xop00) == mode
2278 		&& GET_MODE (xop10) == mode
2279 		&& GET_MODE_CLASS (mode) == MODE_CC)
2280 	      return xop00;
2281 	}
2282       break;
2283 
2284     case MINUS:
2285       /* We can't assume x-x is 0 even with non-IEEE floating point,
2286 	 but since it is zero except in very strange circumstances, we
2287 	 will treat it as zero with -ffinite-math-only.  */
2288       if (rtx_equal_p (trueop0, trueop1)
2289 	  && ! side_effects_p (op0)
2290 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2291 	return CONST0_RTX (mode);
2292 
2293       /* Change subtraction from zero into negation.  (0 - x) is the
2294 	 same as -x when x is NaN, infinite, or finite and nonzero.
2295 	 But if the mode has signed zeros, and does not round towards
2296 	 -infinity, then 0 - 0 is 0, not -0.  */
2297       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2298 	return simplify_gen_unary (NEG, mode, op1, mode);
2299 
2300       /* (-1 - a) is ~a, unless the expression contains symbolic
2301 	 constants, in which case not retaining additions and
2302 	 subtractions could cause invalid assembly to be produced.  */
2303       if (trueop0 == constm1_rtx
2304 	  && !contains_symbolic_reference_p (op1))
2305 	return simplify_gen_unary (NOT, mode, op1, mode);
2306 
2307       /* Subtracting 0 has no effect unless the mode has signed zeros
2308 	 and supports rounding towards -infinity.  In such a case,
2309 	 0 - 0 is -0.  */
2310       if (!(HONOR_SIGNED_ZEROS (mode)
2311 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2312 	  && trueop1 == CONST0_RTX (mode))
2313 	return op0;
2314 
2315       /* See if this is something like X * C - X or vice versa or
2316 	 if the multiplication is written as a shift.  If so, we can
2317 	 distribute and make a new multiply, shift, or maybe just
2318 	 have X (if C is 2 in the example above).  But don't make
2319 	 something more expensive than we had before.  */
2320 
2321       if (SCALAR_INT_MODE_P (mode))
2322 	{
2323 	  rtx lhs = op0, rhs = op1;
2324 
2325 	  wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
2326 	  wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
2327 
2328 	  if (GET_CODE (lhs) == NEG)
2329 	    {
2330 	      coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
2331 	      lhs = XEXP (lhs, 0);
2332 	    }
2333 	  else if (GET_CODE (lhs) == MULT
2334 		   && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2335 	    {
2336 	      coeff0 = std::make_pair (XEXP (lhs, 1), mode);
2337 	      lhs = XEXP (lhs, 0);
2338 	    }
2339 	  else if (GET_CODE (lhs) == ASHIFT
2340 		   && CONST_INT_P (XEXP (lhs, 1))
2341 		   && INTVAL (XEXP (lhs, 1)) >= 0
2342 		   && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
2343 	    {
2344 	      coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2345 					    GET_MODE_PRECISION (mode));
2346 	      lhs = XEXP (lhs, 0);
2347 	    }
2348 
2349 	  if (GET_CODE (rhs) == NEG)
2350 	    {
2351 	      negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
2352 	      rhs = XEXP (rhs, 0);
2353 	    }
2354 	  else if (GET_CODE (rhs) == MULT
2355 		   && CONST_INT_P (XEXP (rhs, 1)))
2356 	    {
2357 	      negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
2358 	      rhs = XEXP (rhs, 0);
2359 	    }
2360 	  else if (GET_CODE (rhs) == ASHIFT
2361 		   && CONST_INT_P (XEXP (rhs, 1))
2362 		   && INTVAL (XEXP (rhs, 1)) >= 0
2363 		   && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
2364 	    {
2365 	      negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2366 					       GET_MODE_PRECISION (mode));
2367 	      negcoeff1 = -negcoeff1;
2368 	      rhs = XEXP (rhs, 0);
2369 	    }
2370 
2371 	  if (rtx_equal_p (lhs, rhs))
2372 	    {
2373 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2374 	      rtx coeff;
2375 	      bool speed = optimize_function_for_speed_p (cfun);
2376 
2377 	      coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
2378 
2379 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2380 	      return (set_src_cost (tem, mode, speed)
2381 		      <= set_src_cost (orig, mode, speed) ? tem : 0);
2382 	    }
2383 	}
2384 
2385       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2386       if (GET_CODE (op1) == NEG)
2387 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2388 
2389       /* (-x - c) may be simplified as (-c - x).  */
2390       if (GET_CODE (op0) == NEG
2391 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2392 	{
2393 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2394 	  if (tem)
2395 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2396 	}
2397 
2398       /* Don't let a relocatable value get a negative coeff.  */
2399       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2400 	return simplify_gen_binary (PLUS, mode,
2401 				    op0,
2402 				    neg_const_int (mode, op1));
2403 
2404       /* (x - (x & y)) -> (x & ~y) */
2405       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2406 	{
2407 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2408 	    {
2409 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2410 					GET_MODE (XEXP (op1, 1)));
2411 	      return simplify_gen_binary (AND, mode, op0, tem);
2412 	    }
2413 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2414 	    {
2415 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2416 					GET_MODE (XEXP (op1, 0)));
2417 	      return simplify_gen_binary (AND, mode, op0, tem);
2418 	    }
2419 	}
2420 
2421       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2422 	 by reversing the comparison code if valid.  */
2423       if (STORE_FLAG_VALUE == 1
2424 	  && trueop0 == const1_rtx
2425 	  && COMPARISON_P (op1)
2426 	  && (reversed = reversed_comparison (op1, mode)))
2427 	return reversed;
2428 
2429       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2430       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2431 	  && GET_CODE (op1) == MULT
2432 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2433 	{
2434 	  rtx in1, in2;
2435 
2436 	  in1 = XEXP (XEXP (op1, 0), 0);
2437 	  in2 = XEXP (op1, 1);
2438 	  return simplify_gen_binary (PLUS, mode,
2439 				      simplify_gen_binary (MULT, mode,
2440 							   in1, in2),
2441 				      op0);
2442 	}
2443 
2444       /* Canonicalize (minus (neg A) (mult B C)) to
2445 	 (minus (mult (neg B) C) A).  */
2446       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2447 	  && GET_CODE (op1) == MULT
2448 	  && GET_CODE (op0) == NEG)
2449 	{
2450 	  rtx in1, in2;
2451 
2452 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2453 	  in2 = XEXP (op1, 1);
2454 	  return simplify_gen_binary (MINUS, mode,
2455 				      simplify_gen_binary (MULT, mode,
2456 							   in1, in2),
2457 				      XEXP (op0, 0));
2458 	}
2459 
2460       /* If one of the operands is a PLUS or a MINUS, see if we can
2461 	 simplify this by the associative law.  This will, for example,
2462          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2463 	 Don't use the associative law for floating point.
2464 	 The inaccuracy makes it nonassociative,
2465 	 and subtle programs can break if operations are associated.  */
2466 
2467       if (INTEGRAL_MODE_P (mode)
2468 	  && (plus_minus_operand_p (op0)
2469 	      || plus_minus_operand_p (op1))
2470 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2471 	return tem;
2472       break;
2473 
2474     case MULT:
2475       if (trueop1 == constm1_rtx)
2476 	return simplify_gen_unary (NEG, mode, op0, mode);
2477 
2478       if (GET_CODE (op0) == NEG)
2479 	{
2480 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2481 	  /* If op1 is a MULT as well and simplify_unary_operation
2482 	     just moved the NEG to the second operand, simplify_gen_binary
2483 	     below could through simplify_associative_operation move
2484 	     the NEG around again and recurse endlessly.  */
2485 	  if (temp
2486 	      && GET_CODE (op1) == MULT
2487 	      && GET_CODE (temp) == MULT
2488 	      && XEXP (op1, 0) == XEXP (temp, 0)
2489 	      && GET_CODE (XEXP (temp, 1)) == NEG
2490 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2491 	    temp = NULL_RTX;
2492 	  if (temp)
2493 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2494 	}
2495       if (GET_CODE (op1) == NEG)
2496 	{
2497 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2498 	  /* If op0 is a MULT as well and simplify_unary_operation
2499 	     just moved the NEG to the second operand, simplify_gen_binary
2500 	     below could through simplify_associative_operation move
2501 	     the NEG around again and recurse endlessly.  */
2502 	  if (temp
2503 	      && GET_CODE (op0) == MULT
2504 	      && GET_CODE (temp) == MULT
2505 	      && XEXP (op0, 0) == XEXP (temp, 0)
2506 	      && GET_CODE (XEXP (temp, 1)) == NEG
2507 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2508 	    temp = NULL_RTX;
2509 	  if (temp)
2510 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2511 	}
2512 
2513       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2514 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2515 	 when the mode has signed zeros, since multiplying a negative
2516 	 number by 0 will give -0, not 0.  */
2517       if (!HONOR_NANS (mode)
2518 	  && !HONOR_SIGNED_ZEROS (mode)
2519 	  && trueop1 == CONST0_RTX (mode)
2520 	  && ! side_effects_p (op0))
2521 	return op1;
2522 
2523       /* In IEEE floating point, x*1 is not equivalent to x for
2524 	 signalling NaNs.  */
2525       if (!HONOR_SNANS (mode)
2526 	  && trueop1 == CONST1_RTX (mode))
2527 	return op0;
2528 
2529       /* Convert multiply by constant power of two into shift.  */
2530       if (CONST_SCALAR_INT_P (trueop1))
2531 	{
2532 	  val = wi::exact_log2 (std::make_pair (trueop1, mode));
2533 	  if (val >= 0)
2534 	    return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2535 	}
2536 
2537       /* x*2 is x+x and x*(-1) is -x */
2538       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2539 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2540 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2541 	  && GET_MODE (op0) == mode)
2542 	{
2543 	  const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2544 
2545 	  if (real_equal (d1, &dconst2))
2546 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2547 
2548 	  if (!HONOR_SNANS (mode)
2549 	      && real_equal (d1, &dconstm1))
2550 	    return simplify_gen_unary (NEG, mode, op0, mode);
2551 	}
2552 
2553       /* Optimize -x * -x as x * x.  */
2554       if (FLOAT_MODE_P (mode)
2555 	  && GET_CODE (op0) == NEG
2556 	  && GET_CODE (op1) == NEG
2557 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2558 	  && !side_effects_p (XEXP (op0, 0)))
2559 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2560 
2561       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2562       if (SCALAR_FLOAT_MODE_P (mode)
2563 	  && GET_CODE (op0) == ABS
2564 	  && GET_CODE (op1) == ABS
2565 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2566 	  && !side_effects_p (XEXP (op0, 0)))
2567 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2568 
2569       /* Reassociate multiplication, but for floating point MULTs
2570 	 only when the user specifies unsafe math optimizations.  */
2571       if (! FLOAT_MODE_P (mode)
2572 	  || flag_unsafe_math_optimizations)
2573 	{
2574 	  tem = simplify_associative_operation (code, mode, op0, op1);
2575 	  if (tem)
2576 	    return tem;
2577 	}
2578       break;
2579 
2580     case IOR:
2581       if (trueop1 == CONST0_RTX (mode))
2582 	return op0;
2583       if (INTEGRAL_MODE_P (mode)
2584 	  && trueop1 == CONSTM1_RTX (mode)
2585 	  && !side_effects_p (op0))
2586 	return op1;
2587       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2588 	return op0;
2589       /* A | (~A) -> -1 */
2590       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2591 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2592 	  && ! side_effects_p (op0)
2593 	  && SCALAR_INT_MODE_P (mode))
2594 	return constm1_rtx;
2595 
2596       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2597       if (CONST_INT_P (op1)
2598 	  && HWI_COMPUTABLE_MODE_P (mode)
2599 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2600 	  && !side_effects_p (op0))
2601 	return op1;
2602 
2603       /* Canonicalize (X & C1) | C2.  */
2604       if (GET_CODE (op0) == AND
2605 	  && CONST_INT_P (trueop1)
2606 	  && CONST_INT_P (XEXP (op0, 1)))
2607 	{
2608 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2609 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2610 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2611 
2612 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2613 	  if ((c1 & c2) == c1
2614 	      && !side_effects_p (XEXP (op0, 0)))
2615 	    return trueop1;
2616 
2617 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2618 	  if (((c1|c2) & mask) == mask)
2619 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2620 
2621 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2622 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2623 	    {
2624 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2625 					 gen_int_mode (c1 & ~c2, mode));
2626 	      return simplify_gen_binary (IOR, mode, tem, op1);
2627 	    }
2628 	}
2629 
2630       /* Convert (A & B) | A to A.  */
2631       if (GET_CODE (op0) == AND
2632 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2633 	      || rtx_equal_p (XEXP (op0, 1), op1))
2634 	  && ! side_effects_p (XEXP (op0, 0))
2635 	  && ! side_effects_p (XEXP (op0, 1)))
2636 	return op1;
2637 
2638       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2639          mode size to (rotate A CX).  */
2640 
2641       if (GET_CODE (op1) == ASHIFT
2642           || GET_CODE (op1) == SUBREG)
2643         {
2644 	  opleft = op1;
2645 	  opright = op0;
2646 	}
2647       else
2648         {
2649 	  opright = op1;
2650 	  opleft = op0;
2651 	}
2652 
2653       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2654           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2655           && CONST_INT_P (XEXP (opleft, 1))
2656           && CONST_INT_P (XEXP (opright, 1))
2657           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2658               == GET_MODE_PRECISION (mode)))
2659         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2660 
2661       /* Same, but for ashift that has been "simplified" to a wider mode
2662         by simplify_shift_const.  */
2663 
2664       if (GET_CODE (opleft) == SUBREG
2665           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2666           && GET_CODE (opright) == LSHIFTRT
2667           && GET_CODE (XEXP (opright, 0)) == SUBREG
2668           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2669           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2670           && (GET_MODE_SIZE (GET_MODE (opleft))
2671               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2672           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2673                           SUBREG_REG (XEXP (opright, 0)))
2674           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2675           && CONST_INT_P (XEXP (opright, 1))
2676           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2677               == GET_MODE_PRECISION (mode)))
2678         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2679                                XEXP (SUBREG_REG (opleft), 1));
2680 
2681       /* If we have (ior (and (X C1) C2)), simplify this by making
2682 	 C1 as small as possible if C1 actually changes.  */
2683       if (CONST_INT_P (op1)
2684 	  && (HWI_COMPUTABLE_MODE_P (mode)
2685 	      || INTVAL (op1) > 0)
2686 	  && GET_CODE (op0) == AND
2687 	  && CONST_INT_P (XEXP (op0, 1))
2688 	  && CONST_INT_P (op1)
2689 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2690 	{
2691 	  rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2692 					 gen_int_mode (UINTVAL (XEXP (op0, 1))
2693 						       & ~UINTVAL (op1),
2694 						       mode));
2695 	  return simplify_gen_binary (IOR, mode, tmp, op1);
2696 	}
2697 
2698       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2699          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2700 	 the PLUS does not affect any of the bits in OP1: then we can do
2701 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2702          can be safely shifted left C bits.  */
2703       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2704           && GET_CODE (XEXP (op0, 0)) == PLUS
2705           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2706           && CONST_INT_P (XEXP (op0, 1))
2707           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2708         {
2709           int count = INTVAL (XEXP (op0, 1));
2710           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2711 
2712           if (mask >> count == INTVAL (trueop1)
2713 	      && trunc_int_for_mode (mask, mode) == mask
2714               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2715 	    return simplify_gen_binary (ASHIFTRT, mode,
2716 					plus_constant (mode, XEXP (op0, 0),
2717 						       mask),
2718 					XEXP (op0, 1));
2719         }
2720 
2721       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2722       if (tem)
2723 	return tem;
2724 
2725       tem = simplify_associative_operation (code, mode, op0, op1);
2726       if (tem)
2727 	return tem;
2728       break;
2729 
2730     case XOR:
2731       if (trueop1 == CONST0_RTX (mode))
2732 	return op0;
2733       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2734 	return simplify_gen_unary (NOT, mode, op0, mode);
2735       if (rtx_equal_p (trueop0, trueop1)
2736 	  && ! side_effects_p (op0)
2737 	  && GET_MODE_CLASS (mode) != MODE_CC)
2738 	 return CONST0_RTX (mode);
2739 
2740       /* Canonicalize XOR of the most significant bit to PLUS.  */
2741       if (CONST_SCALAR_INT_P (op1)
2742 	  && mode_signbit_p (mode, op1))
2743 	return simplify_gen_binary (PLUS, mode, op0, op1);
2744       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2745       if (CONST_SCALAR_INT_P (op1)
2746 	  && GET_CODE (op0) == PLUS
2747 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2748 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2749 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2750 				    simplify_gen_binary (XOR, mode, op1,
2751 							 XEXP (op0, 1)));
2752 
2753       /* If we are XORing two things that have no bits in common,
2754 	 convert them into an IOR.  This helps to detect rotation encoded
2755 	 using those methods and possibly other simplifications.  */
2756 
2757       if (HWI_COMPUTABLE_MODE_P (mode)
2758 	  && (nonzero_bits (op0, mode)
2759 	      & nonzero_bits (op1, mode)) == 0)
2760 	return (simplify_gen_binary (IOR, mode, op0, op1));
2761 
2762       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2763 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2764 	 (NOT y).  */
2765       {
2766 	int num_negated = 0;
2767 
2768 	if (GET_CODE (op0) == NOT)
2769 	  num_negated++, op0 = XEXP (op0, 0);
2770 	if (GET_CODE (op1) == NOT)
2771 	  num_negated++, op1 = XEXP (op1, 0);
2772 
2773 	if (num_negated == 2)
2774 	  return simplify_gen_binary (XOR, mode, op0, op1);
2775 	else if (num_negated == 1)
2776 	  return simplify_gen_unary (NOT, mode,
2777 				     simplify_gen_binary (XOR, mode, op0, op1),
2778 				     mode);
2779       }
2780 
2781       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2782 	 correspond to a machine insn or result in further simplifications
2783 	 if B is a constant.  */
2784 
2785       if (GET_CODE (op0) == AND
2786 	  && rtx_equal_p (XEXP (op0, 1), op1)
2787 	  && ! side_effects_p (op1))
2788 	return simplify_gen_binary (AND, mode,
2789 				    simplify_gen_unary (NOT, mode,
2790 							XEXP (op0, 0), mode),
2791 				    op1);
2792 
2793       else if (GET_CODE (op0) == AND
2794 	       && rtx_equal_p (XEXP (op0, 0), op1)
2795 	       && ! side_effects_p (op1))
2796 	return simplify_gen_binary (AND, mode,
2797 				    simplify_gen_unary (NOT, mode,
2798 							XEXP (op0, 1), mode),
2799 				    op1);
2800 
2801       /* Given (xor (ior (xor A B) C) D), where B, C and D are
2802 	 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2803 	 out bits inverted twice and not set by C.  Similarly, given
2804 	 (xor (and (xor A B) C) D), simplify without inverting C in
2805 	 the xor operand: (xor (and A C) (B&C)^D).
2806       */
2807       else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2808 	       && GET_CODE (XEXP (op0, 0)) == XOR
2809 	       && CONST_INT_P (op1)
2810 	       && CONST_INT_P (XEXP (op0, 1))
2811 	       && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2812 	{
2813 	  enum rtx_code op = GET_CODE (op0);
2814 	  rtx a = XEXP (XEXP (op0, 0), 0);
2815 	  rtx b = XEXP (XEXP (op0, 0), 1);
2816 	  rtx c = XEXP (op0, 1);
2817 	  rtx d = op1;
2818 	  HOST_WIDE_INT bval = INTVAL (b);
2819 	  HOST_WIDE_INT cval = INTVAL (c);
2820 	  HOST_WIDE_INT dval = INTVAL (d);
2821 	  HOST_WIDE_INT xcval;
2822 
2823 	  if (op == IOR)
2824 	    xcval = ~cval;
2825 	  else
2826 	    xcval = cval;
2827 
2828 	  return simplify_gen_binary (XOR, mode,
2829 				      simplify_gen_binary (op, mode, a, c),
2830 				      gen_int_mode ((bval & xcval) ^ dval,
2831 						    mode));
2832 	}
2833 
2834       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2835 	 we can transform like this:
2836             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2837                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2838                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2839 	 Attempt a few simplifications when B and C are both constants.  */
2840       if (GET_CODE (op0) == AND
2841 	  && CONST_INT_P (op1)
2842 	  && CONST_INT_P (XEXP (op0, 1)))
2843 	{
2844 	  rtx a = XEXP (op0, 0);
2845 	  rtx b = XEXP (op0, 1);
2846 	  rtx c = op1;
2847 	  HOST_WIDE_INT bval = INTVAL (b);
2848 	  HOST_WIDE_INT cval = INTVAL (c);
2849 
2850 	  /* Instead of computing ~A&C, we compute its negated value,
2851 	     ~(A|~C).  If it yields -1, ~A&C is zero, so we can
2852 	     optimize for sure.  If it does not simplify, we still try
2853 	     to compute ~A&C below, but since that always allocates
2854 	     RTL, we don't try that before committing to returning a
2855 	     simplified expression.  */
2856 	  rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2857 						  GEN_INT (~cval));
2858 
2859 	  if ((~cval & bval) == 0)
2860 	    {
2861 	      rtx na_c = NULL_RTX;
2862 	      if (n_na_c)
2863 		na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2864 	      else
2865 		{
2866 		  /* If ~A does not simplify, don't bother: we don't
2867 		     want to simplify 2 operations into 3, and if na_c
2868 		     were to simplify with na, n_na_c would have
2869 		     simplified as well.  */
2870 		  rtx na = simplify_unary_operation (NOT, mode, a, mode);
2871 		  if (na)
2872 		    na_c = simplify_gen_binary (AND, mode, na, c);
2873 		}
2874 
2875 	      /* Try to simplify ~A&C | ~B&C.  */
2876 	      if (na_c != NULL_RTX)
2877 		return simplify_gen_binary (IOR, mode, na_c,
2878 					    gen_int_mode (~bval & cval, mode));
2879 	    }
2880 	  else
2881 	    {
2882 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2883 	      if (n_na_c == CONSTM1_RTX (mode))
2884 		{
2885 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2886 						    gen_int_mode (~cval & bval,
2887 								  mode));
2888 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2889 					      gen_int_mode (~bval & cval,
2890 							    mode));
2891 		}
2892 	    }
2893 	}
2894 
2895       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2896 	 comparison if STORE_FLAG_VALUE is 1.  */
2897       if (STORE_FLAG_VALUE == 1
2898 	  && trueop1 == const1_rtx
2899 	  && COMPARISON_P (op0)
2900 	  && (reversed = reversed_comparison (op0, mode)))
2901 	return reversed;
2902 
2903       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2904 	 is (lt foo (const_int 0)), so we can perform the above
2905 	 simplification if STORE_FLAG_VALUE is 1.  */
2906 
2907       if (STORE_FLAG_VALUE == 1
2908 	  && trueop1 == const1_rtx
2909 	  && GET_CODE (op0) == LSHIFTRT
2910 	  && CONST_INT_P (XEXP (op0, 1))
2911 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2912 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2913 
2914       /* (xor (comparison foo bar) (const_int sign-bit))
2915 	 when STORE_FLAG_VALUE is the sign bit.  */
2916       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2917 	  && trueop1 == const_true_rtx
2918 	  && COMPARISON_P (op0)
2919 	  && (reversed = reversed_comparison (op0, mode)))
2920 	return reversed;
2921 
2922       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2923       if (tem)
2924 	return tem;
2925 
2926       tem = simplify_associative_operation (code, mode, op0, op1);
2927       if (tem)
2928 	return tem;
2929       break;
2930 
2931     case AND:
2932       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2933 	return trueop1;
2934       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2935 	return op0;
2936       if (HWI_COMPUTABLE_MODE_P (mode))
2937 	{
2938 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2939 	  HOST_WIDE_INT nzop1;
2940 	  if (CONST_INT_P (trueop1))
2941 	    {
2942 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2943 	      /* If we are turning off bits already known off in OP0, we need
2944 		 not do an AND.  */
2945 	      if ((nzop0 & ~val1) == 0)
2946 		return op0;
2947 	    }
2948 	  nzop1 = nonzero_bits (trueop1, mode);
2949 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2950 	  if ((nzop1 & nzop0) == 0
2951 	      && !side_effects_p (op0) && !side_effects_p (op1))
2952 	    return CONST0_RTX (mode);
2953 	}
2954       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2955 	  && GET_MODE_CLASS (mode) != MODE_CC)
2956 	return op0;
2957       /* A & (~A) -> 0 */
2958       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2959 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2960 	  && ! side_effects_p (op0)
2961 	  && GET_MODE_CLASS (mode) != MODE_CC)
2962 	return CONST0_RTX (mode);
2963 
2964       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2965 	 there are no nonzero bits of C outside of X's mode.  */
2966       if ((GET_CODE (op0) == SIGN_EXTEND
2967 	   || GET_CODE (op0) == ZERO_EXTEND)
2968 	  && CONST_INT_P (trueop1)
2969 	  && HWI_COMPUTABLE_MODE_P (mode)
2970 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2971 	      & UINTVAL (trueop1)) == 0)
2972 	{
2973 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
2974 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2975 				     gen_int_mode (INTVAL (trueop1),
2976 						   imode));
2977 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2978 	}
2979 
2980       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2981 	 we might be able to further simplify the AND with X and potentially
2982 	 remove the truncation altogether.  */
2983       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2984 	{
2985 	  rtx x = XEXP (op0, 0);
2986 	  machine_mode xmode = GET_MODE (x);
2987 	  tem = simplify_gen_binary (AND, xmode, x,
2988 				     gen_int_mode (INTVAL (trueop1), xmode));
2989 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2990 	}
2991 
2992       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2993       if (GET_CODE (op0) == IOR
2994 	  && CONST_INT_P (trueop1)
2995 	  && CONST_INT_P (XEXP (op0, 1)))
2996 	{
2997 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2998 	  return simplify_gen_binary (IOR, mode,
2999 				      simplify_gen_binary (AND, mode,
3000 							   XEXP (op0, 0), op1),
3001 				      gen_int_mode (tmp, mode));
3002 	}
3003 
3004       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3005 	 insn (and may simplify more).  */
3006       if (GET_CODE (op0) == XOR
3007 	  && rtx_equal_p (XEXP (op0, 0), op1)
3008 	  && ! side_effects_p (op1))
3009 	return simplify_gen_binary (AND, mode,
3010 				    simplify_gen_unary (NOT, mode,
3011 							XEXP (op0, 1), mode),
3012 				    op1);
3013 
3014       if (GET_CODE (op0) == XOR
3015 	  && rtx_equal_p (XEXP (op0, 1), op1)
3016 	  && ! side_effects_p (op1))
3017 	return simplify_gen_binary (AND, mode,
3018 				    simplify_gen_unary (NOT, mode,
3019 							XEXP (op0, 0), mode),
3020 				    op1);
3021 
3022       /* Similarly for (~(A ^ B)) & A.  */
3023       if (GET_CODE (op0) == NOT
3024 	  && GET_CODE (XEXP (op0, 0)) == XOR
3025 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3026 	  && ! side_effects_p (op1))
3027 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3028 
3029       if (GET_CODE (op0) == NOT
3030 	  && GET_CODE (XEXP (op0, 0)) == XOR
3031 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3032 	  && ! side_effects_p (op1))
3033 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3034 
3035       /* Convert (A | B) & A to A.  */
3036       if (GET_CODE (op0) == IOR
3037 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3038 	      || rtx_equal_p (XEXP (op0, 1), op1))
3039 	  && ! side_effects_p (XEXP (op0, 0))
3040 	  && ! side_effects_p (XEXP (op0, 1)))
3041 	return op1;
3042 
3043       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3044 	 ((A & N) + B) & M -> (A + B) & M
3045 	 Similarly if (N & M) == 0,
3046 	 ((A | N) + B) & M -> (A + B) & M
3047 	 and for - instead of + and/or ^ instead of |.
3048          Also, if (N & M) == 0, then
3049 	 (A +- N) & M -> A & M.  */
3050       if (CONST_INT_P (trueop1)
3051 	  && HWI_COMPUTABLE_MODE_P (mode)
3052 	  && ~UINTVAL (trueop1)
3053 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3054 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3055 	{
3056 	  rtx pmop[2];
3057 	  int which;
3058 
3059 	  pmop[0] = XEXP (op0, 0);
3060 	  pmop[1] = XEXP (op0, 1);
3061 
3062 	  if (CONST_INT_P (pmop[1])
3063 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3064 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3065 
3066 	  for (which = 0; which < 2; which++)
3067 	    {
3068 	      tem = pmop[which];
3069 	      switch (GET_CODE (tem))
3070 		{
3071 		case AND:
3072 		  if (CONST_INT_P (XEXP (tem, 1))
3073 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3074 		      == UINTVAL (trueop1))
3075 		    pmop[which] = XEXP (tem, 0);
3076 		  break;
3077 		case IOR:
3078 		case XOR:
3079 		  if (CONST_INT_P (XEXP (tem, 1))
3080 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3081 		    pmop[which] = XEXP (tem, 0);
3082 		  break;
3083 		default:
3084 		  break;
3085 		}
3086 	    }
3087 
3088 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3089 	    {
3090 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3091 					 pmop[0], pmop[1]);
3092 	      return simplify_gen_binary (code, mode, tem, op1);
3093 	    }
3094 	}
3095 
3096       /* (and X (ior (not X) Y) -> (and X Y) */
3097       if (GET_CODE (op1) == IOR
3098 	  && GET_CODE (XEXP (op1, 0)) == NOT
3099 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3100        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3101 
3102       /* (and (ior (not X) Y) X) -> (and X Y) */
3103       if (GET_CODE (op0) == IOR
3104 	  && GET_CODE (XEXP (op0, 0)) == NOT
3105 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3106 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3107 
3108       /* (and X (ior Y (not X)) -> (and X Y) */
3109       if (GET_CODE (op1) == IOR
3110 	  && GET_CODE (XEXP (op1, 1)) == NOT
3111 	  && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3112        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3113 
3114       /* (and (ior Y (not X)) X) -> (and X Y) */
3115       if (GET_CODE (op0) == IOR
3116 	  && GET_CODE (XEXP (op0, 1)) == NOT
3117 	  && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3118 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3119 
3120       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3121       if (tem)
3122 	return tem;
3123 
3124       tem = simplify_associative_operation (code, mode, op0, op1);
3125       if (tem)
3126 	return tem;
3127       break;
3128 
3129     case UDIV:
3130       /* 0/x is 0 (or x&0 if x has side-effects).  */
3131       if (trueop0 == CONST0_RTX (mode))
3132 	{
3133 	  if (side_effects_p (op1))
3134 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3135 	  return trueop0;
3136 	}
3137       /* x/1 is x.  */
3138       if (trueop1 == CONST1_RTX (mode))
3139 	{
3140 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3141 	  if (tem)
3142 	    return tem;
3143 	}
3144       /* Convert divide by power of two into shift.  */
3145       if (CONST_INT_P (trueop1)
3146 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3147 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3148       break;
3149 
3150     case DIV:
3151       /* Handle floating point and integers separately.  */
3152       if (SCALAR_FLOAT_MODE_P (mode))
3153 	{
3154 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3155 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3156 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3157 	     zeros, since dividing 0 by a negative number gives -0.0  */
3158 	  if (trueop0 == CONST0_RTX (mode)
3159 	      && !HONOR_NANS (mode)
3160 	      && !HONOR_SIGNED_ZEROS (mode)
3161 	      && ! side_effects_p (op1))
3162 	    return op0;
3163 	  /* x/1.0 is x.  */
3164 	  if (trueop1 == CONST1_RTX (mode)
3165 	      && !HONOR_SNANS (mode))
3166 	    return op0;
3167 
3168 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3169 	      && trueop1 != CONST0_RTX (mode))
3170 	    {
3171 	      const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3172 
3173 	      /* x/-1.0 is -x.  */
3174 	      if (real_equal (d1, &dconstm1)
3175 		  && !HONOR_SNANS (mode))
3176 		return simplify_gen_unary (NEG, mode, op0, mode);
3177 
3178 	      /* Change FP division by a constant into multiplication.
3179 		 Only do this with -freciprocal-math.  */
3180 	      if (flag_reciprocal_math
3181 		  && !real_equal (d1, &dconst0))
3182 		{
3183 		  REAL_VALUE_TYPE d;
3184 		  real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3185 		  tem = const_double_from_real_value (d, mode);
3186 		  return simplify_gen_binary (MULT, mode, op0, tem);
3187 		}
3188 	    }
3189 	}
3190       else if (SCALAR_INT_MODE_P (mode))
3191 	{
3192 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3193 	  if (trueop0 == CONST0_RTX (mode)
3194 	      && !cfun->can_throw_non_call_exceptions)
3195 	    {
3196 	      if (side_effects_p (op1))
3197 		return simplify_gen_binary (AND, mode, op1, trueop0);
3198 	      return trueop0;
3199 	    }
3200 	  /* x/1 is x.  */
3201 	  if (trueop1 == CONST1_RTX (mode))
3202 	    {
3203 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3204 	      if (tem)
3205 		return tem;
3206 	    }
3207 	  /* x/-1 is -x.  */
3208 	  if (trueop1 == constm1_rtx)
3209 	    {
3210 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3211 	      if (x)
3212 		return simplify_gen_unary (NEG, mode, x, mode);
3213 	    }
3214 	}
3215       break;
3216 
3217     case UMOD:
3218       /* 0%x is 0 (or x&0 if x has side-effects).  */
3219       if (trueop0 == CONST0_RTX (mode))
3220 	{
3221 	  if (side_effects_p (op1))
3222 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3223 	  return trueop0;
3224 	}
3225       /* x%1 is 0 (of x&0 if x has side-effects).  */
3226       if (trueop1 == CONST1_RTX (mode))
3227 	{
3228 	  if (side_effects_p (op0))
3229 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3230 	  return CONST0_RTX (mode);
3231 	}
3232       /* Implement modulus by power of two as AND.  */
3233       if (CONST_INT_P (trueop1)
3234 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3235 	return simplify_gen_binary (AND, mode, op0,
3236 				    gen_int_mode (INTVAL (op1) - 1, mode));
3237       break;
3238 
3239     case MOD:
3240       /* 0%x is 0 (or x&0 if x has side-effects).  */
3241       if (trueop0 == CONST0_RTX (mode))
3242 	{
3243 	  if (side_effects_p (op1))
3244 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3245 	  return trueop0;
3246 	}
3247       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3248       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3249 	{
3250 	  if (side_effects_p (op0))
3251 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3252 	  return CONST0_RTX (mode);
3253 	}
3254       break;
3255 
3256     case ROTATERT:
3257     case ROTATE:
3258       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3259 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3260 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3261 	 amount instead.  */
3262 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3263       if (CONST_INT_P (trueop1)
3264 	  && IN_RANGE (INTVAL (trueop1),
3265 		       GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
3266 		       GET_MODE_PRECISION (mode) - 1))
3267 	return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3268 				    mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
3269 							- INTVAL (trueop1)));
3270 #endif
3271       /* FALLTHRU */
3272     case ASHIFTRT:
3273       if (trueop1 == CONST0_RTX (mode))
3274 	return op0;
3275       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3276 	return op0;
3277       /* Rotating ~0 always results in ~0.  */
3278       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3279 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3280 	  && ! side_effects_p (op1))
3281 	return op0;
3282       /* Given:
3283 	 scalar modes M1, M2
3284 	 scalar constants c1, c2
3285 	 size (M2) > size (M1)
3286 	 c1 == size (M2) - size (M1)
3287 	 optimize:
3288 	 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3289 				 <low_part>)
3290 		      (const_int <c2>))
3291 	 to:
3292 	 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3293 		    <low_part>).  */
3294       if (code == ASHIFTRT
3295 	  && !VECTOR_MODE_P (mode)
3296 	  && SUBREG_P (op0)
3297 	  && CONST_INT_P (op1)
3298 	  && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3299 	  && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0)))
3300 	  && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3301 	  && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3302 	      > GET_MODE_BITSIZE (mode))
3303 	  && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3304 	      == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
3305 		  - GET_MODE_BITSIZE (mode)))
3306 	  && subreg_lowpart_p (op0))
3307 	{
3308 	  rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3309 			     + INTVAL (op1));
3310 	  machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
3311 	  tmp = simplify_gen_binary (ASHIFTRT,
3312 				     GET_MODE (SUBREG_REG (op0)),
3313 				     XEXP (SUBREG_REG (op0), 0),
3314 				     tmp);
3315 	  return lowpart_subreg (mode, tmp, inner_mode);
3316 	}
3317     canonicalize_shift:
3318       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3319 	{
3320 	  val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
3321 	  if (val != INTVAL (op1))
3322 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3323 	}
3324       break;
3325 
3326     case ASHIFT:
3327     case SS_ASHIFT:
3328     case US_ASHIFT:
3329       if (trueop1 == CONST0_RTX (mode))
3330 	return op0;
3331       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3332 	return op0;
3333       goto canonicalize_shift;
3334 
3335     case LSHIFTRT:
3336       if (trueop1 == CONST0_RTX (mode))
3337 	return op0;
3338       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3339 	return op0;
3340       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3341       if (GET_CODE (op0) == CLZ
3342 	  && CONST_INT_P (trueop1)
3343 	  && STORE_FLAG_VALUE == 1
3344 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3345 	{
3346 	  machine_mode imode = GET_MODE (XEXP (op0, 0));
3347 	  unsigned HOST_WIDE_INT zero_val = 0;
3348 
3349 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3350 	      && zero_val == GET_MODE_PRECISION (imode)
3351 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3352 	    return simplify_gen_relational (EQ, mode, imode,
3353 					    XEXP (op0, 0), const0_rtx);
3354 	}
3355       goto canonicalize_shift;
3356 
3357     case SMIN:
3358       if (width <= HOST_BITS_PER_WIDE_INT
3359 	  && mode_signbit_p (mode, trueop1)
3360 	  && ! side_effects_p (op0))
3361 	return op1;
3362       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3363 	return op0;
3364       tem = simplify_associative_operation (code, mode, op0, op1);
3365       if (tem)
3366 	return tem;
3367       break;
3368 
3369     case SMAX:
3370       if (width <= HOST_BITS_PER_WIDE_INT
3371 	  && CONST_INT_P (trueop1)
3372 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3373 	  && ! side_effects_p (op0))
3374 	return op1;
3375       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3376 	return op0;
3377       tem = simplify_associative_operation (code, mode, op0, op1);
3378       if (tem)
3379 	return tem;
3380       break;
3381 
3382     case UMIN:
3383       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3384 	return op1;
3385       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3386 	return op0;
3387       tem = simplify_associative_operation (code, mode, op0, op1);
3388       if (tem)
3389 	return tem;
3390       break;
3391 
3392     case UMAX:
3393       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3394 	return op1;
3395       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3396 	return op0;
3397       tem = simplify_associative_operation (code, mode, op0, op1);
3398       if (tem)
3399 	return tem;
3400       break;
3401 
3402     case SS_PLUS:
3403     case US_PLUS:
3404     case SS_MINUS:
3405     case US_MINUS:
3406     case SS_MULT:
3407     case US_MULT:
3408     case SS_DIV:
3409     case US_DIV:
3410       /* ??? There are simplifications that can be done.  */
3411       return 0;
3412 
3413     case VEC_SELECT:
3414       if (!VECTOR_MODE_P (mode))
3415 	{
3416 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3417 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3418 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3419 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3420 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3421 
3422 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3423 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3424 						      (trueop1, 0, 0)));
3425 
3426 	  /* Extract a scalar element from a nested VEC_SELECT expression
3427 	     (with optional nested VEC_CONCAT expression).  Some targets
3428 	     (i386) extract scalar element from a vector using chain of
3429 	     nested VEC_SELECT expressions.  When input operand is a memory
3430 	     operand, this operation can be simplified to a simple scalar
3431 	     load from an offseted memory address.  */
3432 	  if (GET_CODE (trueop0) == VEC_SELECT)
3433 	    {
3434 	      rtx op0 = XEXP (trueop0, 0);
3435 	      rtx op1 = XEXP (trueop0, 1);
3436 
3437 	      machine_mode opmode = GET_MODE (op0);
3438 	      int elt_size = GET_MODE_UNIT_SIZE (opmode);
3439 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3440 
3441 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3442 	      int elem;
3443 
3444 	      rtvec vec;
3445 	      rtx tmp_op, tmp;
3446 
3447 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3448 	      gcc_assert (i < n_elts);
3449 
3450 	      /* Select element, pointed by nested selector.  */
3451 	      elem = INTVAL (XVECEXP (op1, 0, i));
3452 
3453 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3454 	      if (GET_CODE (op0) == VEC_CONCAT)
3455 		{
3456 		  rtx op00 = XEXP (op0, 0);
3457 		  rtx op01 = XEXP (op0, 1);
3458 
3459 		  machine_mode mode00, mode01;
3460 		  int n_elts00, n_elts01;
3461 
3462 		  mode00 = GET_MODE (op00);
3463 		  mode01 = GET_MODE (op01);
3464 
3465 		  /* Find out number of elements of each operand.  */
3466 		  if (VECTOR_MODE_P (mode00))
3467 		    {
3468 		      elt_size = GET_MODE_UNIT_SIZE (mode00);
3469 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3470 		    }
3471 		  else
3472 		    n_elts00 = 1;
3473 
3474 		  if (VECTOR_MODE_P (mode01))
3475 		    {
3476 		      elt_size = GET_MODE_UNIT_SIZE (mode01);
3477 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3478 		    }
3479 		  else
3480 		    n_elts01 = 1;
3481 
3482 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3483 
3484 		  /* Select correct operand of VEC_CONCAT
3485 		     and adjust selector. */
3486 		  if (elem < n_elts01)
3487 		    tmp_op = op00;
3488 		  else
3489 		    {
3490 		      tmp_op = op01;
3491 		      elem -= n_elts00;
3492 		    }
3493 		}
3494 	      else
3495 		tmp_op = op0;
3496 
3497 	      vec = rtvec_alloc (1);
3498 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3499 
3500 	      tmp = gen_rtx_fmt_ee (code, mode,
3501 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3502 	      return tmp;
3503 	    }
3504 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3505 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3506 	    return XEXP (trueop0, 0);
3507 	}
3508       else
3509 	{
3510 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3511 	  gcc_assert (GET_MODE_INNER (mode)
3512 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3513 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3514 
3515 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3516 	    {
3517 	      int elt_size = GET_MODE_UNIT_SIZE (mode);
3518 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3519 	      rtvec v = rtvec_alloc (n_elts);
3520 	      unsigned int i;
3521 
3522 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3523 	      for (i = 0; i < n_elts; i++)
3524 		{
3525 		  rtx x = XVECEXP (trueop1, 0, i);
3526 
3527 		  gcc_assert (CONST_INT_P (x));
3528 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3529 						       INTVAL (x));
3530 		}
3531 
3532 	      return gen_rtx_CONST_VECTOR (mode, v);
3533 	    }
3534 
3535 	  /* Recognize the identity.  */
3536 	  if (GET_MODE (trueop0) == mode)
3537 	    {
3538 	      bool maybe_ident = true;
3539 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3540 		{
3541 		  rtx j = XVECEXP (trueop1, 0, i);
3542 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3543 		    {
3544 		      maybe_ident = false;
3545 		      break;
3546 		    }
3547 		}
3548 	      if (maybe_ident)
3549 		return trueop0;
3550 	    }
3551 
3552 	  /* If we build {a,b} then permute it, build the result directly.  */
3553 	  if (XVECLEN (trueop1, 0) == 2
3554 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3555 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3556 	      && GET_CODE (trueop0) == VEC_CONCAT
3557 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3558 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3559 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3560 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3561 	    {
3562 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3563 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3564 	      rtx subop0, subop1;
3565 
3566 	      gcc_assert (i0 < 4 && i1 < 4);
3567 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3568 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3569 
3570 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3571 	    }
3572 
3573 	  if (XVECLEN (trueop1, 0) == 2
3574 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3575 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3576 	      && GET_CODE (trueop0) == VEC_CONCAT
3577 	      && GET_MODE (trueop0) == mode)
3578 	    {
3579 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3580 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3581 	      rtx subop0, subop1;
3582 
3583 	      gcc_assert (i0 < 2 && i1 < 2);
3584 	      subop0 = XEXP (trueop0, i0);
3585 	      subop1 = XEXP (trueop0, i1);
3586 
3587 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3588 	    }
3589 
3590 	  /* If we select one half of a vec_concat, return that.  */
3591 	  if (GET_CODE (trueop0) == VEC_CONCAT
3592 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3593 	    {
3594 	      rtx subop0 = XEXP (trueop0, 0);
3595 	      rtx subop1 = XEXP (trueop0, 1);
3596 	      machine_mode mode0 = GET_MODE (subop0);
3597 	      machine_mode mode1 = GET_MODE (subop1);
3598 	      int li = GET_MODE_UNIT_SIZE (mode0);
3599 	      int l0 = GET_MODE_SIZE (mode0) / li;
3600 	      int l1 = GET_MODE_SIZE (mode1) / li;
3601 	      int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3602 	      if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3603 		{
3604 		  bool success = true;
3605 		  for (int i = 1; i < l0; ++i)
3606 		    {
3607 		      rtx j = XVECEXP (trueop1, 0, i);
3608 		      if (!CONST_INT_P (j) || INTVAL (j) != i)
3609 			{
3610 			  success = false;
3611 			  break;
3612 			}
3613 		    }
3614 		  if (success)
3615 		    return subop0;
3616 		}
3617 	      if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3618 		{
3619 		  bool success = true;
3620 		  for (int i = 1; i < l1; ++i)
3621 		    {
3622 		      rtx j = XVECEXP (trueop1, 0, i);
3623 		      if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3624 			{
3625 			  success = false;
3626 			  break;
3627 			}
3628 		    }
3629 		  if (success)
3630 		    return subop1;
3631 		}
3632 	    }
3633 	}
3634 
3635       if (XVECLEN (trueop1, 0) == 1
3636 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3637 	  && GET_CODE (trueop0) == VEC_CONCAT)
3638 	{
3639 	  rtx vec = trueop0;
3640 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3641 
3642 	  /* Try to find the element in the VEC_CONCAT.  */
3643 	  while (GET_MODE (vec) != mode
3644 		 && GET_CODE (vec) == VEC_CONCAT)
3645 	    {
3646 	      HOST_WIDE_INT vec_size;
3647 
3648 	      if (CONST_INT_P (XEXP (vec, 0)))
3649 	        {
3650 	          /* vec_concat of two const_ints doesn't make sense with
3651 	             respect to modes.  */
3652 	          if (CONST_INT_P (XEXP (vec, 1)))
3653 	            return 0;
3654 
3655 	          vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3656 	                     - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3657 	        }
3658 	      else
3659 	        vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3660 
3661 	      if (offset < vec_size)
3662 		vec = XEXP (vec, 0);
3663 	      else
3664 		{
3665 		  offset -= vec_size;
3666 		  vec = XEXP (vec, 1);
3667 		}
3668 	      vec = avoid_constant_pool_reference (vec);
3669 	    }
3670 
3671 	  if (GET_MODE (vec) == mode)
3672 	    return vec;
3673 	}
3674 
3675       /* If we select elements in a vec_merge that all come from the same
3676 	 operand, select from that operand directly.  */
3677       if (GET_CODE (op0) == VEC_MERGE)
3678 	{
3679 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3680 	  if (CONST_INT_P (trueop02))
3681 	    {
3682 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3683 	      bool all_operand0 = true;
3684 	      bool all_operand1 = true;
3685 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3686 		{
3687 		  rtx j = XVECEXP (trueop1, 0, i);
3688 		  if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3689 		    all_operand1 = false;
3690 		  else
3691 		    all_operand0 = false;
3692 		}
3693 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3694 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3695 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3696 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3697 	    }
3698 	}
3699 
3700       /* If we have two nested selects that are inverses of each
3701 	 other, replace them with the source operand.  */
3702       if (GET_CODE (trueop0) == VEC_SELECT
3703 	  && GET_MODE (XEXP (trueop0, 0)) == mode)
3704 	{
3705 	  rtx op0_subop1 = XEXP (trueop0, 1);
3706 	  gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3707 	  gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3708 
3709 	  /* Apply the outer ordering vector to the inner one.  (The inner
3710 	     ordering vector is expressly permitted to be of a different
3711 	     length than the outer one.)  If the result is { 0, 1, ..., n-1 }
3712 	     then the two VEC_SELECTs cancel.  */
3713 	  for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3714 	    {
3715 	      rtx x = XVECEXP (trueop1, 0, i);
3716 	      if (!CONST_INT_P (x))
3717 		return 0;
3718 	      rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3719 	      if (!CONST_INT_P (y) || i != INTVAL (y))
3720 		return 0;
3721 	    }
3722 	  return XEXP (trueop0, 0);
3723 	}
3724 
3725       return 0;
3726     case VEC_CONCAT:
3727       {
3728 	machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3729 				      ? GET_MODE (trueop0)
3730 				      : GET_MODE_INNER (mode));
3731 	machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3732 				      ? GET_MODE (trueop1)
3733 				      : GET_MODE_INNER (mode));
3734 
3735 	gcc_assert (VECTOR_MODE_P (mode));
3736 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3737 		    == GET_MODE_SIZE (mode));
3738 
3739 	if (VECTOR_MODE_P (op0_mode))
3740 	  gcc_assert (GET_MODE_INNER (mode)
3741 		      == GET_MODE_INNER (op0_mode));
3742 	else
3743 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3744 
3745 	if (VECTOR_MODE_P (op1_mode))
3746 	  gcc_assert (GET_MODE_INNER (mode)
3747 		      == GET_MODE_INNER (op1_mode));
3748 	else
3749 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3750 
3751 	if ((GET_CODE (trueop0) == CONST_VECTOR
3752 	     || CONST_SCALAR_INT_P (trueop0)
3753 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3754 	    && (GET_CODE (trueop1) == CONST_VECTOR
3755 		|| CONST_SCALAR_INT_P (trueop1)
3756 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3757 	  {
3758 	    int elt_size = GET_MODE_UNIT_SIZE (mode);
3759 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3760 	    rtvec v = rtvec_alloc (n_elts);
3761 	    unsigned int i;
3762 	    unsigned in_n_elts = 1;
3763 
3764 	    if (VECTOR_MODE_P (op0_mode))
3765 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3766 	    for (i = 0; i < n_elts; i++)
3767 	      {
3768 		if (i < in_n_elts)
3769 		  {
3770 		    if (!VECTOR_MODE_P (op0_mode))
3771 		      RTVEC_ELT (v, i) = trueop0;
3772 		    else
3773 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3774 		  }
3775 		else
3776 		  {
3777 		    if (!VECTOR_MODE_P (op1_mode))
3778 		      RTVEC_ELT (v, i) = trueop1;
3779 		    else
3780 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3781 							   i - in_n_elts);
3782 		  }
3783 	      }
3784 
3785 	    return gen_rtx_CONST_VECTOR (mode, v);
3786 	  }
3787 
3788 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
3789 	   Restrict the transformation to avoid generating a VEC_SELECT with a
3790 	   mode unrelated to its operand.  */
3791 	if (GET_CODE (trueop0) == VEC_SELECT
3792 	    && GET_CODE (trueop1) == VEC_SELECT
3793 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3794 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
3795 	  {
3796 	    rtx par0 = XEXP (trueop0, 1);
3797 	    rtx par1 = XEXP (trueop1, 1);
3798 	    int len0 = XVECLEN (par0, 0);
3799 	    int len1 = XVECLEN (par1, 0);
3800 	    rtvec vec = rtvec_alloc (len0 + len1);
3801 	    for (int i = 0; i < len0; i++)
3802 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3803 	    for (int i = 0; i < len1; i++)
3804 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3805 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3806 					gen_rtx_PARALLEL (VOIDmode, vec));
3807 	  }
3808       }
3809       return 0;
3810 
3811     default:
3812       gcc_unreachable ();
3813     }
3814 
3815   return 0;
3816 }
3817 
3818 rtx
simplify_const_binary_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)3819 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
3820 				 rtx op0, rtx op1)
3821 {
3822   unsigned int width = GET_MODE_PRECISION (mode);
3823 
3824   if (VECTOR_MODE_P (mode)
3825       && code != VEC_CONCAT
3826       && GET_CODE (op0) == CONST_VECTOR
3827       && GET_CODE (op1) == CONST_VECTOR)
3828     {
3829       unsigned n_elts = GET_MODE_NUNITS (mode);
3830       machine_mode op0mode = GET_MODE (op0);
3831       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3832       machine_mode op1mode = GET_MODE (op1);
3833       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3834       rtvec v = rtvec_alloc (n_elts);
3835       unsigned int i;
3836 
3837       gcc_assert (op0_n_elts == n_elts);
3838       gcc_assert (op1_n_elts == n_elts);
3839       for (i = 0; i < n_elts; i++)
3840 	{
3841 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3842 					     CONST_VECTOR_ELT (op0, i),
3843 					     CONST_VECTOR_ELT (op1, i));
3844 	  if (!x)
3845 	    return 0;
3846 	  RTVEC_ELT (v, i) = x;
3847 	}
3848 
3849       return gen_rtx_CONST_VECTOR (mode, v);
3850     }
3851 
3852   if (VECTOR_MODE_P (mode)
3853       && code == VEC_CONCAT
3854       && (CONST_SCALAR_INT_P (op0)
3855 	  || GET_CODE (op0) == CONST_FIXED
3856 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
3857       && (CONST_SCALAR_INT_P (op1)
3858 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
3859 	  || GET_CODE (op1) == CONST_FIXED))
3860     {
3861       unsigned n_elts = GET_MODE_NUNITS (mode);
3862       rtvec v = rtvec_alloc (n_elts);
3863 
3864       gcc_assert (n_elts >= 2);
3865       if (n_elts == 2)
3866 	{
3867 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3868 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3869 
3870 	  RTVEC_ELT (v, 0) = op0;
3871 	  RTVEC_ELT (v, 1) = op1;
3872 	}
3873       else
3874 	{
3875 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3876 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3877 	  unsigned i;
3878 
3879 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3880 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3881 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3882 
3883 	  for (i = 0; i < op0_n_elts; ++i)
3884 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3885 	  for (i = 0; i < op1_n_elts; ++i)
3886 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3887 	}
3888 
3889       return gen_rtx_CONST_VECTOR (mode, v);
3890     }
3891 
3892   if (SCALAR_FLOAT_MODE_P (mode)
3893       && CONST_DOUBLE_AS_FLOAT_P (op0)
3894       && CONST_DOUBLE_AS_FLOAT_P (op1)
3895       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3896     {
3897       if (code == AND
3898 	  || code == IOR
3899 	  || code == XOR)
3900 	{
3901 	  long tmp0[4];
3902 	  long tmp1[4];
3903 	  REAL_VALUE_TYPE r;
3904 	  int i;
3905 
3906 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3907 			  GET_MODE (op0));
3908 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3909 			  GET_MODE (op1));
3910 	  for (i = 0; i < 4; i++)
3911 	    {
3912 	      switch (code)
3913 	      {
3914 	      case AND:
3915 		tmp0[i] &= tmp1[i];
3916 		break;
3917 	      case IOR:
3918 		tmp0[i] |= tmp1[i];
3919 		break;
3920 	      case XOR:
3921 		tmp0[i] ^= tmp1[i];
3922 		break;
3923 	      default:
3924 		gcc_unreachable ();
3925 	      }
3926 	    }
3927 	   real_from_target (&r, tmp0, mode);
3928 	   return const_double_from_real_value (r, mode);
3929 	}
3930       else
3931 	{
3932 	  REAL_VALUE_TYPE f0, f1, value, result;
3933 	  const REAL_VALUE_TYPE *opr0, *opr1;
3934 	  bool inexact;
3935 
3936 	  opr0 = CONST_DOUBLE_REAL_VALUE (op0);
3937 	  opr1 = CONST_DOUBLE_REAL_VALUE (op1);
3938 
3939 	  if (HONOR_SNANS (mode)
3940 	      && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
3941 	          || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
3942 	    return 0;
3943 
3944 	  real_convert (&f0, mode, opr0);
3945 	  real_convert (&f1, mode, opr1);
3946 
3947 	  if (code == DIV
3948 	      && real_equal (&f1, &dconst0)
3949 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3950 	    return 0;
3951 
3952 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3953 	      && flag_trapping_math
3954 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3955 	    {
3956 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3957 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3958 
3959 	      switch (code)
3960 		{
3961 		case PLUS:
3962 		  /* Inf + -Inf = NaN plus exception.  */
3963 		  if (s0 != s1)
3964 		    return 0;
3965 		  break;
3966 		case MINUS:
3967 		  /* Inf - Inf = NaN plus exception.  */
3968 		  if (s0 == s1)
3969 		    return 0;
3970 		  break;
3971 		case DIV:
3972 		  /* Inf / Inf = NaN plus exception.  */
3973 		  return 0;
3974 		default:
3975 		  break;
3976 		}
3977 	    }
3978 
3979 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3980 	      && flag_trapping_math
3981 	      && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
3982 		  || (REAL_VALUE_ISINF (f1)
3983 		      && real_equal (&f0, &dconst0))))
3984 	    /* Inf * 0 = NaN plus exception.  */
3985 	    return 0;
3986 
3987 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3988 				     &f0, &f1);
3989 	  real_convert (&result, mode, &value);
3990 
3991 	  /* Don't constant fold this floating point operation if
3992 	     the result has overflowed and flag_trapping_math.  */
3993 
3994 	  if (flag_trapping_math
3995 	      && MODE_HAS_INFINITIES (mode)
3996 	      && REAL_VALUE_ISINF (result)
3997 	      && !REAL_VALUE_ISINF (f0)
3998 	      && !REAL_VALUE_ISINF (f1))
3999 	    /* Overflow plus exception.  */
4000 	    return 0;
4001 
4002 	  /* Don't constant fold this floating point operation if the
4003 	     result may dependent upon the run-time rounding mode and
4004 	     flag_rounding_math is set, or if GCC's software emulation
4005 	     is unable to accurately represent the result.  */
4006 
4007 	  if ((flag_rounding_math
4008 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4009 	      && (inexact || !real_identical (&result, &value)))
4010 	    return NULL_RTX;
4011 
4012 	  return const_double_from_real_value (result, mode);
4013 	}
4014     }
4015 
4016   /* We can fold some multi-word operations.  */
4017   if ((GET_MODE_CLASS (mode) == MODE_INT
4018        || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
4019       && CONST_SCALAR_INT_P (op0)
4020       && CONST_SCALAR_INT_P (op1))
4021     {
4022       wide_int result;
4023       bool overflow;
4024       rtx_mode_t pop0 = std::make_pair (op0, mode);
4025       rtx_mode_t pop1 = std::make_pair (op1, mode);
4026 
4027 #if TARGET_SUPPORTS_WIDE_INT == 0
4028       /* This assert keeps the simplification from producing a result
4029 	 that cannot be represented in a CONST_DOUBLE but a lot of
4030 	 upstream callers expect that this function never fails to
4031 	 simplify something and so you if you added this to the test
4032 	 above the code would die later anyway.  If this assert
4033 	 happens, you just need to make the port support wide int.  */
4034       gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
4035 #endif
4036       switch (code)
4037 	{
4038 	case MINUS:
4039 	  result = wi::sub (pop0, pop1);
4040 	  break;
4041 
4042 	case PLUS:
4043 	  result = wi::add (pop0, pop1);
4044 	  break;
4045 
4046 	case MULT:
4047 	  result = wi::mul (pop0, pop1);
4048 	  break;
4049 
4050 	case DIV:
4051 	  result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4052 	  if (overflow)
4053 	    return NULL_RTX;
4054 	  break;
4055 
4056 	case MOD:
4057 	  result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4058 	  if (overflow)
4059 	    return NULL_RTX;
4060 	  break;
4061 
4062 	case UDIV:
4063 	  result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4064 	  if (overflow)
4065 	    return NULL_RTX;
4066 	  break;
4067 
4068 	case UMOD:
4069 	  result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4070 	  if (overflow)
4071 	    return NULL_RTX;
4072 	  break;
4073 
4074 	case AND:
4075 	  result = wi::bit_and (pop0, pop1);
4076 	  break;
4077 
4078 	case IOR:
4079 	  result = wi::bit_or (pop0, pop1);
4080 	  break;
4081 
4082 	case XOR:
4083 	  result = wi::bit_xor (pop0, pop1);
4084 	  break;
4085 
4086 	case SMIN:
4087 	  result = wi::smin (pop0, pop1);
4088 	  break;
4089 
4090 	case SMAX:
4091 	  result = wi::smax (pop0, pop1);
4092 	  break;
4093 
4094 	case UMIN:
4095 	  result = wi::umin (pop0, pop1);
4096 	  break;
4097 
4098 	case UMAX:
4099 	  result = wi::umax (pop0, pop1);
4100 	  break;
4101 
4102 	case LSHIFTRT:
4103 	case ASHIFTRT:
4104 	case ASHIFT:
4105 	  {
4106 	    wide_int wop1 = pop1;
4107 	    if (SHIFT_COUNT_TRUNCATED)
4108 	      wop1 = wi::umod_trunc (wop1, width);
4109 	    else if (wi::geu_p (wop1, width))
4110 	      return NULL_RTX;
4111 
4112 	    switch (code)
4113 	      {
4114 	      case LSHIFTRT:
4115 		result = wi::lrshift (pop0, wop1);
4116 		break;
4117 
4118 	      case ASHIFTRT:
4119 		result = wi::arshift (pop0, wop1);
4120 		break;
4121 
4122 	      case ASHIFT:
4123 		result = wi::lshift (pop0, wop1);
4124 		break;
4125 
4126 	      default:
4127 		gcc_unreachable ();
4128 	      }
4129 	    break;
4130 	  }
4131 	case ROTATE:
4132 	case ROTATERT:
4133 	  {
4134 	    if (wi::neg_p (pop1))
4135 	      return NULL_RTX;
4136 
4137 	    switch (code)
4138 	      {
4139 	      case ROTATE:
4140 		result = wi::lrotate (pop0, pop1);
4141 		break;
4142 
4143 	      case ROTATERT:
4144 		result = wi::rrotate (pop0, pop1);
4145 		break;
4146 
4147 	      default:
4148 		gcc_unreachable ();
4149 	      }
4150 	    break;
4151 	  }
4152 	default:
4153 	  return NULL_RTX;
4154 	}
4155       return immed_wide_int_const (result, mode);
4156     }
4157 
4158   return NULL_RTX;
4159 }
4160 
4161 
4162 
4163 /* Return a positive integer if X should sort after Y.  The value
4164    returned is 1 if and only if X and Y are both regs.  */
4165 
4166 static int
simplify_plus_minus_op_data_cmp(rtx x,rtx y)4167 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4168 {
4169   int result;
4170 
4171   result = (commutative_operand_precedence (y)
4172 	    - commutative_operand_precedence (x));
4173   if (result)
4174     return result + result;
4175 
4176   /* Group together equal REGs to do more simplification.  */
4177   if (REG_P (x) && REG_P (y))
4178     return REGNO (x) > REGNO (y);
4179 
4180   return 0;
4181 }
4182 
4183 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4184    operands may be another PLUS or MINUS.
4185 
4186    Rather than test for specific case, we do this by a brute-force method
4187    and do all possible simplifications until no more changes occur.  Then
4188    we rebuild the operation.
4189 
4190    May return NULL_RTX when no changes were made.  */
4191 
4192 static rtx
simplify_plus_minus(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)4193 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4194 		     rtx op1)
4195 {
4196   struct simplify_plus_minus_op_data
4197   {
4198     rtx op;
4199     short neg;
4200   } ops[16];
4201   rtx result, tem;
4202   int n_ops = 2;
4203   int changed, n_constants, canonicalized = 0;
4204   int i, j;
4205 
4206   memset (ops, 0, sizeof ops);
4207 
4208   /* Set up the two operands and then expand them until nothing has been
4209      changed.  If we run out of room in our array, give up; this should
4210      almost never happen.  */
4211 
4212   ops[0].op = op0;
4213   ops[0].neg = 0;
4214   ops[1].op = op1;
4215   ops[1].neg = (code == MINUS);
4216 
4217   do
4218     {
4219       changed = 0;
4220       n_constants = 0;
4221 
4222       for (i = 0; i < n_ops; i++)
4223 	{
4224 	  rtx this_op = ops[i].op;
4225 	  int this_neg = ops[i].neg;
4226 	  enum rtx_code this_code = GET_CODE (this_op);
4227 
4228 	  switch (this_code)
4229 	    {
4230 	    case PLUS:
4231 	    case MINUS:
4232 	      if (n_ops == ARRAY_SIZE (ops))
4233 		return NULL_RTX;
4234 
4235 	      ops[n_ops].op = XEXP (this_op, 1);
4236 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4237 	      n_ops++;
4238 
4239 	      ops[i].op = XEXP (this_op, 0);
4240 	      changed = 1;
4241 	      /* If this operand was negated then we will potentially
4242 		 canonicalize the expression.  Similarly if we don't
4243 		 place the operands adjacent we're re-ordering the
4244 		 expression and thus might be performing a
4245 		 canonicalization.  Ignore register re-ordering.
4246 		 ??? It might be better to shuffle the ops array here,
4247 		 but then (plus (plus (A, B), plus (C, D))) wouldn't
4248 		 be seen as non-canonical.  */
4249 	      if (this_neg
4250 		  || (i != n_ops - 2
4251 		      && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4252 		canonicalized = 1;
4253 	      break;
4254 
4255 	    case NEG:
4256 	      ops[i].op = XEXP (this_op, 0);
4257 	      ops[i].neg = ! this_neg;
4258 	      changed = 1;
4259 	      canonicalized = 1;
4260 	      break;
4261 
4262 	    case CONST:
4263 	      if (n_ops != ARRAY_SIZE (ops)
4264 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4265 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4266 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4267 		{
4268 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4269 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4270 		  ops[n_ops].neg = this_neg;
4271 		  n_ops++;
4272 		  changed = 1;
4273 		  canonicalized = 1;
4274 		}
4275 	      break;
4276 
4277 	    case NOT:
4278 	      /* ~a -> (-a - 1) */
4279 	      if (n_ops != ARRAY_SIZE (ops))
4280 		{
4281 		  ops[n_ops].op = CONSTM1_RTX (mode);
4282 		  ops[n_ops++].neg = this_neg;
4283 		  ops[i].op = XEXP (this_op, 0);
4284 		  ops[i].neg = !this_neg;
4285 		  changed = 1;
4286 		  canonicalized = 1;
4287 		}
4288 	      break;
4289 
4290 	    case CONST_INT:
4291 	      n_constants++;
4292 	      if (this_neg)
4293 		{
4294 		  ops[i].op = neg_const_int (mode, this_op);
4295 		  ops[i].neg = 0;
4296 		  changed = 1;
4297 		  canonicalized = 1;
4298 		}
4299 	      break;
4300 
4301 	    default:
4302 	      break;
4303 	    }
4304 	}
4305     }
4306   while (changed);
4307 
4308   if (n_constants > 1)
4309     canonicalized = 1;
4310 
4311   gcc_assert (n_ops >= 2);
4312 
4313   /* If we only have two operands, we can avoid the loops.  */
4314   if (n_ops == 2)
4315     {
4316       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4317       rtx lhs, rhs;
4318 
4319       /* Get the two operands.  Be careful with the order, especially for
4320 	 the cases where code == MINUS.  */
4321       if (ops[0].neg && ops[1].neg)
4322 	{
4323 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4324 	  rhs = ops[1].op;
4325 	}
4326       else if (ops[0].neg)
4327 	{
4328 	  lhs = ops[1].op;
4329 	  rhs = ops[0].op;
4330 	}
4331       else
4332 	{
4333 	  lhs = ops[0].op;
4334 	  rhs = ops[1].op;
4335 	}
4336 
4337       return simplify_const_binary_operation (code, mode, lhs, rhs);
4338     }
4339 
4340   /* Now simplify each pair of operands until nothing changes.  */
4341   while (1)
4342     {
4343       /* Insertion sort is good enough for a small array.  */
4344       for (i = 1; i < n_ops; i++)
4345 	{
4346 	  struct simplify_plus_minus_op_data save;
4347 	  int cmp;
4348 
4349 	  j = i - 1;
4350 	  cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4351 	  if (cmp <= 0)
4352 	    continue;
4353 	  /* Just swapping registers doesn't count as canonicalization.  */
4354 	  if (cmp != 1)
4355 	    canonicalized = 1;
4356 
4357 	  save = ops[i];
4358 	  do
4359 	    ops[j + 1] = ops[j];
4360 	  while (j--
4361 		 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4362 	  ops[j + 1] = save;
4363 	}
4364 
4365       changed = 0;
4366       for (i = n_ops - 1; i > 0; i--)
4367 	for (j = i - 1; j >= 0; j--)
4368 	  {
4369 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4370 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4371 
4372 	    if (lhs != 0 && rhs != 0)
4373 	      {
4374 		enum rtx_code ncode = PLUS;
4375 
4376 		if (lneg != rneg)
4377 		  {
4378 		    ncode = MINUS;
4379 		    if (lneg)
4380 		      std::swap (lhs, rhs);
4381 		  }
4382 		else if (swap_commutative_operands_p (lhs, rhs))
4383 		  std::swap (lhs, rhs);
4384 
4385 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4386 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4387 		  {
4388 		    rtx tem_lhs, tem_rhs;
4389 
4390 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4391 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4392 		    tem = simplify_binary_operation (ncode, mode, tem_lhs,
4393 						     tem_rhs);
4394 
4395 		    if (tem && !CONSTANT_P (tem))
4396 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4397 		  }
4398 		else
4399 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4400 
4401 		if (tem)
4402 		  {
4403 		    /* Reject "simplifications" that just wrap the two
4404 		       arguments in a CONST.  Failure to do so can result
4405 		       in infinite recursion with simplify_binary_operation
4406 		       when it calls us to simplify CONST operations.
4407 		       Also, if we find such a simplification, don't try
4408 		       any more combinations with this rhs:  We must have
4409 		       something like symbol+offset, ie. one of the
4410 		       trivial CONST expressions we handle later.  */
4411 		    if (GET_CODE (tem) == CONST
4412 			&& GET_CODE (XEXP (tem, 0)) == ncode
4413 			&& XEXP (XEXP (tem, 0), 0) == lhs
4414 			&& XEXP (XEXP (tem, 0), 1) == rhs)
4415 		      break;
4416 		    lneg &= rneg;
4417 		    if (GET_CODE (tem) == NEG)
4418 		      tem = XEXP (tem, 0), lneg = !lneg;
4419 		    if (CONST_INT_P (tem) && lneg)
4420 		      tem = neg_const_int (mode, tem), lneg = 0;
4421 
4422 		    ops[i].op = tem;
4423 		    ops[i].neg = lneg;
4424 		    ops[j].op = NULL_RTX;
4425 		    changed = 1;
4426 		    canonicalized = 1;
4427 		  }
4428 	      }
4429 	  }
4430 
4431       if (!changed)
4432 	break;
4433 
4434       /* Pack all the operands to the lower-numbered entries.  */
4435       for (i = 0, j = 0; j < n_ops; j++)
4436 	if (ops[j].op)
4437 	  {
4438 	    ops[i] = ops[j];
4439 	    i++;
4440 	  }
4441       n_ops = i;
4442     }
4443 
4444   /* If nothing changed, check that rematerialization of rtl instructions
4445      is still required.  */
4446   if (!canonicalized)
4447     {
4448       /* Perform rematerialization if only all operands are registers and
4449 	 all operations are PLUS.  */
4450       /* ??? Also disallow (non-global, non-frame) fixed registers to work
4451 	 around rs6000 and how it uses the CA register.  See PR67145.  */
4452       for (i = 0; i < n_ops; i++)
4453 	if (ops[i].neg
4454 	    || !REG_P (ops[i].op)
4455 	    || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4456 		&& fixed_regs[REGNO (ops[i].op)]
4457 		&& !global_regs[REGNO (ops[i].op)]
4458 		&& ops[i].op != frame_pointer_rtx
4459 		&& ops[i].op != arg_pointer_rtx
4460 		&& ops[i].op != stack_pointer_rtx))
4461 	  return NULL_RTX;
4462       goto gen_result;
4463     }
4464 
4465   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4466   if (n_ops == 2
4467       && CONST_INT_P (ops[1].op)
4468       && CONSTANT_P (ops[0].op)
4469       && ops[0].neg)
4470     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4471 
4472   /* We suppressed creation of trivial CONST expressions in the
4473      combination loop to avoid recursion.  Create one manually now.
4474      The combination loop should have ensured that there is exactly
4475      one CONST_INT, and the sort will have ensured that it is last
4476      in the array and that any other constant will be next-to-last.  */
4477 
4478   if (n_ops > 1
4479       && CONST_INT_P (ops[n_ops - 1].op)
4480       && CONSTANT_P (ops[n_ops - 2].op))
4481     {
4482       rtx value = ops[n_ops - 1].op;
4483       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4484 	value = neg_const_int (mode, value);
4485       if (CONST_INT_P (value))
4486 	{
4487 	  ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4488 					     INTVAL (value));
4489 	  n_ops--;
4490 	}
4491     }
4492 
4493   /* Put a non-negated operand first, if possible.  */
4494 
4495   for (i = 0; i < n_ops && ops[i].neg; i++)
4496     continue;
4497   if (i == n_ops)
4498     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4499   else if (i != 0)
4500     {
4501       tem = ops[0].op;
4502       ops[0] = ops[i];
4503       ops[i].op = tem;
4504       ops[i].neg = 1;
4505     }
4506 
4507   /* Now make the result by performing the requested operations.  */
4508  gen_result:
4509   result = ops[0].op;
4510   for (i = 1; i < n_ops; i++)
4511     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4512 			     mode, result, ops[i].op);
4513 
4514   return result;
4515 }
4516 
4517 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4518 static bool
plus_minus_operand_p(const_rtx x)4519 plus_minus_operand_p (const_rtx x)
4520 {
4521   return GET_CODE (x) == PLUS
4522          || GET_CODE (x) == MINUS
4523 	 || (GET_CODE (x) == CONST
4524 	     && GET_CODE (XEXP (x, 0)) == PLUS
4525 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4526 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4527 }
4528 
4529 /* Like simplify_binary_operation except used for relational operators.
4530    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4531    not also be VOIDmode.
4532 
4533    CMP_MODE specifies in which mode the comparison is done in, so it is
4534    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4535    the operands or, if both are VOIDmode, the operands are compared in
4536    "infinite precision".  */
4537 rtx
simplify_relational_operation(enum rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)4538 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4539 			       machine_mode cmp_mode, rtx op0, rtx op1)
4540 {
4541   rtx tem, trueop0, trueop1;
4542 
4543   if (cmp_mode == VOIDmode)
4544     cmp_mode = GET_MODE (op0);
4545   if (cmp_mode == VOIDmode)
4546     cmp_mode = GET_MODE (op1);
4547 
4548   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4549   if (tem)
4550     {
4551       if (SCALAR_FLOAT_MODE_P (mode))
4552 	{
4553           if (tem == const0_rtx)
4554             return CONST0_RTX (mode);
4555 #ifdef FLOAT_STORE_FLAG_VALUE
4556 	  {
4557 	    REAL_VALUE_TYPE val;
4558 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4559 	    return const_double_from_real_value (val, mode);
4560 	  }
4561 #else
4562 	  return NULL_RTX;
4563 #endif
4564 	}
4565       if (VECTOR_MODE_P (mode))
4566 	{
4567 	  if (tem == const0_rtx)
4568 	    return CONST0_RTX (mode);
4569 #ifdef VECTOR_STORE_FLAG_VALUE
4570 	  {
4571 	    int i, units;
4572 	    rtvec v;
4573 
4574 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4575 	    if (val == NULL_RTX)
4576 	      return NULL_RTX;
4577 	    if (val == const1_rtx)
4578 	      return CONST1_RTX (mode);
4579 
4580 	    units = GET_MODE_NUNITS (mode);
4581 	    v = rtvec_alloc (units);
4582 	    for (i = 0; i < units; i++)
4583 	      RTVEC_ELT (v, i) = val;
4584 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4585 	  }
4586 #else
4587 	  return NULL_RTX;
4588 #endif
4589 	}
4590 
4591       return tem;
4592     }
4593 
4594   /* For the following tests, ensure const0_rtx is op1.  */
4595   if (swap_commutative_operands_p (op0, op1)
4596       || (op0 == const0_rtx && op1 != const0_rtx))
4597     std::swap (op0, op1), code = swap_condition (code);
4598 
4599   /* If op0 is a compare, extract the comparison arguments from it.  */
4600   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4601     return simplify_gen_relational (code, mode, VOIDmode,
4602 				    XEXP (op0, 0), XEXP (op0, 1));
4603 
4604   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4605       || CC0_P (op0))
4606     return NULL_RTX;
4607 
4608   trueop0 = avoid_constant_pool_reference (op0);
4609   trueop1 = avoid_constant_pool_reference (op1);
4610   return simplify_relational_operation_1 (code, mode, cmp_mode,
4611 		  			  trueop0, trueop1);
4612 }
4613 
4614 /* This part of simplify_relational_operation is only used when CMP_MODE
4615    is not in class MODE_CC (i.e. it is a real comparison).
4616 
4617    MODE is the mode of the result, while CMP_MODE specifies in which
4618    mode the comparison is done in, so it is the mode of the operands.  */
4619 
4620 static rtx
simplify_relational_operation_1(enum rtx_code code,machine_mode mode,machine_mode cmp_mode,rtx op0,rtx op1)4621 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4622 				 machine_mode cmp_mode, rtx op0, rtx op1)
4623 {
4624   enum rtx_code op0code = GET_CODE (op0);
4625 
4626   if (op1 == const0_rtx && COMPARISON_P (op0))
4627     {
4628       /* If op0 is a comparison, extract the comparison arguments
4629          from it.  */
4630       if (code == NE)
4631 	{
4632 	  if (GET_MODE (op0) == mode)
4633 	    return simplify_rtx (op0);
4634 	  else
4635 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4636 					    XEXP (op0, 0), XEXP (op0, 1));
4637 	}
4638       else if (code == EQ)
4639 	{
4640 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4641 	  if (new_code != UNKNOWN)
4642 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4643 					    XEXP (op0, 0), XEXP (op0, 1));
4644 	}
4645     }
4646 
4647   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4648      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4649   if ((code == LTU || code == GEU)
4650       && GET_CODE (op0) == PLUS
4651       && CONST_INT_P (XEXP (op0, 1))
4652       && (rtx_equal_p (op1, XEXP (op0, 0))
4653 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4654       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4655       && XEXP (op0, 1) != const0_rtx)
4656     {
4657       rtx new_cmp
4658 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4659       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4660 				      cmp_mode, XEXP (op0, 0), new_cmp);
4661     }
4662 
4663   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4664   if ((code == LTU || code == GEU)
4665       && GET_CODE (op0) == PLUS
4666       && rtx_equal_p (op1, XEXP (op0, 1))
4667       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4668       && !rtx_equal_p (op1, XEXP (op0, 0)))
4669     return simplify_gen_relational (code, mode, cmp_mode, op0,
4670 				    copy_rtx (XEXP (op0, 0)));
4671 
4672   if (op1 == const0_rtx)
4673     {
4674       /* Canonicalize (GTU x 0) as (NE x 0).  */
4675       if (code == GTU)
4676         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4677       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4678       if (code == LEU)
4679         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4680     }
4681   else if (op1 == const1_rtx)
4682     {
4683       switch (code)
4684         {
4685         case GE:
4686 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4687 	  return simplify_gen_relational (GT, mode, cmp_mode,
4688 					  op0, const0_rtx);
4689 	case GEU:
4690 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4691 	  return simplify_gen_relational (NE, mode, cmp_mode,
4692 					  op0, const0_rtx);
4693 	case LT:
4694 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4695 	  return simplify_gen_relational (LE, mode, cmp_mode,
4696 					  op0, const0_rtx);
4697 	case LTU:
4698 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4699 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4700 					  op0, const0_rtx);
4701 	default:
4702 	  break;
4703 	}
4704     }
4705   else if (op1 == constm1_rtx)
4706     {
4707       /* Canonicalize (LE x -1) as (LT x 0).  */
4708       if (code == LE)
4709         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4710       /* Canonicalize (GT x -1) as (GE x 0).  */
4711       if (code == GT)
4712         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4713     }
4714 
4715   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4716   if ((code == EQ || code == NE)
4717       && (op0code == PLUS || op0code == MINUS)
4718       && CONSTANT_P (op1)
4719       && CONSTANT_P (XEXP (op0, 1))
4720       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4721     {
4722       rtx x = XEXP (op0, 0);
4723       rtx c = XEXP (op0, 1);
4724       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4725       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4726 
4727       /* Detect an infinite recursive condition, where we oscillate at this
4728 	 simplification case between:
4729 	    A + B == C  <--->  C - B == A,
4730 	 where A, B, and C are all constants with non-simplifiable expressions,
4731 	 usually SYMBOL_REFs.  */
4732       if (GET_CODE (tem) == invcode
4733 	  && CONSTANT_P (x)
4734 	  && rtx_equal_p (c, XEXP (tem, 1)))
4735 	return NULL_RTX;
4736 
4737       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4738     }
4739 
4740   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4741      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4742   if (code == NE
4743       && op1 == const0_rtx
4744       && GET_MODE_CLASS (mode) == MODE_INT
4745       && cmp_mode != VOIDmode
4746       /* ??? Work-around BImode bugs in the ia64 backend.  */
4747       && mode != BImode
4748       && cmp_mode != BImode
4749       && nonzero_bits (op0, cmp_mode) == 1
4750       && STORE_FLAG_VALUE == 1)
4751     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4752 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4753 	   : lowpart_subreg (mode, op0, cmp_mode);
4754 
4755   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4756   if ((code == EQ || code == NE)
4757       && op1 == const0_rtx
4758       && op0code == XOR)
4759     return simplify_gen_relational (code, mode, cmp_mode,
4760 				    XEXP (op0, 0), XEXP (op0, 1));
4761 
4762   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4763   if ((code == EQ || code == NE)
4764       && op0code == XOR
4765       && rtx_equal_p (XEXP (op0, 0), op1)
4766       && !side_effects_p (XEXP (op0, 0)))
4767     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
4768 				    CONST0_RTX (mode));
4769 
4770   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4771   if ((code == EQ || code == NE)
4772       && op0code == XOR
4773       && rtx_equal_p (XEXP (op0, 1), op1)
4774       && !side_effects_p (XEXP (op0, 1)))
4775     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4776 				    CONST0_RTX (mode));
4777 
4778   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4779   if ((code == EQ || code == NE)
4780       && op0code == XOR
4781       && CONST_SCALAR_INT_P (op1)
4782       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4783     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4784 				    simplify_gen_binary (XOR, cmp_mode,
4785 							 XEXP (op0, 1), op1));
4786 
4787   /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4788      can be implemented with a BICS instruction on some targets, or
4789      constant-folded if y is a constant.  */
4790   if ((code == EQ || code == NE)
4791       && op0code == AND
4792       && rtx_equal_p (XEXP (op0, 0), op1)
4793       && !side_effects_p (op1)
4794       && op1 != CONST0_RTX (cmp_mode))
4795     {
4796       rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
4797       rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
4798 
4799       return simplify_gen_relational (code, mode, cmp_mode, lhs,
4800 				      CONST0_RTX (cmp_mode));
4801     }
4802 
4803   /* Likewise for (eq/ne (and x y) y).  */
4804   if ((code == EQ || code == NE)
4805       && op0code == AND
4806       && rtx_equal_p (XEXP (op0, 1), op1)
4807       && !side_effects_p (op1)
4808       && op1 != CONST0_RTX (cmp_mode))
4809     {
4810       rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
4811       rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
4812 
4813       return simplify_gen_relational (code, mode, cmp_mode, lhs,
4814 				      CONST0_RTX (cmp_mode));
4815     }
4816 
4817   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
4818   if ((code == EQ || code == NE)
4819       && GET_CODE (op0) == BSWAP
4820       && CONST_SCALAR_INT_P (op1))
4821     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4822 				    simplify_gen_unary (BSWAP, cmp_mode,
4823 							op1, cmp_mode));
4824 
4825   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
4826   if ((code == EQ || code == NE)
4827       && GET_CODE (op0) == BSWAP
4828       && GET_CODE (op1) == BSWAP)
4829     return simplify_gen_relational (code, mode, cmp_mode,
4830 				    XEXP (op0, 0), XEXP (op1, 0));
4831 
4832   if (op0code == POPCOUNT && op1 == const0_rtx)
4833     switch (code)
4834       {
4835       case EQ:
4836       case LE:
4837       case LEU:
4838 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4839 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4840 					XEXP (op0, 0), const0_rtx);
4841 
4842       case NE:
4843       case GT:
4844       case GTU:
4845 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4846 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4847 					XEXP (op0, 0), const0_rtx);
4848 
4849       default:
4850 	break;
4851       }
4852 
4853   return NULL_RTX;
4854 }
4855 
4856 enum
4857 {
4858   CMP_EQ = 1,
4859   CMP_LT = 2,
4860   CMP_GT = 4,
4861   CMP_LTU = 8,
4862   CMP_GTU = 16
4863 };
4864 
4865 
4866 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4867    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4868    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4869    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4870    For floating-point comparisons, assume that the operands were ordered.  */
4871 
4872 static rtx
comparison_result(enum rtx_code code,int known_results)4873 comparison_result (enum rtx_code code, int known_results)
4874 {
4875   switch (code)
4876     {
4877     case EQ:
4878     case UNEQ:
4879       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4880     case NE:
4881     case LTGT:
4882       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4883 
4884     case LT:
4885     case UNLT:
4886       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4887     case GE:
4888     case UNGE:
4889       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4890 
4891     case GT:
4892     case UNGT:
4893       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4894     case LE:
4895     case UNLE:
4896       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4897 
4898     case LTU:
4899       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4900     case GEU:
4901       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4902 
4903     case GTU:
4904       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4905     case LEU:
4906       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4907 
4908     case ORDERED:
4909       return const_true_rtx;
4910     case UNORDERED:
4911       return const0_rtx;
4912     default:
4913       gcc_unreachable ();
4914     }
4915 }
4916 
4917 /* Check if the given comparison (done in the given MODE) is actually
4918    a tautology or a contradiction.  If the mode is VOID_mode, the
4919    comparison is done in "infinite precision".  If no simplification
4920    is possible, this function returns zero.  Otherwise, it returns
4921    either const_true_rtx or const0_rtx.  */
4922 
4923 rtx
simplify_const_relational_operation(enum rtx_code code,machine_mode mode,rtx op0,rtx op1)4924 simplify_const_relational_operation (enum rtx_code code,
4925 				     machine_mode mode,
4926 				     rtx op0, rtx op1)
4927 {
4928   rtx tem;
4929   rtx trueop0;
4930   rtx trueop1;
4931 
4932   gcc_assert (mode != VOIDmode
4933 	      || (GET_MODE (op0) == VOIDmode
4934 		  && GET_MODE (op1) == VOIDmode));
4935 
4936   /* If op0 is a compare, extract the comparison arguments from it.  */
4937   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4938     {
4939       op1 = XEXP (op0, 1);
4940       op0 = XEXP (op0, 0);
4941 
4942       if (GET_MODE (op0) != VOIDmode)
4943 	mode = GET_MODE (op0);
4944       else if (GET_MODE (op1) != VOIDmode)
4945 	mode = GET_MODE (op1);
4946       else
4947 	return 0;
4948     }
4949 
4950   /* We can't simplify MODE_CC values since we don't know what the
4951      actual comparison is.  */
4952   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4953     return 0;
4954 
4955   /* Make sure the constant is second.  */
4956   if (swap_commutative_operands_p (op0, op1))
4957     {
4958       std::swap (op0, op1);
4959       code = swap_condition (code);
4960     }
4961 
4962   trueop0 = avoid_constant_pool_reference (op0);
4963   trueop1 = avoid_constant_pool_reference (op1);
4964 
4965   /* For integer comparisons of A and B maybe we can simplify A - B and can
4966      then simplify a comparison of that with zero.  If A and B are both either
4967      a register or a CONST_INT, this can't help; testing for these cases will
4968      prevent infinite recursion here and speed things up.
4969 
4970      We can only do this for EQ and NE comparisons as otherwise we may
4971      lose or introduce overflow which we cannot disregard as undefined as
4972      we do not know the signedness of the operation on either the left or
4973      the right hand side of the comparison.  */
4974 
4975   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4976       && (code == EQ || code == NE)
4977       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4978 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4979       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4980       /* We cannot do this if tem is a nonzero address.  */
4981       && ! nonzero_address_p (tem))
4982     return simplify_const_relational_operation (signed_condition (code),
4983 						mode, tem, const0_rtx);
4984 
4985   if (! HONOR_NANS (mode) && code == ORDERED)
4986     return const_true_rtx;
4987 
4988   if (! HONOR_NANS (mode) && code == UNORDERED)
4989     return const0_rtx;
4990 
4991   /* For modes without NaNs, if the two operands are equal, we know the
4992      result except if they have side-effects.  Even with NaNs we know
4993      the result of unordered comparisons and, if signaling NaNs are
4994      irrelevant, also the result of LT/GT/LTGT.  */
4995   if ((! HONOR_NANS (trueop0)
4996        || code == UNEQ || code == UNLE || code == UNGE
4997        || ((code == LT || code == GT || code == LTGT)
4998 	   && ! HONOR_SNANS (trueop0)))
4999       && rtx_equal_p (trueop0, trueop1)
5000       && ! side_effects_p (trueop0))
5001     return comparison_result (code, CMP_EQ);
5002 
5003   /* If the operands are floating-point constants, see if we can fold
5004      the result.  */
5005   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5006       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5007       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5008     {
5009       const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5010       const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5011 
5012       /* Comparisons are unordered iff at least one of the values is NaN.  */
5013       if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5014 	switch (code)
5015 	  {
5016 	  case UNEQ:
5017 	  case UNLT:
5018 	  case UNGT:
5019 	  case UNLE:
5020 	  case UNGE:
5021 	  case NE:
5022 	  case UNORDERED:
5023 	    return const_true_rtx;
5024 	  case EQ:
5025 	  case LT:
5026 	  case GT:
5027 	  case LE:
5028 	  case GE:
5029 	  case LTGT:
5030 	  case ORDERED:
5031 	    return const0_rtx;
5032 	  default:
5033 	    return 0;
5034 	  }
5035 
5036       return comparison_result (code,
5037 				(real_equal (d0, d1) ? CMP_EQ :
5038 				 real_less (d0, d1) ? CMP_LT : CMP_GT));
5039     }
5040 
5041   /* Otherwise, see if the operands are both integers.  */
5042   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5043       && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5044     {
5045       /* It would be nice if we really had a mode here.  However, the
5046 	 largest int representable on the target is as good as
5047 	 infinite.  */
5048       machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5049       rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
5050       rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
5051 
5052       if (wi::eq_p (ptrueop0, ptrueop1))
5053 	return comparison_result (code, CMP_EQ);
5054       else
5055 	{
5056 	  int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5057 	  cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5058 	  return comparison_result (code, cr);
5059 	}
5060     }
5061 
5062   /* Optimize comparisons with upper and lower bounds.  */
5063   if (HWI_COMPUTABLE_MODE_P (mode)
5064       && CONST_INT_P (trueop1)
5065       && !side_effects_p (trueop0))
5066     {
5067       int sign;
5068       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5069       HOST_WIDE_INT val = INTVAL (trueop1);
5070       HOST_WIDE_INT mmin, mmax;
5071 
5072       if (code == GEU
5073 	  || code == LEU
5074 	  || code == GTU
5075 	  || code == LTU)
5076 	sign = 0;
5077       else
5078 	sign = 1;
5079 
5080       /* Get a reduced range if the sign bit is zero.  */
5081       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5082 	{
5083 	  mmin = 0;
5084 	  mmax = nonzero;
5085 	}
5086       else
5087 	{
5088 	  rtx mmin_rtx, mmax_rtx;
5089 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5090 
5091 	  mmin = INTVAL (mmin_rtx);
5092 	  mmax = INTVAL (mmax_rtx);
5093 	  if (sign)
5094 	    {
5095 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5096 
5097 	      mmin >>= (sign_copies - 1);
5098 	      mmax >>= (sign_copies - 1);
5099 	    }
5100 	}
5101 
5102       switch (code)
5103 	{
5104 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5105 	case GEU:
5106 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5107 	    return const_true_rtx;
5108 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5109 	    return const0_rtx;
5110 	  break;
5111 	case GE:
5112 	  if (val <= mmin)
5113 	    return const_true_rtx;
5114 	  if (val > mmax)
5115 	    return const0_rtx;
5116 	  break;
5117 
5118 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5119 	case LEU:
5120 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5121 	    return const_true_rtx;
5122 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5123 	    return const0_rtx;
5124 	  break;
5125 	case LE:
5126 	  if (val >= mmax)
5127 	    return const_true_rtx;
5128 	  if (val < mmin)
5129 	    return const0_rtx;
5130 	  break;
5131 
5132 	case EQ:
5133 	  /* x == y is always false for y out of range.  */
5134 	  if (val < mmin || val > mmax)
5135 	    return const0_rtx;
5136 	  break;
5137 
5138 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5139 	case GTU:
5140 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5141 	    return const0_rtx;
5142 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5143 	    return const_true_rtx;
5144 	  break;
5145 	case GT:
5146 	  if (val >= mmax)
5147 	    return const0_rtx;
5148 	  if (val < mmin)
5149 	    return const_true_rtx;
5150 	  break;
5151 
5152 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5153 	case LTU:
5154 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5155 	    return const0_rtx;
5156 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5157 	    return const_true_rtx;
5158 	  break;
5159 	case LT:
5160 	  if (val <= mmin)
5161 	    return const0_rtx;
5162 	  if (val > mmax)
5163 	    return const_true_rtx;
5164 	  break;
5165 
5166 	case NE:
5167 	  /* x != y is always true for y out of range.  */
5168 	  if (val < mmin || val > mmax)
5169 	    return const_true_rtx;
5170 	  break;
5171 
5172 	default:
5173 	  break;
5174 	}
5175     }
5176 
5177   /* Optimize integer comparisons with zero.  */
5178   if (trueop1 == const0_rtx && !side_effects_p (trueop0))
5179     {
5180       /* Some addresses are known to be nonzero.  We don't know
5181 	 their sign, but equality comparisons are known.  */
5182       if (nonzero_address_p (trueop0))
5183 	{
5184 	  if (code == EQ || code == LEU)
5185 	    return const0_rtx;
5186 	  if (code == NE || code == GTU)
5187 	    return const_true_rtx;
5188 	}
5189 
5190       /* See if the first operand is an IOR with a constant.  If so, we
5191 	 may be able to determine the result of this comparison.  */
5192       if (GET_CODE (op0) == IOR)
5193 	{
5194 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5195 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5196 	    {
5197 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5198 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5199 			      && (UINTVAL (inner_const)
5200 				  & ((unsigned HOST_WIDE_INT) 1
5201 				     << sign_bitnum)));
5202 
5203 	      switch (code)
5204 		{
5205 		case EQ:
5206 		case LEU:
5207 		  return const0_rtx;
5208 		case NE:
5209 		case GTU:
5210 		  return const_true_rtx;
5211 		case LT:
5212 		case LE:
5213 		  if (has_sign)
5214 		    return const_true_rtx;
5215 		  break;
5216 		case GT:
5217 		case GE:
5218 		  if (has_sign)
5219 		    return const0_rtx;
5220 		  break;
5221 		default:
5222 		  break;
5223 		}
5224 	    }
5225 	}
5226     }
5227 
5228   /* Optimize comparison of ABS with zero.  */
5229   if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5230       && (GET_CODE (trueop0) == ABS
5231 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5232 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5233     {
5234       switch (code)
5235 	{
5236 	case LT:
5237 	  /* Optimize abs(x) < 0.0.  */
5238 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5239 	    return const0_rtx;
5240 	  break;
5241 
5242 	case GE:
5243 	  /* Optimize abs(x) >= 0.0.  */
5244 	  if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5245 	    return const_true_rtx;
5246 	  break;
5247 
5248 	case UNGE:
5249 	  /* Optimize ! (abs(x) < 0.0).  */
5250 	  return const_true_rtx;
5251 
5252 	default:
5253 	  break;
5254 	}
5255     }
5256 
5257   return 0;
5258 }
5259 
5260 /* Simplify CODE, an operation with result mode MODE and three operands,
5261    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5262    a constant.  Return 0 if no simplifications is possible.  */
5263 
5264 rtx
simplify_ternary_operation(enum rtx_code code,machine_mode mode,machine_mode op0_mode,rtx op0,rtx op1,rtx op2)5265 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5266 			    machine_mode op0_mode, rtx op0, rtx op1,
5267 			    rtx op2)
5268 {
5269   unsigned int width = GET_MODE_PRECISION (mode);
5270   bool any_change = false;
5271   rtx tem, trueop2;
5272 
5273   /* VOIDmode means "infinite" precision.  */
5274   if (width == 0)
5275     width = HOST_BITS_PER_WIDE_INT;
5276 
5277   switch (code)
5278     {
5279     case FMA:
5280       /* Simplify negations around the multiplication.  */
5281       /* -a * -b + c  =>  a * b + c.  */
5282       if (GET_CODE (op0) == NEG)
5283 	{
5284 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5285 	  if (tem)
5286 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5287 	}
5288       else if (GET_CODE (op1) == NEG)
5289 	{
5290 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5291 	  if (tem)
5292 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5293 	}
5294 
5295       /* Canonicalize the two multiplication operands.  */
5296       /* a * -b + c  =>  -b * a + c.  */
5297       if (swap_commutative_operands_p (op0, op1))
5298 	std::swap (op0, op1), any_change = true;
5299 
5300       if (any_change)
5301 	return gen_rtx_FMA (mode, op0, op1, op2);
5302       return NULL_RTX;
5303 
5304     case SIGN_EXTRACT:
5305     case ZERO_EXTRACT:
5306       if (CONST_INT_P (op0)
5307 	  && CONST_INT_P (op1)
5308 	  && CONST_INT_P (op2)
5309 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5310 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5311 	{
5312 	  /* Extracting a bit-field from a constant */
5313 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5314 	  HOST_WIDE_INT op1val = INTVAL (op1);
5315 	  HOST_WIDE_INT op2val = INTVAL (op2);
5316 	  if (BITS_BIG_ENDIAN)
5317 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5318 	  else
5319 	    val >>= op2val;
5320 
5321 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5322 	    {
5323 	      /* First zero-extend.  */
5324 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5325 	      /* If desired, propagate sign bit.  */
5326 	      if (code == SIGN_EXTRACT
5327 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5328 		     != 0)
5329 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5330 	    }
5331 
5332 	  return gen_int_mode (val, mode);
5333 	}
5334       break;
5335 
5336     case IF_THEN_ELSE:
5337       if (CONST_INT_P (op0))
5338 	return op0 != const0_rtx ? op1 : op2;
5339 
5340       /* Convert c ? a : a into "a".  */
5341       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5342 	return op1;
5343 
5344       /* Convert a != b ? a : b into "a".  */
5345       if (GET_CODE (op0) == NE
5346 	  && ! side_effects_p (op0)
5347 	  && ! HONOR_NANS (mode)
5348 	  && ! HONOR_SIGNED_ZEROS (mode)
5349 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5350 	       && rtx_equal_p (XEXP (op0, 1), op2))
5351 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5352 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5353 	return op1;
5354 
5355       /* Convert a == b ? a : b into "b".  */
5356       if (GET_CODE (op0) == EQ
5357 	  && ! side_effects_p (op0)
5358 	  && ! HONOR_NANS (mode)
5359 	  && ! HONOR_SIGNED_ZEROS (mode)
5360 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5361 	       && rtx_equal_p (XEXP (op0, 1), op2))
5362 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5363 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5364 	return op2;
5365 
5366       /* Convert (!c) != {0,...,0} ? a : b into
5367          c != {0,...,0} ? b : a for vector modes.  */
5368       if (VECTOR_MODE_P (GET_MODE (op1))
5369 	  && GET_CODE (op0) == NE
5370 	  && GET_CODE (XEXP (op0, 0)) == NOT
5371 	  && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5372 	{
5373 	  rtx cv = XEXP (op0, 1);
5374 	  int nunits = CONST_VECTOR_NUNITS (cv);
5375 	  bool ok = true;
5376 	  for (int i = 0; i < nunits; ++i)
5377 	    if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5378 	      {
5379 		ok = false;
5380 		break;
5381 	      }
5382 	  if (ok)
5383 	    {
5384 	      rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5385 					XEXP (XEXP (op0, 0), 0),
5386 					XEXP (op0, 1));
5387 	      rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5388 	      return retval;
5389 	    }
5390 	}
5391 
5392       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5393 	{
5394 	  machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5395 					? GET_MODE (XEXP (op0, 1))
5396 					: GET_MODE (XEXP (op0, 0)));
5397 	  rtx temp;
5398 
5399 	  /* Look for happy constants in op1 and op2.  */
5400 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5401 	    {
5402 	      HOST_WIDE_INT t = INTVAL (op1);
5403 	      HOST_WIDE_INT f = INTVAL (op2);
5404 
5405 	      if (t == STORE_FLAG_VALUE && f == 0)
5406 	        code = GET_CODE (op0);
5407 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5408 		{
5409 		  enum rtx_code tmp;
5410 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5411 		  if (tmp == UNKNOWN)
5412 		    break;
5413 		  code = tmp;
5414 		}
5415 	      else
5416 		break;
5417 
5418 	      return simplify_gen_relational (code, mode, cmp_mode,
5419 					      XEXP (op0, 0), XEXP (op0, 1));
5420 	    }
5421 
5422 	  if (cmp_mode == VOIDmode)
5423 	    cmp_mode = op0_mode;
5424 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5425 			  			cmp_mode, XEXP (op0, 0),
5426 						XEXP (op0, 1));
5427 
5428 	  /* See if any simplifications were possible.  */
5429 	  if (temp)
5430 	    {
5431 	      if (CONST_INT_P (temp))
5432 		return temp == const0_rtx ? op2 : op1;
5433 	      else if (temp)
5434 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5435 	    }
5436 	}
5437       break;
5438 
5439     case VEC_MERGE:
5440       gcc_assert (GET_MODE (op0) == mode);
5441       gcc_assert (GET_MODE (op1) == mode);
5442       gcc_assert (VECTOR_MODE_P (mode));
5443       trueop2 = avoid_constant_pool_reference (op2);
5444       if (CONST_INT_P (trueop2))
5445 	{
5446 	  int elt_size = GET_MODE_UNIT_SIZE (mode);
5447 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5448 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5449 	  unsigned HOST_WIDE_INT mask;
5450 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
5451 	    mask = -1;
5452 	  else
5453 	    mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5454 
5455 	  if (!(sel & mask) && !side_effects_p (op0))
5456 	    return op1;
5457 	  if ((sel & mask) == mask && !side_effects_p (op1))
5458 	    return op0;
5459 
5460 	  rtx trueop0 = avoid_constant_pool_reference (op0);
5461 	  rtx trueop1 = avoid_constant_pool_reference (op1);
5462 	  if (GET_CODE (trueop0) == CONST_VECTOR
5463 	      && GET_CODE (trueop1) == CONST_VECTOR)
5464 	    {
5465 	      rtvec v = rtvec_alloc (n_elts);
5466 	      unsigned int i;
5467 
5468 	      for (i = 0; i < n_elts; i++)
5469 		RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5470 				    ? CONST_VECTOR_ELT (trueop0, i)
5471 				    : CONST_VECTOR_ELT (trueop1, i));
5472 	      return gen_rtx_CONST_VECTOR (mode, v);
5473 	    }
5474 
5475 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5476 	     if no element from a appears in the result.  */
5477 	  if (GET_CODE (op0) == VEC_MERGE)
5478 	    {
5479 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
5480 	      if (CONST_INT_P (tem))
5481 		{
5482 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5483 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5484 		    return simplify_gen_ternary (code, mode, mode,
5485 						 XEXP (op0, 1), op1, op2);
5486 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5487 		    return simplify_gen_ternary (code, mode, mode,
5488 						 XEXP (op0, 0), op1, op2);
5489 		}
5490 	    }
5491 	  if (GET_CODE (op1) == VEC_MERGE)
5492 	    {
5493 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
5494 	      if (CONST_INT_P (tem))
5495 		{
5496 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5497 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5498 		    return simplify_gen_ternary (code, mode, mode,
5499 						 op0, XEXP (op1, 1), op2);
5500 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5501 		    return simplify_gen_ternary (code, mode, mode,
5502 						 op0, XEXP (op1, 0), op2);
5503 		}
5504 	    }
5505 
5506 	  /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5507 	     with a.  */
5508 	  if (GET_CODE (op0) == VEC_DUPLICATE
5509 	      && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5510 	      && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5511 	      && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5512 	    {
5513 	      tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5514 	      if (CONST_INT_P (tem) && CONST_INT_P (op2))
5515 		{
5516 		  if (XEXP (XEXP (op0, 0), 0) == op1
5517 		      && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5518 		    return op1;
5519 		}
5520 	    }
5521 	}
5522 
5523       if (rtx_equal_p (op0, op1)
5524 	  && !side_effects_p (op2) && !side_effects_p (op1))
5525 	return op0;
5526 
5527       break;
5528 
5529     default:
5530       gcc_unreachable ();
5531     }
5532 
5533   return 0;
5534 }
5535 
5536 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5537    or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5538    CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5539 
5540    Works by unpacking OP into a collection of 8-bit values
5541    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5542    and then repacking them again for OUTERMODE.  */
5543 
5544 static rtx
simplify_immed_subreg(machine_mode outermode,rtx op,machine_mode innermode,unsigned int byte)5545 simplify_immed_subreg (machine_mode outermode, rtx op,
5546 		       machine_mode innermode, unsigned int byte)
5547 {
5548   enum {
5549     value_bit = 8,
5550     value_mask = (1 << value_bit) - 1
5551   };
5552   unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5553   int value_start;
5554   int i;
5555   int elem;
5556 
5557   int num_elem;
5558   rtx * elems;
5559   int elem_bitsize;
5560   rtx result_s;
5561   rtvec result_v = NULL;
5562   enum mode_class outer_class;
5563   machine_mode outer_submode;
5564   int max_bitsize;
5565 
5566   /* Some ports misuse CCmode.  */
5567   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5568     return op;
5569 
5570   /* We have no way to represent a complex constant at the rtl level.  */
5571   if (COMPLEX_MODE_P (outermode))
5572     return NULL_RTX;
5573 
5574   /* We support any size mode.  */
5575   max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5576 		     GET_MODE_BITSIZE (innermode));
5577 
5578   /* Unpack the value.  */
5579 
5580   if (GET_CODE (op) == CONST_VECTOR)
5581     {
5582       num_elem = CONST_VECTOR_NUNITS (op);
5583       elems = &CONST_VECTOR_ELT (op, 0);
5584       elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5585     }
5586   else
5587     {
5588       num_elem = 1;
5589       elems = &op;
5590       elem_bitsize = max_bitsize;
5591     }
5592   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5593   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5594   /* I don't know how to handle endianness of sub-units.  */
5595   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5596 
5597   for (elem = 0; elem < num_elem; elem++)
5598     {
5599       unsigned char * vp;
5600       rtx el = elems[elem];
5601 
5602       /* Vectors are kept in target memory order.  (This is probably
5603 	 a mistake.)  */
5604       {
5605 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5606 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5607 			  / BITS_PER_UNIT);
5608 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5609 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5610 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5611 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5612 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5613       }
5614 
5615       switch (GET_CODE (el))
5616 	{
5617 	case CONST_INT:
5618 	  for (i = 0;
5619 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5620 	       i += value_bit)
5621 	    *vp++ = INTVAL (el) >> i;
5622 	  /* CONST_INTs are always logically sign-extended.  */
5623 	  for (; i < elem_bitsize; i += value_bit)
5624 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5625 	  break;
5626 
5627 	case CONST_WIDE_INT:
5628 	  {
5629 	    rtx_mode_t val = std::make_pair (el, innermode);
5630 	    unsigned char extend = wi::sign_mask (val);
5631 
5632 	    for (i = 0; i < elem_bitsize; i += value_bit)
5633 	      *vp++ = wi::extract_uhwi (val, i, value_bit);
5634 	    for (; i < elem_bitsize; i += value_bit)
5635 	      *vp++ = extend;
5636 	  }
5637 	  break;
5638 
5639 	case CONST_DOUBLE:
5640 	  if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
5641 	    {
5642 	      unsigned char extend = 0;
5643 	      /* If this triggers, someone should have generated a
5644 		 CONST_INT instead.  */
5645 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5646 
5647 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5648 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5649 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5650 		{
5651 		  *vp++
5652 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5653 		  i += value_bit;
5654 		}
5655 
5656 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5657 		extend = -1;
5658 	      for (; i < elem_bitsize; i += value_bit)
5659 		*vp++ = extend;
5660 	    }
5661 	  else
5662 	    {
5663 	      /* This is big enough for anything on the platform.  */
5664 	      long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5665 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5666 
5667 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5668 	      gcc_assert (bitsize <= elem_bitsize);
5669 	      gcc_assert (bitsize % value_bit == 0);
5670 
5671 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5672 			      GET_MODE (el));
5673 
5674 	      /* real_to_target produces its result in words affected by
5675 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5676 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5677 	         of SUBREG in rtl.texi.  */
5678 	      for (i = 0; i < bitsize; i += value_bit)
5679 		{
5680 		  int ibase;
5681 		  if (WORDS_BIG_ENDIAN)
5682 		    ibase = bitsize - 1 - i;
5683 		  else
5684 		    ibase = i;
5685 		  *vp++ = tmp[ibase / 32] >> i % 32;
5686 		}
5687 
5688 	      /* It shouldn't matter what's done here, so fill it with
5689 		 zero.  */
5690 	      for (; i < elem_bitsize; i += value_bit)
5691 		*vp++ = 0;
5692 	    }
5693 	  break;
5694 
5695         case CONST_FIXED:
5696 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5697 	    {
5698 	      for (i = 0; i < elem_bitsize; i += value_bit)
5699 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5700 	    }
5701 	  else
5702 	    {
5703 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5704 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5705               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5706 		   i += value_bit)
5707 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5708 			>> (i - HOST_BITS_PER_WIDE_INT);
5709 	      for (; i < elem_bitsize; i += value_bit)
5710 		*vp++ = 0;
5711 	    }
5712           break;
5713 
5714 	default:
5715 	  gcc_unreachable ();
5716 	}
5717     }
5718 
5719   /* Now, pick the right byte to start with.  */
5720   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5721      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5722      will already have offset 0.  */
5723   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5724     {
5725       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5726 			- byte);
5727       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5728       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5729       byte = (subword_byte % UNITS_PER_WORD
5730 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5731     }
5732 
5733   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5734      so if it's become negative it will instead be very large.)  */
5735   gcc_assert (byte < GET_MODE_SIZE (innermode));
5736 
5737   /* Convert from bytes to chunks of size value_bit.  */
5738   value_start = byte * (BITS_PER_UNIT / value_bit);
5739 
5740   /* Re-pack the value.  */
5741   num_elem = GET_MODE_NUNITS (outermode);
5742 
5743   if (VECTOR_MODE_P (outermode))
5744     {
5745       result_v = rtvec_alloc (num_elem);
5746       elems = &RTVEC_ELT (result_v, 0);
5747     }
5748   else
5749     elems = &result_s;
5750 
5751   outer_submode = GET_MODE_INNER (outermode);
5752   outer_class = GET_MODE_CLASS (outer_submode);
5753   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5754 
5755   gcc_assert (elem_bitsize % value_bit == 0);
5756   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5757 
5758   for (elem = 0; elem < num_elem; elem++)
5759     {
5760       unsigned char *vp;
5761 
5762       /* Vectors are stored in target memory order.  (This is probably
5763 	 a mistake.)  */
5764       {
5765 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5766 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5767 			  / BITS_PER_UNIT);
5768 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5769 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5770 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5771 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5772 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5773       }
5774 
5775       switch (outer_class)
5776 	{
5777 	case MODE_INT:
5778 	case MODE_PARTIAL_INT:
5779 	  {
5780 	    int u;
5781 	    int base = 0;
5782 	    int units
5783 	      = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
5784 	      / HOST_BITS_PER_WIDE_INT;
5785 	    HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
5786 	    wide_int r;
5787 
5788 	    if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
5789 	      return NULL_RTX;
5790 	    for (u = 0; u < units; u++)
5791 	      {
5792 		unsigned HOST_WIDE_INT buf = 0;
5793 		for (i = 0;
5794 		     i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
5795 		     i += value_bit)
5796 		  buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5797 
5798 		tmp[u] = buf;
5799 		base += HOST_BITS_PER_WIDE_INT;
5800 	      }
5801 	    r = wide_int::from_array (tmp, units,
5802 				      GET_MODE_PRECISION (outer_submode));
5803 #if TARGET_SUPPORTS_WIDE_INT == 0
5804 	    /* Make sure r will fit into CONST_INT or CONST_DOUBLE.  */
5805 	    if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
5806 	      return NULL_RTX;
5807 #endif
5808 	    elems[elem] = immed_wide_int_const (r, outer_submode);
5809 	  }
5810 	  break;
5811 
5812 	case MODE_FLOAT:
5813 	case MODE_DECIMAL_FLOAT:
5814 	  {
5815 	    REAL_VALUE_TYPE r;
5816 	    long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
5817 
5818 	    /* real_from_target wants its input in words affected by
5819 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5820 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5821 	       of SUBREG in rtl.texi.  */
5822 	    for (i = 0; i < max_bitsize / 32; i++)
5823 	      tmp[i] = 0;
5824 	    for (i = 0; i < elem_bitsize; i += value_bit)
5825 	      {
5826 		int ibase;
5827 		if (WORDS_BIG_ENDIAN)
5828 		  ibase = elem_bitsize - 1 - i;
5829 		else
5830 		  ibase = i;
5831 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5832 	      }
5833 
5834 	    real_from_target (&r, tmp, outer_submode);
5835 	    elems[elem] = const_double_from_real_value (r, outer_submode);
5836 	  }
5837 	  break;
5838 
5839 	case MODE_FRACT:
5840 	case MODE_UFRACT:
5841 	case MODE_ACCUM:
5842 	case MODE_UACCUM:
5843 	  {
5844 	    FIXED_VALUE_TYPE f;
5845 	    f.data.low = 0;
5846 	    f.data.high = 0;
5847 	    f.mode = outer_submode;
5848 
5849 	    for (i = 0;
5850 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5851 		 i += value_bit)
5852 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5853 	    for (; i < elem_bitsize; i += value_bit)
5854 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5855 			     << (i - HOST_BITS_PER_WIDE_INT));
5856 
5857 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5858           }
5859           break;
5860 
5861 	default:
5862 	  gcc_unreachable ();
5863 	}
5864     }
5865   if (VECTOR_MODE_P (outermode))
5866     return gen_rtx_CONST_VECTOR (outermode, result_v);
5867   else
5868     return result_s;
5869 }
5870 
5871 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5872    Return 0 if no simplifications are possible.  */
5873 rtx
simplify_subreg(machine_mode outermode,rtx op,machine_mode innermode,unsigned int byte)5874 simplify_subreg (machine_mode outermode, rtx op,
5875 		 machine_mode innermode, unsigned int byte)
5876 {
5877   /* Little bit of sanity checking.  */
5878   gcc_assert (innermode != VOIDmode);
5879   gcc_assert (outermode != VOIDmode);
5880   gcc_assert (innermode != BLKmode);
5881   gcc_assert (outermode != BLKmode);
5882 
5883   gcc_assert (GET_MODE (op) == innermode
5884 	      || GET_MODE (op) == VOIDmode);
5885 
5886   if ((byte % GET_MODE_SIZE (outermode)) != 0)
5887     return NULL_RTX;
5888 
5889   if (byte >= GET_MODE_SIZE (innermode))
5890     return NULL_RTX;
5891 
5892   if (outermode == innermode && !byte)
5893     return op;
5894 
5895   if (CONST_SCALAR_INT_P (op)
5896       || CONST_DOUBLE_AS_FLOAT_P (op)
5897       || GET_CODE (op) == CONST_FIXED
5898       || GET_CODE (op) == CONST_VECTOR)
5899     return simplify_immed_subreg (outermode, op, innermode, byte);
5900 
5901   /* Changing mode twice with SUBREG => just change it once,
5902      or not at all if changing back op starting mode.  */
5903   if (GET_CODE (op) == SUBREG)
5904     {
5905       machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5906       int final_offset = byte + SUBREG_BYTE (op);
5907       rtx newx;
5908 
5909       if (outermode == innermostmode
5910 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5911 	return SUBREG_REG (op);
5912 
5913       /* The SUBREG_BYTE represents offset, as if the value were stored
5914 	 in memory.  Irritating exception is paradoxical subreg, where
5915 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5916 	 value should be negative.  For a moment, undo this exception.  */
5917       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5918 	{
5919 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5920 	  if (WORDS_BIG_ENDIAN)
5921 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5922 	  if (BYTES_BIG_ENDIAN)
5923 	    final_offset += difference % UNITS_PER_WORD;
5924 	}
5925       if (SUBREG_BYTE (op) == 0
5926 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5927 	{
5928 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5929 	  if (WORDS_BIG_ENDIAN)
5930 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5931 	  if (BYTES_BIG_ENDIAN)
5932 	    final_offset += difference % UNITS_PER_WORD;
5933 	}
5934 
5935       /* See whether resulting subreg will be paradoxical.  */
5936       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5937 	{
5938 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5939 	  if (final_offset < 0)
5940 	    return NULL_RTX;
5941 	  /* Bail out in case resulting subreg would be incorrect.  */
5942 	  if (final_offset % GET_MODE_SIZE (outermode)
5943 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5944 	    return NULL_RTX;
5945 	}
5946       else
5947 	{
5948 	  int offset = 0;
5949 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5950 
5951 	  /* In paradoxical subreg, see if we are still looking on lower part.
5952 	     If so, our SUBREG_BYTE will be 0.  */
5953 	  if (WORDS_BIG_ENDIAN)
5954 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5955 	  if (BYTES_BIG_ENDIAN)
5956 	    offset += difference % UNITS_PER_WORD;
5957 	  if (offset == final_offset)
5958 	    final_offset = 0;
5959 	  else
5960 	    return NULL_RTX;
5961 	}
5962 
5963       /* Recurse for further possible simplifications.  */
5964       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5965 			      final_offset);
5966       if (newx)
5967 	return newx;
5968       if (validate_subreg (outermode, innermostmode,
5969 			   SUBREG_REG (op), final_offset))
5970 	{
5971 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5972 	  if (SUBREG_PROMOTED_VAR_P (op)
5973 	      && SUBREG_PROMOTED_SIGN (op) >= 0
5974 	      && GET_MODE_CLASS (outermode) == MODE_INT
5975 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5976 			   GET_MODE_SIZE (innermode),
5977 			   GET_MODE_SIZE (innermostmode))
5978 	      && subreg_lowpart_p (newx))
5979 	    {
5980 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5981 	      SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
5982 	    }
5983 	  return newx;
5984 	}
5985       return NULL_RTX;
5986     }
5987 
5988   /* SUBREG of a hard register => just change the register number
5989      and/or mode.  If the hard register is not valid in that mode,
5990      suppress this simplification.  If the hard register is the stack,
5991      frame, or argument pointer, leave this as a SUBREG.  */
5992 
5993   if (REG_P (op) && HARD_REGISTER_P (op))
5994     {
5995       unsigned int regno, final_regno;
5996 
5997       regno = REGNO (op);
5998       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5999       if (HARD_REGISTER_NUM_P (final_regno))
6000 	{
6001 	  rtx x;
6002 	  int final_offset = byte;
6003 
6004 	  /* Adjust offset for paradoxical subregs.  */
6005 	  if (byte == 0
6006 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6007 	    {
6008 	      int difference = (GET_MODE_SIZE (innermode)
6009 				- GET_MODE_SIZE (outermode));
6010 	      if (WORDS_BIG_ENDIAN)
6011 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6012 	      if (BYTES_BIG_ENDIAN)
6013 		final_offset += difference % UNITS_PER_WORD;
6014 	    }
6015 
6016 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6017 
6018 	  /* Propagate original regno.  We don't have any way to specify
6019 	     the offset inside original regno, so do so only for lowpart.
6020 	     The information is used only by alias analysis that can not
6021 	     grog partial register anyway.  */
6022 
6023 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
6024 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6025 	  return x;
6026 	}
6027     }
6028 
6029   /* If we have a SUBREG of a register that we are replacing and we are
6030      replacing it with a MEM, make a new MEM and try replacing the
6031      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
6032      or if we would be widening it.  */
6033 
6034   if (MEM_P (op)
6035       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6036       /* Allow splitting of volatile memory references in case we don't
6037          have instruction to move the whole thing.  */
6038       && (! MEM_VOLATILE_P (op)
6039 	  || ! have_insn_for (SET, innermode))
6040       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6041     return adjust_address_nv (op, outermode, byte);
6042 
6043   /* Handle complex values represented as CONCAT
6044      of real and imaginary part.  */
6045   if (GET_CODE (op) == CONCAT)
6046     {
6047       unsigned int part_size, final_offset;
6048       rtx part, res;
6049 
6050       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6051       if (byte < part_size)
6052 	{
6053 	  part = XEXP (op, 0);
6054 	  final_offset = byte;
6055 	}
6056       else
6057 	{
6058 	  part = XEXP (op, 1);
6059 	  final_offset = byte - part_size;
6060 	}
6061 
6062       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6063 	return NULL_RTX;
6064 
6065       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6066       if (res)
6067 	return res;
6068       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6069 	return gen_rtx_SUBREG (outermode, part, final_offset);
6070       return NULL_RTX;
6071     }
6072 
6073   /* A SUBREG resulting from a zero extension may fold to zero if
6074      it extracts higher bits that the ZERO_EXTEND's source bits.  */
6075   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6076     {
6077       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6078       if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6079 	return CONST0_RTX (outermode);
6080     }
6081 
6082   if (SCALAR_INT_MODE_P (outermode)
6083       && SCALAR_INT_MODE_P (innermode)
6084       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6085       && byte == subreg_lowpart_offset (outermode, innermode))
6086     {
6087       rtx tem = simplify_truncation (outermode, op, innermode);
6088       if (tem)
6089 	return tem;
6090     }
6091 
6092   return NULL_RTX;
6093 }
6094 
6095 /* Make a SUBREG operation or equivalent if it folds.  */
6096 
6097 rtx
simplify_gen_subreg(machine_mode outermode,rtx op,machine_mode innermode,unsigned int byte)6098 simplify_gen_subreg (machine_mode outermode, rtx op,
6099 		     machine_mode innermode, unsigned int byte)
6100 {
6101   rtx newx;
6102 
6103   newx = simplify_subreg (outermode, op, innermode, byte);
6104   if (newx)
6105     return newx;
6106 
6107   if (GET_CODE (op) == SUBREG
6108       || GET_CODE (op) == CONCAT
6109       || GET_MODE (op) == VOIDmode)
6110     return NULL_RTX;
6111 
6112   if (validate_subreg (outermode, innermode, op, byte))
6113     return gen_rtx_SUBREG (outermode, op, byte);
6114 
6115   return NULL_RTX;
6116 }
6117 
6118 /* Generates a subreg to get the least significant part of EXPR (in mode
6119    INNER_MODE) to OUTER_MODE.  */
6120 
6121 rtx
lowpart_subreg(machine_mode outer_mode,rtx expr,machine_mode inner_mode)6122 lowpart_subreg (machine_mode outer_mode, rtx expr,
6123 			     machine_mode inner_mode)
6124 {
6125   return simplify_gen_subreg (outer_mode, expr, inner_mode,
6126 			      subreg_lowpart_offset (outer_mode, inner_mode));
6127 }
6128 
6129 /* Simplify X, an rtx expression.
6130 
6131    Return the simplified expression or NULL if no simplifications
6132    were possible.
6133 
6134    This is the preferred entry point into the simplification routines;
6135    however, we still allow passes to call the more specific routines.
6136 
6137    Right now GCC has three (yes, three) major bodies of RTL simplification
6138    code that need to be unified.
6139 
6140 	1. fold_rtx in cse.c.  This code uses various CSE specific
6141 	   information to aid in RTL simplification.
6142 
6143 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
6144 	   it uses combine specific information to aid in RTL
6145 	   simplification.
6146 
6147 	3. The routines in this file.
6148 
6149 
6150    Long term we want to only have one body of simplification code; to
6151    get to that state I recommend the following steps:
6152 
6153 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
6154 	   which are not pass dependent state into these routines.
6155 
6156 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
6157 	   use this routine whenever possible.
6158 
6159 	3. Allow for pass dependent state to be provided to these
6160 	   routines and add simplifications based on the pass dependent
6161 	   state.  Remove code from cse.c & combine.c that becomes
6162 	   redundant/dead.
6163 
6164     It will take time, but ultimately the compiler will be easier to
6165     maintain and improve.  It's totally silly that when we add a
6166     simplification that it needs to be added to 4 places (3 for RTL
6167     simplification and 1 for tree simplification.  */
6168 
6169 rtx
simplify_rtx(const_rtx x)6170 simplify_rtx (const_rtx x)
6171 {
6172   const enum rtx_code code = GET_CODE (x);
6173   const machine_mode mode = GET_MODE (x);
6174 
6175   switch (GET_RTX_CLASS (code))
6176     {
6177     case RTX_UNARY:
6178       return simplify_unary_operation (code, mode,
6179 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6180     case RTX_COMM_ARITH:
6181       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6182 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6183 
6184       /* Fall through....  */
6185 
6186     case RTX_BIN_ARITH:
6187       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6188 
6189     case RTX_TERNARY:
6190     case RTX_BITFIELD_OPS:
6191       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6192 					 XEXP (x, 0), XEXP (x, 1),
6193 					 XEXP (x, 2));
6194 
6195     case RTX_COMPARE:
6196     case RTX_COMM_COMPARE:
6197       return simplify_relational_operation (code, mode,
6198                                             ((GET_MODE (XEXP (x, 0))
6199                                              != VOIDmode)
6200                                             ? GET_MODE (XEXP (x, 0))
6201                                             : GET_MODE (XEXP (x, 1))),
6202                                             XEXP (x, 0),
6203                                             XEXP (x, 1));
6204 
6205     case RTX_EXTRA:
6206       if (code == SUBREG)
6207 	return simplify_subreg (mode, SUBREG_REG (x),
6208 				GET_MODE (SUBREG_REG (x)),
6209 				SUBREG_BYTE (x));
6210       break;
6211 
6212     case RTX_OBJ:
6213       if (code == LO_SUM)
6214 	{
6215 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6216 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6217 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6218 	  return XEXP (x, 1);
6219 	}
6220       break;
6221 
6222     default:
6223       break;
6224     }
6225   return NULL;
6226 }
6227