1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2014 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "varasm.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "ggc.h"
38 #include "target.h"
39 
40 /* Simplification and canonicalization of RTL.  */
41 
42 /* Much code operates on (low, high) pairs; the low value is an
43    unsigned wide int, the high value a signed wide int.  We
44    occasionally need to sign extend from low to high as if low were a
45    signed wide int.  */
46 #define HWI_SIGN_EXTEND(low) \
47  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 
49 static rtx neg_const_int (enum machine_mode, const_rtx);
50 static bool plus_minus_operand_p (const_rtx);
51 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
52 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
53 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
54 				  unsigned int);
55 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
56 					   rtx, rtx);
57 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
58 					    enum machine_mode, rtx, rtx);
59 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
60 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
61 					rtx, rtx, rtx, rtx);
62 
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64    maximally negative number can overflow).  */
65 static rtx
neg_const_int(enum machine_mode mode,const_rtx i)66 neg_const_int (enum machine_mode mode, const_rtx i)
67 {
68   return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
69 }
70 
71 /* Test whether expression, X, is an immediate constant that represents
72    the most significant bit of machine mode MODE.  */
73 
74 bool
mode_signbit_p(enum machine_mode mode,const_rtx x)75 mode_signbit_p (enum machine_mode mode, const_rtx x)
76 {
77   unsigned HOST_WIDE_INT val;
78   unsigned int width;
79 
80   if (GET_MODE_CLASS (mode) != MODE_INT)
81     return false;
82 
83   width = GET_MODE_PRECISION (mode);
84   if (width == 0)
85     return false;
86 
87   if (width <= HOST_BITS_PER_WIDE_INT
88       && CONST_INT_P (x))
89     val = INTVAL (x);
90   else if (width <= HOST_BITS_PER_DOUBLE_INT
91 	   && CONST_DOUBLE_AS_INT_P (x)
92 	   && CONST_DOUBLE_LOW (x) == 0)
93     {
94       val = CONST_DOUBLE_HIGH (x);
95       width -= HOST_BITS_PER_WIDE_INT;
96     }
97   else
98     /* FIXME: We don't yet have a representation for wider modes.  */
99     return false;
100 
101   if (width < HOST_BITS_PER_WIDE_INT)
102     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
104 }
105 
106 /* Test whether VAL is equal to the most significant bit of mode MODE
107    (after masking with the mode mask of MODE).  Returns false if the
108    precision of MODE is too large to handle.  */
109 
110 bool
val_signbit_p(enum machine_mode mode,unsigned HOST_WIDE_INT val)111 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
112 {
113   unsigned int width;
114 
115   if (GET_MODE_CLASS (mode) != MODE_INT)
116     return false;
117 
118   width = GET_MODE_PRECISION (mode);
119   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
120     return false;
121 
122   val &= GET_MODE_MASK (mode);
123   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
124 }
125 
126 /* Test whether the most significant bit of mode MODE is set in VAL.
127    Returns false if the precision of MODE is too large to handle.  */
128 bool
val_signbit_known_set_p(enum machine_mode mode,unsigned HOST_WIDE_INT val)129 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
130 {
131   unsigned int width;
132 
133   if (GET_MODE_CLASS (mode) != MODE_INT)
134     return false;
135 
136   width = GET_MODE_PRECISION (mode);
137   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
138     return false;
139 
140   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
141   return val != 0;
142 }
143 
144 /* Test whether the most significant bit of mode MODE is clear in VAL.
145    Returns false if the precision of MODE is too large to handle.  */
146 bool
val_signbit_known_clear_p(enum machine_mode mode,unsigned HOST_WIDE_INT val)147 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
148 {
149   unsigned int width;
150 
151   if (GET_MODE_CLASS (mode) != MODE_INT)
152     return false;
153 
154   width = GET_MODE_PRECISION (mode);
155   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
156     return false;
157 
158   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
159   return val == 0;
160 }
161 
162 /* Make a binary operation by properly ordering the operands and
163    seeing if the expression folds.  */
164 
165 rtx
simplify_gen_binary(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)166 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
167 		     rtx op1)
168 {
169   rtx tem;
170 
171   /* If this simplifies, do it.  */
172   tem = simplify_binary_operation (code, mode, op0, op1);
173   if (tem)
174     return tem;
175 
176   /* Put complex operands first and constants second if commutative.  */
177   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
178       && swap_commutative_operands_p (op0, op1))
179     tem = op0, op0 = op1, op1 = tem;
180 
181   return gen_rtx_fmt_ee (code, mode, op0, op1);
182 }
183 
184 /* If X is a MEM referencing the constant pool, return the real value.
185    Otherwise return X.  */
186 rtx
avoid_constant_pool_reference(rtx x)187 avoid_constant_pool_reference (rtx x)
188 {
189   rtx c, tmp, addr;
190   enum machine_mode cmode;
191   HOST_WIDE_INT offset = 0;
192 
193   switch (GET_CODE (x))
194     {
195     case MEM:
196       break;
197 
198     case FLOAT_EXTEND:
199       /* Handle float extensions of constant pool references.  */
200       tmp = XEXP (x, 0);
201       c = avoid_constant_pool_reference (tmp);
202       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
203 	{
204 	  REAL_VALUE_TYPE d;
205 
206 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
207 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
208 	}
209       return x;
210 
211     default:
212       return x;
213     }
214 
215   if (GET_MODE (x) == BLKmode)
216     return x;
217 
218   addr = XEXP (x, 0);
219 
220   /* Call target hook to avoid the effects of -fpic etc....  */
221   addr = targetm.delegitimize_address (addr);
222 
223   /* Split the address into a base and integer offset.  */
224   if (GET_CODE (addr) == CONST
225       && GET_CODE (XEXP (addr, 0)) == PLUS
226       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
227     {
228       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
229       addr = XEXP (XEXP (addr, 0), 0);
230     }
231 
232   if (GET_CODE (addr) == LO_SUM)
233     addr = XEXP (addr, 1);
234 
235   /* If this is a constant pool reference, we can turn it into its
236      constant and hope that simplifications happen.  */
237   if (GET_CODE (addr) == SYMBOL_REF
238       && CONSTANT_POOL_ADDRESS_P (addr))
239     {
240       c = get_pool_constant (addr);
241       cmode = get_pool_mode (addr);
242 
243       /* If we're accessing the constant in a different mode than it was
244          originally stored, attempt to fix that up via subreg simplifications.
245          If that fails we have no choice but to return the original memory.  */
246       if ((offset != 0 || cmode != GET_MODE (x))
247 	  && offset >= 0 && offset < GET_MODE_SIZE (cmode))
248         {
249           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
250           if (tem && CONSTANT_P (tem))
251             return tem;
252         }
253       else
254         return c;
255     }
256 
257   return x;
258 }
259 
260 /* Simplify a MEM based on its attributes.  This is the default
261    delegitimize_address target hook, and it's recommended that every
262    overrider call it.  */
263 
264 rtx
delegitimize_mem_from_attrs(rtx x)265 delegitimize_mem_from_attrs (rtx x)
266 {
267   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268      use their base addresses as equivalent.  */
269   if (MEM_P (x)
270       && MEM_EXPR (x)
271       && MEM_OFFSET_KNOWN_P (x))
272     {
273       tree decl = MEM_EXPR (x);
274       enum machine_mode mode = GET_MODE (x);
275       HOST_WIDE_INT offset = 0;
276 
277       switch (TREE_CODE (decl))
278 	{
279 	default:
280 	  decl = NULL;
281 	  break;
282 
283 	case VAR_DECL:
284 	  break;
285 
286 	case ARRAY_REF:
287 	case ARRAY_RANGE_REF:
288 	case COMPONENT_REF:
289 	case BIT_FIELD_REF:
290 	case REALPART_EXPR:
291 	case IMAGPART_EXPR:
292 	case VIEW_CONVERT_EXPR:
293 	  {
294 	    HOST_WIDE_INT bitsize, bitpos;
295 	    tree toffset;
296 	    int unsignedp, volatilep = 0;
297 
298 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
299 					&mode, &unsignedp, &volatilep, false);
300 	    if (bitsize != GET_MODE_BITSIZE (mode)
301 		|| (bitpos % BITS_PER_UNIT)
302 		|| (toffset && !tree_fits_shwi_p (toffset)))
303 	      decl = NULL;
304 	    else
305 	      {
306 		offset += bitpos / BITS_PER_UNIT;
307 		if (toffset)
308 		  offset += tree_to_shwi (toffset);
309 	      }
310 	    break;
311 	  }
312 	}
313 
314       if (decl
315 	  && mode == GET_MODE (x)
316 	  && TREE_CODE (decl) == VAR_DECL
317 	  && (TREE_STATIC (decl)
318 	      || DECL_THREAD_LOCAL_P (decl))
319 	  && DECL_RTL_SET_P (decl)
320 	  && MEM_P (DECL_RTL (decl)))
321 	{
322 	  rtx newx;
323 
324 	  offset += MEM_OFFSET (x);
325 
326 	  newx = DECL_RTL (decl);
327 
328 	  if (MEM_P (newx))
329 	    {
330 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
331 
332 	      /* Avoid creating a new MEM needlessly if we already had
333 		 the same address.  We do if there's no OFFSET and the
334 		 old address X is identical to NEWX, or if X is of the
335 		 form (plus NEWX OFFSET), or the NEWX is of the form
336 		 (plus Y (const_int Z)) and X is that with the offset
337 		 added: (plus Y (const_int Z+OFFSET)).  */
338 	      if (!((offset == 0
339 		     || (GET_CODE (o) == PLUS
340 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
341 			 && (offset == INTVAL (XEXP (o, 1))
342 			     || (GET_CODE (n) == PLUS
343 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
344 				 && (INTVAL (XEXP (n, 1)) + offset
345 				     == INTVAL (XEXP (o, 1)))
346 				 && (n = XEXP (n, 0))))
347 			 && (o = XEXP (o, 0))))
348 		    && rtx_equal_p (o, n)))
349 		x = adjust_address_nv (newx, mode, offset);
350 	    }
351 	  else if (GET_MODE (x) == GET_MODE (newx)
352 		   && offset == 0)
353 	    x = newx;
354 	}
355     }
356 
357   return x;
358 }
359 
360 /* Make a unary operation by first seeing if it folds and otherwise making
361    the specified operation.  */
362 
363 rtx
simplify_gen_unary(enum rtx_code code,enum machine_mode mode,rtx op,enum machine_mode op_mode)364 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
365 		    enum machine_mode op_mode)
366 {
367   rtx tem;
368 
369   /* If this simplifies, use it.  */
370   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
371     return tem;
372 
373   return gen_rtx_fmt_e (code, mode, op);
374 }
375 
376 /* Likewise for ternary operations.  */
377 
378 rtx
simplify_gen_ternary(enum rtx_code code,enum machine_mode mode,enum machine_mode op0_mode,rtx op0,rtx op1,rtx op2)379 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
380 		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
381 {
382   rtx tem;
383 
384   /* If this simplifies, use it.  */
385   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
386 					      op0, op1, op2)))
387     return tem;
388 
389   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
390 }
391 
392 /* Likewise, for relational operations.
393    CMP_MODE specifies mode comparison is done in.  */
394 
395 rtx
simplify_gen_relational(enum rtx_code code,enum machine_mode mode,enum machine_mode cmp_mode,rtx op0,rtx op1)396 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
397 			 enum machine_mode cmp_mode, rtx op0, rtx op1)
398 {
399   rtx tem;
400 
401   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
402 						 op0, op1)))
403     return tem;
404 
405   return gen_rtx_fmt_ee (code, mode, op0, op1);
406 }
407 
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409    and simplify the result.  If FN is non-NULL, call this callback on each
410    X, if it returns non-NULL, replace X with its return value and simplify the
411    result.  */
412 
413 rtx
simplify_replace_fn_rtx(rtx x,const_rtx old_rtx,rtx (* fn)(rtx,const_rtx,void *),void * data)414 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
415 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
416 {
417   enum rtx_code code = GET_CODE (x);
418   enum machine_mode mode = GET_MODE (x);
419   enum machine_mode op_mode;
420   const char *fmt;
421   rtx op0, op1, op2, newx, op;
422   rtvec vec, newvec;
423   int i, j;
424 
425   if (__builtin_expect (fn != NULL, 0))
426     {
427       newx = fn (x, old_rtx, data);
428       if (newx)
429 	return newx;
430     }
431   else if (rtx_equal_p (x, old_rtx))
432     return copy_rtx ((rtx) data);
433 
434   switch (GET_RTX_CLASS (code))
435     {
436     case RTX_UNARY:
437       op0 = XEXP (x, 0);
438       op_mode = GET_MODE (op0);
439       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
440       if (op0 == XEXP (x, 0))
441 	return x;
442       return simplify_gen_unary (code, mode, op0, op_mode);
443 
444     case RTX_BIN_ARITH:
445     case RTX_COMM_ARITH:
446       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
447       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
448       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
449 	return x;
450       return simplify_gen_binary (code, mode, op0, op1);
451 
452     case RTX_COMPARE:
453     case RTX_COMM_COMPARE:
454       op0 = XEXP (x, 0);
455       op1 = XEXP (x, 1);
456       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
457       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
459       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
460 	return x;
461       return simplify_gen_relational (code, mode, op_mode, op0, op1);
462 
463     case RTX_TERNARY:
464     case RTX_BITFIELD_OPS:
465       op0 = XEXP (x, 0);
466       op_mode = GET_MODE (op0);
467       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
468       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
469       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
470       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
471 	return x;
472       if (op_mode == VOIDmode)
473 	op_mode = GET_MODE (op0);
474       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
475 
476     case RTX_EXTRA:
477       if (code == SUBREG)
478 	{
479 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
480 	  if (op0 == SUBREG_REG (x))
481 	    return x;
482 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
483 				     GET_MODE (SUBREG_REG (x)),
484 				     SUBREG_BYTE (x));
485 	  return op0 ? op0 : x;
486 	}
487       break;
488 
489     case RTX_OBJ:
490       if (code == MEM)
491 	{
492 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
493 	  if (op0 == XEXP (x, 0))
494 	    return x;
495 	  return replace_equiv_address_nv (x, op0);
496 	}
497       else if (code == LO_SUM)
498 	{
499 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
500 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
501 
502 	  /* (lo_sum (high x) x) -> x  */
503 	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
504 	    return op1;
505 
506 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
507 	    return x;
508 	  return gen_rtx_LO_SUM (mode, op0, op1);
509 	}
510       break;
511 
512     default:
513       break;
514     }
515 
516   newx = x;
517   fmt = GET_RTX_FORMAT (code);
518   for (i = 0; fmt[i]; i++)
519     switch (fmt[i])
520       {
521       case 'E':
522 	vec = XVEC (x, i);
523 	newvec = XVEC (newx, i);
524 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
525 	  {
526 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
527 					  old_rtx, fn, data);
528 	    if (op != RTVEC_ELT (vec, j))
529 	      {
530 		if (newvec == vec)
531 		  {
532 		    newvec = shallow_copy_rtvec (vec);
533 		    if (x == newx)
534 		      newx = shallow_copy_rtx (x);
535 		    XVEC (newx, i) = newvec;
536 		  }
537 		RTVEC_ELT (newvec, j) = op;
538 	      }
539 	  }
540 	break;
541 
542       case 'e':
543 	if (XEXP (x, i))
544 	  {
545 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
546 	    if (op != XEXP (x, i))
547 	      {
548 		if (x == newx)
549 		  newx = shallow_copy_rtx (x);
550 		XEXP (newx, i) = op;
551 	      }
552 	  }
553 	break;
554       }
555   return newx;
556 }
557 
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559    resulting RTX.  Return a new RTX which is as simplified as possible.  */
560 
561 rtx
simplify_replace_rtx(rtx x,const_rtx old_rtx,rtx new_rtx)562 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
563 {
564   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 }
566 
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568    Only handle cases where the truncated value is inherently an rvalue.
569 
570    RTL provides two ways of truncating a value:
571 
572    1. a lowpart subreg.  This form is only a truncation when both
573       the outer and inner modes (here MODE and OP_MODE respectively)
574       are scalar integers, and only then when the subreg is used as
575       an rvalue.
576 
577       It is only valid to form such truncating subregs if the
578       truncation requires no action by the target.  The onus for
579       proving this is on the creator of the subreg -- e.g. the
580       caller to simplify_subreg or simplify_gen_subreg -- and typically
581       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 
583    2. a TRUNCATE.  This form handles both scalar and compound integers.
584 
585    The first form is preferred where valid.  However, the TRUNCATE
586    handling in simplify_unary_operation turns the second form into the
587    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588    so it is generally safe to form rvalue truncations using:
589 
590       simplify_gen_unary (TRUNCATE, ...)
591 
592    and leave simplify_unary_operation to work out which representation
593    should be used.
594 
595    Because of the proof requirements on (1), simplify_truncation must
596    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597    regardless of whether the outer truncation came from a SUBREG or a
598    TRUNCATE.  For example, if the caller has proven that an SImode
599    truncation of:
600 
601       (and:DI X Y)
602 
603    is a no-op and can be represented as a subreg, it does not follow
604    that SImode truncations of X and Y are also no-ops.  On a target
605    like 64-bit MIPS that requires SImode values to be stored in
606    sign-extended form, an SImode truncation of:
607 
608       (and:DI (reg:DI X) (const_int 63))
609 
610    is trivially a no-op because only the lower 6 bits can be set.
611    However, X is still an arbitrary 64-bit number and so we cannot
612    assume that truncating it too is a no-op.  */
613 
614 static rtx
simplify_truncation(enum machine_mode mode,rtx op,enum machine_mode op_mode)615 simplify_truncation (enum machine_mode mode, rtx op,
616 		     enum machine_mode op_mode)
617 {
618   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
619   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
620   gcc_assert (precision <= op_precision);
621 
622   /* Optimize truncations of zero and sign extended values.  */
623   if (GET_CODE (op) == ZERO_EXTEND
624       || GET_CODE (op) == SIGN_EXTEND)
625     {
626       /* There are three possibilities.  If MODE is the same as the
627 	 origmode, we can omit both the extension and the subreg.
628 	 If MODE is not larger than the origmode, we can apply the
629 	 truncation without the extension.  Finally, if the outermode
630 	 is larger than the origmode, we can just extend to the appropriate
631 	 mode.  */
632       enum machine_mode origmode = GET_MODE (XEXP (op, 0));
633       if (mode == origmode)
634 	return XEXP (op, 0);
635       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 	return simplify_gen_unary (TRUNCATE, mode,
637 				   XEXP (op, 0), origmode);
638       else
639 	return simplify_gen_unary (GET_CODE (op), mode,
640 				   XEXP (op, 0), origmode);
641     }
642 
643   /* If the machine can perform operations in the truncated mode, distribute
644      the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645      (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))).  */
646   if (1
647 #ifdef WORD_REGISTER_OPERATIONS
648       && precision >= BITS_PER_WORD
649 #endif
650       && (GET_CODE (op) == PLUS
651 	  || GET_CODE (op) == MINUS
652 	  || GET_CODE (op) == MULT))
653     {
654       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
655       if (op0)
656 	{
657 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
658 	  if (op1)
659 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
660 	}
661     }
662 
663   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
664      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
665      the outer subreg is effectively a truncation to the original mode.  */
666   if ((GET_CODE (op) == LSHIFTRT
667        || GET_CODE (op) == ASHIFTRT)
668       /* Ensure that OP_MODE is at least twice as wide as MODE
669 	 to avoid the possibility that an outer LSHIFTRT shifts by more
670 	 than the sign extension's sign_bit_copies and introduces zeros
671 	 into the high bits of the result.  */
672       && 2 * precision <= op_precision
673       && CONST_INT_P (XEXP (op, 1))
674       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
675       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
676       && UINTVAL (XEXP (op, 1)) < precision)
677     return simplify_gen_binary (ASHIFTRT, mode,
678 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
679 
680   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
681      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
682      the outer subreg is effectively a truncation to the original mode.  */
683   if ((GET_CODE (op) == LSHIFTRT
684        || GET_CODE (op) == ASHIFTRT)
685       && CONST_INT_P (XEXP (op, 1))
686       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
687       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
688       && UINTVAL (XEXP (op, 1)) < precision)
689     return simplify_gen_binary (LSHIFTRT, mode,
690 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
691 
692   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
693      to (ashift:QI (x:QI) C), where C is a suitable small constant and
694      the outer subreg is effectively a truncation to the original mode.  */
695   if (GET_CODE (op) == ASHIFT
696       && CONST_INT_P (XEXP (op, 1))
697       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
698 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
699       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700       && UINTVAL (XEXP (op, 1)) < precision)
701     return simplify_gen_binary (ASHIFT, mode,
702 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
703 
704   /* Recognize a word extraction from a multi-word subreg.  */
705   if ((GET_CODE (op) == LSHIFTRT
706        || GET_CODE (op) == ASHIFTRT)
707       && SCALAR_INT_MODE_P (mode)
708       && SCALAR_INT_MODE_P (op_mode)
709       && precision >= BITS_PER_WORD
710       && 2 * precision <= op_precision
711       && CONST_INT_P (XEXP (op, 1))
712       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
713       && UINTVAL (XEXP (op, 1)) < op_precision)
714     {
715       int byte = subreg_lowpart_offset (mode, op_mode);
716       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
717       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
718 				  (WORDS_BIG_ENDIAN
719 				   ? byte - shifted_bytes
720 				   : byte + shifted_bytes));
721     }
722 
723   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
724      and try replacing the TRUNCATE and shift with it.  Don't do this
725      if the MEM has a mode-dependent address.  */
726   if ((GET_CODE (op) == LSHIFTRT
727        || GET_CODE (op) == ASHIFTRT)
728       && SCALAR_INT_MODE_P (op_mode)
729       && MEM_P (XEXP (op, 0))
730       && CONST_INT_P (XEXP (op, 1))
731       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
732       && INTVAL (XEXP (op, 1)) > 0
733       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
734       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
735 				     MEM_ADDR_SPACE (XEXP (op, 0)))
736       && ! MEM_VOLATILE_P (XEXP (op, 0))
737       && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
738 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
739     {
740       int byte = subreg_lowpart_offset (mode, op_mode);
741       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
742       return adjust_address_nv (XEXP (op, 0), mode,
743 				(WORDS_BIG_ENDIAN
744 				 ? byte - shifted_bytes
745 				 : byte + shifted_bytes));
746     }
747 
748   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
749      (OP:SI foo:SI) if OP is NEG or ABS.  */
750   if ((GET_CODE (op) == ABS
751        || GET_CODE (op) == NEG)
752       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
753 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
754       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
755     return simplify_gen_unary (GET_CODE (op), mode,
756 			       XEXP (XEXP (op, 0), 0), mode);
757 
758   /* (truncate:A (subreg:B (truncate:C X) 0)) is
759      (truncate:A X).  */
760   if (GET_CODE (op) == SUBREG
761       && SCALAR_INT_MODE_P (mode)
762       && SCALAR_INT_MODE_P (op_mode)
763       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
764       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
765       && subreg_lowpart_p (op))
766     {
767       rtx inner = XEXP (SUBREG_REG (op), 0);
768       if (GET_MODE_PRECISION (mode)
769 	  <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
770 	return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
771       else
772 	/* If subreg above is paradoxical and C is narrower
773 	   than A, return (subreg:A (truncate:C X) 0).  */
774 	return simplify_gen_subreg (mode, SUBREG_REG (op),
775 				    GET_MODE (SUBREG_REG (op)), 0);
776     }
777 
778   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
779   if (GET_CODE (op) == TRUNCATE)
780     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
781 			       GET_MODE (XEXP (op, 0)));
782 
783   return NULL_RTX;
784 }
785 
786 /* Try to simplify a unary operation CODE whose output mode is to be
787    MODE with input operand OP whose mode was originally OP_MODE.
788    Return zero if no simplification can be made.  */
789 rtx
simplify_unary_operation(enum rtx_code code,enum machine_mode mode,rtx op,enum machine_mode op_mode)790 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
791 			  rtx op, enum machine_mode op_mode)
792 {
793   rtx trueop, tem;
794 
795   trueop = avoid_constant_pool_reference (op);
796 
797   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
798   if (tem)
799     return tem;
800 
801   return simplify_unary_operation_1 (code, mode, op);
802 }
803 
804 /* Perform some simplifications we can do even if the operands
805    aren't constant.  */
806 static rtx
simplify_unary_operation_1(enum rtx_code code,enum machine_mode mode,rtx op)807 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
808 {
809   enum rtx_code reversed;
810   rtx temp;
811 
812   switch (code)
813     {
814     case NOT:
815       /* (not (not X)) == X.  */
816       if (GET_CODE (op) == NOT)
817 	return XEXP (op, 0);
818 
819       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
820 	 comparison is all ones.   */
821       if (COMPARISON_P (op)
822 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
823 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
824 	return simplify_gen_relational (reversed, mode, VOIDmode,
825 					XEXP (op, 0), XEXP (op, 1));
826 
827       /* (not (plus X -1)) can become (neg X).  */
828       if (GET_CODE (op) == PLUS
829 	  && XEXP (op, 1) == constm1_rtx)
830 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
831 
832       /* Similarly, (not (neg X)) is (plus X -1).  */
833       if (GET_CODE (op) == NEG)
834 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
835 				    CONSTM1_RTX (mode));
836 
837       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
838       if (GET_CODE (op) == XOR
839 	  && CONST_INT_P (XEXP (op, 1))
840 	  && (temp = simplify_unary_operation (NOT, mode,
841 					       XEXP (op, 1), mode)) != 0)
842 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
843 
844       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
845       if (GET_CODE (op) == PLUS
846 	  && CONST_INT_P (XEXP (op, 1))
847 	  && mode_signbit_p (mode, XEXP (op, 1))
848 	  && (temp = simplify_unary_operation (NOT, mode,
849 					       XEXP (op, 1), mode)) != 0)
850 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
851 
852 
853       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
854 	 operands other than 1, but that is not valid.  We could do a
855 	 similar simplification for (not (lshiftrt C X)) where C is
856 	 just the sign bit, but this doesn't seem common enough to
857 	 bother with.  */
858       if (GET_CODE (op) == ASHIFT
859 	  && XEXP (op, 0) == const1_rtx)
860 	{
861 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
862 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
863 	}
864 
865       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
866 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
867 	 so we can perform the above simplification.  */
868       if (STORE_FLAG_VALUE == -1
869 	  && GET_CODE (op) == ASHIFTRT
870 	  && GET_CODE (XEXP (op, 1))
871 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
872 	return simplify_gen_relational (GE, mode, VOIDmode,
873 					XEXP (op, 0), const0_rtx);
874 
875 
876       if (GET_CODE (op) == SUBREG
877 	  && subreg_lowpart_p (op)
878 	  && (GET_MODE_SIZE (GET_MODE (op))
879 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
880 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
881 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
882 	{
883 	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
884 	  rtx x;
885 
886 	  x = gen_rtx_ROTATE (inner_mode,
887 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
888 						  inner_mode),
889 			      XEXP (SUBREG_REG (op), 1));
890 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
891 	  if (temp)
892 	    return temp;
893 	}
894 
895       /* Apply De Morgan's laws to reduce number of patterns for machines
896 	 with negating logical insns (and-not, nand, etc.).  If result has
897 	 only one NOT, put it first, since that is how the patterns are
898 	 coded.  */
899       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
900 	{
901 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
902 	  enum machine_mode op_mode;
903 
904 	  op_mode = GET_MODE (in1);
905 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
906 
907 	  op_mode = GET_MODE (in2);
908 	  if (op_mode == VOIDmode)
909 	    op_mode = mode;
910 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
911 
912 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
913 	    {
914 	      rtx tem = in2;
915 	      in2 = in1; in1 = tem;
916 	    }
917 
918 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
919 				 mode, in1, in2);
920 	}
921 
922       /* (not (bswap x)) -> (bswap (not x)).  */
923       if (GET_CODE (op) == BSWAP)
924 	{
925 	  rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
926 	  return simplify_gen_unary (BSWAP, mode, x, mode);
927 	}
928       break;
929 
930     case NEG:
931       /* (neg (neg X)) == X.  */
932       if (GET_CODE (op) == NEG)
933 	return XEXP (op, 0);
934 
935       /* (neg (plus X 1)) can become (not X).  */
936       if (GET_CODE (op) == PLUS
937 	  && XEXP (op, 1) == const1_rtx)
938 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
939 
940       /* Similarly, (neg (not X)) is (plus X 1).  */
941       if (GET_CODE (op) == NOT)
942 	return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
943 				    CONST1_RTX (mode));
944 
945       /* (neg (minus X Y)) can become (minus Y X).  This transformation
946 	 isn't safe for modes with signed zeros, since if X and Y are
947 	 both +0, (minus Y X) is the same as (minus X Y).  If the
948 	 rounding mode is towards +infinity (or -infinity) then the two
949 	 expressions will be rounded differently.  */
950       if (GET_CODE (op) == MINUS
951 	  && !HONOR_SIGNED_ZEROS (mode)
952 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
953 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
954 
955       if (GET_CODE (op) == PLUS
956 	  && !HONOR_SIGNED_ZEROS (mode)
957 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
958 	{
959 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
960 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
961 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
962 	    {
963 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
964 	      if (temp)
965 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
966 	    }
967 
968 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
969 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
970 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
971 	}
972 
973       /* (neg (mult A B)) becomes (mult A (neg B)).
974 	 This works even for floating-point values.  */
975       if (GET_CODE (op) == MULT
976 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
977 	{
978 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
979 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
980 	}
981 
982       /* NEG commutes with ASHIFT since it is multiplication.  Only do
983 	 this if we can then eliminate the NEG (e.g., if the operand
984 	 is a constant).  */
985       if (GET_CODE (op) == ASHIFT)
986 	{
987 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
988 	  if (temp)
989 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
990 	}
991 
992       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
993 	 C is equal to the width of MODE minus 1.  */
994       if (GET_CODE (op) == ASHIFTRT
995 	  && CONST_INT_P (XEXP (op, 1))
996 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
997 	return simplify_gen_binary (LSHIFTRT, mode,
998 				    XEXP (op, 0), XEXP (op, 1));
999 
1000       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1001 	 C is equal to the width of MODE minus 1.  */
1002       if (GET_CODE (op) == LSHIFTRT
1003 	  && CONST_INT_P (XEXP (op, 1))
1004 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
1005 	return simplify_gen_binary (ASHIFTRT, mode,
1006 				    XEXP (op, 0), XEXP (op, 1));
1007 
1008       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
1009       if (GET_CODE (op) == XOR
1010 	  && XEXP (op, 1) == const1_rtx
1011 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
1012 	return plus_constant (mode, XEXP (op, 0), -1);
1013 
1014       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1015       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1016       if (GET_CODE (op) == LT
1017 	  && XEXP (op, 1) == const0_rtx
1018 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1019 	{
1020 	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
1021 	  int isize = GET_MODE_PRECISION (inner);
1022 	  if (STORE_FLAG_VALUE == 1)
1023 	    {
1024 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1025 					  GEN_INT (isize - 1));
1026 	      if (mode == inner)
1027 		return temp;
1028 	      if (GET_MODE_PRECISION (mode) > isize)
1029 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1030 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1031 	    }
1032 	  else if (STORE_FLAG_VALUE == -1)
1033 	    {
1034 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1035 					  GEN_INT (isize - 1));
1036 	      if (mode == inner)
1037 		return temp;
1038 	      if (GET_MODE_PRECISION (mode) > isize)
1039 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1040 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1041 	    }
1042 	}
1043       break;
1044 
1045     case TRUNCATE:
1046       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1047 	 with the umulXi3_highpart patterns.  */
1048       if (GET_CODE (op) == LSHIFTRT
1049 	  && GET_CODE (XEXP (op, 0)) == MULT)
1050 	break;
1051 
1052       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1053 	{
1054 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1055 	    {
1056 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1057 	      if (temp)
1058 		return temp;
1059 	    }
1060 	  /* We can't handle truncation to a partial integer mode here
1061 	     because we don't know the real bitsize of the partial
1062 	     integer mode.  */
1063 	  break;
1064 	}
1065 
1066       if (GET_MODE (op) != VOIDmode)
1067 	{
1068 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1069 	  if (temp)
1070 	    return temp;
1071 	}
1072 
1073       /* If we know that the value is already truncated, we can
1074 	 replace the TRUNCATE with a SUBREG.  */
1075       if (GET_MODE_NUNITS (mode) == 1
1076 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1077 	      || truncated_to_mode (mode, op)))
1078 	{
1079 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1080 	  if (temp)
1081 	    return temp;
1082 	}
1083 
1084       /* A truncate of a comparison can be replaced with a subreg if
1085          STORE_FLAG_VALUE permits.  This is like the previous test,
1086          but it works even if the comparison is done in a mode larger
1087          than HOST_BITS_PER_WIDE_INT.  */
1088       if (HWI_COMPUTABLE_MODE_P (mode)
1089 	  && COMPARISON_P (op)
1090 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1091 	{
1092 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1093 	  if (temp)
1094 	    return temp;
1095 	}
1096 
1097       /* A truncate of a memory is just loading the low part of the memory
1098 	 if we are not changing the meaning of the address. */
1099       if (GET_CODE (op) == MEM
1100 	  && !VECTOR_MODE_P (mode)
1101 	  && !MEM_VOLATILE_P (op)
1102 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1103 	{
1104 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 	  if (temp)
1106 	    return temp;
1107 	}
1108 
1109       break;
1110 
1111     case FLOAT_TRUNCATE:
1112       if (DECIMAL_FLOAT_MODE_P (mode))
1113 	break;
1114 
1115       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1116       if (GET_CODE (op) == FLOAT_EXTEND
1117 	  && GET_MODE (XEXP (op, 0)) == mode)
1118 	return XEXP (op, 0);
1119 
1120       /* (float_truncate:SF (float_truncate:DF foo:XF))
1121          = (float_truncate:SF foo:XF).
1122 	 This may eliminate double rounding, so it is unsafe.
1123 
1124          (float_truncate:SF (float_extend:XF foo:DF))
1125          = (float_truncate:SF foo:DF).
1126 
1127          (float_truncate:DF (float_extend:XF foo:SF))
1128          = (float_extend:SF foo:DF).  */
1129       if ((GET_CODE (op) == FLOAT_TRUNCATE
1130 	   && flag_unsafe_math_optimizations)
1131 	  || GET_CODE (op) == FLOAT_EXTEND)
1132 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1133 							    0)))
1134 				   > GET_MODE_SIZE (mode)
1135 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1136 				   mode,
1137 				   XEXP (op, 0), mode);
1138 
1139       /*  (float_truncate (float x)) is (float x)  */
1140       if (GET_CODE (op) == FLOAT
1141 	  && (flag_unsafe_math_optimizations
1142 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1143 		  && ((unsigned)significand_size (GET_MODE (op))
1144 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1145 			  - num_sign_bit_copies (XEXP (op, 0),
1146 						 GET_MODE (XEXP (op, 0))))))))
1147 	return simplify_gen_unary (FLOAT, mode,
1148 				   XEXP (op, 0),
1149 				   GET_MODE (XEXP (op, 0)));
1150 
1151       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1152 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1153       if ((GET_CODE (op) == ABS
1154 	   || GET_CODE (op) == NEG)
1155 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1156 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1157 	return simplify_gen_unary (GET_CODE (op), mode,
1158 				   XEXP (XEXP (op, 0), 0), mode);
1159 
1160       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1161 	 is (float_truncate:SF x).  */
1162       if (GET_CODE (op) == SUBREG
1163 	  && subreg_lowpart_p (op)
1164 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1165 	return SUBREG_REG (op);
1166       break;
1167 
1168     case FLOAT_EXTEND:
1169       if (DECIMAL_FLOAT_MODE_P (mode))
1170 	break;
1171 
1172       /*  (float_extend (float_extend x)) is (float_extend x)
1173 
1174 	  (float_extend (float x)) is (float x) assuming that double
1175 	  rounding can't happen.
1176           */
1177       if (GET_CODE (op) == FLOAT_EXTEND
1178 	  || (GET_CODE (op) == FLOAT
1179 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1180 	      && ((unsigned)significand_size (GET_MODE (op))
1181 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1182 		      - num_sign_bit_copies (XEXP (op, 0),
1183 					     GET_MODE (XEXP (op, 0)))))))
1184 	return simplify_gen_unary (GET_CODE (op), mode,
1185 				   XEXP (op, 0),
1186 				   GET_MODE (XEXP (op, 0)));
1187 
1188       break;
1189 
1190     case ABS:
1191       /* (abs (neg <foo>)) -> (abs <foo>) */
1192       if (GET_CODE (op) == NEG)
1193 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1194 				   GET_MODE (XEXP (op, 0)));
1195 
1196       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1197          do nothing.  */
1198       if (GET_MODE (op) == VOIDmode)
1199 	break;
1200 
1201       /* If operand is something known to be positive, ignore the ABS.  */
1202       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1203 	  || val_signbit_known_clear_p (GET_MODE (op),
1204 					nonzero_bits (op, GET_MODE (op))))
1205 	return op;
1206 
1207       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1208       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1209 	return gen_rtx_NEG (mode, op);
1210 
1211       break;
1212 
1213     case FFS:
1214       /* (ffs (*_extend <X>)) = (ffs <X>) */
1215       if (GET_CODE (op) == SIGN_EXTEND
1216 	  || GET_CODE (op) == ZERO_EXTEND)
1217 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1218 				   GET_MODE (XEXP (op, 0)));
1219       break;
1220 
1221     case POPCOUNT:
1222       switch (GET_CODE (op))
1223 	{
1224 	case BSWAP:
1225 	case ZERO_EXTEND:
1226 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1227 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1228 				     GET_MODE (XEXP (op, 0)));
1229 
1230 	case ROTATE:
1231 	case ROTATERT:
1232 	  /* Rotations don't affect popcount.  */
1233 	  if (!side_effects_p (XEXP (op, 1)))
1234 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1235 				       GET_MODE (XEXP (op, 0)));
1236 	  break;
1237 
1238 	default:
1239 	  break;
1240 	}
1241       break;
1242 
1243     case PARITY:
1244       switch (GET_CODE (op))
1245 	{
1246 	case NOT:
1247 	case BSWAP:
1248 	case ZERO_EXTEND:
1249 	case SIGN_EXTEND:
1250 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1251 				     GET_MODE (XEXP (op, 0)));
1252 
1253 	case ROTATE:
1254 	case ROTATERT:
1255 	  /* Rotations don't affect parity.  */
1256 	  if (!side_effects_p (XEXP (op, 1)))
1257 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1258 				       GET_MODE (XEXP (op, 0)));
1259 	  break;
1260 
1261 	default:
1262 	  break;
1263 	}
1264       break;
1265 
1266     case BSWAP:
1267       /* (bswap (bswap x)) -> x.  */
1268       if (GET_CODE (op) == BSWAP)
1269 	return XEXP (op, 0);
1270       break;
1271 
1272     case FLOAT:
1273       /* (float (sign_extend <X>)) = (float <X>).  */
1274       if (GET_CODE (op) == SIGN_EXTEND)
1275 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1276 				   GET_MODE (XEXP (op, 0)));
1277       break;
1278 
1279     case SIGN_EXTEND:
1280       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1281 	 becomes just the MINUS if its mode is MODE.  This allows
1282 	 folding switch statements on machines using casesi (such as
1283 	 the VAX).  */
1284       if (GET_CODE (op) == TRUNCATE
1285 	  && GET_MODE (XEXP (op, 0)) == mode
1286 	  && GET_CODE (XEXP (op, 0)) == MINUS
1287 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1288 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1289 	return XEXP (op, 0);
1290 
1291       /* Extending a widening multiplication should be canonicalized to
1292 	 a wider widening multiplication.  */
1293       if (GET_CODE (op) == MULT)
1294 	{
1295 	  rtx lhs = XEXP (op, 0);
1296 	  rtx rhs = XEXP (op, 1);
1297 	  enum rtx_code lcode = GET_CODE (lhs);
1298 	  enum rtx_code rcode = GET_CODE (rhs);
1299 
1300 	  /* Widening multiplies usually extend both operands, but sometimes
1301 	     they use a shift to extract a portion of a register.  */
1302 	  if ((lcode == SIGN_EXTEND
1303 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1304 	      && (rcode == SIGN_EXTEND
1305 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1306 	    {
1307 	      enum machine_mode lmode = GET_MODE (lhs);
1308 	      enum machine_mode rmode = GET_MODE (rhs);
1309 	      int bits;
1310 
1311 	      if (lcode == ASHIFTRT)
1312 		/* Number of bits not shifted off the end.  */
1313 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1314 	      else /* lcode == SIGN_EXTEND */
1315 		/* Size of inner mode.  */
1316 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1317 
1318 	      if (rcode == ASHIFTRT)
1319 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1320 	      else /* rcode == SIGN_EXTEND */
1321 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1322 
1323 	      /* We can only widen multiplies if the result is mathematiclly
1324 		 equivalent.  I.e. if overflow was impossible.  */
1325 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1326 		return simplify_gen_binary
1327 			 (MULT, mode,
1328 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1329 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1330 	    }
1331 	}
1332 
1333       /* Check for a sign extension of a subreg of a promoted
1334 	 variable, where the promotion is sign-extended, and the
1335 	 target mode is the same as the variable's promotion.  */
1336       if (GET_CODE (op) == SUBREG
1337 	  && SUBREG_PROMOTED_VAR_P (op)
1338 	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1339 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1340 	{
1341 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1342 	  if (temp)
1343 	    return temp;
1344 	}
1345 
1346       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1347 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1348       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1349 	{
1350 	  gcc_assert (GET_MODE_BITSIZE (mode)
1351 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1352 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1353 				     GET_MODE (XEXP (op, 0)));
1354 	}
1355 
1356       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1357 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1358 	 GET_MODE_BITSIZE (N) - I bits.
1359 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1360 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1361       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1362 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1363 	  && CONST_INT_P (XEXP (op, 1))
1364 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1365 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1366 	{
1367 	  enum machine_mode tmode
1368 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1369 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1370 	  gcc_assert (GET_MODE_BITSIZE (mode)
1371 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1372 	  if (tmode != BLKmode)
1373 	    {
1374 	      rtx inner =
1375 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1376 	      if (inner)
1377 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1378 					   ? SIGN_EXTEND : ZERO_EXTEND,
1379 					   mode, inner, tmode);
1380 	    }
1381 	}
1382 
1383 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1384       /* As we do not know which address space the pointer is referring to,
1385 	 we can do this only if the target does not support different pointer
1386 	 or address modes depending on the address space.  */
1387       if (target_default_pointer_address_modes_p ()
1388 	  && ! POINTERS_EXTEND_UNSIGNED
1389 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1390 	  && (CONSTANT_P (op)
1391 	      || (GET_CODE (op) == SUBREG
1392 		  && REG_P (SUBREG_REG (op))
1393 		  && REG_POINTER (SUBREG_REG (op))
1394 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1395 	return convert_memory_address (Pmode, op);
1396 #endif
1397       break;
1398 
1399     case ZERO_EXTEND:
1400       /* Check for a zero extension of a subreg of a promoted
1401 	 variable, where the promotion is zero-extended, and the
1402 	 target mode is the same as the variable's promotion.  */
1403       if (GET_CODE (op) == SUBREG
1404 	  && SUBREG_PROMOTED_VAR_P (op)
1405 	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1406 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1407 	{
1408 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1409 	  if (temp)
1410 	    return temp;
1411 	}
1412 
1413       /* Extending a widening multiplication should be canonicalized to
1414 	 a wider widening multiplication.  */
1415       if (GET_CODE (op) == MULT)
1416 	{
1417 	  rtx lhs = XEXP (op, 0);
1418 	  rtx rhs = XEXP (op, 1);
1419 	  enum rtx_code lcode = GET_CODE (lhs);
1420 	  enum rtx_code rcode = GET_CODE (rhs);
1421 
1422 	  /* Widening multiplies usually extend both operands, but sometimes
1423 	     they use a shift to extract a portion of a register.  */
1424 	  if ((lcode == ZERO_EXTEND
1425 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1426 	      && (rcode == ZERO_EXTEND
1427 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1428 	    {
1429 	      enum machine_mode lmode = GET_MODE (lhs);
1430 	      enum machine_mode rmode = GET_MODE (rhs);
1431 	      int bits;
1432 
1433 	      if (lcode == LSHIFTRT)
1434 		/* Number of bits not shifted off the end.  */
1435 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1436 	      else /* lcode == ZERO_EXTEND */
1437 		/* Size of inner mode.  */
1438 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1439 
1440 	      if (rcode == LSHIFTRT)
1441 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1442 	      else /* rcode == ZERO_EXTEND */
1443 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1444 
1445 	      /* We can only widen multiplies if the result is mathematiclly
1446 		 equivalent.  I.e. if overflow was impossible.  */
1447 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1448 		return simplify_gen_binary
1449 			 (MULT, mode,
1450 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1451 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1452 	    }
1453 	}
1454 
1455       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1456       if (GET_CODE (op) == ZERO_EXTEND)
1457 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1458 				   GET_MODE (XEXP (op, 0)));
1459 
1460       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1461 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1462 	 GET_MODE_BITSIZE (N) - I bits.  */
1463       if (GET_CODE (op) == LSHIFTRT
1464 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1465 	  && CONST_INT_P (XEXP (op, 1))
1466 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1467 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1468 	{
1469 	  enum machine_mode tmode
1470 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1471 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1472 	  if (tmode != BLKmode)
1473 	    {
1474 	      rtx inner =
1475 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1476 	      if (inner)
1477 		return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1478 	    }
1479 	}
1480 
1481       /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1482 	 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1483 	 of mode N.  E.g.
1484 	 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1485 	 (and:SI (reg:SI) (const_int 63)).  */
1486       if (GET_CODE (op) == SUBREG
1487 	  && GET_MODE_PRECISION (GET_MODE (op))
1488 	     < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1489 	  && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1490 	     <= HOST_BITS_PER_WIDE_INT
1491 	  && GET_MODE_PRECISION (mode)
1492 	     >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
1493 	  && subreg_lowpart_p (op)
1494 	  && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
1495 	      & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1496 	{
1497 	  if (GET_MODE_PRECISION (mode)
1498 	      == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
1499 	    return SUBREG_REG (op);
1500 	  return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
1501 				     GET_MODE (SUBREG_REG (op)));
1502 	}
1503 
1504 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1505       /* As we do not know which address space the pointer is referring to,
1506 	 we can do this only if the target does not support different pointer
1507 	 or address modes depending on the address space.  */
1508       if (target_default_pointer_address_modes_p ()
1509 	  && POINTERS_EXTEND_UNSIGNED > 0
1510 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1511 	  && (CONSTANT_P (op)
1512 	      || (GET_CODE (op) == SUBREG
1513 		  && REG_P (SUBREG_REG (op))
1514 		  && REG_POINTER (SUBREG_REG (op))
1515 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1516 	return convert_memory_address (Pmode, op);
1517 #endif
1518       break;
1519 
1520     default:
1521       break;
1522     }
1523 
1524   return 0;
1525 }
1526 
1527 /* Try to compute the value of a unary operation CODE whose output mode is to
1528    be MODE with input operand OP whose mode was originally OP_MODE.
1529    Return zero if the value cannot be computed.  */
1530 rtx
simplify_const_unary_operation(enum rtx_code code,enum machine_mode mode,rtx op,enum machine_mode op_mode)1531 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1532 				rtx op, enum machine_mode op_mode)
1533 {
1534   unsigned int width = GET_MODE_PRECISION (mode);
1535   unsigned int op_width = GET_MODE_PRECISION (op_mode);
1536 
1537   if (code == VEC_DUPLICATE)
1538     {
1539       gcc_assert (VECTOR_MODE_P (mode));
1540       if (GET_MODE (op) != VOIDmode)
1541       {
1542 	if (!VECTOR_MODE_P (GET_MODE (op)))
1543 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1544 	else
1545 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1546 						(GET_MODE (op)));
1547       }
1548       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1549 	  || GET_CODE (op) == CONST_VECTOR)
1550 	{
1551           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1552           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1553 	  rtvec v = rtvec_alloc (n_elts);
1554 	  unsigned int i;
1555 
1556 	  if (GET_CODE (op) != CONST_VECTOR)
1557 	    for (i = 0; i < n_elts; i++)
1558 	      RTVEC_ELT (v, i) = op;
1559 	  else
1560 	    {
1561 	      enum machine_mode inmode = GET_MODE (op);
1562               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1563               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1564 
1565 	      gcc_assert (in_n_elts < n_elts);
1566 	      gcc_assert ((n_elts % in_n_elts) == 0);
1567 	      for (i = 0; i < n_elts; i++)
1568 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1569 	    }
1570 	  return gen_rtx_CONST_VECTOR (mode, v);
1571 	}
1572     }
1573 
1574   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1575     {
1576       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1577       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1578       enum machine_mode opmode = GET_MODE (op);
1579       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1580       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1581       rtvec v = rtvec_alloc (n_elts);
1582       unsigned int i;
1583 
1584       gcc_assert (op_n_elts == n_elts);
1585       for (i = 0; i < n_elts; i++)
1586 	{
1587 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1588 					    CONST_VECTOR_ELT (op, i),
1589 					    GET_MODE_INNER (opmode));
1590 	  if (!x)
1591 	    return 0;
1592 	  RTVEC_ELT (v, i) = x;
1593 	}
1594       return gen_rtx_CONST_VECTOR (mode, v);
1595     }
1596 
1597   /* The order of these tests is critical so that, for example, we don't
1598      check the wrong mode (input vs. output) for a conversion operation,
1599      such as FIX.  At some point, this should be simplified.  */
1600 
1601   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1602     {
1603       HOST_WIDE_INT hv, lv;
1604       REAL_VALUE_TYPE d;
1605 
1606       if (CONST_INT_P (op))
1607 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1608       else
1609 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1610 
1611       REAL_VALUE_FROM_INT (d, lv, hv, mode);
1612       d = real_value_truncate (mode, d);
1613       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1614     }
1615   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1616     {
1617       HOST_WIDE_INT hv, lv;
1618       REAL_VALUE_TYPE d;
1619 
1620       if (CONST_INT_P (op))
1621 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1622       else
1623 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1624 
1625       if (op_mode == VOIDmode
1626 	  || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1627 	/* We should never get a negative number.  */
1628 	gcc_assert (hv >= 0);
1629       else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1630 	hv = 0, lv &= GET_MODE_MASK (op_mode);
1631 
1632       REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1633       d = real_value_truncate (mode, d);
1634       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1635     }
1636 
1637   if (CONST_INT_P (op)
1638       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1639     {
1640       HOST_WIDE_INT arg0 = INTVAL (op);
1641       HOST_WIDE_INT val;
1642 
1643       switch (code)
1644 	{
1645 	case NOT:
1646 	  val = ~ arg0;
1647 	  break;
1648 
1649 	case NEG:
1650 	  val = - (unsigned HOST_WIDE_INT) arg0;
1651 	  break;
1652 
1653 	case ABS:
1654 	  val = (arg0 >= 0 ? arg0 : - arg0);
1655 	  break;
1656 
1657 	case FFS:
1658 	  arg0 &= GET_MODE_MASK (mode);
1659 	  val = ffs_hwi (arg0);
1660 	  break;
1661 
1662 	case CLZ:
1663 	  arg0 &= GET_MODE_MASK (mode);
1664 	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1665 	    ;
1666 	  else
1667 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1668 	  break;
1669 
1670 	case CLRSB:
1671 	  arg0 &= GET_MODE_MASK (mode);
1672 	  if (arg0 == 0)
1673 	    val = GET_MODE_PRECISION (mode) - 1;
1674 	  else if (arg0 >= 0)
1675 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1676 	  else if (arg0 < 0)
1677 	    val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1678 	  break;
1679 
1680 	case CTZ:
1681 	  arg0 &= GET_MODE_MASK (mode);
1682 	  if (arg0 == 0)
1683 	    {
1684 	      /* Even if the value at zero is undefined, we have to come
1685 		 up with some replacement.  Seems good enough.  */
1686 	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1687 		val = GET_MODE_PRECISION (mode);
1688 	    }
1689 	  else
1690 	    val = ctz_hwi (arg0);
1691 	  break;
1692 
1693 	case POPCOUNT:
1694 	  arg0 &= GET_MODE_MASK (mode);
1695 	  val = 0;
1696 	  while (arg0)
1697 	    val++, arg0 &= arg0 - 1;
1698 	  break;
1699 
1700 	case PARITY:
1701 	  arg0 &= GET_MODE_MASK (mode);
1702 	  val = 0;
1703 	  while (arg0)
1704 	    val++, arg0 &= arg0 - 1;
1705 	  val &= 1;
1706 	  break;
1707 
1708 	case BSWAP:
1709 	  {
1710 	    unsigned int s;
1711 
1712 	    val = 0;
1713 	    for (s = 0; s < width; s += 8)
1714 	      {
1715 		unsigned int d = width - s - 8;
1716 		unsigned HOST_WIDE_INT byte;
1717 		byte = (arg0 >> s) & 0xff;
1718 		val |= byte << d;
1719 	      }
1720 	  }
1721 	  break;
1722 
1723 	case TRUNCATE:
1724 	  val = arg0;
1725 	  break;
1726 
1727 	case ZERO_EXTEND:
1728 	  /* When zero-extending a CONST_INT, we need to know its
1729              original mode.  */
1730 	  gcc_assert (op_mode != VOIDmode);
1731 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1732 	    {
1733 	      /* If we were really extending the mode,
1734 		 we would have to distinguish between zero-extension
1735 		 and sign-extension.  */
1736 	      gcc_assert (width == op_width);
1737 	      val = arg0;
1738 	    }
1739 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1740 	    val = arg0 & GET_MODE_MASK (op_mode);
1741 	  else
1742 	    return 0;
1743 	  break;
1744 
1745 	case SIGN_EXTEND:
1746 	  if (op_mode == VOIDmode)
1747 	    op_mode = mode;
1748 	  op_width = GET_MODE_PRECISION (op_mode);
1749 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1750 	    {
1751 	      /* If we were really extending the mode,
1752 		 we would have to distinguish between zero-extension
1753 		 and sign-extension.  */
1754 	      gcc_assert (width == op_width);
1755 	      val = arg0;
1756 	    }
1757 	  else if (op_width < HOST_BITS_PER_WIDE_INT)
1758 	    {
1759 	      val = arg0 & GET_MODE_MASK (op_mode);
1760 	      if (val_signbit_known_set_p (op_mode, val))
1761 		val |= ~GET_MODE_MASK (op_mode);
1762 	    }
1763 	  else
1764 	    return 0;
1765 	  break;
1766 
1767 	case SQRT:
1768 	case FLOAT_EXTEND:
1769 	case FLOAT_TRUNCATE:
1770 	case SS_TRUNCATE:
1771 	case US_TRUNCATE:
1772 	case SS_NEG:
1773 	case US_NEG:
1774 	case SS_ABS:
1775 	  return 0;
1776 
1777 	default:
1778 	  gcc_unreachable ();
1779 	}
1780 
1781       return gen_int_mode (val, mode);
1782     }
1783 
1784   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1785      for a DImode operation on a CONST_INT.  */
1786   else if (width <= HOST_BITS_PER_DOUBLE_INT
1787 	   && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1788     {
1789       double_int first, value;
1790 
1791       if (CONST_DOUBLE_AS_INT_P (op))
1792 	first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1793 				       CONST_DOUBLE_LOW (op));
1794       else
1795 	first = double_int::from_shwi (INTVAL (op));
1796 
1797       switch (code)
1798 	{
1799 	case NOT:
1800 	  value = ~first;
1801 	  break;
1802 
1803 	case NEG:
1804 	  value = -first;
1805 	  break;
1806 
1807 	case ABS:
1808 	  if (first.is_negative ())
1809 	    value = -first;
1810 	  else
1811 	    value = first;
1812 	  break;
1813 
1814 	case FFS:
1815 	  value.high = 0;
1816 	  if (first.low != 0)
1817 	    value.low = ffs_hwi (first.low);
1818 	  else if (first.high != 0)
1819 	    value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1820 	  else
1821 	    value.low = 0;
1822 	  break;
1823 
1824 	case CLZ:
1825 	  value.high = 0;
1826 	  if (first.high != 0)
1827 	    value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1828 	              - HOST_BITS_PER_WIDE_INT;
1829 	  else if (first.low != 0)
1830 	    value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1831 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1832 	    value.low = GET_MODE_PRECISION (mode);
1833 	  break;
1834 
1835 	case CTZ:
1836 	  value.high = 0;
1837 	  if (first.low != 0)
1838 	    value.low = ctz_hwi (first.low);
1839 	  else if (first.high != 0)
1840 	    value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1841 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1842 	    value.low = GET_MODE_PRECISION (mode);
1843 	  break;
1844 
1845 	case POPCOUNT:
1846 	  value = double_int_zero;
1847 	  while (first.low)
1848 	    {
1849 	      value.low++;
1850 	      first.low &= first.low - 1;
1851 	    }
1852 	  while (first.high)
1853 	    {
1854 	      value.low++;
1855 	      first.high &= first.high - 1;
1856 	    }
1857 	  break;
1858 
1859 	case PARITY:
1860 	  value = double_int_zero;
1861 	  while (first.low)
1862 	    {
1863 	      value.low++;
1864 	      first.low &= first.low - 1;
1865 	    }
1866 	  while (first.high)
1867 	    {
1868 	      value.low++;
1869 	      first.high &= first.high - 1;
1870 	    }
1871 	  value.low &= 1;
1872 	  break;
1873 
1874 	case BSWAP:
1875 	  {
1876 	    unsigned int s;
1877 
1878 	    value = double_int_zero;
1879 	    for (s = 0; s < width; s += 8)
1880 	      {
1881 		unsigned int d = width - s - 8;
1882 		unsigned HOST_WIDE_INT byte;
1883 
1884 		if (s < HOST_BITS_PER_WIDE_INT)
1885 		  byte = (first.low >> s) & 0xff;
1886 		else
1887 		  byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1888 
1889 		if (d < HOST_BITS_PER_WIDE_INT)
1890 		  value.low |= byte << d;
1891 		else
1892 		  value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1893 	      }
1894 	  }
1895 	  break;
1896 
1897 	case TRUNCATE:
1898 	  /* This is just a change-of-mode, so do nothing.  */
1899 	  value = first;
1900 	  break;
1901 
1902 	case ZERO_EXTEND:
1903 	  gcc_assert (op_mode != VOIDmode);
1904 
1905 	  if (op_width > HOST_BITS_PER_WIDE_INT)
1906 	    return 0;
1907 
1908 	  value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1909 	  break;
1910 
1911 	case SIGN_EXTEND:
1912 	  if (op_mode == VOIDmode
1913 	      || op_width > HOST_BITS_PER_WIDE_INT)
1914 	    return 0;
1915 	  else
1916 	    {
1917 	      value.low = first.low & GET_MODE_MASK (op_mode);
1918 	      if (val_signbit_known_set_p (op_mode, value.low))
1919 		value.low |= ~GET_MODE_MASK (op_mode);
1920 
1921 	      value.high = HWI_SIGN_EXTEND (value.low);
1922 	    }
1923 	  break;
1924 
1925 	case SQRT:
1926 	  return 0;
1927 
1928 	default:
1929 	  return 0;
1930 	}
1931 
1932       return immed_double_int_const (value, mode);
1933     }
1934 
1935   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1936 	   && SCALAR_FLOAT_MODE_P (mode)
1937 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1938     {
1939       REAL_VALUE_TYPE d;
1940       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1941 
1942       switch (code)
1943 	{
1944 	case SQRT:
1945 	  return 0;
1946 	case ABS:
1947 	  d = real_value_abs (&d);
1948 	  break;
1949 	case NEG:
1950 	  d = real_value_negate (&d);
1951 	  break;
1952 	case FLOAT_TRUNCATE:
1953 	  d = real_value_truncate (mode, d);
1954 	  break;
1955 	case FLOAT_EXTEND:
1956 	  /* All this does is change the mode, unless changing
1957 	     mode class.  */
1958 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1959 	    real_convert (&d, mode, &d);
1960 	  break;
1961 	case FIX:
1962 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1963 	  break;
1964 	case NOT:
1965 	  {
1966 	    long tmp[4];
1967 	    int i;
1968 
1969 	    real_to_target (tmp, &d, GET_MODE (op));
1970 	    for (i = 0; i < 4; i++)
1971 	      tmp[i] = ~tmp[i];
1972 	    real_from_target (&d, tmp, mode);
1973 	    break;
1974 	  }
1975 	default:
1976 	  gcc_unreachable ();
1977 	}
1978       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1979     }
1980 
1981   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1982 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1983 	   && GET_MODE_CLASS (mode) == MODE_INT
1984 	   && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1985     {
1986       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1987 	 operators are intentionally left unspecified (to ease implementation
1988 	 by target backends), for consistency, this routine implements the
1989 	 same semantics for constant folding as used by the middle-end.  */
1990 
1991       /* This was formerly used only for non-IEEE float.
1992 	 eggert@twinsun.com says it is safe for IEEE also.  */
1993       HOST_WIDE_INT xh, xl, th, tl;
1994       REAL_VALUE_TYPE x, t;
1995       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1996       switch (code)
1997 	{
1998 	case FIX:
1999 	  if (REAL_VALUE_ISNAN (x))
2000 	    return const0_rtx;
2001 
2002 	  /* Test against the signed upper bound.  */
2003 	  if (width > HOST_BITS_PER_WIDE_INT)
2004 	    {
2005 	      th = ((unsigned HOST_WIDE_INT) 1
2006 		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
2007 	      tl = -1;
2008 	    }
2009 	  else
2010 	    {
2011 	      th = 0;
2012 	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
2013 	    }
2014 	  real_from_integer (&t, VOIDmode, tl, th, 0);
2015 	  if (REAL_VALUES_LESS (t, x))
2016 	    {
2017 	      xh = th;
2018 	      xl = tl;
2019 	      break;
2020 	    }
2021 
2022 	  /* Test against the signed lower bound.  */
2023 	  if (width > HOST_BITS_PER_WIDE_INT)
2024 	    {
2025 	      th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
2026 	      tl = 0;
2027 	    }
2028 	  else
2029 	    {
2030 	      th = -1;
2031 	      tl = HOST_WIDE_INT_M1U << (width - 1);
2032 	    }
2033 	  real_from_integer (&t, VOIDmode, tl, th, 0);
2034 	  if (REAL_VALUES_LESS (x, t))
2035 	    {
2036 	      xh = th;
2037 	      xl = tl;
2038 	      break;
2039 	    }
2040 	  REAL_VALUE_TO_INT (&xl, &xh, x);
2041 	  break;
2042 
2043 	case UNSIGNED_FIX:
2044 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2045 	    return const0_rtx;
2046 
2047 	  /* Test against the unsigned upper bound.  */
2048 	  if (width == HOST_BITS_PER_DOUBLE_INT)
2049 	    {
2050 	      th = -1;
2051 	      tl = -1;
2052 	    }
2053 	  else if (width >= HOST_BITS_PER_WIDE_INT)
2054 	    {
2055 	      th = ((unsigned HOST_WIDE_INT) 1
2056 		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2057 	      tl = -1;
2058 	    }
2059 	  else
2060 	    {
2061 	      th = 0;
2062 	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2063 	    }
2064 	  real_from_integer (&t, VOIDmode, tl, th, 1);
2065 	  if (REAL_VALUES_LESS (t, x))
2066 	    {
2067 	      xh = th;
2068 	      xl = tl;
2069 	      break;
2070 	    }
2071 
2072 	  REAL_VALUE_TO_INT (&xl, &xh, x);
2073 	  break;
2074 
2075 	default:
2076 	  gcc_unreachable ();
2077 	}
2078       return immed_double_const (xl, xh, mode);
2079     }
2080 
2081   return NULL_RTX;
2082 }
2083 
2084 /* Subroutine of simplify_binary_operation to simplify a binary operation
2085    CODE that can commute with byte swapping, with result mode MODE and
2086    operating on OP0 and OP1.  CODE is currently one of AND, IOR or XOR.
2087    Return zero if no simplification or canonicalization is possible.  */
2088 
2089 static rtx
simplify_byte_swapping_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)2090 simplify_byte_swapping_operation (enum rtx_code code, enum machine_mode mode,
2091 				  rtx op0, rtx op1)
2092 {
2093   rtx tem;
2094 
2095   /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped.  */
2096   if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2097     {
2098       tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2099 				 simplify_gen_unary (BSWAP, mode, op1, mode));
2100       return simplify_gen_unary (BSWAP, mode, tem, mode);
2101     }
2102 
2103   /* (op (bswap x) (bswap y)) -> (bswap (op x y)).  */
2104   if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2105     {
2106       tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2107       return simplify_gen_unary (BSWAP, mode, tem, mode);
2108     }
2109 
2110   return NULL_RTX;
2111 }
2112 
2113 /* Subroutine of simplify_binary_operation to simplify a commutative,
2114    associative binary operation CODE with result mode MODE, operating
2115    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2116    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2117    canonicalization is possible.  */
2118 
2119 static rtx
simplify_associative_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)2120 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2121 				rtx op0, rtx op1)
2122 {
2123   rtx tem;
2124 
2125   /* Linearize the operator to the left.  */
2126   if (GET_CODE (op1) == code)
2127     {
2128       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2129       if (GET_CODE (op0) == code)
2130 	{
2131 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2132 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2133 	}
2134 
2135       /* "a op (b op c)" becomes "(b op c) op a".  */
2136       if (! swap_commutative_operands_p (op1, op0))
2137 	return simplify_gen_binary (code, mode, op1, op0);
2138 
2139       tem = op0;
2140       op0 = op1;
2141       op1 = tem;
2142     }
2143 
2144   if (GET_CODE (op0) == code)
2145     {
2146       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2147       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2148 	{
2149 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2150 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2151 	}
2152 
2153       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2154       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2155       if (tem != 0)
2156         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2157 
2158       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2159       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2160       if (tem != 0)
2161         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2162     }
2163 
2164   return 0;
2165 }
2166 
2167 
2168 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2169    and OP1.  Return 0 if no simplification is possible.
2170 
2171    Don't use this for relational operations such as EQ or LT.
2172    Use simplify_relational_operation instead.  */
2173 rtx
simplify_binary_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)2174 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2175 			   rtx op0, rtx op1)
2176 {
2177   rtx trueop0, trueop1;
2178   rtx tem;
2179 
2180   /* Relational operations don't work here.  We must know the mode
2181      of the operands in order to do the comparison correctly.
2182      Assuming a full word can give incorrect results.
2183      Consider comparing 128 with -128 in QImode.  */
2184   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2185   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2186 
2187   /* Make sure the constant is second.  */
2188   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2189       && swap_commutative_operands_p (op0, op1))
2190     {
2191       tem = op0, op0 = op1, op1 = tem;
2192     }
2193 
2194   trueop0 = avoid_constant_pool_reference (op0);
2195   trueop1 = avoid_constant_pool_reference (op1);
2196 
2197   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2198   if (tem)
2199     return tem;
2200   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2201 }
2202 
2203 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2204    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2205    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2206    actual constants.  */
2207 
2208 static rtx
simplify_binary_operation_1(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1,rtx trueop0,rtx trueop1)2209 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2210 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2211 {
2212   rtx tem, reversed, opleft, opright;
2213   HOST_WIDE_INT val;
2214   unsigned int width = GET_MODE_PRECISION (mode);
2215 
2216   /* Even if we can't compute a constant result,
2217      there are some cases worth simplifying.  */
2218 
2219   switch (code)
2220     {
2221     case PLUS:
2222       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2223 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2224 	 when x is -0 and the rounding mode is not towards -infinity,
2225 	 since (-0) + 0 is then 0.  */
2226       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2227 	return op0;
2228 
2229       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2230 	 transformations are safe even for IEEE.  */
2231       if (GET_CODE (op0) == NEG)
2232 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2233       else if (GET_CODE (op1) == NEG)
2234 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2235 
2236       /* (~a) + 1 -> -a */
2237       if (INTEGRAL_MODE_P (mode)
2238 	  && GET_CODE (op0) == NOT
2239 	  && trueop1 == const1_rtx)
2240 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2241 
2242       /* Handle both-operands-constant cases.  We can only add
2243 	 CONST_INTs to constants since the sum of relocatable symbols
2244 	 can't be handled by most assemblers.  Don't add CONST_INT
2245 	 to CONST_INT since overflow won't be computed properly if wider
2246 	 than HOST_BITS_PER_WIDE_INT.  */
2247 
2248       if ((GET_CODE (op0) == CONST
2249 	   || GET_CODE (op0) == SYMBOL_REF
2250 	   || GET_CODE (op0) == LABEL_REF)
2251 	  && CONST_INT_P (op1))
2252 	return plus_constant (mode, op0, INTVAL (op1));
2253       else if ((GET_CODE (op1) == CONST
2254 		|| GET_CODE (op1) == SYMBOL_REF
2255 		|| GET_CODE (op1) == LABEL_REF)
2256 	       && CONST_INT_P (op0))
2257 	return plus_constant (mode, op1, INTVAL (op0));
2258 
2259       /* See if this is something like X * C - X or vice versa or
2260 	 if the multiplication is written as a shift.  If so, we can
2261 	 distribute and make a new multiply, shift, or maybe just
2262 	 have X (if C is 2 in the example above).  But don't make
2263 	 something more expensive than we had before.  */
2264 
2265       if (SCALAR_INT_MODE_P (mode))
2266 	{
2267 	  double_int coeff0, coeff1;
2268 	  rtx lhs = op0, rhs = op1;
2269 
2270 	  coeff0 = double_int_one;
2271 	  coeff1 = double_int_one;
2272 
2273 	  if (GET_CODE (lhs) == NEG)
2274 	    {
2275 	      coeff0 = double_int_minus_one;
2276 	      lhs = XEXP (lhs, 0);
2277 	    }
2278 	  else if (GET_CODE (lhs) == MULT
2279 		   && CONST_INT_P (XEXP (lhs, 1)))
2280 	    {
2281 	      coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2282 	      lhs = XEXP (lhs, 0);
2283 	    }
2284 	  else if (GET_CODE (lhs) == ASHIFT
2285 		   && CONST_INT_P (XEXP (lhs, 1))
2286                    && INTVAL (XEXP (lhs, 1)) >= 0
2287 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2288 	    {
2289 	      coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2290 	      lhs = XEXP (lhs, 0);
2291 	    }
2292 
2293 	  if (GET_CODE (rhs) == NEG)
2294 	    {
2295 	      coeff1 = double_int_minus_one;
2296 	      rhs = XEXP (rhs, 0);
2297 	    }
2298 	  else if (GET_CODE (rhs) == MULT
2299 		   && CONST_INT_P (XEXP (rhs, 1)))
2300 	    {
2301 	      coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2302 	      rhs = XEXP (rhs, 0);
2303 	    }
2304 	  else if (GET_CODE (rhs) == ASHIFT
2305 		   && CONST_INT_P (XEXP (rhs, 1))
2306 		   && INTVAL (XEXP (rhs, 1)) >= 0
2307 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2308 	    {
2309 	      coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2310 	      rhs = XEXP (rhs, 0);
2311 	    }
2312 
2313 	  if (rtx_equal_p (lhs, rhs))
2314 	    {
2315 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2316 	      rtx coeff;
2317 	      double_int val;
2318 	      bool speed = optimize_function_for_speed_p (cfun);
2319 
2320 	      val = coeff0 + coeff1;
2321 	      coeff = immed_double_int_const (val, mode);
2322 
2323 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2324 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2325 		? tem : 0;
2326 	    }
2327 	}
2328 
2329       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2330       if (CONST_SCALAR_INT_P (op1)
2331 	  && GET_CODE (op0) == XOR
2332 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2333 	  && mode_signbit_p (mode, op1))
2334 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2335 				    simplify_gen_binary (XOR, mode, op1,
2336 							 XEXP (op0, 1)));
2337 
2338       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2339       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2340 	  && GET_CODE (op0) == MULT
2341 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2342 	{
2343 	  rtx in1, in2;
2344 
2345 	  in1 = XEXP (XEXP (op0, 0), 0);
2346 	  in2 = XEXP (op0, 1);
2347 	  return simplify_gen_binary (MINUS, mode, op1,
2348 				      simplify_gen_binary (MULT, mode,
2349 							   in1, in2));
2350 	}
2351 
2352       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2353 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2354 	 is 1.  */
2355       if (COMPARISON_P (op0)
2356 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2357 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2358 	  && (reversed = reversed_comparison (op0, mode)))
2359 	return
2360 	  simplify_gen_unary (NEG, mode, reversed, mode);
2361 
2362       /* If one of the operands is a PLUS or a MINUS, see if we can
2363 	 simplify this by the associative law.
2364 	 Don't use the associative law for floating point.
2365 	 The inaccuracy makes it nonassociative,
2366 	 and subtle programs can break if operations are associated.  */
2367 
2368       if (INTEGRAL_MODE_P (mode)
2369 	  && (plus_minus_operand_p (op0)
2370 	      || plus_minus_operand_p (op1))
2371 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2372 	return tem;
2373 
2374       /* Reassociate floating point addition only when the user
2375 	 specifies associative math operations.  */
2376       if (FLOAT_MODE_P (mode)
2377 	  && flag_associative_math)
2378 	{
2379 	  tem = simplify_associative_operation (code, mode, op0, op1);
2380 	  if (tem)
2381 	    return tem;
2382 	}
2383       break;
2384 
2385     case COMPARE:
2386       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2387       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2388 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2389 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2390 	{
2391 	  rtx xop00 = XEXP (op0, 0);
2392 	  rtx xop10 = XEXP (op1, 0);
2393 
2394 #ifdef HAVE_cc0
2395 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2396 #else
2397 	    if (REG_P (xop00) && REG_P (xop10)
2398 		&& GET_MODE (xop00) == GET_MODE (xop10)
2399 		&& REGNO (xop00) == REGNO (xop10)
2400 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2401 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2402 #endif
2403 	      return xop00;
2404 	}
2405       break;
2406 
2407     case MINUS:
2408       /* We can't assume x-x is 0 even with non-IEEE floating point,
2409 	 but since it is zero except in very strange circumstances, we
2410 	 will treat it as zero with -ffinite-math-only.  */
2411       if (rtx_equal_p (trueop0, trueop1)
2412 	  && ! side_effects_p (op0)
2413 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2414 	return CONST0_RTX (mode);
2415 
2416       /* Change subtraction from zero into negation.  (0 - x) is the
2417 	 same as -x when x is NaN, infinite, or finite and nonzero.
2418 	 But if the mode has signed zeros, and does not round towards
2419 	 -infinity, then 0 - 0 is 0, not -0.  */
2420       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2421 	return simplify_gen_unary (NEG, mode, op1, mode);
2422 
2423       /* (-1 - a) is ~a.  */
2424       if (trueop0 == constm1_rtx)
2425 	return simplify_gen_unary (NOT, mode, op1, mode);
2426 
2427       /* Subtracting 0 has no effect unless the mode has signed zeros
2428 	 and supports rounding towards -infinity.  In such a case,
2429 	 0 - 0 is -0.  */
2430       if (!(HONOR_SIGNED_ZEROS (mode)
2431 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2432 	  && trueop1 == CONST0_RTX (mode))
2433 	return op0;
2434 
2435       /* See if this is something like X * C - X or vice versa or
2436 	 if the multiplication is written as a shift.  If so, we can
2437 	 distribute and make a new multiply, shift, or maybe just
2438 	 have X (if C is 2 in the example above).  But don't make
2439 	 something more expensive than we had before.  */
2440 
2441       if (SCALAR_INT_MODE_P (mode))
2442 	{
2443 	  double_int coeff0, negcoeff1;
2444 	  rtx lhs = op0, rhs = op1;
2445 
2446 	  coeff0 = double_int_one;
2447 	  negcoeff1 = double_int_minus_one;
2448 
2449 	  if (GET_CODE (lhs) == NEG)
2450 	    {
2451 	      coeff0 = double_int_minus_one;
2452 	      lhs = XEXP (lhs, 0);
2453 	    }
2454 	  else if (GET_CODE (lhs) == MULT
2455 		   && CONST_INT_P (XEXP (lhs, 1)))
2456 	    {
2457 	      coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2458 	      lhs = XEXP (lhs, 0);
2459 	    }
2460 	  else if (GET_CODE (lhs) == ASHIFT
2461 		   && CONST_INT_P (XEXP (lhs, 1))
2462 		   && INTVAL (XEXP (lhs, 1)) >= 0
2463 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2464 	    {
2465 	      coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2466 	      lhs = XEXP (lhs, 0);
2467 	    }
2468 
2469 	  if (GET_CODE (rhs) == NEG)
2470 	    {
2471 	      negcoeff1 = double_int_one;
2472 	      rhs = XEXP (rhs, 0);
2473 	    }
2474 	  else if (GET_CODE (rhs) == MULT
2475 		   && CONST_INT_P (XEXP (rhs, 1)))
2476 	    {
2477 	      negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2478 	      rhs = XEXP (rhs, 0);
2479 	    }
2480 	  else if (GET_CODE (rhs) == ASHIFT
2481 		   && CONST_INT_P (XEXP (rhs, 1))
2482 		   && INTVAL (XEXP (rhs, 1)) >= 0
2483 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2484 	    {
2485 	      negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2486 	      negcoeff1 = -negcoeff1;
2487 	      rhs = XEXP (rhs, 0);
2488 	    }
2489 
2490 	  if (rtx_equal_p (lhs, rhs))
2491 	    {
2492 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2493 	      rtx coeff;
2494 	      double_int val;
2495 	      bool speed = optimize_function_for_speed_p (cfun);
2496 
2497 	      val = coeff0 + negcoeff1;
2498 	      coeff = immed_double_int_const (val, mode);
2499 
2500 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2501 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2502 		? tem : 0;
2503 	    }
2504 	}
2505 
2506       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2507       if (GET_CODE (op1) == NEG)
2508 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2509 
2510       /* (-x - c) may be simplified as (-c - x).  */
2511       if (GET_CODE (op0) == NEG
2512 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2513 	{
2514 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2515 	  if (tem)
2516 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2517 	}
2518 
2519       /* Don't let a relocatable value get a negative coeff.  */
2520       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2521 	return simplify_gen_binary (PLUS, mode,
2522 				    op0,
2523 				    neg_const_int (mode, op1));
2524 
2525       /* (x - (x & y)) -> (x & ~y) */
2526       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2527 	{
2528 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2529 	    {
2530 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2531 					GET_MODE (XEXP (op1, 1)));
2532 	      return simplify_gen_binary (AND, mode, op0, tem);
2533 	    }
2534 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2535 	    {
2536 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2537 					GET_MODE (XEXP (op1, 0)));
2538 	      return simplify_gen_binary (AND, mode, op0, tem);
2539 	    }
2540 	}
2541 
2542       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2543 	 by reversing the comparison code if valid.  */
2544       if (STORE_FLAG_VALUE == 1
2545 	  && trueop0 == const1_rtx
2546 	  && COMPARISON_P (op1)
2547 	  && (reversed = reversed_comparison (op1, mode)))
2548 	return reversed;
2549 
2550       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2551       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2552 	  && GET_CODE (op1) == MULT
2553 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2554 	{
2555 	  rtx in1, in2;
2556 
2557 	  in1 = XEXP (XEXP (op1, 0), 0);
2558 	  in2 = XEXP (op1, 1);
2559 	  return simplify_gen_binary (PLUS, mode,
2560 				      simplify_gen_binary (MULT, mode,
2561 							   in1, in2),
2562 				      op0);
2563 	}
2564 
2565       /* Canonicalize (minus (neg A) (mult B C)) to
2566 	 (minus (mult (neg B) C) A).  */
2567       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2568 	  && GET_CODE (op1) == MULT
2569 	  && GET_CODE (op0) == NEG)
2570 	{
2571 	  rtx in1, in2;
2572 
2573 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2574 	  in2 = XEXP (op1, 1);
2575 	  return simplify_gen_binary (MINUS, mode,
2576 				      simplify_gen_binary (MULT, mode,
2577 							   in1, in2),
2578 				      XEXP (op0, 0));
2579 	}
2580 
2581       /* If one of the operands is a PLUS or a MINUS, see if we can
2582 	 simplify this by the associative law.  This will, for example,
2583          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2584 	 Don't use the associative law for floating point.
2585 	 The inaccuracy makes it nonassociative,
2586 	 and subtle programs can break if operations are associated.  */
2587 
2588       if (INTEGRAL_MODE_P (mode)
2589 	  && (plus_minus_operand_p (op0)
2590 	      || plus_minus_operand_p (op1))
2591 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2592 	return tem;
2593       break;
2594 
2595     case MULT:
2596       if (trueop1 == constm1_rtx)
2597 	return simplify_gen_unary (NEG, mode, op0, mode);
2598 
2599       if (GET_CODE (op0) == NEG)
2600 	{
2601 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2602 	  /* If op1 is a MULT as well and simplify_unary_operation
2603 	     just moved the NEG to the second operand, simplify_gen_binary
2604 	     below could through simplify_associative_operation move
2605 	     the NEG around again and recurse endlessly.  */
2606 	  if (temp
2607 	      && GET_CODE (op1) == MULT
2608 	      && GET_CODE (temp) == MULT
2609 	      && XEXP (op1, 0) == XEXP (temp, 0)
2610 	      && GET_CODE (XEXP (temp, 1)) == NEG
2611 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2612 	    temp = NULL_RTX;
2613 	  if (temp)
2614 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2615 	}
2616       if (GET_CODE (op1) == NEG)
2617 	{
2618 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2619 	  /* If op0 is a MULT as well and simplify_unary_operation
2620 	     just moved the NEG to the second operand, simplify_gen_binary
2621 	     below could through simplify_associative_operation move
2622 	     the NEG around again and recurse endlessly.  */
2623 	  if (temp
2624 	      && GET_CODE (op0) == MULT
2625 	      && GET_CODE (temp) == MULT
2626 	      && XEXP (op0, 0) == XEXP (temp, 0)
2627 	      && GET_CODE (XEXP (temp, 1)) == NEG
2628 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2629 	    temp = NULL_RTX;
2630 	  if (temp)
2631 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2632 	}
2633 
2634       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2635 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2636 	 when the mode has signed zeros, since multiplying a negative
2637 	 number by 0 will give -0, not 0.  */
2638       if (!HONOR_NANS (mode)
2639 	  && !HONOR_SIGNED_ZEROS (mode)
2640 	  && trueop1 == CONST0_RTX (mode)
2641 	  && ! side_effects_p (op0))
2642 	return op1;
2643 
2644       /* In IEEE floating point, x*1 is not equivalent to x for
2645 	 signalling NaNs.  */
2646       if (!HONOR_SNANS (mode)
2647 	  && trueop1 == CONST1_RTX (mode))
2648 	return op0;
2649 
2650       /* Convert multiply by constant power of two into shift unless
2651 	 we are still generating RTL.  This test is a kludge.  */
2652       if (CONST_INT_P (trueop1)
2653 	  && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2654 	  /* If the mode is larger than the host word size, and the
2655 	     uppermost bit is set, then this isn't a power of two due
2656 	     to implicit sign extension.  */
2657 	  && (width <= HOST_BITS_PER_WIDE_INT
2658 	      || val != HOST_BITS_PER_WIDE_INT - 1))
2659 	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2660 
2661       /* Likewise for multipliers wider than a word.  */
2662       if (CONST_DOUBLE_AS_INT_P (trueop1)
2663 	  && GET_MODE (op0) == mode
2664 	  && CONST_DOUBLE_LOW (trueop1) == 0
2665 	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2666 	  && (val < HOST_BITS_PER_DOUBLE_INT - 1
2667 	      || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2668 	return simplify_gen_binary (ASHIFT, mode, op0,
2669 				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2670 
2671       /* x*2 is x+x and x*(-1) is -x */
2672       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2673 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2674 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2675 	  && GET_MODE (op0) == mode)
2676 	{
2677 	  REAL_VALUE_TYPE d;
2678 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2679 
2680 	  if (REAL_VALUES_EQUAL (d, dconst2))
2681 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2682 
2683 	  if (!HONOR_SNANS (mode)
2684 	      && REAL_VALUES_EQUAL (d, dconstm1))
2685 	    return simplify_gen_unary (NEG, mode, op0, mode);
2686 	}
2687 
2688       /* Optimize -x * -x as x * x.  */
2689       if (FLOAT_MODE_P (mode)
2690 	  && GET_CODE (op0) == NEG
2691 	  && GET_CODE (op1) == NEG
2692 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2693 	  && !side_effects_p (XEXP (op0, 0)))
2694 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2695 
2696       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2697       if (SCALAR_FLOAT_MODE_P (mode)
2698 	  && GET_CODE (op0) == ABS
2699 	  && GET_CODE (op1) == ABS
2700 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2701 	  && !side_effects_p (XEXP (op0, 0)))
2702 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2703 
2704       /* Reassociate multiplication, but for floating point MULTs
2705 	 only when the user specifies unsafe math optimizations.  */
2706       if (! FLOAT_MODE_P (mode)
2707 	  || flag_unsafe_math_optimizations)
2708 	{
2709 	  tem = simplify_associative_operation (code, mode, op0, op1);
2710 	  if (tem)
2711 	    return tem;
2712 	}
2713       break;
2714 
2715     case IOR:
2716       if (trueop1 == CONST0_RTX (mode))
2717 	return op0;
2718       if (INTEGRAL_MODE_P (mode)
2719 	  && trueop1 == CONSTM1_RTX (mode)
2720 	  && !side_effects_p (op0))
2721 	return op1;
2722       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2723 	return op0;
2724       /* A | (~A) -> -1 */
2725       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2726 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2727 	  && ! side_effects_p (op0)
2728 	  && SCALAR_INT_MODE_P (mode))
2729 	return constm1_rtx;
2730 
2731       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2732       if (CONST_INT_P (op1)
2733 	  && HWI_COMPUTABLE_MODE_P (mode)
2734 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2735 	  && !side_effects_p (op0))
2736 	return op1;
2737 
2738       /* Canonicalize (X & C1) | C2.  */
2739       if (GET_CODE (op0) == AND
2740 	  && CONST_INT_P (trueop1)
2741 	  && CONST_INT_P (XEXP (op0, 1)))
2742 	{
2743 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2744 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2745 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2746 
2747 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2748 	  if ((c1 & c2) == c1
2749 	      && !side_effects_p (XEXP (op0, 0)))
2750 	    return trueop1;
2751 
2752 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2753 	  if (((c1|c2) & mask) == mask)
2754 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2755 
2756 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2757 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2758 	    {
2759 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2760 					 gen_int_mode (c1 & ~c2, mode));
2761 	      return simplify_gen_binary (IOR, mode, tem, op1);
2762 	    }
2763 	}
2764 
2765       /* Convert (A & B) | A to A.  */
2766       if (GET_CODE (op0) == AND
2767 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2768 	      || rtx_equal_p (XEXP (op0, 1), op1))
2769 	  && ! side_effects_p (XEXP (op0, 0))
2770 	  && ! side_effects_p (XEXP (op0, 1)))
2771 	return op1;
2772 
2773       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2774          mode size to (rotate A CX).  */
2775 
2776       if (GET_CODE (op1) == ASHIFT
2777           || GET_CODE (op1) == SUBREG)
2778         {
2779 	  opleft = op1;
2780 	  opright = op0;
2781 	}
2782       else
2783         {
2784 	  opright = op1;
2785 	  opleft = op0;
2786 	}
2787 
2788       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2789           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2790           && CONST_INT_P (XEXP (opleft, 1))
2791           && CONST_INT_P (XEXP (opright, 1))
2792           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2793               == GET_MODE_PRECISION (mode)))
2794         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2795 
2796       /* Same, but for ashift that has been "simplified" to a wider mode
2797         by simplify_shift_const.  */
2798 
2799       if (GET_CODE (opleft) == SUBREG
2800           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2801           && GET_CODE (opright) == LSHIFTRT
2802           && GET_CODE (XEXP (opright, 0)) == SUBREG
2803           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2804           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2805           && (GET_MODE_SIZE (GET_MODE (opleft))
2806               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2807           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2808                           SUBREG_REG (XEXP (opright, 0)))
2809           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2810           && CONST_INT_P (XEXP (opright, 1))
2811           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2812               == GET_MODE_PRECISION (mode)))
2813         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2814                                XEXP (SUBREG_REG (opleft), 1));
2815 
2816       /* If we have (ior (and (X C1) C2)), simplify this by making
2817 	 C1 as small as possible if C1 actually changes.  */
2818       if (CONST_INT_P (op1)
2819 	  && (HWI_COMPUTABLE_MODE_P (mode)
2820 	      || INTVAL (op1) > 0)
2821 	  && GET_CODE (op0) == AND
2822 	  && CONST_INT_P (XEXP (op0, 1))
2823 	  && CONST_INT_P (op1)
2824 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2825 	{
2826 	  rtx tmp = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2827 					 gen_int_mode (UINTVAL (XEXP (op0, 1))
2828 						       & ~UINTVAL (op1),
2829 						       mode));
2830 	  return simplify_gen_binary (IOR, mode, tmp, op1);
2831 	}
2832 
2833       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2834          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2835 	 the PLUS does not affect any of the bits in OP1: then we can do
2836 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2837          can be safely shifted left C bits.  */
2838       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2839           && GET_CODE (XEXP (op0, 0)) == PLUS
2840           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2841           && CONST_INT_P (XEXP (op0, 1))
2842           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2843         {
2844           int count = INTVAL (XEXP (op0, 1));
2845           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2846 
2847           if (mask >> count == INTVAL (trueop1)
2848 	      && trunc_int_for_mode (mask, mode) == mask
2849               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2850 	    return simplify_gen_binary (ASHIFTRT, mode,
2851 					plus_constant (mode, XEXP (op0, 0),
2852 						       mask),
2853 					XEXP (op0, 1));
2854         }
2855 
2856       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2857       if (tem)
2858 	return tem;
2859 
2860       tem = simplify_associative_operation (code, mode, op0, op1);
2861       if (tem)
2862 	return tem;
2863       break;
2864 
2865     case XOR:
2866       if (trueop1 == CONST0_RTX (mode))
2867 	return op0;
2868       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2869 	return simplify_gen_unary (NOT, mode, op0, mode);
2870       if (rtx_equal_p (trueop0, trueop1)
2871 	  && ! side_effects_p (op0)
2872 	  && GET_MODE_CLASS (mode) != MODE_CC)
2873 	 return CONST0_RTX (mode);
2874 
2875       /* Canonicalize XOR of the most significant bit to PLUS.  */
2876       if (CONST_SCALAR_INT_P (op1)
2877 	  && mode_signbit_p (mode, op1))
2878 	return simplify_gen_binary (PLUS, mode, op0, op1);
2879       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2880       if (CONST_SCALAR_INT_P (op1)
2881 	  && GET_CODE (op0) == PLUS
2882 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2883 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2884 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2885 				    simplify_gen_binary (XOR, mode, op1,
2886 							 XEXP (op0, 1)));
2887 
2888       /* If we are XORing two things that have no bits in common,
2889 	 convert them into an IOR.  This helps to detect rotation encoded
2890 	 using those methods and possibly other simplifications.  */
2891 
2892       if (HWI_COMPUTABLE_MODE_P (mode)
2893 	  && (nonzero_bits (op0, mode)
2894 	      & nonzero_bits (op1, mode)) == 0)
2895 	return (simplify_gen_binary (IOR, mode, op0, op1));
2896 
2897       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2898 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2899 	 (NOT y).  */
2900       {
2901 	int num_negated = 0;
2902 
2903 	if (GET_CODE (op0) == NOT)
2904 	  num_negated++, op0 = XEXP (op0, 0);
2905 	if (GET_CODE (op1) == NOT)
2906 	  num_negated++, op1 = XEXP (op1, 0);
2907 
2908 	if (num_negated == 2)
2909 	  return simplify_gen_binary (XOR, mode, op0, op1);
2910 	else if (num_negated == 1)
2911 	  return simplify_gen_unary (NOT, mode,
2912 				     simplify_gen_binary (XOR, mode, op0, op1),
2913 				     mode);
2914       }
2915 
2916       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2917 	 correspond to a machine insn or result in further simplifications
2918 	 if B is a constant.  */
2919 
2920       if (GET_CODE (op0) == AND
2921 	  && rtx_equal_p (XEXP (op0, 1), op1)
2922 	  && ! side_effects_p (op1))
2923 	return simplify_gen_binary (AND, mode,
2924 				    simplify_gen_unary (NOT, mode,
2925 							XEXP (op0, 0), mode),
2926 				    op1);
2927 
2928       else if (GET_CODE (op0) == AND
2929 	       && rtx_equal_p (XEXP (op0, 0), op1)
2930 	       && ! side_effects_p (op1))
2931 	return simplify_gen_binary (AND, mode,
2932 				    simplify_gen_unary (NOT, mode,
2933 							XEXP (op0, 1), mode),
2934 				    op1);
2935 
2936       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2937 	 we can transform like this:
2938             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2939                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2940                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2941 	 Attempt a few simplifications when B and C are both constants.  */
2942       if (GET_CODE (op0) == AND
2943 	  && CONST_INT_P (op1)
2944 	  && CONST_INT_P (XEXP (op0, 1)))
2945 	{
2946 	  rtx a = XEXP (op0, 0);
2947 	  rtx b = XEXP (op0, 1);
2948 	  rtx c = op1;
2949 	  HOST_WIDE_INT bval = INTVAL (b);
2950 	  HOST_WIDE_INT cval = INTVAL (c);
2951 
2952 	  rtx na_c
2953 	    = simplify_binary_operation (AND, mode,
2954 					 simplify_gen_unary (NOT, mode, a, mode),
2955 					 c);
2956 	  if ((~cval & bval) == 0)
2957 	    {
2958 	      /* Try to simplify ~A&C | ~B&C.  */
2959 	      if (na_c != NULL_RTX)
2960 		return simplify_gen_binary (IOR, mode, na_c,
2961 					    gen_int_mode (~bval & cval, mode));
2962 	    }
2963 	  else
2964 	    {
2965 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2966 	      if (na_c == const0_rtx)
2967 		{
2968 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2969 						    gen_int_mode (~cval & bval,
2970 								  mode));
2971 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2972 					      gen_int_mode (~bval & cval,
2973 							    mode));
2974 		}
2975 	    }
2976 	}
2977 
2978       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2979 	 comparison if STORE_FLAG_VALUE is 1.  */
2980       if (STORE_FLAG_VALUE == 1
2981 	  && trueop1 == const1_rtx
2982 	  && COMPARISON_P (op0)
2983 	  && (reversed = reversed_comparison (op0, mode)))
2984 	return reversed;
2985 
2986       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2987 	 is (lt foo (const_int 0)), so we can perform the above
2988 	 simplification if STORE_FLAG_VALUE is 1.  */
2989 
2990       if (STORE_FLAG_VALUE == 1
2991 	  && trueop1 == const1_rtx
2992 	  && GET_CODE (op0) == LSHIFTRT
2993 	  && CONST_INT_P (XEXP (op0, 1))
2994 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2995 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2996 
2997       /* (xor (comparison foo bar) (const_int sign-bit))
2998 	 when STORE_FLAG_VALUE is the sign bit.  */
2999       if (val_signbit_p (mode, STORE_FLAG_VALUE)
3000 	  && trueop1 == const_true_rtx
3001 	  && COMPARISON_P (op0)
3002 	  && (reversed = reversed_comparison (op0, mode)))
3003 	return reversed;
3004 
3005       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3006       if (tem)
3007 	return tem;
3008 
3009       tem = simplify_associative_operation (code, mode, op0, op1);
3010       if (tem)
3011 	return tem;
3012       break;
3013 
3014     case AND:
3015       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3016 	return trueop1;
3017       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3018 	return op0;
3019       if (HWI_COMPUTABLE_MODE_P (mode))
3020 	{
3021 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3022 	  HOST_WIDE_INT nzop1;
3023 	  if (CONST_INT_P (trueop1))
3024 	    {
3025 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
3026 	      /* If we are turning off bits already known off in OP0, we need
3027 		 not do an AND.  */
3028 	      if ((nzop0 & ~val1) == 0)
3029 		return op0;
3030 	    }
3031 	  nzop1 = nonzero_bits (trueop1, mode);
3032 	  /* If we are clearing all the nonzero bits, the result is zero.  */
3033 	  if ((nzop1 & nzop0) == 0
3034 	      && !side_effects_p (op0) && !side_effects_p (op1))
3035 	    return CONST0_RTX (mode);
3036 	}
3037       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3038 	  && GET_MODE_CLASS (mode) != MODE_CC)
3039 	return op0;
3040       /* A & (~A) -> 0 */
3041       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3042 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3043 	  && ! side_effects_p (op0)
3044 	  && GET_MODE_CLASS (mode) != MODE_CC)
3045 	return CONST0_RTX (mode);
3046 
3047       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3048 	 there are no nonzero bits of C outside of X's mode.  */
3049       if ((GET_CODE (op0) == SIGN_EXTEND
3050 	   || GET_CODE (op0) == ZERO_EXTEND)
3051 	  && CONST_INT_P (trueop1)
3052 	  && HWI_COMPUTABLE_MODE_P (mode)
3053 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3054 	      & UINTVAL (trueop1)) == 0)
3055 	{
3056 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3057 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3058 				     gen_int_mode (INTVAL (trueop1),
3059 						   imode));
3060 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3061 	}
3062 
3063       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
3064 	 we might be able to further simplify the AND with X and potentially
3065 	 remove the truncation altogether.  */
3066       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3067 	{
3068 	  rtx x = XEXP (op0, 0);
3069 	  enum machine_mode xmode = GET_MODE (x);
3070 	  tem = simplify_gen_binary (AND, xmode, x,
3071 				     gen_int_mode (INTVAL (trueop1), xmode));
3072 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3073 	}
3074 
3075       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3076       if (GET_CODE (op0) == IOR
3077 	  && CONST_INT_P (trueop1)
3078 	  && CONST_INT_P (XEXP (op0, 1)))
3079 	{
3080 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3081 	  return simplify_gen_binary (IOR, mode,
3082 				      simplify_gen_binary (AND, mode,
3083 							   XEXP (op0, 0), op1),
3084 				      gen_int_mode (tmp, mode));
3085 	}
3086 
3087       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3088 	 insn (and may simplify more).  */
3089       if (GET_CODE (op0) == XOR
3090 	  && rtx_equal_p (XEXP (op0, 0), op1)
3091 	  && ! side_effects_p (op1))
3092 	return simplify_gen_binary (AND, mode,
3093 				    simplify_gen_unary (NOT, mode,
3094 							XEXP (op0, 1), mode),
3095 				    op1);
3096 
3097       if (GET_CODE (op0) == XOR
3098 	  && rtx_equal_p (XEXP (op0, 1), op1)
3099 	  && ! side_effects_p (op1))
3100 	return simplify_gen_binary (AND, mode,
3101 				    simplify_gen_unary (NOT, mode,
3102 							XEXP (op0, 0), mode),
3103 				    op1);
3104 
3105       /* Similarly for (~(A ^ B)) & A.  */
3106       if (GET_CODE (op0) == NOT
3107 	  && GET_CODE (XEXP (op0, 0)) == XOR
3108 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3109 	  && ! side_effects_p (op1))
3110 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3111 
3112       if (GET_CODE (op0) == NOT
3113 	  && GET_CODE (XEXP (op0, 0)) == XOR
3114 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3115 	  && ! side_effects_p (op1))
3116 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3117 
3118       /* Convert (A | B) & A to A.  */
3119       if (GET_CODE (op0) == IOR
3120 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3121 	      || rtx_equal_p (XEXP (op0, 1), op1))
3122 	  && ! side_effects_p (XEXP (op0, 0))
3123 	  && ! side_effects_p (XEXP (op0, 1)))
3124 	return op1;
3125 
3126       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3127 	 ((A & N) + B) & M -> (A + B) & M
3128 	 Similarly if (N & M) == 0,
3129 	 ((A | N) + B) & M -> (A + B) & M
3130 	 and for - instead of + and/or ^ instead of |.
3131          Also, if (N & M) == 0, then
3132 	 (A +- N) & M -> A & M.  */
3133       if (CONST_INT_P (trueop1)
3134 	  && HWI_COMPUTABLE_MODE_P (mode)
3135 	  && ~UINTVAL (trueop1)
3136 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3137 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3138 	{
3139 	  rtx pmop[2];
3140 	  int which;
3141 
3142 	  pmop[0] = XEXP (op0, 0);
3143 	  pmop[1] = XEXP (op0, 1);
3144 
3145 	  if (CONST_INT_P (pmop[1])
3146 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3147 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3148 
3149 	  for (which = 0; which < 2; which++)
3150 	    {
3151 	      tem = pmop[which];
3152 	      switch (GET_CODE (tem))
3153 		{
3154 		case AND:
3155 		  if (CONST_INT_P (XEXP (tem, 1))
3156 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3157 		      == UINTVAL (trueop1))
3158 		    pmop[which] = XEXP (tem, 0);
3159 		  break;
3160 		case IOR:
3161 		case XOR:
3162 		  if (CONST_INT_P (XEXP (tem, 1))
3163 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3164 		    pmop[which] = XEXP (tem, 0);
3165 		  break;
3166 		default:
3167 		  break;
3168 		}
3169 	    }
3170 
3171 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3172 	    {
3173 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3174 					 pmop[0], pmop[1]);
3175 	      return simplify_gen_binary (code, mode, tem, op1);
3176 	    }
3177 	}
3178 
3179       /* (and X (ior (not X) Y) -> (and X Y) */
3180       if (GET_CODE (op1) == IOR
3181 	  && GET_CODE (XEXP (op1, 0)) == NOT
3182 	  && op0 == XEXP (XEXP (op1, 0), 0))
3183        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3184 
3185       /* (and (ior (not X) Y) X) -> (and X Y) */
3186       if (GET_CODE (op0) == IOR
3187 	  && GET_CODE (XEXP (op0, 0)) == NOT
3188 	  && op1 == XEXP (XEXP (op0, 0), 0))
3189 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3190 
3191       tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3192       if (tem)
3193 	return tem;
3194 
3195       tem = simplify_associative_operation (code, mode, op0, op1);
3196       if (tem)
3197 	return tem;
3198       break;
3199 
3200     case UDIV:
3201       /* 0/x is 0 (or x&0 if x has side-effects).  */
3202       if (trueop0 == CONST0_RTX (mode))
3203 	{
3204 	  if (side_effects_p (op1))
3205 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3206 	  return trueop0;
3207 	}
3208       /* x/1 is x.  */
3209       if (trueop1 == CONST1_RTX (mode))
3210 	{
3211 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3212 	  if (tem)
3213 	    return tem;
3214 	}
3215       /* Convert divide by power of two into shift.  */
3216       if (CONST_INT_P (trueop1)
3217 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3218 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3219       break;
3220 
3221     case DIV:
3222       /* Handle floating point and integers separately.  */
3223       if (SCALAR_FLOAT_MODE_P (mode))
3224 	{
3225 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3226 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3227 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3228 	     zeros, since dividing 0 by a negative number gives -0.0  */
3229 	  if (trueop0 == CONST0_RTX (mode)
3230 	      && !HONOR_NANS (mode)
3231 	      && !HONOR_SIGNED_ZEROS (mode)
3232 	      && ! side_effects_p (op1))
3233 	    return op0;
3234 	  /* x/1.0 is x.  */
3235 	  if (trueop1 == CONST1_RTX (mode)
3236 	      && !HONOR_SNANS (mode))
3237 	    return op0;
3238 
3239 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3240 	      && trueop1 != CONST0_RTX (mode))
3241 	    {
3242 	      REAL_VALUE_TYPE d;
3243 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3244 
3245 	      /* x/-1.0 is -x.  */
3246 	      if (REAL_VALUES_EQUAL (d, dconstm1)
3247 		  && !HONOR_SNANS (mode))
3248 		return simplify_gen_unary (NEG, mode, op0, mode);
3249 
3250 	      /* Change FP division by a constant into multiplication.
3251 		 Only do this with -freciprocal-math.  */
3252 	      if (flag_reciprocal_math
3253 		  && !REAL_VALUES_EQUAL (d, dconst0))
3254 		{
3255 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3256 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3257 		  return simplify_gen_binary (MULT, mode, op0, tem);
3258 		}
3259 	    }
3260 	}
3261       else if (SCALAR_INT_MODE_P (mode))
3262 	{
3263 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3264 	  if (trueop0 == CONST0_RTX (mode)
3265 	      && !cfun->can_throw_non_call_exceptions)
3266 	    {
3267 	      if (side_effects_p (op1))
3268 		return simplify_gen_binary (AND, mode, op1, trueop0);
3269 	      return trueop0;
3270 	    }
3271 	  /* x/1 is x.  */
3272 	  if (trueop1 == CONST1_RTX (mode))
3273 	    {
3274 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3275 	      if (tem)
3276 		return tem;
3277 	    }
3278 	  /* x/-1 is -x.  */
3279 	  if (trueop1 == constm1_rtx)
3280 	    {
3281 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3282 	      if (x)
3283 		return simplify_gen_unary (NEG, mode, x, mode);
3284 	    }
3285 	}
3286       break;
3287 
3288     case UMOD:
3289       /* 0%x is 0 (or x&0 if x has side-effects).  */
3290       if (trueop0 == CONST0_RTX (mode))
3291 	{
3292 	  if (side_effects_p (op1))
3293 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3294 	  return trueop0;
3295 	}
3296       /* x%1 is 0 (of x&0 if x has side-effects).  */
3297       if (trueop1 == CONST1_RTX (mode))
3298 	{
3299 	  if (side_effects_p (op0))
3300 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3301 	  return CONST0_RTX (mode);
3302 	}
3303       /* Implement modulus by power of two as AND.  */
3304       if (CONST_INT_P (trueop1)
3305 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3306 	return simplify_gen_binary (AND, mode, op0,
3307 				    gen_int_mode (INTVAL (op1) - 1, mode));
3308       break;
3309 
3310     case MOD:
3311       /* 0%x is 0 (or x&0 if x has side-effects).  */
3312       if (trueop0 == CONST0_RTX (mode))
3313 	{
3314 	  if (side_effects_p (op1))
3315 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3316 	  return trueop0;
3317 	}
3318       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3319       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3320 	{
3321 	  if (side_effects_p (op0))
3322 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3323 	  return CONST0_RTX (mode);
3324 	}
3325       break;
3326 
3327     case ROTATERT:
3328     case ROTATE:
3329       /* Canonicalize rotates by constant amount.  If op1 is bitsize / 2,
3330 	 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3331 	 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3332 	 amount instead.  */
3333       if (CONST_INT_P (trueop1)
3334 	  && IN_RANGE (INTVAL (trueop1),
3335 		       GET_MODE_BITSIZE (mode) / 2 + (code == ROTATE),
3336 		       GET_MODE_BITSIZE (mode) - 1))
3337 	return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3338 				    mode, op0, GEN_INT (GET_MODE_BITSIZE (mode)
3339 							- INTVAL (trueop1)));
3340       /* FALLTHRU */
3341     case ASHIFTRT:
3342       if (trueop1 == CONST0_RTX (mode))
3343 	return op0;
3344       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3345 	return op0;
3346       /* Rotating ~0 always results in ~0.  */
3347       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3348 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3349 	  && ! side_effects_p (op1))
3350 	return op0;
3351     canonicalize_shift:
3352       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3353 	{
3354 	  val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3355 	  if (val != INTVAL (op1))
3356 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3357 	}
3358       break;
3359 
3360     case ASHIFT:
3361     case SS_ASHIFT:
3362     case US_ASHIFT:
3363       if (trueop1 == CONST0_RTX (mode))
3364 	return op0;
3365       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3366 	return op0;
3367       goto canonicalize_shift;
3368 
3369     case LSHIFTRT:
3370       if (trueop1 == CONST0_RTX (mode))
3371 	return op0;
3372       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3373 	return op0;
3374       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3375       if (GET_CODE (op0) == CLZ
3376 	  && CONST_INT_P (trueop1)
3377 	  && STORE_FLAG_VALUE == 1
3378 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3379 	{
3380 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3381 	  unsigned HOST_WIDE_INT zero_val = 0;
3382 
3383 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3384 	      && zero_val == GET_MODE_PRECISION (imode)
3385 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3386 	    return simplify_gen_relational (EQ, mode, imode,
3387 					    XEXP (op0, 0), const0_rtx);
3388 	}
3389       goto canonicalize_shift;
3390 
3391     case SMIN:
3392       if (width <= HOST_BITS_PER_WIDE_INT
3393 	  && mode_signbit_p (mode, trueop1)
3394 	  && ! side_effects_p (op0))
3395 	return op1;
3396       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3397 	return op0;
3398       tem = simplify_associative_operation (code, mode, op0, op1);
3399       if (tem)
3400 	return tem;
3401       break;
3402 
3403     case SMAX:
3404       if (width <= HOST_BITS_PER_WIDE_INT
3405 	  && CONST_INT_P (trueop1)
3406 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3407 	  && ! side_effects_p (op0))
3408 	return op1;
3409       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3410 	return op0;
3411       tem = simplify_associative_operation (code, mode, op0, op1);
3412       if (tem)
3413 	return tem;
3414       break;
3415 
3416     case UMIN:
3417       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3418 	return op1;
3419       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3420 	return op0;
3421       tem = simplify_associative_operation (code, mode, op0, op1);
3422       if (tem)
3423 	return tem;
3424       break;
3425 
3426     case UMAX:
3427       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3428 	return op1;
3429       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3430 	return op0;
3431       tem = simplify_associative_operation (code, mode, op0, op1);
3432       if (tem)
3433 	return tem;
3434       break;
3435 
3436     case SS_PLUS:
3437     case US_PLUS:
3438     case SS_MINUS:
3439     case US_MINUS:
3440     case SS_MULT:
3441     case US_MULT:
3442     case SS_DIV:
3443     case US_DIV:
3444       /* ??? There are simplifications that can be done.  */
3445       return 0;
3446 
3447     case VEC_SELECT:
3448       if (!VECTOR_MODE_P (mode))
3449 	{
3450 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3451 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3452 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3453 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3454 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3455 
3456 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3457 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3458 						      (trueop1, 0, 0)));
3459 
3460 	  /* Extract a scalar element from a nested VEC_SELECT expression
3461 	     (with optional nested VEC_CONCAT expression).  Some targets
3462 	     (i386) extract scalar element from a vector using chain of
3463 	     nested VEC_SELECT expressions.  When input operand is a memory
3464 	     operand, this operation can be simplified to a simple scalar
3465 	     load from an offseted memory address.  */
3466 	  if (GET_CODE (trueop0) == VEC_SELECT)
3467 	    {
3468 	      rtx op0 = XEXP (trueop0, 0);
3469 	      rtx op1 = XEXP (trueop0, 1);
3470 
3471 	      enum machine_mode opmode = GET_MODE (op0);
3472 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3473 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3474 
3475 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3476 	      int elem;
3477 
3478 	      rtvec vec;
3479 	      rtx tmp_op, tmp;
3480 
3481 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3482 	      gcc_assert (i < n_elts);
3483 
3484 	      /* Select element, pointed by nested selector.  */
3485 	      elem = INTVAL (XVECEXP (op1, 0, i));
3486 
3487 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3488 	      if (GET_CODE (op0) == VEC_CONCAT)
3489 		{
3490 		  rtx op00 = XEXP (op0, 0);
3491 		  rtx op01 = XEXP (op0, 1);
3492 
3493 		  enum machine_mode mode00, mode01;
3494 		  int n_elts00, n_elts01;
3495 
3496 		  mode00 = GET_MODE (op00);
3497 		  mode01 = GET_MODE (op01);
3498 
3499 		  /* Find out number of elements of each operand.  */
3500 		  if (VECTOR_MODE_P (mode00))
3501 		    {
3502 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3503 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3504 		    }
3505 		  else
3506 		    n_elts00 = 1;
3507 
3508 		  if (VECTOR_MODE_P (mode01))
3509 		    {
3510 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3511 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3512 		    }
3513 		  else
3514 		    n_elts01 = 1;
3515 
3516 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3517 
3518 		  /* Select correct operand of VEC_CONCAT
3519 		     and adjust selector. */
3520 		  if (elem < n_elts01)
3521 		    tmp_op = op00;
3522 		  else
3523 		    {
3524 		      tmp_op = op01;
3525 		      elem -= n_elts00;
3526 		    }
3527 		}
3528 	      else
3529 		tmp_op = op0;
3530 
3531 	      vec = rtvec_alloc (1);
3532 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3533 
3534 	      tmp = gen_rtx_fmt_ee (code, mode,
3535 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3536 	      return tmp;
3537 	    }
3538 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3539 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3540 	    return XEXP (trueop0, 0);
3541 	}
3542       else
3543 	{
3544 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3545 	  gcc_assert (GET_MODE_INNER (mode)
3546 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3547 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3548 
3549 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3550 	    {
3551 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3552 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3553 	      rtvec v = rtvec_alloc (n_elts);
3554 	      unsigned int i;
3555 
3556 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3557 	      for (i = 0; i < n_elts; i++)
3558 		{
3559 		  rtx x = XVECEXP (trueop1, 0, i);
3560 
3561 		  gcc_assert (CONST_INT_P (x));
3562 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3563 						       INTVAL (x));
3564 		}
3565 
3566 	      return gen_rtx_CONST_VECTOR (mode, v);
3567 	    }
3568 
3569 	  /* Recognize the identity.  */
3570 	  if (GET_MODE (trueop0) == mode)
3571 	    {
3572 	      bool maybe_ident = true;
3573 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3574 		{
3575 		  rtx j = XVECEXP (trueop1, 0, i);
3576 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3577 		    {
3578 		      maybe_ident = false;
3579 		      break;
3580 		    }
3581 		}
3582 	      if (maybe_ident)
3583 		return trueop0;
3584 	    }
3585 
3586 	  /* If we build {a,b} then permute it, build the result directly.  */
3587 	  if (XVECLEN (trueop1, 0) == 2
3588 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3589 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3590 	      && GET_CODE (trueop0) == VEC_CONCAT
3591 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3592 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3593 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3594 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3595 	    {
3596 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3597 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3598 	      rtx subop0, subop1;
3599 
3600 	      gcc_assert (i0 < 4 && i1 < 4);
3601 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3602 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3603 
3604 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3605 	    }
3606 
3607 	  if (XVECLEN (trueop1, 0) == 2
3608 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3609 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3610 	      && GET_CODE (trueop0) == VEC_CONCAT
3611 	      && GET_MODE (trueop0) == mode)
3612 	    {
3613 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3614 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3615 	      rtx subop0, subop1;
3616 
3617 	      gcc_assert (i0 < 2 && i1 < 2);
3618 	      subop0 = XEXP (trueop0, i0);
3619 	      subop1 = XEXP (trueop0, i1);
3620 
3621 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3622 	    }
3623 	}
3624 
3625       if (XVECLEN (trueop1, 0) == 1
3626 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3627 	  && GET_CODE (trueop0) == VEC_CONCAT)
3628 	{
3629 	  rtx vec = trueop0;
3630 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3631 
3632 	  /* Try to find the element in the VEC_CONCAT.  */
3633 	  while (GET_MODE (vec) != mode
3634 		 && GET_CODE (vec) == VEC_CONCAT)
3635 	    {
3636 	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3637 	      if (offset < vec_size)
3638 		vec = XEXP (vec, 0);
3639 	      else
3640 		{
3641 		  offset -= vec_size;
3642 		  vec = XEXP (vec, 1);
3643 		}
3644 	      vec = avoid_constant_pool_reference (vec);
3645 	    }
3646 
3647 	  if (GET_MODE (vec) == mode)
3648 	    return vec;
3649 	}
3650 
3651       /* If we select elements in a vec_merge that all come from the same
3652 	 operand, select from that operand directly.  */
3653       if (GET_CODE (op0) == VEC_MERGE)
3654 	{
3655 	  rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3656 	  if (CONST_INT_P (trueop02))
3657 	    {
3658 	      unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3659 	      bool all_operand0 = true;
3660 	      bool all_operand1 = true;
3661 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3662 		{
3663 		  rtx j = XVECEXP (trueop1, 0, i);
3664 		  if (sel & (1 << UINTVAL (j)))
3665 		    all_operand1 = false;
3666 		  else
3667 		    all_operand0 = false;
3668 		}
3669 	      if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3670 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3671 	      if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3672 		return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3673 	    }
3674 	}
3675 
3676       return 0;
3677     case VEC_CONCAT:
3678       {
3679 	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3680 				      ? GET_MODE (trueop0)
3681 				      : GET_MODE_INNER (mode));
3682 	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3683 				      ? GET_MODE (trueop1)
3684 				      : GET_MODE_INNER (mode));
3685 
3686 	gcc_assert (VECTOR_MODE_P (mode));
3687 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3688 		    == GET_MODE_SIZE (mode));
3689 
3690 	if (VECTOR_MODE_P (op0_mode))
3691 	  gcc_assert (GET_MODE_INNER (mode)
3692 		      == GET_MODE_INNER (op0_mode));
3693 	else
3694 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3695 
3696 	if (VECTOR_MODE_P (op1_mode))
3697 	  gcc_assert (GET_MODE_INNER (mode)
3698 		      == GET_MODE_INNER (op1_mode));
3699 	else
3700 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3701 
3702 	if ((GET_CODE (trueop0) == CONST_VECTOR
3703 	     || CONST_SCALAR_INT_P (trueop0)
3704 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3705 	    && (GET_CODE (trueop1) == CONST_VECTOR
3706 		|| CONST_SCALAR_INT_P (trueop1)
3707 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3708 	  {
3709 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3710 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3711 	    rtvec v = rtvec_alloc (n_elts);
3712 	    unsigned int i;
3713 	    unsigned in_n_elts = 1;
3714 
3715 	    if (VECTOR_MODE_P (op0_mode))
3716 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3717 	    for (i = 0; i < n_elts; i++)
3718 	      {
3719 		if (i < in_n_elts)
3720 		  {
3721 		    if (!VECTOR_MODE_P (op0_mode))
3722 		      RTVEC_ELT (v, i) = trueop0;
3723 		    else
3724 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3725 		  }
3726 		else
3727 		  {
3728 		    if (!VECTOR_MODE_P (op1_mode))
3729 		      RTVEC_ELT (v, i) = trueop1;
3730 		    else
3731 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3732 							   i - in_n_elts);
3733 		  }
3734 	      }
3735 
3736 	    return gen_rtx_CONST_VECTOR (mode, v);
3737 	  }
3738 
3739 	/* Try to merge two VEC_SELECTs from the same vector into a single one.
3740 	   Restrict the transformation to avoid generating a VEC_SELECT with a
3741 	   mode unrelated to its operand.  */
3742 	if (GET_CODE (trueop0) == VEC_SELECT
3743 	    && GET_CODE (trueop1) == VEC_SELECT
3744 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3745 	    && GET_MODE (XEXP (trueop0, 0)) == mode)
3746 	  {
3747 	    rtx par0 = XEXP (trueop0, 1);
3748 	    rtx par1 = XEXP (trueop1, 1);
3749 	    int len0 = XVECLEN (par0, 0);
3750 	    int len1 = XVECLEN (par1, 0);
3751 	    rtvec vec = rtvec_alloc (len0 + len1);
3752 	    for (int i = 0; i < len0; i++)
3753 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3754 	    for (int i = 0; i < len1; i++)
3755 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3756 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3757 					gen_rtx_PARALLEL (VOIDmode, vec));
3758 	  }
3759       }
3760       return 0;
3761 
3762     default:
3763       gcc_unreachable ();
3764     }
3765 
3766   return 0;
3767 }
3768 
3769 rtx
simplify_const_binary_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)3770 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3771 				 rtx op0, rtx op1)
3772 {
3773   HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3774   HOST_WIDE_INT val;
3775   unsigned int width = GET_MODE_PRECISION (mode);
3776 
3777   if (VECTOR_MODE_P (mode)
3778       && code != VEC_CONCAT
3779       && GET_CODE (op0) == CONST_VECTOR
3780       && GET_CODE (op1) == CONST_VECTOR)
3781     {
3782       unsigned n_elts = GET_MODE_NUNITS (mode);
3783       enum machine_mode op0mode = GET_MODE (op0);
3784       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3785       enum machine_mode op1mode = GET_MODE (op1);
3786       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3787       rtvec v = rtvec_alloc (n_elts);
3788       unsigned int i;
3789 
3790       gcc_assert (op0_n_elts == n_elts);
3791       gcc_assert (op1_n_elts == n_elts);
3792       for (i = 0; i < n_elts; i++)
3793 	{
3794 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3795 					     CONST_VECTOR_ELT (op0, i),
3796 					     CONST_VECTOR_ELT (op1, i));
3797 	  if (!x)
3798 	    return 0;
3799 	  RTVEC_ELT (v, i) = x;
3800 	}
3801 
3802       return gen_rtx_CONST_VECTOR (mode, v);
3803     }
3804 
3805   if (VECTOR_MODE_P (mode)
3806       && code == VEC_CONCAT
3807       && (CONST_SCALAR_INT_P (op0)
3808 	  || GET_CODE (op0) == CONST_FIXED
3809 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
3810       && (CONST_SCALAR_INT_P (op1)
3811 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
3812 	  || GET_CODE (op1) == CONST_FIXED))
3813     {
3814       unsigned n_elts = GET_MODE_NUNITS (mode);
3815       rtvec v = rtvec_alloc (n_elts);
3816 
3817       gcc_assert (n_elts >= 2);
3818       if (n_elts == 2)
3819 	{
3820 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3821 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3822 
3823 	  RTVEC_ELT (v, 0) = op0;
3824 	  RTVEC_ELT (v, 1) = op1;
3825 	}
3826       else
3827 	{
3828 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3829 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3830 	  unsigned i;
3831 
3832 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3833 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3834 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3835 
3836 	  for (i = 0; i < op0_n_elts; ++i)
3837 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3838 	  for (i = 0; i < op1_n_elts; ++i)
3839 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3840 	}
3841 
3842       return gen_rtx_CONST_VECTOR (mode, v);
3843     }
3844 
3845   if (SCALAR_FLOAT_MODE_P (mode)
3846       && CONST_DOUBLE_AS_FLOAT_P (op0)
3847       && CONST_DOUBLE_AS_FLOAT_P (op1)
3848       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3849     {
3850       if (code == AND
3851 	  || code == IOR
3852 	  || code == XOR)
3853 	{
3854 	  long tmp0[4];
3855 	  long tmp1[4];
3856 	  REAL_VALUE_TYPE r;
3857 	  int i;
3858 
3859 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3860 			  GET_MODE (op0));
3861 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3862 			  GET_MODE (op1));
3863 	  for (i = 0; i < 4; i++)
3864 	    {
3865 	      switch (code)
3866 	      {
3867 	      case AND:
3868 		tmp0[i] &= tmp1[i];
3869 		break;
3870 	      case IOR:
3871 		tmp0[i] |= tmp1[i];
3872 		break;
3873 	      case XOR:
3874 		tmp0[i] ^= tmp1[i];
3875 		break;
3876 	      default:
3877 		gcc_unreachable ();
3878 	      }
3879 	    }
3880 	   real_from_target (&r, tmp0, mode);
3881 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3882 	}
3883       else
3884 	{
3885 	  REAL_VALUE_TYPE f0, f1, value, result;
3886 	  bool inexact;
3887 
3888 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3889 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3890 	  real_convert (&f0, mode, &f0);
3891 	  real_convert (&f1, mode, &f1);
3892 
3893 	  if (HONOR_SNANS (mode)
3894 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3895 	    return 0;
3896 
3897 	  if (code == DIV
3898 	      && REAL_VALUES_EQUAL (f1, dconst0)
3899 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3900 	    return 0;
3901 
3902 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3903 	      && flag_trapping_math
3904 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3905 	    {
3906 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3907 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3908 
3909 	      switch (code)
3910 		{
3911 		case PLUS:
3912 		  /* Inf + -Inf = NaN plus exception.  */
3913 		  if (s0 != s1)
3914 		    return 0;
3915 		  break;
3916 		case MINUS:
3917 		  /* Inf - Inf = NaN plus exception.  */
3918 		  if (s0 == s1)
3919 		    return 0;
3920 		  break;
3921 		case DIV:
3922 		  /* Inf / Inf = NaN plus exception.  */
3923 		  return 0;
3924 		default:
3925 		  break;
3926 		}
3927 	    }
3928 
3929 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3930 	      && flag_trapping_math
3931 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3932 		  || (REAL_VALUE_ISINF (f1)
3933 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3934 	    /* Inf * 0 = NaN plus exception.  */
3935 	    return 0;
3936 
3937 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3938 				     &f0, &f1);
3939 	  real_convert (&result, mode, &value);
3940 
3941 	  /* Don't constant fold this floating point operation if
3942 	     the result has overflowed and flag_trapping_math.  */
3943 
3944 	  if (flag_trapping_math
3945 	      && MODE_HAS_INFINITIES (mode)
3946 	      && REAL_VALUE_ISINF (result)
3947 	      && !REAL_VALUE_ISINF (f0)
3948 	      && !REAL_VALUE_ISINF (f1))
3949 	    /* Overflow plus exception.  */
3950 	    return 0;
3951 
3952 	  /* Don't constant fold this floating point operation if the
3953 	     result may dependent upon the run-time rounding mode and
3954 	     flag_rounding_math is set, or if GCC's software emulation
3955 	     is unable to accurately represent the result.  */
3956 
3957 	  if ((flag_rounding_math
3958 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3959 	      && (inexact || !real_identical (&result, &value)))
3960 	    return NULL_RTX;
3961 
3962 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3963 	}
3964     }
3965 
3966   /* We can fold some multi-word operations.  */
3967   if (GET_MODE_CLASS (mode) == MODE_INT
3968       && width == HOST_BITS_PER_DOUBLE_INT
3969       && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3970       && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3971     {
3972       double_int o0, o1, res, tmp;
3973       bool overflow;
3974 
3975       o0 = rtx_to_double_int (op0);
3976       o1 = rtx_to_double_int (op1);
3977 
3978       switch (code)
3979 	{
3980 	case MINUS:
3981 	  /* A - B == A + (-B).  */
3982 	  o1 = -o1;
3983 
3984 	  /* Fall through....  */
3985 
3986 	case PLUS:
3987 	  res = o0 + o1;
3988 	  break;
3989 
3990 	case MULT:
3991 	  res = o0 * o1;
3992 	  break;
3993 
3994 	case DIV:
3995           res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3996 					 &tmp, &overflow);
3997 	  if (overflow)
3998 	    return 0;
3999 	  break;
4000 
4001 	case MOD:
4002           tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
4003 					 &res, &overflow);
4004 	  if (overflow)
4005 	    return 0;
4006 	  break;
4007 
4008 	case UDIV:
4009           res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4010 					 &tmp, &overflow);
4011 	  if (overflow)
4012 	    return 0;
4013 	  break;
4014 
4015 	case UMOD:
4016           tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
4017 					 &res, &overflow);
4018 	  if (overflow)
4019 	    return 0;
4020 	  break;
4021 
4022 	case AND:
4023 	  res = o0 & o1;
4024 	  break;
4025 
4026 	case IOR:
4027 	  res = o0 | o1;
4028 	  break;
4029 
4030 	case XOR:
4031 	  res = o0 ^ o1;
4032 	  break;
4033 
4034 	case SMIN:
4035 	  res = o0.smin (o1);
4036 	  break;
4037 
4038 	case SMAX:
4039 	  res = o0.smax (o1);
4040 	  break;
4041 
4042 	case UMIN:
4043 	  res = o0.umin (o1);
4044 	  break;
4045 
4046 	case UMAX:
4047 	  res = o0.umax (o1);
4048 	  break;
4049 
4050 	case LSHIFTRT:   case ASHIFTRT:
4051 	case ASHIFT:
4052 	case ROTATE:     case ROTATERT:
4053 	  {
4054 	    unsigned HOST_WIDE_INT cnt;
4055 
4056 	    if (SHIFT_COUNT_TRUNCATED)
4057 	      {
4058 		o1.high = 0;
4059 		o1.low &= GET_MODE_PRECISION (mode) - 1;
4060 	      }
4061 
4062 	    if (!o1.fits_uhwi ()
4063 	        || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
4064 	      return 0;
4065 
4066 	    cnt = o1.to_uhwi ();
4067 	    unsigned short prec = GET_MODE_PRECISION (mode);
4068 
4069 	    if (code == LSHIFTRT || code == ASHIFTRT)
4070 	      res = o0.rshift (cnt, prec, code == ASHIFTRT);
4071 	    else if (code == ASHIFT)
4072 	      res = o0.alshift (cnt, prec);
4073 	    else if (code == ROTATE)
4074 	      res = o0.lrotate (cnt, prec);
4075 	    else /* code == ROTATERT */
4076 	      res = o0.rrotate (cnt, prec);
4077 	  }
4078 	  break;
4079 
4080 	default:
4081 	  return 0;
4082 	}
4083 
4084       return immed_double_int_const (res, mode);
4085     }
4086 
4087   if (CONST_INT_P (op0) && CONST_INT_P (op1)
4088       && width <= HOST_BITS_PER_WIDE_INT && width != 0)
4089     {
4090       /* Get the integer argument values in two forms:
4091          zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
4092 
4093       arg0 = INTVAL (op0);
4094       arg1 = INTVAL (op1);
4095 
4096       if (width < HOST_BITS_PER_WIDE_INT)
4097         {
4098           arg0 &= GET_MODE_MASK (mode);
4099           arg1 &= GET_MODE_MASK (mode);
4100 
4101           arg0s = arg0;
4102 	  if (val_signbit_known_set_p (mode, arg0s))
4103 	    arg0s |= ~GET_MODE_MASK (mode);
4104 
4105           arg1s = arg1;
4106 	  if (val_signbit_known_set_p (mode, arg1s))
4107 	    arg1s |= ~GET_MODE_MASK (mode);
4108 	}
4109       else
4110 	{
4111 	  arg0s = arg0;
4112 	  arg1s = arg1;
4113 	}
4114 
4115       /* Compute the value of the arithmetic.  */
4116 
4117       switch (code)
4118 	{
4119 	case PLUS:
4120 	  val = (unsigned HOST_WIDE_INT) arg0s + arg1s;
4121 	  break;
4122 
4123 	case MINUS:
4124 	  val = (unsigned HOST_WIDE_INT) arg0s - arg1s;
4125 	  break;
4126 
4127 	case MULT:
4128 	  val = (unsigned HOST_WIDE_INT) arg0s * arg1s;
4129 	  break;
4130 
4131 	case DIV:
4132 	  if (arg1s == 0
4133 	      || ((unsigned HOST_WIDE_INT) arg0s
4134 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4135 		  && arg1s == -1))
4136 	    return 0;
4137 	  val = arg0s / arg1s;
4138 	  break;
4139 
4140 	case MOD:
4141 	  if (arg1s == 0
4142 	      || ((unsigned HOST_WIDE_INT) arg0s
4143 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4144 		  && arg1s == -1))
4145 	    return 0;
4146 	  val = arg0s % arg1s;
4147 	  break;
4148 
4149 	case UDIV:
4150 	  if (arg1 == 0
4151 	      || ((unsigned HOST_WIDE_INT) arg0s
4152 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4153 		  && arg1s == -1))
4154 	    return 0;
4155 	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4156 	  break;
4157 
4158 	case UMOD:
4159 	  if (arg1 == 0
4160 	      || ((unsigned HOST_WIDE_INT) arg0s
4161 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4162 		  && arg1s == -1))
4163 	    return 0;
4164 	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4165 	  break;
4166 
4167 	case AND:
4168 	  val = arg0 & arg1;
4169 	  break;
4170 
4171 	case IOR:
4172 	  val = arg0 | arg1;
4173 	  break;
4174 
4175 	case XOR:
4176 	  val = arg0 ^ arg1;
4177 	  break;
4178 
4179 	case LSHIFTRT:
4180 	case ASHIFT:
4181 	case ASHIFTRT:
4182 	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4183 	     the value is in range.  We can't return any old value for
4184 	     out-of-range arguments because either the middle-end (via
4185 	     shift_truncation_mask) or the back-end might be relying on
4186 	     target-specific knowledge.  Nor can we rely on
4187 	     shift_truncation_mask, since the shift might not be part of an
4188 	     ashlM3, lshrM3 or ashrM3 instruction.  */
4189 	  if (SHIFT_COUNT_TRUNCATED)
4190 	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4191 	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4192 	    return 0;
4193 
4194 	  val = (code == ASHIFT
4195 		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4196 		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4197 
4198 	  /* Sign-extend the result for arithmetic right shifts.  */
4199 	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4200 	    val |= HOST_WIDE_INT_M1U << (width - arg1);
4201 	  break;
4202 
4203 	case ROTATERT:
4204 	  if (arg1 < 0)
4205 	    return 0;
4206 
4207 	  arg1 %= width;
4208 	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4209 		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4210 	  break;
4211 
4212 	case ROTATE:
4213 	  if (arg1 < 0)
4214 	    return 0;
4215 
4216 	  arg1 %= width;
4217 	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4218 		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4219 	  break;
4220 
4221 	case COMPARE:
4222 	  /* Do nothing here.  */
4223 	  return 0;
4224 
4225 	case SMIN:
4226 	  val = arg0s <= arg1s ? arg0s : arg1s;
4227 	  break;
4228 
4229 	case UMIN:
4230 	  val = ((unsigned HOST_WIDE_INT) arg0
4231 		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4232 	  break;
4233 
4234 	case SMAX:
4235 	  val = arg0s > arg1s ? arg0s : arg1s;
4236 	  break;
4237 
4238 	case UMAX:
4239 	  val = ((unsigned HOST_WIDE_INT) arg0
4240 		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4241 	  break;
4242 
4243 	case SS_PLUS:
4244 	case US_PLUS:
4245 	case SS_MINUS:
4246 	case US_MINUS:
4247 	case SS_MULT:
4248 	case US_MULT:
4249 	case SS_DIV:
4250 	case US_DIV:
4251 	case SS_ASHIFT:
4252 	case US_ASHIFT:
4253 	  /* ??? There are simplifications that can be done.  */
4254 	  return 0;
4255 
4256 	default:
4257 	  gcc_unreachable ();
4258 	}
4259 
4260       return gen_int_mode (val, mode);
4261     }
4262 
4263   return NULL_RTX;
4264 }
4265 
4266 
4267 
4268 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4269    PLUS or MINUS.
4270 
4271    Rather than test for specific case, we do this by a brute-force method
4272    and do all possible simplifications until no more changes occur.  Then
4273    we rebuild the operation.  */
4274 
4275 struct simplify_plus_minus_op_data
4276 {
4277   rtx op;
4278   short neg;
4279 };
4280 
4281 static bool
simplify_plus_minus_op_data_cmp(rtx x,rtx y)4282 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4283 {
4284   int result;
4285 
4286   result = (commutative_operand_precedence (y)
4287 	    - commutative_operand_precedence (x));
4288   if (result)
4289     return result > 0;
4290 
4291   /* Group together equal REGs to do more simplification.  */
4292   if (REG_P (x) && REG_P (y))
4293     return REGNO (x) > REGNO (y);
4294   else
4295     return false;
4296 }
4297 
4298 static rtx
simplify_plus_minus(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)4299 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4300 		     rtx op1)
4301 {
4302   struct simplify_plus_minus_op_data ops[8];
4303   rtx result, tem;
4304   int n_ops = 2, input_ops = 2;
4305   int changed, n_constants = 0, canonicalized = 0;
4306   int i, j;
4307 
4308   memset (ops, 0, sizeof ops);
4309 
4310   /* Set up the two operands and then expand them until nothing has been
4311      changed.  If we run out of room in our array, give up; this should
4312      almost never happen.  */
4313 
4314   ops[0].op = op0;
4315   ops[0].neg = 0;
4316   ops[1].op = op1;
4317   ops[1].neg = (code == MINUS);
4318 
4319   do
4320     {
4321       changed = 0;
4322 
4323       for (i = 0; i < n_ops; i++)
4324 	{
4325 	  rtx this_op = ops[i].op;
4326 	  int this_neg = ops[i].neg;
4327 	  enum rtx_code this_code = GET_CODE (this_op);
4328 
4329 	  switch (this_code)
4330 	    {
4331 	    case PLUS:
4332 	    case MINUS:
4333 	      if (n_ops == 7)
4334 		return NULL_RTX;
4335 
4336 	      ops[n_ops].op = XEXP (this_op, 1);
4337 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4338 	      n_ops++;
4339 
4340 	      ops[i].op = XEXP (this_op, 0);
4341 	      input_ops++;
4342 	      changed = 1;
4343 	      canonicalized |= this_neg;
4344 	      break;
4345 
4346 	    case NEG:
4347 	      ops[i].op = XEXP (this_op, 0);
4348 	      ops[i].neg = ! this_neg;
4349 	      changed = 1;
4350 	      canonicalized = 1;
4351 	      break;
4352 
4353 	    case CONST:
4354 	      if (n_ops < 7
4355 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4356 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4357 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4358 		{
4359 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4360 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4361 		  ops[n_ops].neg = this_neg;
4362 		  n_ops++;
4363 		  changed = 1;
4364 	          canonicalized = 1;
4365 		}
4366 	      break;
4367 
4368 	    case NOT:
4369 	      /* ~a -> (-a - 1) */
4370 	      if (n_ops != 7)
4371 		{
4372 		  ops[n_ops].op = CONSTM1_RTX (mode);
4373 		  ops[n_ops++].neg = this_neg;
4374 		  ops[i].op = XEXP (this_op, 0);
4375 		  ops[i].neg = !this_neg;
4376 		  changed = 1;
4377 	          canonicalized = 1;
4378 		}
4379 	      break;
4380 
4381 	    case CONST_INT:
4382 	      n_constants++;
4383 	      if (this_neg)
4384 		{
4385 		  ops[i].op = neg_const_int (mode, this_op);
4386 		  ops[i].neg = 0;
4387 		  changed = 1;
4388 	          canonicalized = 1;
4389 		}
4390 	      break;
4391 
4392 	    default:
4393 	      break;
4394 	    }
4395 	}
4396     }
4397   while (changed);
4398 
4399   if (n_constants > 1)
4400     canonicalized = 1;
4401 
4402   gcc_assert (n_ops >= 2);
4403 
4404   /* If we only have two operands, we can avoid the loops.  */
4405   if (n_ops == 2)
4406     {
4407       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4408       rtx lhs, rhs;
4409 
4410       /* Get the two operands.  Be careful with the order, especially for
4411 	 the cases where code == MINUS.  */
4412       if (ops[0].neg && ops[1].neg)
4413 	{
4414 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4415 	  rhs = ops[1].op;
4416 	}
4417       else if (ops[0].neg)
4418 	{
4419 	  lhs = ops[1].op;
4420 	  rhs = ops[0].op;
4421 	}
4422       else
4423 	{
4424 	  lhs = ops[0].op;
4425 	  rhs = ops[1].op;
4426 	}
4427 
4428       return simplify_const_binary_operation (code, mode, lhs, rhs);
4429     }
4430 
4431   /* Now simplify each pair of operands until nothing changes.  */
4432   do
4433     {
4434       /* Insertion sort is good enough for an eight-element array.  */
4435       for (i = 1; i < n_ops; i++)
4436         {
4437           struct simplify_plus_minus_op_data save;
4438           j = i - 1;
4439           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4440 	    continue;
4441 
4442           canonicalized = 1;
4443           save = ops[i];
4444           do
4445 	    ops[j + 1] = ops[j];
4446           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4447           ops[j + 1] = save;
4448         }
4449 
4450       changed = 0;
4451       for (i = n_ops - 1; i > 0; i--)
4452 	for (j = i - 1; j >= 0; j--)
4453 	  {
4454 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4455 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4456 
4457 	    if (lhs != 0 && rhs != 0)
4458 	      {
4459 		enum rtx_code ncode = PLUS;
4460 
4461 		if (lneg != rneg)
4462 		  {
4463 		    ncode = MINUS;
4464 		    if (lneg)
4465 		      tem = lhs, lhs = rhs, rhs = tem;
4466 		  }
4467 		else if (swap_commutative_operands_p (lhs, rhs))
4468 		  tem = lhs, lhs = rhs, rhs = tem;
4469 
4470 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4471 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4472 		  {
4473 		    rtx tem_lhs, tem_rhs;
4474 
4475 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4476 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4477 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4478 
4479 		    if (tem && !CONSTANT_P (tem))
4480 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4481 		  }
4482 		else
4483 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4484 
4485 		/* Reject "simplifications" that just wrap the two
4486 		   arguments in a CONST.  Failure to do so can result
4487 		   in infinite recursion with simplify_binary_operation
4488 		   when it calls us to simplify CONST operations.  */
4489 		if (tem
4490 		    && ! (GET_CODE (tem) == CONST
4491 			  && GET_CODE (XEXP (tem, 0)) == ncode
4492 			  && XEXP (XEXP (tem, 0), 0) == lhs
4493 			  && XEXP (XEXP (tem, 0), 1) == rhs))
4494 		  {
4495 		    lneg &= rneg;
4496 		    if (GET_CODE (tem) == NEG)
4497 		      tem = XEXP (tem, 0), lneg = !lneg;
4498 		    if (CONST_INT_P (tem) && lneg)
4499 		      tem = neg_const_int (mode, tem), lneg = 0;
4500 
4501 		    ops[i].op = tem;
4502 		    ops[i].neg = lneg;
4503 		    ops[j].op = NULL_RTX;
4504 		    changed = 1;
4505 		    canonicalized = 1;
4506 		  }
4507 	      }
4508 	  }
4509 
4510       /* If nothing changed, fail.  */
4511       if (!canonicalized)
4512         return NULL_RTX;
4513 
4514       /* Pack all the operands to the lower-numbered entries.  */
4515       for (i = 0, j = 0; j < n_ops; j++)
4516         if (ops[j].op)
4517           {
4518 	    ops[i] = ops[j];
4519 	    i++;
4520           }
4521       n_ops = i;
4522     }
4523   while (changed);
4524 
4525   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4526   if (n_ops == 2
4527       && CONST_INT_P (ops[1].op)
4528       && CONSTANT_P (ops[0].op)
4529       && ops[0].neg)
4530     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4531 
4532   /* We suppressed creation of trivial CONST expressions in the
4533      combination loop to avoid recursion.  Create one manually now.
4534      The combination loop should have ensured that there is exactly
4535      one CONST_INT, and the sort will have ensured that it is last
4536      in the array and that any other constant will be next-to-last.  */
4537 
4538   if (n_ops > 1
4539       && CONST_INT_P (ops[n_ops - 1].op)
4540       && CONSTANT_P (ops[n_ops - 2].op))
4541     {
4542       rtx value = ops[n_ops - 1].op;
4543       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4544 	value = neg_const_int (mode, value);
4545       ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4546 					 INTVAL (value));
4547       n_ops--;
4548     }
4549 
4550   /* Put a non-negated operand first, if possible.  */
4551 
4552   for (i = 0; i < n_ops && ops[i].neg; i++)
4553     continue;
4554   if (i == n_ops)
4555     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4556   else if (i != 0)
4557     {
4558       tem = ops[0].op;
4559       ops[0] = ops[i];
4560       ops[i].op = tem;
4561       ops[i].neg = 1;
4562     }
4563 
4564   /* Now make the result by performing the requested operations.  */
4565   result = ops[0].op;
4566   for (i = 1; i < n_ops; i++)
4567     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4568 			     mode, result, ops[i].op);
4569 
4570   return result;
4571 }
4572 
4573 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4574 static bool
plus_minus_operand_p(const_rtx x)4575 plus_minus_operand_p (const_rtx x)
4576 {
4577   return GET_CODE (x) == PLUS
4578          || GET_CODE (x) == MINUS
4579 	 || (GET_CODE (x) == CONST
4580 	     && GET_CODE (XEXP (x, 0)) == PLUS
4581 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4582 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4583 }
4584 
4585 /* Like simplify_binary_operation except used for relational operators.
4586    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4587    not also be VOIDmode.
4588 
4589    CMP_MODE specifies in which mode the comparison is done in, so it is
4590    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4591    the operands or, if both are VOIDmode, the operands are compared in
4592    "infinite precision".  */
4593 rtx
simplify_relational_operation(enum rtx_code code,enum machine_mode mode,enum machine_mode cmp_mode,rtx op0,rtx op1)4594 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4595 			       enum machine_mode cmp_mode, rtx op0, rtx op1)
4596 {
4597   rtx tem, trueop0, trueop1;
4598 
4599   if (cmp_mode == VOIDmode)
4600     cmp_mode = GET_MODE (op0);
4601   if (cmp_mode == VOIDmode)
4602     cmp_mode = GET_MODE (op1);
4603 
4604   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4605   if (tem)
4606     {
4607       if (SCALAR_FLOAT_MODE_P (mode))
4608 	{
4609           if (tem == const0_rtx)
4610             return CONST0_RTX (mode);
4611 #ifdef FLOAT_STORE_FLAG_VALUE
4612 	  {
4613 	    REAL_VALUE_TYPE val;
4614 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4615 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4616 	  }
4617 #else
4618 	  return NULL_RTX;
4619 #endif
4620 	}
4621       if (VECTOR_MODE_P (mode))
4622 	{
4623 	  if (tem == const0_rtx)
4624 	    return CONST0_RTX (mode);
4625 #ifdef VECTOR_STORE_FLAG_VALUE
4626 	  {
4627 	    int i, units;
4628 	    rtvec v;
4629 
4630 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4631 	    if (val == NULL_RTX)
4632 	      return NULL_RTX;
4633 	    if (val == const1_rtx)
4634 	      return CONST1_RTX (mode);
4635 
4636 	    units = GET_MODE_NUNITS (mode);
4637 	    v = rtvec_alloc (units);
4638 	    for (i = 0; i < units; i++)
4639 	      RTVEC_ELT (v, i) = val;
4640 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4641 	  }
4642 #else
4643 	  return NULL_RTX;
4644 #endif
4645 	}
4646 
4647       return tem;
4648     }
4649 
4650   /* For the following tests, ensure const0_rtx is op1.  */
4651   if (swap_commutative_operands_p (op0, op1)
4652       || (op0 == const0_rtx && op1 != const0_rtx))
4653     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4654 
4655   /* If op0 is a compare, extract the comparison arguments from it.  */
4656   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4657     return simplify_gen_relational (code, mode, VOIDmode,
4658 				    XEXP (op0, 0), XEXP (op0, 1));
4659 
4660   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4661       || CC0_P (op0))
4662     return NULL_RTX;
4663 
4664   trueop0 = avoid_constant_pool_reference (op0);
4665   trueop1 = avoid_constant_pool_reference (op1);
4666   return simplify_relational_operation_1 (code, mode, cmp_mode,
4667 		  			  trueop0, trueop1);
4668 }
4669 
4670 /* This part of simplify_relational_operation is only used when CMP_MODE
4671    is not in class MODE_CC (i.e. it is a real comparison).
4672 
4673    MODE is the mode of the result, while CMP_MODE specifies in which
4674    mode the comparison is done in, so it is the mode of the operands.  */
4675 
4676 static rtx
simplify_relational_operation_1(enum rtx_code code,enum machine_mode mode,enum machine_mode cmp_mode,rtx op0,rtx op1)4677 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4678 				 enum machine_mode cmp_mode, rtx op0, rtx op1)
4679 {
4680   enum rtx_code op0code = GET_CODE (op0);
4681 
4682   if (op1 == const0_rtx && COMPARISON_P (op0))
4683     {
4684       /* If op0 is a comparison, extract the comparison arguments
4685          from it.  */
4686       if (code == NE)
4687 	{
4688 	  if (GET_MODE (op0) == mode)
4689 	    return simplify_rtx (op0);
4690 	  else
4691 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4692 					    XEXP (op0, 0), XEXP (op0, 1));
4693 	}
4694       else if (code == EQ)
4695 	{
4696 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4697 	  if (new_code != UNKNOWN)
4698 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4699 					    XEXP (op0, 0), XEXP (op0, 1));
4700 	}
4701     }
4702 
4703   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4704      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4705   if ((code == LTU || code == GEU)
4706       && GET_CODE (op0) == PLUS
4707       && CONST_INT_P (XEXP (op0, 1))
4708       && (rtx_equal_p (op1, XEXP (op0, 0))
4709 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4710       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4711       && XEXP (op0, 1) != const0_rtx)
4712     {
4713       rtx new_cmp
4714 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4715       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4716 				      cmp_mode, XEXP (op0, 0), new_cmp);
4717     }
4718 
4719   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4720   if ((code == LTU || code == GEU)
4721       && GET_CODE (op0) == PLUS
4722       && rtx_equal_p (op1, XEXP (op0, 1))
4723       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4724       && !rtx_equal_p (op1, XEXP (op0, 0)))
4725     return simplify_gen_relational (code, mode, cmp_mode, op0,
4726 				    copy_rtx (XEXP (op0, 0)));
4727 
4728   if (op1 == const0_rtx)
4729     {
4730       /* Canonicalize (GTU x 0) as (NE x 0).  */
4731       if (code == GTU)
4732         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4733       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4734       if (code == LEU)
4735         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4736     }
4737   else if (op1 == const1_rtx)
4738     {
4739       switch (code)
4740         {
4741         case GE:
4742 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4743 	  return simplify_gen_relational (GT, mode, cmp_mode,
4744 					  op0, const0_rtx);
4745 	case GEU:
4746 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4747 	  return simplify_gen_relational (NE, mode, cmp_mode,
4748 					  op0, const0_rtx);
4749 	case LT:
4750 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4751 	  return simplify_gen_relational (LE, mode, cmp_mode,
4752 					  op0, const0_rtx);
4753 	case LTU:
4754 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4755 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4756 					  op0, const0_rtx);
4757 	default:
4758 	  break;
4759 	}
4760     }
4761   else if (op1 == constm1_rtx)
4762     {
4763       /* Canonicalize (LE x -1) as (LT x 0).  */
4764       if (code == LE)
4765         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4766       /* Canonicalize (GT x -1) as (GE x 0).  */
4767       if (code == GT)
4768         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4769     }
4770 
4771   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4772   if ((code == EQ || code == NE)
4773       && (op0code == PLUS || op0code == MINUS)
4774       && CONSTANT_P (op1)
4775       && CONSTANT_P (XEXP (op0, 1))
4776       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4777     {
4778       rtx x = XEXP (op0, 0);
4779       rtx c = XEXP (op0, 1);
4780       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4781       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4782 
4783       /* Detect an infinite recursive condition, where we oscillate at this
4784 	 simplification case between:
4785 	    A + B == C  <--->  C - B == A,
4786 	 where A, B, and C are all constants with non-simplifiable expressions,
4787 	 usually SYMBOL_REFs.  */
4788       if (GET_CODE (tem) == invcode
4789 	  && CONSTANT_P (x)
4790 	  && rtx_equal_p (c, XEXP (tem, 1)))
4791 	return NULL_RTX;
4792 
4793       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4794     }
4795 
4796   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4797      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4798   if (code == NE
4799       && op1 == const0_rtx
4800       && GET_MODE_CLASS (mode) == MODE_INT
4801       && cmp_mode != VOIDmode
4802       /* ??? Work-around BImode bugs in the ia64 backend.  */
4803       && mode != BImode
4804       && cmp_mode != BImode
4805       && nonzero_bits (op0, cmp_mode) == 1
4806       && STORE_FLAG_VALUE == 1)
4807     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4808 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4809 	   : lowpart_subreg (mode, op0, cmp_mode);
4810 
4811   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4812   if ((code == EQ || code == NE)
4813       && op1 == const0_rtx
4814       && op0code == XOR)
4815     return simplify_gen_relational (code, mode, cmp_mode,
4816 				    XEXP (op0, 0), XEXP (op0, 1));
4817 
4818   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4819   if ((code == EQ || code == NE)
4820       && op0code == XOR
4821       && rtx_equal_p (XEXP (op0, 0), op1)
4822       && !side_effects_p (XEXP (op0, 0)))
4823     return simplify_gen_relational (code, mode, cmp_mode,
4824 				    XEXP (op0, 1), const0_rtx);
4825 
4826   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4827   if ((code == EQ || code == NE)
4828       && op0code == XOR
4829       && rtx_equal_p (XEXP (op0, 1), op1)
4830       && !side_effects_p (XEXP (op0, 1)))
4831     return simplify_gen_relational (code, mode, cmp_mode,
4832 				    XEXP (op0, 0), const0_rtx);
4833 
4834   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4835   if ((code == EQ || code == NE)
4836       && op0code == XOR
4837       && CONST_SCALAR_INT_P (op1)
4838       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4839     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4840 				    simplify_gen_binary (XOR, cmp_mode,
4841 							 XEXP (op0, 1), op1));
4842 
4843   /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped.  */
4844   if ((code == EQ || code == NE)
4845       && GET_CODE (op0) == BSWAP
4846       && CONST_SCALAR_INT_P (op1))
4847     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4848 				    simplify_gen_unary (BSWAP, cmp_mode,
4849 							op1, cmp_mode));
4850 
4851   /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y).  */
4852   if ((code == EQ || code == NE)
4853       && GET_CODE (op0) == BSWAP
4854       && GET_CODE (op1) == BSWAP)
4855     return simplify_gen_relational (code, mode, cmp_mode,
4856 				    XEXP (op0, 0), XEXP (op1, 0));
4857 
4858   if (op0code == POPCOUNT && op1 == const0_rtx)
4859     switch (code)
4860       {
4861       case EQ:
4862       case LE:
4863       case LEU:
4864 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4865 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4866 					XEXP (op0, 0), const0_rtx);
4867 
4868       case NE:
4869       case GT:
4870       case GTU:
4871 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4872 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4873 					XEXP (op0, 0), const0_rtx);
4874 
4875       default:
4876 	break;
4877       }
4878 
4879   return NULL_RTX;
4880 }
4881 
4882 enum
4883 {
4884   CMP_EQ = 1,
4885   CMP_LT = 2,
4886   CMP_GT = 4,
4887   CMP_LTU = 8,
4888   CMP_GTU = 16
4889 };
4890 
4891 
4892 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4893    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4894    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4895    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4896    For floating-point comparisons, assume that the operands were ordered.  */
4897 
4898 static rtx
comparison_result(enum rtx_code code,int known_results)4899 comparison_result (enum rtx_code code, int known_results)
4900 {
4901   switch (code)
4902     {
4903     case EQ:
4904     case UNEQ:
4905       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4906     case NE:
4907     case LTGT:
4908       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4909 
4910     case LT:
4911     case UNLT:
4912       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4913     case GE:
4914     case UNGE:
4915       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4916 
4917     case GT:
4918     case UNGT:
4919       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4920     case LE:
4921     case UNLE:
4922       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4923 
4924     case LTU:
4925       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4926     case GEU:
4927       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4928 
4929     case GTU:
4930       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4931     case LEU:
4932       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4933 
4934     case ORDERED:
4935       return const_true_rtx;
4936     case UNORDERED:
4937       return const0_rtx;
4938     default:
4939       gcc_unreachable ();
4940     }
4941 }
4942 
4943 /* Check if the given comparison (done in the given MODE) is actually a
4944    tautology or a contradiction.
4945    If no simplification is possible, this function returns zero.
4946    Otherwise, it returns either const_true_rtx or const0_rtx.  */
4947 
4948 rtx
simplify_const_relational_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)4949 simplify_const_relational_operation (enum rtx_code code,
4950 				     enum machine_mode mode,
4951 				     rtx op0, rtx op1)
4952 {
4953   rtx tem;
4954   rtx trueop0;
4955   rtx trueop1;
4956 
4957   gcc_assert (mode != VOIDmode
4958 	      || (GET_MODE (op0) == VOIDmode
4959 		  && GET_MODE (op1) == VOIDmode));
4960 
4961   /* If op0 is a compare, extract the comparison arguments from it.  */
4962   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4963     {
4964       op1 = XEXP (op0, 1);
4965       op0 = XEXP (op0, 0);
4966 
4967       if (GET_MODE (op0) != VOIDmode)
4968 	mode = GET_MODE (op0);
4969       else if (GET_MODE (op1) != VOIDmode)
4970 	mode = GET_MODE (op1);
4971       else
4972 	return 0;
4973     }
4974 
4975   /* We can't simplify MODE_CC values since we don't know what the
4976      actual comparison is.  */
4977   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4978     return 0;
4979 
4980   /* Make sure the constant is second.  */
4981   if (swap_commutative_operands_p (op0, op1))
4982     {
4983       tem = op0, op0 = op1, op1 = tem;
4984       code = swap_condition (code);
4985     }
4986 
4987   trueop0 = avoid_constant_pool_reference (op0);
4988   trueop1 = avoid_constant_pool_reference (op1);
4989 
4990   /* For integer comparisons of A and B maybe we can simplify A - B and can
4991      then simplify a comparison of that with zero.  If A and B are both either
4992      a register or a CONST_INT, this can't help; testing for these cases will
4993      prevent infinite recursion here and speed things up.
4994 
4995      We can only do this for EQ and NE comparisons as otherwise we may
4996      lose or introduce overflow which we cannot disregard as undefined as
4997      we do not know the signedness of the operation on either the left or
4998      the right hand side of the comparison.  */
4999 
5000   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5001       && (code == EQ || code == NE)
5002       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5003 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
5004       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
5005       /* We cannot do this if tem is a nonzero address.  */
5006       && ! nonzero_address_p (tem))
5007     return simplify_const_relational_operation (signed_condition (code),
5008 						mode, tem, const0_rtx);
5009 
5010   if (! HONOR_NANS (mode) && code == ORDERED)
5011     return const_true_rtx;
5012 
5013   if (! HONOR_NANS (mode) && code == UNORDERED)
5014     return const0_rtx;
5015 
5016   /* For modes without NaNs, if the two operands are equal, we know the
5017      result except if they have side-effects.  Even with NaNs we know
5018      the result of unordered comparisons and, if signaling NaNs are
5019      irrelevant, also the result of LT/GT/LTGT.  */
5020   if ((! HONOR_NANS (GET_MODE (trueop0))
5021        || code == UNEQ || code == UNLE || code == UNGE
5022        || ((code == LT || code == GT || code == LTGT)
5023 	   && ! HONOR_SNANS (GET_MODE (trueop0))))
5024       && rtx_equal_p (trueop0, trueop1)
5025       && ! side_effects_p (trueop0))
5026     return comparison_result (code, CMP_EQ);
5027 
5028   /* If the operands are floating-point constants, see if we can fold
5029      the result.  */
5030   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5031       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5032       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5033     {
5034       REAL_VALUE_TYPE d0, d1;
5035 
5036       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
5037       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
5038 
5039       /* Comparisons are unordered iff at least one of the values is NaN.  */
5040       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
5041 	switch (code)
5042 	  {
5043 	  case UNEQ:
5044 	  case UNLT:
5045 	  case UNGT:
5046 	  case UNLE:
5047 	  case UNGE:
5048 	  case NE:
5049 	  case UNORDERED:
5050 	    return const_true_rtx;
5051 	  case EQ:
5052 	  case LT:
5053 	  case GT:
5054 	  case LE:
5055 	  case GE:
5056 	  case LTGT:
5057 	  case ORDERED:
5058 	    return const0_rtx;
5059 	  default:
5060 	    return 0;
5061 	  }
5062 
5063       return comparison_result (code,
5064 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
5065 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
5066     }
5067 
5068   /* Otherwise, see if the operands are both integers.  */
5069   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5070        && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
5071        && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
5072     {
5073       int width = GET_MODE_PRECISION (mode);
5074       HOST_WIDE_INT l0s, h0s, l1s, h1s;
5075       unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
5076 
5077       /* Get the two words comprising each integer constant.  */
5078       if (CONST_DOUBLE_AS_INT_P (trueop0))
5079 	{
5080 	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
5081 	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
5082 	}
5083       else
5084 	{
5085 	  l0u = l0s = INTVAL (trueop0);
5086 	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
5087 	}
5088 
5089       if (CONST_DOUBLE_AS_INT_P (trueop1))
5090 	{
5091 	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
5092 	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
5093 	}
5094       else
5095 	{
5096 	  l1u = l1s = INTVAL (trueop1);
5097 	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
5098 	}
5099 
5100       /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5101 	 we have to sign or zero-extend the values.  */
5102       if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
5103 	{
5104 	  l0u &= GET_MODE_MASK (mode);
5105 	  l1u &= GET_MODE_MASK (mode);
5106 
5107 	  if (val_signbit_known_set_p (mode, l0s))
5108 	    l0s |= ~GET_MODE_MASK (mode);
5109 
5110 	  if (val_signbit_known_set_p (mode, l1s))
5111 	    l1s |= ~GET_MODE_MASK (mode);
5112 	}
5113       if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
5114 	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
5115 
5116       if (h0u == h1u && l0u == l1u)
5117 	return comparison_result (code, CMP_EQ);
5118       else
5119 	{
5120 	  int cr;
5121 	  cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
5122 	  cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
5123 	  return comparison_result (code, cr);
5124 	}
5125     }
5126 
5127   /* Optimize comparisons with upper and lower bounds.  */
5128   if (HWI_COMPUTABLE_MODE_P (mode)
5129       && CONST_INT_P (trueop1))
5130     {
5131       int sign;
5132       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5133       HOST_WIDE_INT val = INTVAL (trueop1);
5134       HOST_WIDE_INT mmin, mmax;
5135 
5136       if (code == GEU
5137 	  || code == LEU
5138 	  || code == GTU
5139 	  || code == LTU)
5140 	sign = 0;
5141       else
5142 	sign = 1;
5143 
5144       /* Get a reduced range if the sign bit is zero.  */
5145       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5146 	{
5147 	  mmin = 0;
5148 	  mmax = nonzero;
5149 	}
5150       else
5151 	{
5152 	  rtx mmin_rtx, mmax_rtx;
5153 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5154 
5155 	  mmin = INTVAL (mmin_rtx);
5156 	  mmax = INTVAL (mmax_rtx);
5157 	  if (sign)
5158 	    {
5159 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5160 
5161 	      mmin >>= (sign_copies - 1);
5162 	      mmax >>= (sign_copies - 1);
5163 	    }
5164 	}
5165 
5166       switch (code)
5167 	{
5168 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5169 	case GEU:
5170 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5171 	    return const_true_rtx;
5172 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5173 	    return const0_rtx;
5174 	  break;
5175 	case GE:
5176 	  if (val <= mmin)
5177 	    return const_true_rtx;
5178 	  if (val > mmax)
5179 	    return const0_rtx;
5180 	  break;
5181 
5182 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5183 	case LEU:
5184 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5185 	    return const_true_rtx;
5186 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5187 	    return const0_rtx;
5188 	  break;
5189 	case LE:
5190 	  if (val >= mmax)
5191 	    return const_true_rtx;
5192 	  if (val < mmin)
5193 	    return const0_rtx;
5194 	  break;
5195 
5196 	case EQ:
5197 	  /* x == y is always false for y out of range.  */
5198 	  if (val < mmin || val > mmax)
5199 	    return const0_rtx;
5200 	  break;
5201 
5202 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5203 	case GTU:
5204 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5205 	    return const0_rtx;
5206 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5207 	    return const_true_rtx;
5208 	  break;
5209 	case GT:
5210 	  if (val >= mmax)
5211 	    return const0_rtx;
5212 	  if (val < mmin)
5213 	    return const_true_rtx;
5214 	  break;
5215 
5216 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5217 	case LTU:
5218 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5219 	    return const0_rtx;
5220 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5221 	    return const_true_rtx;
5222 	  break;
5223 	case LT:
5224 	  if (val <= mmin)
5225 	    return const0_rtx;
5226 	  if (val > mmax)
5227 	    return const_true_rtx;
5228 	  break;
5229 
5230 	case NE:
5231 	  /* x != y is always true for y out of range.  */
5232 	  if (val < mmin || val > mmax)
5233 	    return const_true_rtx;
5234 	  break;
5235 
5236 	default:
5237 	  break;
5238 	}
5239     }
5240 
5241   /* Optimize integer comparisons with zero.  */
5242   if (trueop1 == const0_rtx)
5243     {
5244       /* Some addresses are known to be nonzero.  We don't know
5245 	 their sign, but equality comparisons are known.  */
5246       if (nonzero_address_p (trueop0))
5247 	{
5248 	  if (code == EQ || code == LEU)
5249 	    return const0_rtx;
5250 	  if (code == NE || code == GTU)
5251 	    return const_true_rtx;
5252 	}
5253 
5254       /* See if the first operand is an IOR with a constant.  If so, we
5255 	 may be able to determine the result of this comparison.  */
5256       if (GET_CODE (op0) == IOR)
5257 	{
5258 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5259 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5260 	    {
5261 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5262 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5263 			      && (UINTVAL (inner_const)
5264 				  & ((unsigned HOST_WIDE_INT) 1
5265 				     << sign_bitnum)));
5266 
5267 	      switch (code)
5268 		{
5269 		case EQ:
5270 		case LEU:
5271 		  return const0_rtx;
5272 		case NE:
5273 		case GTU:
5274 		  return const_true_rtx;
5275 		case LT:
5276 		case LE:
5277 		  if (has_sign)
5278 		    return const_true_rtx;
5279 		  break;
5280 		case GT:
5281 		case GE:
5282 		  if (has_sign)
5283 		    return const0_rtx;
5284 		  break;
5285 		default:
5286 		  break;
5287 		}
5288 	    }
5289 	}
5290     }
5291 
5292   /* Optimize comparison of ABS with zero.  */
5293   if (trueop1 == CONST0_RTX (mode)
5294       && (GET_CODE (trueop0) == ABS
5295 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5296 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5297     {
5298       switch (code)
5299 	{
5300 	case LT:
5301 	  /* Optimize abs(x) < 0.0.  */
5302 	  if (!HONOR_SNANS (mode)
5303 	      && (!INTEGRAL_MODE_P (mode)
5304 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5305 	    {
5306 	      if (INTEGRAL_MODE_P (mode)
5307 		  && (issue_strict_overflow_warning
5308 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5309 		warning (OPT_Wstrict_overflow,
5310 			 ("assuming signed overflow does not occur when "
5311 			  "assuming abs (x) < 0 is false"));
5312 	       return const0_rtx;
5313 	    }
5314 	  break;
5315 
5316 	case GE:
5317 	  /* Optimize abs(x) >= 0.0.  */
5318 	  if (!HONOR_NANS (mode)
5319 	      && (!INTEGRAL_MODE_P (mode)
5320 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5321 	    {
5322 	      if (INTEGRAL_MODE_P (mode)
5323 	          && (issue_strict_overflow_warning
5324 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5325 	        warning (OPT_Wstrict_overflow,
5326 			 ("assuming signed overflow does not occur when "
5327 			  "assuming abs (x) >= 0 is true"));
5328 	      return const_true_rtx;
5329 	    }
5330 	  break;
5331 
5332 	case UNGE:
5333 	  /* Optimize ! (abs(x) < 0.0).  */
5334 	  return const_true_rtx;
5335 
5336 	default:
5337 	  break;
5338 	}
5339     }
5340 
5341   return 0;
5342 }
5343 
5344 /* Simplify CODE, an operation with result mode MODE and three operands,
5345    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5346    a constant.  Return 0 if no simplifications is possible.  */
5347 
5348 rtx
simplify_ternary_operation(enum rtx_code code,enum machine_mode mode,enum machine_mode op0_mode,rtx op0,rtx op1,rtx op2)5349 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5350 			    enum machine_mode op0_mode, rtx op0, rtx op1,
5351 			    rtx op2)
5352 {
5353   unsigned int width = GET_MODE_PRECISION (mode);
5354   bool any_change = false;
5355   rtx tem, trueop2;
5356 
5357   /* VOIDmode means "infinite" precision.  */
5358   if (width == 0)
5359     width = HOST_BITS_PER_WIDE_INT;
5360 
5361   switch (code)
5362     {
5363     case FMA:
5364       /* Simplify negations around the multiplication.  */
5365       /* -a * -b + c  =>  a * b + c.  */
5366       if (GET_CODE (op0) == NEG)
5367 	{
5368 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5369 	  if (tem)
5370 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5371 	}
5372       else if (GET_CODE (op1) == NEG)
5373 	{
5374 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5375 	  if (tem)
5376 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5377 	}
5378 
5379       /* Canonicalize the two multiplication operands.  */
5380       /* a * -b + c  =>  -b * a + c.  */
5381       if (swap_commutative_operands_p (op0, op1))
5382 	tem = op0, op0 = op1, op1 = tem, any_change = true;
5383 
5384       if (any_change)
5385 	return gen_rtx_FMA (mode, op0, op1, op2);
5386       return NULL_RTX;
5387 
5388     case SIGN_EXTRACT:
5389     case ZERO_EXTRACT:
5390       if (CONST_INT_P (op0)
5391 	  && CONST_INT_P (op1)
5392 	  && CONST_INT_P (op2)
5393 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5394 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5395 	{
5396 	  /* Extracting a bit-field from a constant */
5397 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5398 	  HOST_WIDE_INT op1val = INTVAL (op1);
5399 	  HOST_WIDE_INT op2val = INTVAL (op2);
5400 	  if (BITS_BIG_ENDIAN)
5401 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5402 	  else
5403 	    val >>= op2val;
5404 
5405 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5406 	    {
5407 	      /* First zero-extend.  */
5408 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5409 	      /* If desired, propagate sign bit.  */
5410 	      if (code == SIGN_EXTRACT
5411 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5412 		     != 0)
5413 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5414 	    }
5415 
5416 	  return gen_int_mode (val, mode);
5417 	}
5418       break;
5419 
5420     case IF_THEN_ELSE:
5421       if (CONST_INT_P (op0))
5422 	return op0 != const0_rtx ? op1 : op2;
5423 
5424       /* Convert c ? a : a into "a".  */
5425       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5426 	return op1;
5427 
5428       /* Convert a != b ? a : b into "a".  */
5429       if (GET_CODE (op0) == NE
5430 	  && ! side_effects_p (op0)
5431 	  && ! HONOR_NANS (mode)
5432 	  && ! HONOR_SIGNED_ZEROS (mode)
5433 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5434 	       && rtx_equal_p (XEXP (op0, 1), op2))
5435 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5436 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5437 	return op1;
5438 
5439       /* Convert a == b ? a : b into "b".  */
5440       if (GET_CODE (op0) == EQ
5441 	  && ! side_effects_p (op0)
5442 	  && ! HONOR_NANS (mode)
5443 	  && ! HONOR_SIGNED_ZEROS (mode)
5444 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5445 	       && rtx_equal_p (XEXP (op0, 1), op2))
5446 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5447 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5448 	return op2;
5449 
5450       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5451 	{
5452 	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5453 					? GET_MODE (XEXP (op0, 1))
5454 					: GET_MODE (XEXP (op0, 0)));
5455 	  rtx temp;
5456 
5457 	  /* Look for happy constants in op1 and op2.  */
5458 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5459 	    {
5460 	      HOST_WIDE_INT t = INTVAL (op1);
5461 	      HOST_WIDE_INT f = INTVAL (op2);
5462 
5463 	      if (t == STORE_FLAG_VALUE && f == 0)
5464 	        code = GET_CODE (op0);
5465 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5466 		{
5467 		  enum rtx_code tmp;
5468 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5469 		  if (tmp == UNKNOWN)
5470 		    break;
5471 		  code = tmp;
5472 		}
5473 	      else
5474 		break;
5475 
5476 	      return simplify_gen_relational (code, mode, cmp_mode,
5477 					      XEXP (op0, 0), XEXP (op0, 1));
5478 	    }
5479 
5480 	  if (cmp_mode == VOIDmode)
5481 	    cmp_mode = op0_mode;
5482 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5483 			  			cmp_mode, XEXP (op0, 0),
5484 						XEXP (op0, 1));
5485 
5486 	  /* See if any simplifications were possible.  */
5487 	  if (temp)
5488 	    {
5489 	      if (CONST_INT_P (temp))
5490 		return temp == const0_rtx ? op2 : op1;
5491 	      else if (temp)
5492 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5493 	    }
5494 	}
5495       break;
5496 
5497     case VEC_MERGE:
5498       gcc_assert (GET_MODE (op0) == mode);
5499       gcc_assert (GET_MODE (op1) == mode);
5500       gcc_assert (VECTOR_MODE_P (mode));
5501       trueop2 = avoid_constant_pool_reference (op2);
5502       if (CONST_INT_P (trueop2))
5503 	{
5504 	  int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5505 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5506 	  unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5507 	  unsigned HOST_WIDE_INT mask;
5508 	  if (n_elts == HOST_BITS_PER_WIDE_INT)
5509 	    mask = -1;
5510 	  else
5511 	    mask = ((unsigned HOST_WIDE_INT) 1 << n_elts) - 1;
5512 
5513 	  if (!(sel & mask) && !side_effects_p (op0))
5514 	    return op1;
5515 	  if ((sel & mask) == mask && !side_effects_p (op1))
5516 	    return op0;
5517 
5518 	  rtx trueop0 = avoid_constant_pool_reference (op0);
5519 	  rtx trueop1 = avoid_constant_pool_reference (op1);
5520 	  if (GET_CODE (trueop0) == CONST_VECTOR
5521 	      && GET_CODE (trueop1) == CONST_VECTOR)
5522 	    {
5523 	      rtvec v = rtvec_alloc (n_elts);
5524 	      unsigned int i;
5525 
5526 	      for (i = 0; i < n_elts; i++)
5527 		RTVEC_ELT (v, i) = ((sel & ((unsigned HOST_WIDE_INT) 1 << i))
5528 				    ? CONST_VECTOR_ELT (trueop0, i)
5529 				    : CONST_VECTOR_ELT (trueop1, i));
5530 	      return gen_rtx_CONST_VECTOR (mode, v);
5531 	    }
5532 
5533 	  /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5534 	     if no element from a appears in the result.  */
5535 	  if (GET_CODE (op0) == VEC_MERGE)
5536 	    {
5537 	      tem = avoid_constant_pool_reference (XEXP (op0, 2));
5538 	      if (CONST_INT_P (tem))
5539 		{
5540 		  unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5541 		  if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5542 		    return simplify_gen_ternary (code, mode, mode,
5543 						 XEXP (op0, 1), op1, op2);
5544 		  if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5545 		    return simplify_gen_ternary (code, mode, mode,
5546 						 XEXP (op0, 0), op1, op2);
5547 		}
5548 	    }
5549 	  if (GET_CODE (op1) == VEC_MERGE)
5550 	    {
5551 	      tem = avoid_constant_pool_reference (XEXP (op1, 2));
5552 	      if (CONST_INT_P (tem))
5553 		{
5554 		  unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5555 		  if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5556 		    return simplify_gen_ternary (code, mode, mode,
5557 						 op0, XEXP (op1, 1), op2);
5558 		  if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5559 		    return simplify_gen_ternary (code, mode, mode,
5560 						 op0, XEXP (op1, 0), op2);
5561 		}
5562 	    }
5563 	}
5564 
5565       if (rtx_equal_p (op0, op1)
5566 	  && !side_effects_p (op2) && !side_effects_p (op1))
5567 	return op0;
5568 
5569       break;
5570 
5571     default:
5572       gcc_unreachable ();
5573     }
5574 
5575   return 0;
5576 }
5577 
5578 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5579    or CONST_VECTOR,
5580    returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5581 
5582    Works by unpacking OP into a collection of 8-bit values
5583    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5584    and then repacking them again for OUTERMODE.  */
5585 
5586 static rtx
simplify_immed_subreg(enum machine_mode outermode,rtx op,enum machine_mode innermode,unsigned int byte)5587 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5588 		       enum machine_mode innermode, unsigned int byte)
5589 {
5590   /* We support up to 512-bit values (for V8DFmode).  */
5591   enum {
5592     max_bitsize = 512,
5593     value_bit = 8,
5594     value_mask = (1 << value_bit) - 1
5595   };
5596   unsigned char value[max_bitsize / value_bit];
5597   int value_start;
5598   int i;
5599   int elem;
5600 
5601   int num_elem;
5602   rtx * elems;
5603   int elem_bitsize;
5604   rtx result_s;
5605   rtvec result_v = NULL;
5606   enum mode_class outer_class;
5607   enum machine_mode outer_submode;
5608 
5609   /* Some ports misuse CCmode.  */
5610   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5611     return op;
5612 
5613   /* We have no way to represent a complex constant at the rtl level.  */
5614   if (COMPLEX_MODE_P (outermode))
5615     return NULL_RTX;
5616 
5617   /* Unpack the value.  */
5618 
5619   if (GET_CODE (op) == CONST_VECTOR)
5620     {
5621       num_elem = CONST_VECTOR_NUNITS (op);
5622       elems = &CONST_VECTOR_ELT (op, 0);
5623       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5624     }
5625   else
5626     {
5627       num_elem = 1;
5628       elems = &op;
5629       elem_bitsize = max_bitsize;
5630     }
5631   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5632   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5633   /* I don't know how to handle endianness of sub-units.  */
5634   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5635 
5636   for (elem = 0; elem < num_elem; elem++)
5637     {
5638       unsigned char * vp;
5639       rtx el = elems[elem];
5640 
5641       /* Vectors are kept in target memory order.  (This is probably
5642 	 a mistake.)  */
5643       {
5644 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5645 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5646 			  / BITS_PER_UNIT);
5647 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5648 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5649 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5650 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5651 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5652       }
5653 
5654       switch (GET_CODE (el))
5655 	{
5656 	case CONST_INT:
5657 	  for (i = 0;
5658 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5659 	       i += value_bit)
5660 	    *vp++ = INTVAL (el) >> i;
5661 	  /* CONST_INTs are always logically sign-extended.  */
5662 	  for (; i < elem_bitsize; i += value_bit)
5663 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5664 	  break;
5665 
5666 	case CONST_DOUBLE:
5667 	  if (GET_MODE (el) == VOIDmode)
5668 	    {
5669 	      unsigned char extend = 0;
5670 	      /* If this triggers, someone should have generated a
5671 		 CONST_INT instead.  */
5672 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5673 
5674 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5675 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5676 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5677 		{
5678 		  *vp++
5679 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5680 		  i += value_bit;
5681 		}
5682 
5683 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5684 		extend = -1;
5685 	      for (; i < elem_bitsize; i += value_bit)
5686 		*vp++ = extend;
5687 	    }
5688 	  else
5689 	    {
5690 	      long tmp[max_bitsize / 32];
5691 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5692 
5693 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5694 	      gcc_assert (bitsize <= elem_bitsize);
5695 	      gcc_assert (bitsize % value_bit == 0);
5696 
5697 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5698 			      GET_MODE (el));
5699 
5700 	      /* real_to_target produces its result in words affected by
5701 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5702 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5703 	         of SUBREG in rtl.texi.  */
5704 	      for (i = 0; i < bitsize; i += value_bit)
5705 		{
5706 		  int ibase;
5707 		  if (WORDS_BIG_ENDIAN)
5708 		    ibase = bitsize - 1 - i;
5709 		  else
5710 		    ibase = i;
5711 		  *vp++ = tmp[ibase / 32] >> i % 32;
5712 		}
5713 
5714 	      /* It shouldn't matter what's done here, so fill it with
5715 		 zero.  */
5716 	      for (; i < elem_bitsize; i += value_bit)
5717 		*vp++ = 0;
5718 	    }
5719 	  break;
5720 
5721         case CONST_FIXED:
5722 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5723 	    {
5724 	      for (i = 0; i < elem_bitsize; i += value_bit)
5725 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5726 	    }
5727 	  else
5728 	    {
5729 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5730 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5731               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5732 		   i += value_bit)
5733 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5734 			>> (i - HOST_BITS_PER_WIDE_INT);
5735 	      for (; i < elem_bitsize; i += value_bit)
5736 		*vp++ = 0;
5737 	    }
5738           break;
5739 
5740 	default:
5741 	  gcc_unreachable ();
5742 	}
5743     }
5744 
5745   /* Now, pick the right byte to start with.  */
5746   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5747      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5748      will already have offset 0.  */
5749   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5750     {
5751       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5752 			- byte);
5753       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5754       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5755       byte = (subword_byte % UNITS_PER_WORD
5756 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5757     }
5758 
5759   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5760      so if it's become negative it will instead be very large.)  */
5761   gcc_assert (byte < GET_MODE_SIZE (innermode));
5762 
5763   /* Convert from bytes to chunks of size value_bit.  */
5764   value_start = byte * (BITS_PER_UNIT / value_bit);
5765 
5766   /* Re-pack the value.  */
5767 
5768   if (VECTOR_MODE_P (outermode))
5769     {
5770       num_elem = GET_MODE_NUNITS (outermode);
5771       result_v = rtvec_alloc (num_elem);
5772       elems = &RTVEC_ELT (result_v, 0);
5773       outer_submode = GET_MODE_INNER (outermode);
5774     }
5775   else
5776     {
5777       num_elem = 1;
5778       elems = &result_s;
5779       outer_submode = outermode;
5780     }
5781 
5782   outer_class = GET_MODE_CLASS (outer_submode);
5783   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5784 
5785   gcc_assert (elem_bitsize % value_bit == 0);
5786   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5787 
5788   for (elem = 0; elem < num_elem; elem++)
5789     {
5790       unsigned char *vp;
5791 
5792       /* Vectors are stored in target memory order.  (This is probably
5793 	 a mistake.)  */
5794       {
5795 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5796 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5797 			  / BITS_PER_UNIT);
5798 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5799 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5800 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5801 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5802 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5803       }
5804 
5805       switch (outer_class)
5806 	{
5807 	case MODE_INT:
5808 	case MODE_PARTIAL_INT:
5809 	  {
5810 	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
5811 
5812 	    for (i = 0;
5813 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5814 		 i += value_bit)
5815 	      lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5816 	    for (; i < elem_bitsize; i += value_bit)
5817 	      hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5818 		     << (i - HOST_BITS_PER_WIDE_INT);
5819 
5820 	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5821 	       know why.  */
5822 	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5823 	      elems[elem] = gen_int_mode (lo, outer_submode);
5824 	    else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5825 	      elems[elem] = immed_double_const (lo, hi, outer_submode);
5826 	    else
5827 	      return NULL_RTX;
5828 	  }
5829 	  break;
5830 
5831 	case MODE_FLOAT:
5832 	case MODE_DECIMAL_FLOAT:
5833 	  {
5834 	    REAL_VALUE_TYPE r;
5835 	    long tmp[max_bitsize / 32];
5836 
5837 	    /* real_from_target wants its input in words affected by
5838 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5839 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5840 	       of SUBREG in rtl.texi.  */
5841 	    for (i = 0; i < max_bitsize / 32; i++)
5842 	      tmp[i] = 0;
5843 	    for (i = 0; i < elem_bitsize; i += value_bit)
5844 	      {
5845 		int ibase;
5846 		if (WORDS_BIG_ENDIAN)
5847 		  ibase = elem_bitsize - 1 - i;
5848 		else
5849 		  ibase = i;
5850 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5851 	      }
5852 
5853 	    real_from_target (&r, tmp, outer_submode);
5854 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5855 	  }
5856 	  break;
5857 
5858 	case MODE_FRACT:
5859 	case MODE_UFRACT:
5860 	case MODE_ACCUM:
5861 	case MODE_UACCUM:
5862 	  {
5863 	    FIXED_VALUE_TYPE f;
5864 	    f.data.low = 0;
5865 	    f.data.high = 0;
5866 	    f.mode = outer_submode;
5867 
5868 	    for (i = 0;
5869 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5870 		 i += value_bit)
5871 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5872 	    for (; i < elem_bitsize; i += value_bit)
5873 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5874 			     << (i - HOST_BITS_PER_WIDE_INT));
5875 
5876 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5877           }
5878           break;
5879 
5880 	default:
5881 	  gcc_unreachable ();
5882 	}
5883     }
5884   if (VECTOR_MODE_P (outermode))
5885     return gen_rtx_CONST_VECTOR (outermode, result_v);
5886   else
5887     return result_s;
5888 }
5889 
5890 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5891    Return 0 if no simplifications are possible.  */
5892 rtx
simplify_subreg(enum machine_mode outermode,rtx op,enum machine_mode innermode,unsigned int byte)5893 simplify_subreg (enum machine_mode outermode, rtx op,
5894 		 enum machine_mode innermode, unsigned int byte)
5895 {
5896   /* Little bit of sanity checking.  */
5897   gcc_assert (innermode != VOIDmode);
5898   gcc_assert (outermode != VOIDmode);
5899   gcc_assert (innermode != BLKmode);
5900   gcc_assert (outermode != BLKmode);
5901 
5902   gcc_assert (GET_MODE (op) == innermode
5903 	      || GET_MODE (op) == VOIDmode);
5904 
5905   if ((byte % GET_MODE_SIZE (outermode)) != 0)
5906     return NULL_RTX;
5907 
5908   if (byte >= GET_MODE_SIZE (innermode))
5909     return NULL_RTX;
5910 
5911   if (outermode == innermode && !byte)
5912     return op;
5913 
5914   if (CONST_SCALAR_INT_P (op)
5915       || CONST_DOUBLE_AS_FLOAT_P (op)
5916       || GET_CODE (op) == CONST_FIXED
5917       || GET_CODE (op) == CONST_VECTOR)
5918     return simplify_immed_subreg (outermode, op, innermode, byte);
5919 
5920   /* Changing mode twice with SUBREG => just change it once,
5921      or not at all if changing back op starting mode.  */
5922   if (GET_CODE (op) == SUBREG)
5923     {
5924       enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5925       int final_offset = byte + SUBREG_BYTE (op);
5926       rtx newx;
5927 
5928       if (outermode == innermostmode
5929 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5930 	return SUBREG_REG (op);
5931 
5932       /* The SUBREG_BYTE represents offset, as if the value were stored
5933 	 in memory.  Irritating exception is paradoxical subreg, where
5934 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5935 	 value should be negative.  For a moment, undo this exception.  */
5936       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5937 	{
5938 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5939 	  if (WORDS_BIG_ENDIAN)
5940 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5941 	  if (BYTES_BIG_ENDIAN)
5942 	    final_offset += difference % UNITS_PER_WORD;
5943 	}
5944       if (SUBREG_BYTE (op) == 0
5945 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5946 	{
5947 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5948 	  if (WORDS_BIG_ENDIAN)
5949 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5950 	  if (BYTES_BIG_ENDIAN)
5951 	    final_offset += difference % UNITS_PER_WORD;
5952 	}
5953 
5954       /* See whether resulting subreg will be paradoxical.  */
5955       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5956 	{
5957 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5958 	  if (final_offset < 0)
5959 	    return NULL_RTX;
5960 	  /* Bail out in case resulting subreg would be incorrect.  */
5961 	  if (final_offset % GET_MODE_SIZE (outermode)
5962 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5963 	    return NULL_RTX;
5964 	}
5965       else
5966 	{
5967 	  int offset = 0;
5968 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5969 
5970 	  /* In paradoxical subreg, see if we are still looking on lower part.
5971 	     If so, our SUBREG_BYTE will be 0.  */
5972 	  if (WORDS_BIG_ENDIAN)
5973 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5974 	  if (BYTES_BIG_ENDIAN)
5975 	    offset += difference % UNITS_PER_WORD;
5976 	  if (offset == final_offset)
5977 	    final_offset = 0;
5978 	  else
5979 	    return NULL_RTX;
5980 	}
5981 
5982       /* Recurse for further possible simplifications.  */
5983       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5984 			      final_offset);
5985       if (newx)
5986 	return newx;
5987       if (validate_subreg (outermode, innermostmode,
5988 			   SUBREG_REG (op), final_offset))
5989 	{
5990 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5991 	  if (SUBREG_PROMOTED_VAR_P (op)
5992 	      && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5993 	      && GET_MODE_CLASS (outermode) == MODE_INT
5994 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5995 			   GET_MODE_SIZE (innermode),
5996 			   GET_MODE_SIZE (innermostmode))
5997 	      && subreg_lowpart_p (newx))
5998 	    {
5999 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
6000 	      SUBREG_PROMOTED_UNSIGNED_SET
6001 		(newx, SUBREG_PROMOTED_UNSIGNED_P (op));
6002 	    }
6003 	  return newx;
6004 	}
6005       return NULL_RTX;
6006     }
6007 
6008   /* SUBREG of a hard register => just change the register number
6009      and/or mode.  If the hard register is not valid in that mode,
6010      suppress this simplification.  If the hard register is the stack,
6011      frame, or argument pointer, leave this as a SUBREG.  */
6012 
6013   if (REG_P (op) && HARD_REGISTER_P (op))
6014     {
6015       unsigned int regno, final_regno;
6016 
6017       regno = REGNO (op);
6018       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6019       if (HARD_REGISTER_NUM_P (final_regno))
6020 	{
6021 	  rtx x;
6022 	  int final_offset = byte;
6023 
6024 	  /* Adjust offset for paradoxical subregs.  */
6025 	  if (byte == 0
6026 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
6027 	    {
6028 	      int difference = (GET_MODE_SIZE (innermode)
6029 				- GET_MODE_SIZE (outermode));
6030 	      if (WORDS_BIG_ENDIAN)
6031 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
6032 	      if (BYTES_BIG_ENDIAN)
6033 		final_offset += difference % UNITS_PER_WORD;
6034 	    }
6035 
6036 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
6037 
6038 	  /* Propagate original regno.  We don't have any way to specify
6039 	     the offset inside original regno, so do so only for lowpart.
6040 	     The information is used only by alias analysis that can not
6041 	     grog partial register anyway.  */
6042 
6043 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
6044 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6045 	  return x;
6046 	}
6047     }
6048 
6049   /* If we have a SUBREG of a register that we are replacing and we are
6050      replacing it with a MEM, make a new MEM and try replacing the
6051      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
6052      or if we would be widening it.  */
6053 
6054   if (MEM_P (op)
6055       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6056       /* Allow splitting of volatile memory references in case we don't
6057          have instruction to move the whole thing.  */
6058       && (! MEM_VOLATILE_P (op)
6059 	  || ! have_insn_for (SET, innermode))
6060       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6061     return adjust_address_nv (op, outermode, byte);
6062 
6063   /* Handle complex values represented as CONCAT
6064      of real and imaginary part.  */
6065   if (GET_CODE (op) == CONCAT)
6066     {
6067       unsigned int part_size, final_offset;
6068       rtx part, res;
6069 
6070       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
6071       if (byte < part_size)
6072 	{
6073 	  part = XEXP (op, 0);
6074 	  final_offset = byte;
6075 	}
6076       else
6077 	{
6078 	  part = XEXP (op, 1);
6079 	  final_offset = byte - part_size;
6080 	}
6081 
6082       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
6083 	return NULL_RTX;
6084 
6085       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
6086       if (res)
6087 	return res;
6088       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
6089 	return gen_rtx_SUBREG (outermode, part, final_offset);
6090       return NULL_RTX;
6091     }
6092 
6093   /* A SUBREG resulting from a zero extension may fold to zero if
6094      it extracts higher bits that the ZERO_EXTEND's source bits.  */
6095   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6096     {
6097       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
6098       if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
6099 	return CONST0_RTX (outermode);
6100     }
6101 
6102   if (SCALAR_INT_MODE_P (outermode)
6103       && SCALAR_INT_MODE_P (innermode)
6104       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
6105       && byte == subreg_lowpart_offset (outermode, innermode))
6106     {
6107       rtx tem = simplify_truncation (outermode, op, innermode);
6108       if (tem)
6109 	return tem;
6110     }
6111 
6112   return NULL_RTX;
6113 }
6114 
6115 /* Make a SUBREG operation or equivalent if it folds.  */
6116 
6117 rtx
simplify_gen_subreg(enum machine_mode outermode,rtx op,enum machine_mode innermode,unsigned int byte)6118 simplify_gen_subreg (enum machine_mode outermode, rtx op,
6119 		     enum machine_mode innermode, unsigned int byte)
6120 {
6121   rtx newx;
6122 
6123   newx = simplify_subreg (outermode, op, innermode, byte);
6124   if (newx)
6125     return newx;
6126 
6127   if (GET_CODE (op) == SUBREG
6128       || GET_CODE (op) == CONCAT
6129       || GET_MODE (op) == VOIDmode)
6130     return NULL_RTX;
6131 
6132   if (validate_subreg (outermode, innermode, op, byte))
6133     return gen_rtx_SUBREG (outermode, op, byte);
6134 
6135   return NULL_RTX;
6136 }
6137 
6138 /* Simplify X, an rtx expression.
6139 
6140    Return the simplified expression or NULL if no simplifications
6141    were possible.
6142 
6143    This is the preferred entry point into the simplification routines;
6144    however, we still allow passes to call the more specific routines.
6145 
6146    Right now GCC has three (yes, three) major bodies of RTL simplification
6147    code that need to be unified.
6148 
6149 	1. fold_rtx in cse.c.  This code uses various CSE specific
6150 	   information to aid in RTL simplification.
6151 
6152 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
6153 	   it uses combine specific information to aid in RTL
6154 	   simplification.
6155 
6156 	3. The routines in this file.
6157 
6158 
6159    Long term we want to only have one body of simplification code; to
6160    get to that state I recommend the following steps:
6161 
6162 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
6163 	   which are not pass dependent state into these routines.
6164 
6165 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
6166 	   use this routine whenever possible.
6167 
6168 	3. Allow for pass dependent state to be provided to these
6169 	   routines and add simplifications based on the pass dependent
6170 	   state.  Remove code from cse.c & combine.c that becomes
6171 	   redundant/dead.
6172 
6173     It will take time, but ultimately the compiler will be easier to
6174     maintain and improve.  It's totally silly that when we add a
6175     simplification that it needs to be added to 4 places (3 for RTL
6176     simplification and 1 for tree simplification.  */
6177 
6178 rtx
simplify_rtx(const_rtx x)6179 simplify_rtx (const_rtx x)
6180 {
6181   const enum rtx_code code = GET_CODE (x);
6182   const enum machine_mode mode = GET_MODE (x);
6183 
6184   switch (GET_RTX_CLASS (code))
6185     {
6186     case RTX_UNARY:
6187       return simplify_unary_operation (code, mode,
6188 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6189     case RTX_COMM_ARITH:
6190       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6191 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6192 
6193       /* Fall through....  */
6194 
6195     case RTX_BIN_ARITH:
6196       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6197 
6198     case RTX_TERNARY:
6199     case RTX_BITFIELD_OPS:
6200       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6201 					 XEXP (x, 0), XEXP (x, 1),
6202 					 XEXP (x, 2));
6203 
6204     case RTX_COMPARE:
6205     case RTX_COMM_COMPARE:
6206       return simplify_relational_operation (code, mode,
6207                                             ((GET_MODE (XEXP (x, 0))
6208                                              != VOIDmode)
6209                                             ? GET_MODE (XEXP (x, 0))
6210                                             : GET_MODE (XEXP (x, 1))),
6211                                             XEXP (x, 0),
6212                                             XEXP (x, 1));
6213 
6214     case RTX_EXTRA:
6215       if (code == SUBREG)
6216 	return simplify_subreg (mode, SUBREG_REG (x),
6217 				GET_MODE (SUBREG_REG (x)),
6218 				SUBREG_BYTE (x));
6219       break;
6220 
6221     case RTX_OBJ:
6222       if (code == LO_SUM)
6223 	{
6224 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6225 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6226 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6227 	  return XEXP (x, 1);
6228 	}
6229       break;
6230 
6231     default:
6232       break;
6233     }
6234   return NULL;
6235 }
6236