1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4    2011, 2012  Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
41 
42 /* Simplification and canonicalization of RTL.  */
43 
44 /* Much code operates on (low, high) pairs; the low value is an
45    unsigned wide int, the high value a signed wide int.  We
46    occasionally need to sign extend from low to high as if low were a
47    signed wide int.  */
48 #define HWI_SIGN_EXTEND(low) \
49  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 				  unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 					   rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 					    enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 					rtx, rtx, rtx, rtx);
64 
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66    maximally negative number can overflow).  */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
69 {
70   return gen_int_mode (- INTVAL (i), mode);
71 }
72 
73 /* Test whether expression, X, is an immediate constant that represents
74    the most significant bit of machine mode MODE.  */
75 
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 {
79   unsigned HOST_WIDE_INT val;
80   unsigned int width;
81 
82   if (GET_MODE_CLASS (mode) != MODE_INT)
83     return false;
84 
85   width = GET_MODE_PRECISION (mode);
86   if (width == 0)
87     return false;
88 
89   if (width <= HOST_BITS_PER_WIDE_INT
90       && CONST_INT_P (x))
91     val = INTVAL (x);
92   else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 	   && GET_CODE (x) == CONST_DOUBLE
94 	   && CONST_DOUBLE_LOW (x) == 0)
95     {
96       val = CONST_DOUBLE_HIGH (x);
97       width -= HOST_BITS_PER_WIDE_INT;
98     }
99   else
100     return false;
101 
102   if (width < HOST_BITS_PER_WIDE_INT)
103     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106 
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108    (after masking with the mode mask of MODE).  Returns false if the
109    precision of MODE is too large to handle.  */
110 
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114   unsigned int width;
115 
116   if (GET_MODE_CLASS (mode) != MODE_INT)
117     return false;
118 
119   width = GET_MODE_PRECISION (mode);
120   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121     return false;
122 
123   val &= GET_MODE_MASK (mode);
124   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126 
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128    Returns false if the precision of MODE is too large to handle.  */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132   unsigned int width;
133 
134   if (GET_MODE_CLASS (mode) != MODE_INT)
135     return false;
136 
137   width = GET_MODE_PRECISION (mode);
138   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139     return false;
140 
141   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142   return val != 0;
143 }
144 
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146    Returns false if the precision of MODE is too large to handle.  */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150   unsigned int width;
151 
152   if (GET_MODE_CLASS (mode) != MODE_INT)
153     return false;
154 
155   width = GET_MODE_PRECISION (mode);
156   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157     return false;
158 
159   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160   return val == 0;
161 }
162 
163 /* Make a binary operation by properly ordering the operands and
164    seeing if the expression folds.  */
165 
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 		     rtx op1)
169 {
170   rtx tem;
171 
172   /* If this simplifies, do it.  */
173   tem = simplify_binary_operation (code, mode, op0, op1);
174   if (tem)
175     return tem;
176 
177   /* Put complex operands first and constants second if commutative.  */
178   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179       && swap_commutative_operands_p (op0, op1))
180     tem = op0, op0 = op1, op1 = tem;
181 
182   return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 
185 /* If X is a MEM referencing the constant pool, return the real value.
186    Otherwise return X.  */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190   rtx c, tmp, addr;
191   enum machine_mode cmode;
192   HOST_WIDE_INT offset = 0;
193 
194   switch (GET_CODE (x))
195     {
196     case MEM:
197       break;
198 
199     case FLOAT_EXTEND:
200       /* Handle float extensions of constant pool references.  */
201       tmp = XEXP (x, 0);
202       c = avoid_constant_pool_reference (tmp);
203       if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
204 	{
205 	  REAL_VALUE_TYPE d;
206 
207 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 	}
210       return x;
211 
212     default:
213       return x;
214     }
215 
216   if (GET_MODE (x) == BLKmode)
217     return x;
218 
219   addr = XEXP (x, 0);
220 
221   /* Call target hook to avoid the effects of -fpic etc....  */
222   addr = targetm.delegitimize_address (addr);
223 
224   /* Split the address into a base and integer offset.  */
225   if (GET_CODE (addr) == CONST
226       && GET_CODE (XEXP (addr, 0)) == PLUS
227       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228     {
229       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230       addr = XEXP (XEXP (addr, 0), 0);
231     }
232 
233   if (GET_CODE (addr) == LO_SUM)
234     addr = XEXP (addr, 1);
235 
236   /* If this is a constant pool reference, we can turn it into its
237      constant and hope that simplifications happen.  */
238   if (GET_CODE (addr) == SYMBOL_REF
239       && CONSTANT_POOL_ADDRESS_P (addr))
240     {
241       c = get_pool_constant (addr);
242       cmode = get_pool_mode (addr);
243 
244       /* If we're accessing the constant in a different mode than it was
245          originally stored, attempt to fix that up via subreg simplifications.
246          If that fails we have no choice but to return the original memory.  */
247       if ((offset != 0 || cmode != GET_MODE (x))
248 	  && offset >= 0 && offset < GET_MODE_SIZE (cmode))
249         {
250           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251           if (tem && CONSTANT_P (tem))
252             return tem;
253         }
254       else
255         return c;
256     }
257 
258   return x;
259 }
260 
261 /* Simplify a MEM based on its attributes.  This is the default
262    delegitimize_address target hook, and it's recommended that every
263    overrider call it.  */
264 
265 rtx
266 delegitimize_mem_from_attrs (rtx x)
267 {
268   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
269      use their base addresses as equivalent.  */
270   if (MEM_P (x)
271       && MEM_EXPR (x)
272       && MEM_OFFSET_KNOWN_P (x))
273     {
274       tree decl = MEM_EXPR (x);
275       enum machine_mode mode = GET_MODE (x);
276       HOST_WIDE_INT offset = 0;
277 
278       switch (TREE_CODE (decl))
279 	{
280 	default:
281 	  decl = NULL;
282 	  break;
283 
284 	case VAR_DECL:
285 	  break;
286 
287 	case ARRAY_REF:
288 	case ARRAY_RANGE_REF:
289 	case COMPONENT_REF:
290 	case BIT_FIELD_REF:
291 	case REALPART_EXPR:
292 	case IMAGPART_EXPR:
293 	case VIEW_CONVERT_EXPR:
294 	  {
295 	    HOST_WIDE_INT bitsize, bitpos;
296 	    tree toffset;
297 	    int unsignedp = 0, volatilep = 0;
298 
299 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
300 					&mode, &unsignedp, &volatilep, false);
301 	    if (bitsize != GET_MODE_BITSIZE (mode)
302 		|| (bitpos % BITS_PER_UNIT)
303 		|| (toffset && !host_integerp (toffset, 0)))
304 	      decl = NULL;
305 	    else
306 	      {
307 		offset += bitpos / BITS_PER_UNIT;
308 		if (toffset)
309 		  offset += TREE_INT_CST_LOW (toffset);
310 	      }
311 	    break;
312 	  }
313 	}
314 
315       if (decl
316 	  && mode == GET_MODE (x)
317 	  && TREE_CODE (decl) == VAR_DECL
318 	  && (TREE_STATIC (decl)
319 	      || DECL_THREAD_LOCAL_P (decl))
320 	  && DECL_RTL_SET_P (decl)
321 	  && MEM_P (DECL_RTL (decl)))
322 	{
323 	  rtx newx;
324 
325 	  offset += MEM_OFFSET (x);
326 
327 	  newx = DECL_RTL (decl);
328 
329 	  if (MEM_P (newx))
330 	    {
331 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 
333 	      /* Avoid creating a new MEM needlessly if we already had
334 		 the same address.  We do if there's no OFFSET and the
335 		 old address X is identical to NEWX, or if X is of the
336 		 form (plus NEWX OFFSET), or the NEWX is of the form
337 		 (plus Y (const_int Z)) and X is that with the offset
338 		 added: (plus Y (const_int Z+OFFSET)).  */
339 	      if (!((offset == 0
340 		     || (GET_CODE (o) == PLUS
341 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
342 			 && (offset == INTVAL (XEXP (o, 1))
343 			     || (GET_CODE (n) == PLUS
344 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
345 				 && (INTVAL (XEXP (n, 1)) + offset
346 				     == INTVAL (XEXP (o, 1)))
347 				 && (n = XEXP (n, 0))))
348 			 && (o = XEXP (o, 0))))
349 		    && rtx_equal_p (o, n)))
350 		x = adjust_address_nv (newx, mode, offset);
351 	    }
352 	  else if (GET_MODE (x) == GET_MODE (newx)
353 		   && offset == 0)
354 	    x = newx;
355 	}
356     }
357 
358   return x;
359 }
360 
361 /* Make a unary operation by first seeing if it folds and otherwise making
362    the specified operation.  */
363 
364 rtx
365 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
366 		    enum machine_mode op_mode)
367 {
368   rtx tem;
369 
370   /* If this simplifies, use it.  */
371   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
372     return tem;
373 
374   return gen_rtx_fmt_e (code, mode, op);
375 }
376 
377 /* Likewise for ternary operations.  */
378 
379 rtx
380 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
381 		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 {
383   rtx tem;
384 
385   /* If this simplifies, use it.  */
386   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
387 					      op0, op1, op2)))
388     return tem;
389 
390   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 }
392 
393 /* Likewise, for relational operations.
394    CMP_MODE specifies mode comparison is done in.  */
395 
396 rtx
397 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
398 			 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 {
400   rtx tem;
401 
402   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
403 						 op0, op1)))
404     return tem;
405 
406   return gen_rtx_fmt_ee (code, mode, op0, op1);
407 }
408 
409 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
410    and simplify the result.  If FN is non-NULL, call this callback on each
411    X, if it returns non-NULL, replace X with its return value and simplify the
412    result.  */
413 
414 rtx
415 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
416 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 {
418   enum rtx_code code = GET_CODE (x);
419   enum machine_mode mode = GET_MODE (x);
420   enum machine_mode op_mode;
421   const char *fmt;
422   rtx op0, op1, op2, newx, op;
423   rtvec vec, newvec;
424   int i, j;
425 
426   if (__builtin_expect (fn != NULL, 0))
427     {
428       newx = fn (x, old_rtx, data);
429       if (newx)
430 	return newx;
431     }
432   else if (rtx_equal_p (x, old_rtx))
433     return copy_rtx ((rtx) data);
434 
435   switch (GET_RTX_CLASS (code))
436     {
437     case RTX_UNARY:
438       op0 = XEXP (x, 0);
439       op_mode = GET_MODE (op0);
440       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
441       if (op0 == XEXP (x, 0))
442 	return x;
443       return simplify_gen_unary (code, mode, op0, op_mode);
444 
445     case RTX_BIN_ARITH:
446     case RTX_COMM_ARITH:
447       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
448       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
449       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
450 	return x;
451       return simplify_gen_binary (code, mode, op0, op1);
452 
453     case RTX_COMPARE:
454     case RTX_COMM_COMPARE:
455       op0 = XEXP (x, 0);
456       op1 = XEXP (x, 1);
457       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
458       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
459       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
460       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
461 	return x;
462       return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 
464     case RTX_TERNARY:
465     case RTX_BITFIELD_OPS:
466       op0 = XEXP (x, 0);
467       op_mode = GET_MODE (op0);
468       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
469       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
470       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
471       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
472 	return x;
473       if (op_mode == VOIDmode)
474 	op_mode = GET_MODE (op0);
475       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 
477     case RTX_EXTRA:
478       if (code == SUBREG)
479 	{
480 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
481 	  if (op0 == SUBREG_REG (x))
482 	    return x;
483 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
484 				     GET_MODE (SUBREG_REG (x)),
485 				     SUBREG_BYTE (x));
486 	  return op0 ? op0 : x;
487 	}
488       break;
489 
490     case RTX_OBJ:
491       if (code == MEM)
492 	{
493 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
494 	  if (op0 == XEXP (x, 0))
495 	    return x;
496 	  return replace_equiv_address_nv (x, op0);
497 	}
498       else if (code == LO_SUM)
499 	{
500 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 
503 	  /* (lo_sum (high x) x) -> x  */
504 	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
505 	    return op1;
506 
507 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
508 	    return x;
509 	  return gen_rtx_LO_SUM (mode, op0, op1);
510 	}
511       break;
512 
513     default:
514       break;
515     }
516 
517   newx = x;
518   fmt = GET_RTX_FORMAT (code);
519   for (i = 0; fmt[i]; i++)
520     switch (fmt[i])
521       {
522       case 'E':
523 	vec = XVEC (x, i);
524 	newvec = XVEC (newx, i);
525 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 	  {
527 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
528 					  old_rtx, fn, data);
529 	    if (op != RTVEC_ELT (vec, j))
530 	      {
531 		if (newvec == vec)
532 		  {
533 		    newvec = shallow_copy_rtvec (vec);
534 		    if (x == newx)
535 		      newx = shallow_copy_rtx (x);
536 		    XVEC (newx, i) = newvec;
537 		  }
538 		RTVEC_ELT (newvec, j) = op;
539 	      }
540 	  }
541 	break;
542 
543       case 'e':
544 	if (XEXP (x, i))
545 	  {
546 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
547 	    if (op != XEXP (x, i))
548 	      {
549 		if (x == newx)
550 		  newx = shallow_copy_rtx (x);
551 		XEXP (newx, i) = op;
552 	      }
553 	  }
554 	break;
555       }
556   return newx;
557 }
558 
559 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
560    resulting RTX.  Return a new RTX which is as simplified as possible.  */
561 
562 rtx
563 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 {
565   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 }
567 
568 /* Try to simplify a unary operation CODE whose output mode is to be
569    MODE with input operand OP whose mode was originally OP_MODE.
570    Return zero if no simplification can be made.  */
571 rtx
572 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
573 			  rtx op, enum machine_mode op_mode)
574 {
575   rtx trueop, tem;
576 
577   trueop = avoid_constant_pool_reference (op);
578 
579   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
580   if (tem)
581     return tem;
582 
583   return simplify_unary_operation_1 (code, mode, op);
584 }
585 
586 /* Perform some simplifications we can do even if the operands
587    aren't constant.  */
588 static rtx
589 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
590 {
591   enum rtx_code reversed;
592   rtx temp;
593 
594   switch (code)
595     {
596     case NOT:
597       /* (not (not X)) == X.  */
598       if (GET_CODE (op) == NOT)
599 	return XEXP (op, 0);
600 
601       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
602 	 comparison is all ones.   */
603       if (COMPARISON_P (op)
604 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
605 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
606 	return simplify_gen_relational (reversed, mode, VOIDmode,
607 					XEXP (op, 0), XEXP (op, 1));
608 
609       /* (not (plus X -1)) can become (neg X).  */
610       if (GET_CODE (op) == PLUS
611 	  && XEXP (op, 1) == constm1_rtx)
612 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
613 
614       /* Similarly, (not (neg X)) is (plus X -1).  */
615       if (GET_CODE (op) == NEG)
616 	return plus_constant (XEXP (op, 0), -1);
617 
618       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
619       if (GET_CODE (op) == XOR
620 	  && CONST_INT_P (XEXP (op, 1))
621 	  && (temp = simplify_unary_operation (NOT, mode,
622 					       XEXP (op, 1), mode)) != 0)
623 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
624 
625       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
626       if (GET_CODE (op) == PLUS
627 	  && CONST_INT_P (XEXP (op, 1))
628 	  && mode_signbit_p (mode, XEXP (op, 1))
629 	  && (temp = simplify_unary_operation (NOT, mode,
630 					       XEXP (op, 1), mode)) != 0)
631 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
632 
633 
634       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
635 	 operands other than 1, but that is not valid.  We could do a
636 	 similar simplification for (not (lshiftrt C X)) where C is
637 	 just the sign bit, but this doesn't seem common enough to
638 	 bother with.  */
639       if (GET_CODE (op) == ASHIFT
640 	  && XEXP (op, 0) == const1_rtx)
641 	{
642 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
643 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
644 	}
645 
646       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
647 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
648 	 so we can perform the above simplification.  */
649 
650       if (STORE_FLAG_VALUE == -1
651 	  && GET_CODE (op) == ASHIFTRT
652 	  && GET_CODE (XEXP (op, 1))
653 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
654 	return simplify_gen_relational (GE, mode, VOIDmode,
655 					XEXP (op, 0), const0_rtx);
656 
657 
658       if (GET_CODE (op) == SUBREG
659 	  && subreg_lowpart_p (op)
660 	  && (GET_MODE_SIZE (GET_MODE (op))
661 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
662 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
663 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
664 	{
665 	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
666 	  rtx x;
667 
668 	  x = gen_rtx_ROTATE (inner_mode,
669 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
670 						  inner_mode),
671 			      XEXP (SUBREG_REG (op), 1));
672 	  return rtl_hooks.gen_lowpart_no_emit (mode, x);
673 	}
674 
675       /* Apply De Morgan's laws to reduce number of patterns for machines
676 	 with negating logical insns (and-not, nand, etc.).  If result has
677 	 only one NOT, put it first, since that is how the patterns are
678 	 coded.  */
679 
680       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
681 	{
682 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
683 	  enum machine_mode op_mode;
684 
685 	  op_mode = GET_MODE (in1);
686 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
687 
688 	  op_mode = GET_MODE (in2);
689 	  if (op_mode == VOIDmode)
690 	    op_mode = mode;
691 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
692 
693 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
694 	    {
695 	      rtx tem = in2;
696 	      in2 = in1; in1 = tem;
697 	    }
698 
699 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
700 				 mode, in1, in2);
701 	}
702       break;
703 
704     case NEG:
705       /* (neg (neg X)) == X.  */
706       if (GET_CODE (op) == NEG)
707 	return XEXP (op, 0);
708 
709       /* (neg (plus X 1)) can become (not X).  */
710       if (GET_CODE (op) == PLUS
711 	  && XEXP (op, 1) == const1_rtx)
712 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
713 
714       /* Similarly, (neg (not X)) is (plus X 1).  */
715       if (GET_CODE (op) == NOT)
716 	return plus_constant (XEXP (op, 0), 1);
717 
718       /* (neg (minus X Y)) can become (minus Y X).  This transformation
719 	 isn't safe for modes with signed zeros, since if X and Y are
720 	 both +0, (minus Y X) is the same as (minus X Y).  If the
721 	 rounding mode is towards +infinity (or -infinity) then the two
722 	 expressions will be rounded differently.  */
723       if (GET_CODE (op) == MINUS
724 	  && !HONOR_SIGNED_ZEROS (mode)
725 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
726 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
727 
728       if (GET_CODE (op) == PLUS
729 	  && !HONOR_SIGNED_ZEROS (mode)
730 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
731 	{
732 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
733 	  if (CONST_INT_P (XEXP (op, 1))
734 	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
735 	    {
736 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
737 	      if (temp)
738 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
739 	    }
740 
741 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
742 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
743 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
744 	}
745 
746       /* (neg (mult A B)) becomes (mult A (neg B)).
747 	 This works even for floating-point values.  */
748       if (GET_CODE (op) == MULT
749 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
750 	{
751 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
752 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
753 	}
754 
755       /* NEG commutes with ASHIFT since it is multiplication.  Only do
756 	 this if we can then eliminate the NEG (e.g., if the operand
757 	 is a constant).  */
758       if (GET_CODE (op) == ASHIFT)
759 	{
760 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
761 	  if (temp)
762 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
763 	}
764 
765       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
766 	 C is equal to the width of MODE minus 1.  */
767       if (GET_CODE (op) == ASHIFTRT
768 	  && CONST_INT_P (XEXP (op, 1))
769 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
770 	return simplify_gen_binary (LSHIFTRT, mode,
771 				    XEXP (op, 0), XEXP (op, 1));
772 
773       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
774 	 C is equal to the width of MODE minus 1.  */
775       if (GET_CODE (op) == LSHIFTRT
776 	  && CONST_INT_P (XEXP (op, 1))
777 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
778 	return simplify_gen_binary (ASHIFTRT, mode,
779 				    XEXP (op, 0), XEXP (op, 1));
780 
781       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
782       if (GET_CODE (op) == XOR
783 	  && XEXP (op, 1) == const1_rtx
784 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
785 	return plus_constant (XEXP (op, 0), -1);
786 
787       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
788       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
789       if (GET_CODE (op) == LT
790 	  && XEXP (op, 1) == const0_rtx
791 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
792 	{
793 	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
794 	  int isize = GET_MODE_PRECISION (inner);
795 	  if (STORE_FLAG_VALUE == 1)
796 	    {
797 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
798 					  GEN_INT (isize - 1));
799 	      if (mode == inner)
800 		return temp;
801 	      if (GET_MODE_PRECISION (mode) > isize)
802 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
803 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
804 	    }
805 	  else if (STORE_FLAG_VALUE == -1)
806 	    {
807 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
808 					  GEN_INT (isize - 1));
809 	      if (mode == inner)
810 		return temp;
811 	      if (GET_MODE_PRECISION (mode) > isize)
812 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
813 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
814 	    }
815 	}
816       break;
817 
818     case TRUNCATE:
819       /* We can't handle truncation to a partial integer mode here
820          because we don't know the real bitsize of the partial
821          integer mode.  */
822       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
823         break;
824 
825       /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
826       if ((GET_CODE (op) == SIGN_EXTEND
827 	   || GET_CODE (op) == ZERO_EXTEND)
828 	  && GET_MODE (XEXP (op, 0)) == mode)
829 	return XEXP (op, 0);
830 
831       /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
832 	 (OP:SI foo:SI) if OP is NEG or ABS.  */
833       if ((GET_CODE (op) == ABS
834 	   || GET_CODE (op) == NEG)
835 	  && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
836 	      || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
837 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
838 	return simplify_gen_unary (GET_CODE (op), mode,
839 				   XEXP (XEXP (op, 0), 0), mode);
840 
841       /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 	 (truncate:A X).  */
843       if (GET_CODE (op) == SUBREG
844 	  && GET_CODE (SUBREG_REG (op)) == TRUNCATE
845 	  && subreg_lowpart_p (op))
846 	return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
847 				   GET_MODE (XEXP (SUBREG_REG (op), 0)));
848 
849       /* If we know that the value is already truncated, we can
850          replace the TRUNCATE with a SUBREG.  Note that this is also
851          valid if TRULY_NOOP_TRUNCATION is false for the corresponding
852          modes we just have to apply a different definition for
853          truncation.  But don't do this for an (LSHIFTRT (MULT ...))
854          since this will cause problems with the umulXi3_highpart
855          patterns.  */
856       if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
857 	   ? (num_sign_bit_copies (op, GET_MODE (op))
858 	      > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
859 				- GET_MODE_PRECISION (mode)))
860 	   : truncated_to_mode (mode, op))
861 	  && ! (GET_CODE (op) == LSHIFTRT
862 		&& GET_CODE (XEXP (op, 0)) == MULT))
863 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
864 
865       /* A truncate of a comparison can be replaced with a subreg if
866          STORE_FLAG_VALUE permits.  This is like the previous test,
867          but it works even if the comparison is done in a mode larger
868          than HOST_BITS_PER_WIDE_INT.  */
869       if (HWI_COMPUTABLE_MODE_P (mode)
870 	  && COMPARISON_P (op)
871 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
872 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
873       break;
874 
875     case FLOAT_TRUNCATE:
876       if (DECIMAL_FLOAT_MODE_P (mode))
877 	break;
878 
879       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
880       if (GET_CODE (op) == FLOAT_EXTEND
881 	  && GET_MODE (XEXP (op, 0)) == mode)
882 	return XEXP (op, 0);
883 
884       /* (float_truncate:SF (float_truncate:DF foo:XF))
885          = (float_truncate:SF foo:XF).
886 	 This may eliminate double rounding, so it is unsafe.
887 
888          (float_truncate:SF (float_extend:XF foo:DF))
889          = (float_truncate:SF foo:DF).
890 
891          (float_truncate:DF (float_extend:XF foo:SF))
892          = (float_extend:SF foo:DF).  */
893       if ((GET_CODE (op) == FLOAT_TRUNCATE
894 	   && flag_unsafe_math_optimizations)
895 	  || GET_CODE (op) == FLOAT_EXTEND)
896 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
897 							    0)))
898 				   > GET_MODE_SIZE (mode)
899 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
900 				   mode,
901 				   XEXP (op, 0), mode);
902 
903       /*  (float_truncate (float x)) is (float x)  */
904       if (GET_CODE (op) == FLOAT
905 	  && (flag_unsafe_math_optimizations
906 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
907 		  && ((unsigned)significand_size (GET_MODE (op))
908 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
909 			  - num_sign_bit_copies (XEXP (op, 0),
910 						 GET_MODE (XEXP (op, 0))))))))
911 	return simplify_gen_unary (FLOAT, mode,
912 				   XEXP (op, 0),
913 				   GET_MODE (XEXP (op, 0)));
914 
915       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
916 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
917       if ((GET_CODE (op) == ABS
918 	   || GET_CODE (op) == NEG)
919 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
920 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
921 	return simplify_gen_unary (GET_CODE (op), mode,
922 				   XEXP (XEXP (op, 0), 0), mode);
923 
924       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
925 	 is (float_truncate:SF x).  */
926       if (GET_CODE (op) == SUBREG
927 	  && subreg_lowpart_p (op)
928 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
929 	return SUBREG_REG (op);
930       break;
931 
932     case FLOAT_EXTEND:
933       if (DECIMAL_FLOAT_MODE_P (mode))
934 	break;
935 
936       /*  (float_extend (float_extend x)) is (float_extend x)
937 
938 	  (float_extend (float x)) is (float x) assuming that double
939 	  rounding can't happen.
940           */
941       if (GET_CODE (op) == FLOAT_EXTEND
942 	  || (GET_CODE (op) == FLOAT
943 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
944 	      && ((unsigned)significand_size (GET_MODE (op))
945 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
946 		      - num_sign_bit_copies (XEXP (op, 0),
947 					     GET_MODE (XEXP (op, 0)))))))
948 	return simplify_gen_unary (GET_CODE (op), mode,
949 				   XEXP (op, 0),
950 				   GET_MODE (XEXP (op, 0)));
951 
952       break;
953 
954     case ABS:
955       /* (abs (neg <foo>)) -> (abs <foo>) */
956       if (GET_CODE (op) == NEG)
957 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
958 				   GET_MODE (XEXP (op, 0)));
959 
960       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961          do nothing.  */
962       if (GET_MODE (op) == VOIDmode)
963 	break;
964 
965       /* If operand is something known to be positive, ignore the ABS.  */
966       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
967 	  || val_signbit_known_clear_p (GET_MODE (op),
968 					nonzero_bits (op, GET_MODE (op))))
969 	return op;
970 
971       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
972       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
973 	return gen_rtx_NEG (mode, op);
974 
975       break;
976 
977     case FFS:
978       /* (ffs (*_extend <X>)) = (ffs <X>) */
979       if (GET_CODE (op) == SIGN_EXTEND
980 	  || GET_CODE (op) == ZERO_EXTEND)
981 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
982 				   GET_MODE (XEXP (op, 0)));
983       break;
984 
985     case POPCOUNT:
986       switch (GET_CODE (op))
987 	{
988 	case BSWAP:
989 	case ZERO_EXTEND:
990 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
991 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
992 				     GET_MODE (XEXP (op, 0)));
993 
994 	case ROTATE:
995 	case ROTATERT:
996 	  /* Rotations don't affect popcount.  */
997 	  if (!side_effects_p (XEXP (op, 1)))
998 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
999 				       GET_MODE (XEXP (op, 0)));
1000 	  break;
1001 
1002 	default:
1003 	  break;
1004 	}
1005       break;
1006 
1007     case PARITY:
1008       switch (GET_CODE (op))
1009 	{
1010 	case NOT:
1011 	case BSWAP:
1012 	case ZERO_EXTEND:
1013 	case SIGN_EXTEND:
1014 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1015 				     GET_MODE (XEXP (op, 0)));
1016 
1017 	case ROTATE:
1018 	case ROTATERT:
1019 	  /* Rotations don't affect parity.  */
1020 	  if (!side_effects_p (XEXP (op, 1)))
1021 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1022 				       GET_MODE (XEXP (op, 0)));
1023 	  break;
1024 
1025 	default:
1026 	  break;
1027 	}
1028       break;
1029 
1030     case BSWAP:
1031       /* (bswap (bswap x)) -> x.  */
1032       if (GET_CODE (op) == BSWAP)
1033 	return XEXP (op, 0);
1034       break;
1035 
1036     case FLOAT:
1037       /* (float (sign_extend <X>)) = (float <X>).  */
1038       if (GET_CODE (op) == SIGN_EXTEND)
1039 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1040 				   GET_MODE (XEXP (op, 0)));
1041       break;
1042 
1043     case SIGN_EXTEND:
1044       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1045 	 becomes just the MINUS if its mode is MODE.  This allows
1046 	 folding switch statements on machines using casesi (such as
1047 	 the VAX).  */
1048       if (GET_CODE (op) == TRUNCATE
1049 	  && GET_MODE (XEXP (op, 0)) == mode
1050 	  && GET_CODE (XEXP (op, 0)) == MINUS
1051 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1052 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1053 	return XEXP (op, 0);
1054 
1055       /* Extending a widening multiplication should be canonicalized to
1056 	 a wider widening multiplication.  */
1057       if (GET_CODE (op) == MULT)
1058 	{
1059 	  rtx lhs = XEXP (op, 0);
1060 	  rtx rhs = XEXP (op, 1);
1061 	  enum rtx_code lcode = GET_CODE (lhs);
1062 	  enum rtx_code rcode = GET_CODE (rhs);
1063 
1064 	  /* Widening multiplies usually extend both operands, but sometimes
1065 	     they use a shift to extract a portion of a register.  */
1066 	  if ((lcode == SIGN_EXTEND
1067 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1068 	      && (rcode == SIGN_EXTEND
1069 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1070 	    {
1071 	      enum machine_mode lmode = GET_MODE (lhs);
1072 	      enum machine_mode rmode = GET_MODE (rhs);
1073 	      int bits;
1074 
1075 	      if (lcode == ASHIFTRT)
1076 		/* Number of bits not shifted off the end.  */
1077 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1078 	      else /* lcode == SIGN_EXTEND */
1079 		/* Size of inner mode.  */
1080 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1081 
1082 	      if (rcode == ASHIFTRT)
1083 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1084 	      else /* rcode == SIGN_EXTEND */
1085 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1086 
1087 	      /* We can only widen multiplies if the result is mathematiclly
1088 		 equivalent.  I.e. if overflow was impossible.  */
1089 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1090 		return simplify_gen_binary
1091 			 (MULT, mode,
1092 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1093 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1094 	    }
1095 	}
1096 
1097       /* Check for a sign extension of a subreg of a promoted
1098 	 variable, where the promotion is sign-extended, and the
1099 	 target mode is the same as the variable's promotion.  */
1100       if (GET_CODE (op) == SUBREG
1101 	  && SUBREG_PROMOTED_VAR_P (op)
1102 	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1103 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1104 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 
1106       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1107 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1108       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1109 	{
1110 	  gcc_assert (GET_MODE_BITSIZE (mode)
1111 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1112 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1113 				     GET_MODE (XEXP (op, 0)));
1114 	}
1115 
1116       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1117 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1118 	 GET_MODE_BITSIZE (N) - I bits.
1119 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1120 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1121       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1122 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1123 	  && CONST_INT_P (XEXP (op, 1))
1124 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1125 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1126 	{
1127 	  enum machine_mode tmode
1128 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1129 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1130 	  gcc_assert (GET_MODE_BITSIZE (mode)
1131 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1132 	  if (tmode != BLKmode)
1133 	    {
1134 	      rtx inner =
1135 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1136 	      return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1137 					 ? SIGN_EXTEND : ZERO_EXTEND,
1138 					 mode, inner, tmode);
1139 	    }
1140 	}
1141 
1142 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1143       /* As we do not know which address space the pointer is refering to,
1144 	 we can do this only if the target does not support different pointer
1145 	 or address modes depending on the address space.  */
1146       if (target_default_pointer_address_modes_p ()
1147 	  && ! POINTERS_EXTEND_UNSIGNED
1148 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1149 	  && (CONSTANT_P (op)
1150 	      || (GET_CODE (op) == SUBREG
1151 		  && REG_P (SUBREG_REG (op))
1152 		  && REG_POINTER (SUBREG_REG (op))
1153 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1154 	return convert_memory_address (Pmode, op);
1155 #endif
1156       break;
1157 
1158     case ZERO_EXTEND:
1159       /* Check for a zero extension of a subreg of a promoted
1160 	 variable, where the promotion is zero-extended, and the
1161 	 target mode is the same as the variable's promotion.  */
1162       if (GET_CODE (op) == SUBREG
1163 	  && SUBREG_PROMOTED_VAR_P (op)
1164 	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1165 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1166 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
1167 
1168       /* Extending a widening multiplication should be canonicalized to
1169 	 a wider widening multiplication.  */
1170       if (GET_CODE (op) == MULT)
1171 	{
1172 	  rtx lhs = XEXP (op, 0);
1173 	  rtx rhs = XEXP (op, 1);
1174 	  enum rtx_code lcode = GET_CODE (lhs);
1175 	  enum rtx_code rcode = GET_CODE (rhs);
1176 
1177 	  /* Widening multiplies usually extend both operands, but sometimes
1178 	     they use a shift to extract a portion of a register.  */
1179 	  if ((lcode == ZERO_EXTEND
1180 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1181 	      && (rcode == ZERO_EXTEND
1182 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1183 	    {
1184 	      enum machine_mode lmode = GET_MODE (lhs);
1185 	      enum machine_mode rmode = GET_MODE (rhs);
1186 	      int bits;
1187 
1188 	      if (lcode == LSHIFTRT)
1189 		/* Number of bits not shifted off the end.  */
1190 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1191 	      else /* lcode == ZERO_EXTEND */
1192 		/* Size of inner mode.  */
1193 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1194 
1195 	      if (rcode == LSHIFTRT)
1196 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1197 	      else /* rcode == ZERO_EXTEND */
1198 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1199 
1200 	      /* We can only widen multiplies if the result is mathematiclly
1201 		 equivalent.  I.e. if overflow was impossible.  */
1202 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1203 		return simplify_gen_binary
1204 			 (MULT, mode,
1205 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1206 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1207 	    }
1208 	}
1209 
1210       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1211       if (GET_CODE (op) == ZERO_EXTEND)
1212 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1213 				   GET_MODE (XEXP (op, 0)));
1214 
1215       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1216 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1217 	 GET_MODE_BITSIZE (N) - I bits.  */
1218       if (GET_CODE (op) == LSHIFTRT
1219 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1220 	  && CONST_INT_P (XEXP (op, 1))
1221 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1222 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1223 	{
1224 	  enum machine_mode tmode
1225 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1226 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1227 	  if (tmode != BLKmode)
1228 	    {
1229 	      rtx inner =
1230 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1231 	      return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1232 	    }
1233 	}
1234 
1235 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1236       /* As we do not know which address space the pointer is refering to,
1237 	 we can do this only if the target does not support different pointer
1238 	 or address modes depending on the address space.  */
1239       if (target_default_pointer_address_modes_p ()
1240 	  && POINTERS_EXTEND_UNSIGNED > 0
1241 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1242 	  && (CONSTANT_P (op)
1243 	      || (GET_CODE (op) == SUBREG
1244 		  && REG_P (SUBREG_REG (op))
1245 		  && REG_POINTER (SUBREG_REG (op))
1246 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1247 	return convert_memory_address (Pmode, op);
1248 #endif
1249       break;
1250 
1251     default:
1252       break;
1253     }
1254 
1255   return 0;
1256 }
1257 
1258 /* Try to compute the value of a unary operation CODE whose output mode is to
1259    be MODE with input operand OP whose mode was originally OP_MODE.
1260    Return zero if the value cannot be computed.  */
1261 rtx
1262 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1263 				rtx op, enum machine_mode op_mode)
1264 {
1265   unsigned int width = GET_MODE_PRECISION (mode);
1266   unsigned int op_width = GET_MODE_PRECISION (op_mode);
1267 
1268   if (code == VEC_DUPLICATE)
1269     {
1270       gcc_assert (VECTOR_MODE_P (mode));
1271       if (GET_MODE (op) != VOIDmode)
1272       {
1273 	if (!VECTOR_MODE_P (GET_MODE (op)))
1274 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1275 	else
1276 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1277 						(GET_MODE (op)));
1278       }
1279       if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1280 	  || GET_CODE (op) == CONST_VECTOR)
1281 	{
1282           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1283           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1284 	  rtvec v = rtvec_alloc (n_elts);
1285 	  unsigned int i;
1286 
1287 	  if (GET_CODE (op) != CONST_VECTOR)
1288 	    for (i = 0; i < n_elts; i++)
1289 	      RTVEC_ELT (v, i) = op;
1290 	  else
1291 	    {
1292 	      enum machine_mode inmode = GET_MODE (op);
1293               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1294               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1295 
1296 	      gcc_assert (in_n_elts < n_elts);
1297 	      gcc_assert ((n_elts % in_n_elts) == 0);
1298 	      for (i = 0; i < n_elts; i++)
1299 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1300 	    }
1301 	  return gen_rtx_CONST_VECTOR (mode, v);
1302 	}
1303     }
1304 
1305   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1306     {
1307       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1308       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1309       enum machine_mode opmode = GET_MODE (op);
1310       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1311       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1312       rtvec v = rtvec_alloc (n_elts);
1313       unsigned int i;
1314 
1315       gcc_assert (op_n_elts == n_elts);
1316       for (i = 0; i < n_elts; i++)
1317 	{
1318 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1319 					    CONST_VECTOR_ELT (op, i),
1320 					    GET_MODE_INNER (opmode));
1321 	  if (!x)
1322 	    return 0;
1323 	  RTVEC_ELT (v, i) = x;
1324 	}
1325       return gen_rtx_CONST_VECTOR (mode, v);
1326     }
1327 
1328   /* The order of these tests is critical so that, for example, we don't
1329      check the wrong mode (input vs. output) for a conversion operation,
1330      such as FIX.  At some point, this should be simplified.  */
1331 
1332   if (code == FLOAT && GET_MODE (op) == VOIDmode
1333       && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1334     {
1335       HOST_WIDE_INT hv, lv;
1336       REAL_VALUE_TYPE d;
1337 
1338       if (CONST_INT_P (op))
1339 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1340       else
1341 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1342 
1343       REAL_VALUE_FROM_INT (d, lv, hv, mode);
1344       d = real_value_truncate (mode, d);
1345       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1346     }
1347   else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1348 	   && (GET_CODE (op) == CONST_DOUBLE
1349 	       || CONST_INT_P (op)))
1350     {
1351       HOST_WIDE_INT hv, lv;
1352       REAL_VALUE_TYPE d;
1353 
1354       if (CONST_INT_P (op))
1355 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1356       else
1357 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1358 
1359       if (op_mode == VOIDmode)
1360 	{
1361 	  /* We don't know how to interpret negative-looking numbers in
1362 	     this case, so don't try to fold those.  */
1363 	  if (hv < 0)
1364 	    return 0;
1365 	}
1366       else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1367 	;
1368       else
1369 	hv = 0, lv &= GET_MODE_MASK (op_mode);
1370 
1371       REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1372       d = real_value_truncate (mode, d);
1373       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1374     }
1375 
1376   if (CONST_INT_P (op)
1377       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1378     {
1379       HOST_WIDE_INT arg0 = INTVAL (op);
1380       HOST_WIDE_INT val;
1381 
1382       switch (code)
1383 	{
1384 	case NOT:
1385 	  val = ~ arg0;
1386 	  break;
1387 
1388 	case NEG:
1389 	  val = - arg0;
1390 	  break;
1391 
1392 	case ABS:
1393 	  val = (arg0 >= 0 ? arg0 : - arg0);
1394 	  break;
1395 
1396 	case FFS:
1397 	  arg0 &= GET_MODE_MASK (mode);
1398 	  val = ffs_hwi (arg0);
1399 	  break;
1400 
1401 	case CLZ:
1402 	  arg0 &= GET_MODE_MASK (mode);
1403 	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1404 	    ;
1405 	  else
1406 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1407 	  break;
1408 
1409 	case CLRSB:
1410 	  arg0 &= GET_MODE_MASK (mode);
1411 	  if (arg0 == 0)
1412 	    val = GET_MODE_PRECISION (mode) - 1;
1413 	  else if (arg0 >= 0)
1414 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1415 	  else if (arg0 < 0)
1416 	    val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1417 	  break;
1418 
1419 	case CTZ:
1420 	  arg0 &= GET_MODE_MASK (mode);
1421 	  if (arg0 == 0)
1422 	    {
1423 	      /* Even if the value at zero is undefined, we have to come
1424 		 up with some replacement.  Seems good enough.  */
1425 	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1426 		val = GET_MODE_PRECISION (mode);
1427 	    }
1428 	  else
1429 	    val = ctz_hwi (arg0);
1430 	  break;
1431 
1432 	case POPCOUNT:
1433 	  arg0 &= GET_MODE_MASK (mode);
1434 	  val = 0;
1435 	  while (arg0)
1436 	    val++, arg0 &= arg0 - 1;
1437 	  break;
1438 
1439 	case PARITY:
1440 	  arg0 &= GET_MODE_MASK (mode);
1441 	  val = 0;
1442 	  while (arg0)
1443 	    val++, arg0 &= arg0 - 1;
1444 	  val &= 1;
1445 	  break;
1446 
1447 	case BSWAP:
1448 	  {
1449 	    unsigned int s;
1450 
1451 	    val = 0;
1452 	    for (s = 0; s < width; s += 8)
1453 	      {
1454 		unsigned int d = width - s - 8;
1455 		unsigned HOST_WIDE_INT byte;
1456 		byte = (arg0 >> s) & 0xff;
1457 		val |= byte << d;
1458 	      }
1459 	  }
1460 	  break;
1461 
1462 	case TRUNCATE:
1463 	  val = arg0;
1464 	  break;
1465 
1466 	case ZERO_EXTEND:
1467 	  /* When zero-extending a CONST_INT, we need to know its
1468              original mode.  */
1469 	  gcc_assert (op_mode != VOIDmode);
1470 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1471 	    {
1472 	      /* If we were really extending the mode,
1473 		 we would have to distinguish between zero-extension
1474 		 and sign-extension.  */
1475 	      gcc_assert (width == op_width);
1476 	      val = arg0;
1477 	    }
1478 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1479 	    val = arg0 & GET_MODE_MASK (op_mode);
1480 	  else
1481 	    return 0;
1482 	  break;
1483 
1484 	case SIGN_EXTEND:
1485 	  if (op_mode == VOIDmode)
1486 	    op_mode = mode;
1487 	  op_width = GET_MODE_PRECISION (op_mode);
1488 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1489 	    {
1490 	      /* If we were really extending the mode,
1491 		 we would have to distinguish between zero-extension
1492 		 and sign-extension.  */
1493 	      gcc_assert (width == op_width);
1494 	      val = arg0;
1495 	    }
1496 	  else if (op_width < HOST_BITS_PER_WIDE_INT)
1497 	    {
1498 	      val = arg0 & GET_MODE_MASK (op_mode);
1499 	      if (val_signbit_known_set_p (op_mode, val))
1500 		val |= ~GET_MODE_MASK (op_mode);
1501 	    }
1502 	  else
1503 	    return 0;
1504 	  break;
1505 
1506 	case SQRT:
1507 	case FLOAT_EXTEND:
1508 	case FLOAT_TRUNCATE:
1509 	case SS_TRUNCATE:
1510 	case US_TRUNCATE:
1511 	case SS_NEG:
1512 	case US_NEG:
1513 	case SS_ABS:
1514 	  return 0;
1515 
1516 	default:
1517 	  gcc_unreachable ();
1518 	}
1519 
1520       return gen_int_mode (val, mode);
1521     }
1522 
1523   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1524      for a DImode operation on a CONST_INT.  */
1525   else if (GET_MODE (op) == VOIDmode
1526 	   && width <= HOST_BITS_PER_WIDE_INT * 2
1527 	   && (GET_CODE (op) == CONST_DOUBLE
1528 	       || CONST_INT_P (op)))
1529     {
1530       unsigned HOST_WIDE_INT l1, lv;
1531       HOST_WIDE_INT h1, hv;
1532 
1533       if (GET_CODE (op) == CONST_DOUBLE)
1534 	l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1535       else
1536 	l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1537 
1538       switch (code)
1539 	{
1540 	case NOT:
1541 	  lv = ~ l1;
1542 	  hv = ~ h1;
1543 	  break;
1544 
1545 	case NEG:
1546 	  neg_double (l1, h1, &lv, &hv);
1547 	  break;
1548 
1549 	case ABS:
1550 	  if (h1 < 0)
1551 	    neg_double (l1, h1, &lv, &hv);
1552 	  else
1553 	    lv = l1, hv = h1;
1554 	  break;
1555 
1556 	case FFS:
1557 	  hv = 0;
1558 	  if (l1 != 0)
1559 	    lv = ffs_hwi (l1);
1560 	  else if (h1 != 0)
1561 	    lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1562 	  else
1563 	    lv = 0;
1564 	  break;
1565 
1566 	case CLZ:
1567 	  hv = 0;
1568 	  if (h1 != 0)
1569 	    lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1570 	      - HOST_BITS_PER_WIDE_INT;
1571 	  else if (l1 != 0)
1572 	    lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1573 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1574 	    lv = GET_MODE_PRECISION (mode);
1575 	  break;
1576 
1577 	case CTZ:
1578 	  hv = 0;
1579 	  if (l1 != 0)
1580 	    lv = ctz_hwi (l1);
1581 	  else if (h1 != 0)
1582 	    lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1583 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1584 	    lv = GET_MODE_PRECISION (mode);
1585 	  break;
1586 
1587 	case POPCOUNT:
1588 	  hv = 0;
1589 	  lv = 0;
1590 	  while (l1)
1591 	    lv++, l1 &= l1 - 1;
1592 	  while (h1)
1593 	    lv++, h1 &= h1 - 1;
1594 	  break;
1595 
1596 	case PARITY:
1597 	  hv = 0;
1598 	  lv = 0;
1599 	  while (l1)
1600 	    lv++, l1 &= l1 - 1;
1601 	  while (h1)
1602 	    lv++, h1 &= h1 - 1;
1603 	  lv &= 1;
1604 	  break;
1605 
1606 	case BSWAP:
1607 	  {
1608 	    unsigned int s;
1609 
1610 	    hv = 0;
1611 	    lv = 0;
1612 	    for (s = 0; s < width; s += 8)
1613 	      {
1614 		unsigned int d = width - s - 8;
1615 		unsigned HOST_WIDE_INT byte;
1616 
1617 		if (s < HOST_BITS_PER_WIDE_INT)
1618 		  byte = (l1 >> s) & 0xff;
1619 		else
1620 		  byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1621 
1622 		if (d < HOST_BITS_PER_WIDE_INT)
1623 		  lv |= byte << d;
1624 		else
1625 		  hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1626 	      }
1627 	  }
1628 	  break;
1629 
1630 	case TRUNCATE:
1631 	  /* This is just a change-of-mode, so do nothing.  */
1632 	  lv = l1, hv = h1;
1633 	  break;
1634 
1635 	case ZERO_EXTEND:
1636 	  gcc_assert (op_mode != VOIDmode);
1637 
1638 	  if (op_width > HOST_BITS_PER_WIDE_INT)
1639 	    return 0;
1640 
1641 	  hv = 0;
1642 	  lv = l1 & GET_MODE_MASK (op_mode);
1643 	  break;
1644 
1645 	case SIGN_EXTEND:
1646 	  if (op_mode == VOIDmode
1647 	      || op_width > HOST_BITS_PER_WIDE_INT)
1648 	    return 0;
1649 	  else
1650 	    {
1651 	      lv = l1 & GET_MODE_MASK (op_mode);
1652 	      if (val_signbit_known_set_p (op_mode, lv))
1653 		lv |= ~GET_MODE_MASK (op_mode);
1654 
1655 	      hv = HWI_SIGN_EXTEND (lv);
1656 	    }
1657 	  break;
1658 
1659 	case SQRT:
1660 	  return 0;
1661 
1662 	default:
1663 	  return 0;
1664 	}
1665 
1666       return immed_double_const (lv, hv, mode);
1667     }
1668 
1669   else if (GET_CODE (op) == CONST_DOUBLE
1670 	   && SCALAR_FLOAT_MODE_P (mode)
1671 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1672     {
1673       REAL_VALUE_TYPE d, t;
1674       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1675 
1676       switch (code)
1677 	{
1678 	case SQRT:
1679 	  if (HONOR_SNANS (mode) && real_isnan (&d))
1680 	    return 0;
1681 	  real_sqrt (&t, mode, &d);
1682 	  d = t;
1683 	  break;
1684 	case ABS:
1685 	  d = real_value_abs (&d);
1686 	  break;
1687 	case NEG:
1688 	  d = real_value_negate (&d);
1689 	  break;
1690 	case FLOAT_TRUNCATE:
1691 	  d = real_value_truncate (mode, d);
1692 	  break;
1693 	case FLOAT_EXTEND:
1694 	  /* All this does is change the mode, unless changing
1695 	     mode class.  */
1696 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1697 	    real_convert (&d, mode, &d);
1698 	  break;
1699 	case FIX:
1700 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1701 	  break;
1702 	case NOT:
1703 	  {
1704 	    long tmp[4];
1705 	    int i;
1706 
1707 	    real_to_target (tmp, &d, GET_MODE (op));
1708 	    for (i = 0; i < 4; i++)
1709 	      tmp[i] = ~tmp[i];
1710 	    real_from_target (&d, tmp, mode);
1711 	    break;
1712 	  }
1713 	default:
1714 	  gcc_unreachable ();
1715 	}
1716       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1717     }
1718 
1719   else if (GET_CODE (op) == CONST_DOUBLE
1720 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1721 	   && GET_MODE_CLASS (mode) == MODE_INT
1722 	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1723     {
1724       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1725 	 operators are intentionally left unspecified (to ease implementation
1726 	 by target backends), for consistency, this routine implements the
1727 	 same semantics for constant folding as used by the middle-end.  */
1728 
1729       /* This was formerly used only for non-IEEE float.
1730 	 eggert@twinsun.com says it is safe for IEEE also.  */
1731       HOST_WIDE_INT xh, xl, th, tl;
1732       REAL_VALUE_TYPE x, t;
1733       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1734       switch (code)
1735 	{
1736 	case FIX:
1737 	  if (REAL_VALUE_ISNAN (x))
1738 	    return const0_rtx;
1739 
1740 	  /* Test against the signed upper bound.  */
1741 	  if (width > HOST_BITS_PER_WIDE_INT)
1742 	    {
1743 	      th = ((unsigned HOST_WIDE_INT) 1
1744 		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1745 	      tl = -1;
1746 	    }
1747 	  else
1748 	    {
1749 	      th = 0;
1750 	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1751 	    }
1752 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1753 	  if (REAL_VALUES_LESS (t, x))
1754 	    {
1755 	      xh = th;
1756 	      xl = tl;
1757 	      break;
1758 	    }
1759 
1760 	  /* Test against the signed lower bound.  */
1761 	  if (width > HOST_BITS_PER_WIDE_INT)
1762 	    {
1763 	      th = (unsigned HOST_WIDE_INT) (-1)
1764 		   << (width - HOST_BITS_PER_WIDE_INT - 1);
1765 	      tl = 0;
1766 	    }
1767 	  else
1768 	    {
1769 	      th = -1;
1770 	      tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1771 	    }
1772 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1773 	  if (REAL_VALUES_LESS (x, t))
1774 	    {
1775 	      xh = th;
1776 	      xl = tl;
1777 	      break;
1778 	    }
1779 	  REAL_VALUE_TO_INT (&xl, &xh, x);
1780 	  break;
1781 
1782 	case UNSIGNED_FIX:
1783 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1784 	    return const0_rtx;
1785 
1786 	  /* Test against the unsigned upper bound.  */
1787 	  if (width == 2*HOST_BITS_PER_WIDE_INT)
1788 	    {
1789 	      th = -1;
1790 	      tl = -1;
1791 	    }
1792 	  else if (width >= HOST_BITS_PER_WIDE_INT)
1793 	    {
1794 	      th = ((unsigned HOST_WIDE_INT) 1
1795 		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1796 	      tl = -1;
1797 	    }
1798 	  else
1799 	    {
1800 	      th = 0;
1801 	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1802 	    }
1803 	  real_from_integer (&t, VOIDmode, tl, th, 1);
1804 	  if (REAL_VALUES_LESS (t, x))
1805 	    {
1806 	      xh = th;
1807 	      xl = tl;
1808 	      break;
1809 	    }
1810 
1811 	  REAL_VALUE_TO_INT (&xl, &xh, x);
1812 	  break;
1813 
1814 	default:
1815 	  gcc_unreachable ();
1816 	}
1817       return immed_double_const (xl, xh, mode);
1818     }
1819 
1820   return NULL_RTX;
1821 }
1822 
1823 /* Subroutine of simplify_binary_operation to simplify a commutative,
1824    associative binary operation CODE with result mode MODE, operating
1825    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1826    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1827    canonicalization is possible.  */
1828 
1829 static rtx
1830 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1831 				rtx op0, rtx op1)
1832 {
1833   rtx tem;
1834 
1835   /* Linearize the operator to the left.  */
1836   if (GET_CODE (op1) == code)
1837     {
1838       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1839       if (GET_CODE (op0) == code)
1840 	{
1841 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1842 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1843 	}
1844 
1845       /* "a op (b op c)" becomes "(b op c) op a".  */
1846       if (! swap_commutative_operands_p (op1, op0))
1847 	return simplify_gen_binary (code, mode, op1, op0);
1848 
1849       tem = op0;
1850       op0 = op1;
1851       op1 = tem;
1852     }
1853 
1854   if (GET_CODE (op0) == code)
1855     {
1856       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1857       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1858 	{
1859 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1860 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1861 	}
1862 
1863       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1864       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1865       if (tem != 0)
1866         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1867 
1868       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1869       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1870       if (tem != 0)
1871         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1872     }
1873 
1874   return 0;
1875 }
1876 
1877 
1878 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1879    and OP1.  Return 0 if no simplification is possible.
1880 
1881    Don't use this for relational operations such as EQ or LT.
1882    Use simplify_relational_operation instead.  */
1883 rtx
1884 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1885 			   rtx op0, rtx op1)
1886 {
1887   rtx trueop0, trueop1;
1888   rtx tem;
1889 
1890   /* Relational operations don't work here.  We must know the mode
1891      of the operands in order to do the comparison correctly.
1892      Assuming a full word can give incorrect results.
1893      Consider comparing 128 with -128 in QImode.  */
1894   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1895   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1896 
1897   /* Make sure the constant is second.  */
1898   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1899       && swap_commutative_operands_p (op0, op1))
1900     {
1901       tem = op0, op0 = op1, op1 = tem;
1902     }
1903 
1904   trueop0 = avoid_constant_pool_reference (op0);
1905   trueop1 = avoid_constant_pool_reference (op1);
1906 
1907   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1908   if (tem)
1909     return tem;
1910   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1911 }
1912 
1913 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
1914    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1915    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1916    actual constants.  */
1917 
1918 static rtx
1919 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1920 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1921 {
1922   rtx tem, reversed, opleft, opright;
1923   HOST_WIDE_INT val;
1924   unsigned int width = GET_MODE_PRECISION (mode);
1925 
1926   /* Even if we can't compute a constant result,
1927      there are some cases worth simplifying.  */
1928 
1929   switch (code)
1930     {
1931     case PLUS:
1932       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1933 	 when x is NaN, infinite, or finite and nonzero.  They aren't
1934 	 when x is -0 and the rounding mode is not towards -infinity,
1935 	 since (-0) + 0 is then 0.  */
1936       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1937 	return op0;
1938 
1939       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1940 	 transformations are safe even for IEEE.  */
1941       if (GET_CODE (op0) == NEG)
1942 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1943       else if (GET_CODE (op1) == NEG)
1944 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1945 
1946       /* (~a) + 1 -> -a */
1947       if (INTEGRAL_MODE_P (mode)
1948 	  && GET_CODE (op0) == NOT
1949 	  && trueop1 == const1_rtx)
1950 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1951 
1952       /* Handle both-operands-constant cases.  We can only add
1953 	 CONST_INTs to constants since the sum of relocatable symbols
1954 	 can't be handled by most assemblers.  Don't add CONST_INT
1955 	 to CONST_INT since overflow won't be computed properly if wider
1956 	 than HOST_BITS_PER_WIDE_INT.  */
1957 
1958       if ((GET_CODE (op0) == CONST
1959 	   || GET_CODE (op0) == SYMBOL_REF
1960 	   || GET_CODE (op0) == LABEL_REF)
1961 	  && CONST_INT_P (op1))
1962 	return plus_constant (op0, INTVAL (op1));
1963       else if ((GET_CODE (op1) == CONST
1964 		|| GET_CODE (op1) == SYMBOL_REF
1965 		|| GET_CODE (op1) == LABEL_REF)
1966 	       && CONST_INT_P (op0))
1967 	return plus_constant (op1, INTVAL (op0));
1968 
1969       /* See if this is something like X * C - X or vice versa or
1970 	 if the multiplication is written as a shift.  If so, we can
1971 	 distribute and make a new multiply, shift, or maybe just
1972 	 have X (if C is 2 in the example above).  But don't make
1973 	 something more expensive than we had before.  */
1974 
1975       if (SCALAR_INT_MODE_P (mode))
1976 	{
1977 	  double_int coeff0, coeff1;
1978 	  rtx lhs = op0, rhs = op1;
1979 
1980 	  coeff0 = double_int_one;
1981 	  coeff1 = double_int_one;
1982 
1983 	  if (GET_CODE (lhs) == NEG)
1984 	    {
1985 	      coeff0 = double_int_minus_one;
1986 	      lhs = XEXP (lhs, 0);
1987 	    }
1988 	  else if (GET_CODE (lhs) == MULT
1989 		   && CONST_INT_P (XEXP (lhs, 1)))
1990 	    {
1991 	      coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1992 	      lhs = XEXP (lhs, 0);
1993 	    }
1994 	  else if (GET_CODE (lhs) == ASHIFT
1995 		   && CONST_INT_P (XEXP (lhs, 1))
1996                    && INTVAL (XEXP (lhs, 1)) >= 0
1997 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1998 	    {
1999 	      coeff0 = double_int_setbit (double_int_zero,
2000 					  INTVAL (XEXP (lhs, 1)));
2001 	      lhs = XEXP (lhs, 0);
2002 	    }
2003 
2004 	  if (GET_CODE (rhs) == NEG)
2005 	    {
2006 	      coeff1 = double_int_minus_one;
2007 	      rhs = XEXP (rhs, 0);
2008 	    }
2009 	  else if (GET_CODE (rhs) == MULT
2010 		   && CONST_INT_P (XEXP (rhs, 1)))
2011 	    {
2012 	      coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2013 	      rhs = XEXP (rhs, 0);
2014 	    }
2015 	  else if (GET_CODE (rhs) == ASHIFT
2016 		   && CONST_INT_P (XEXP (rhs, 1))
2017 		   && INTVAL (XEXP (rhs, 1)) >= 0
2018 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2019 	    {
2020 	      coeff1 = double_int_setbit (double_int_zero,
2021 					  INTVAL (XEXP (rhs, 1)));
2022 	      rhs = XEXP (rhs, 0);
2023 	    }
2024 
2025 	  if (rtx_equal_p (lhs, rhs))
2026 	    {
2027 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2028 	      rtx coeff;
2029 	      double_int val;
2030 	      bool speed = optimize_function_for_speed_p (cfun);
2031 
2032 	      val = double_int_add (coeff0, coeff1);
2033 	      coeff = immed_double_int_const (val, mode);
2034 
2035 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2036 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2037 		? tem : 0;
2038 	    }
2039 	}
2040 
2041       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2042       if ((CONST_INT_P (op1)
2043 	   || GET_CODE (op1) == CONST_DOUBLE)
2044 	  && GET_CODE (op0) == XOR
2045 	  && (CONST_INT_P (XEXP (op0, 1))
2046 	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2047 	  && mode_signbit_p (mode, op1))
2048 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2049 				    simplify_gen_binary (XOR, mode, op1,
2050 							 XEXP (op0, 1)));
2051 
2052       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2053       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2054 	  && GET_CODE (op0) == MULT
2055 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2056 	{
2057 	  rtx in1, in2;
2058 
2059 	  in1 = XEXP (XEXP (op0, 0), 0);
2060 	  in2 = XEXP (op0, 1);
2061 	  return simplify_gen_binary (MINUS, mode, op1,
2062 				      simplify_gen_binary (MULT, mode,
2063 							   in1, in2));
2064 	}
2065 
2066       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2067 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2068 	 is 1.  */
2069       if (COMPARISON_P (op0)
2070 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2071 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2072 	  && (reversed = reversed_comparison (op0, mode)))
2073 	return
2074 	  simplify_gen_unary (NEG, mode, reversed, mode);
2075 
2076       /* If one of the operands is a PLUS or a MINUS, see if we can
2077 	 simplify this by the associative law.
2078 	 Don't use the associative law for floating point.
2079 	 The inaccuracy makes it nonassociative,
2080 	 and subtle programs can break if operations are associated.  */
2081 
2082       if (INTEGRAL_MODE_P (mode)
2083 	  && (plus_minus_operand_p (op0)
2084 	      || plus_minus_operand_p (op1))
2085 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2086 	return tem;
2087 
2088       /* Reassociate floating point addition only when the user
2089 	 specifies associative math operations.  */
2090       if (FLOAT_MODE_P (mode)
2091 	  && flag_associative_math)
2092 	{
2093 	  tem = simplify_associative_operation (code, mode, op0, op1);
2094 	  if (tem)
2095 	    return tem;
2096 	}
2097       break;
2098 
2099     case COMPARE:
2100       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2101       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2102 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2103 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2104 	{
2105 	  rtx xop00 = XEXP (op0, 0);
2106 	  rtx xop10 = XEXP (op1, 0);
2107 
2108 #ifdef HAVE_cc0
2109 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2110 #else
2111 	    if (REG_P (xop00) && REG_P (xop10)
2112 		&& GET_MODE (xop00) == GET_MODE (xop10)
2113 		&& REGNO (xop00) == REGNO (xop10)
2114 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2115 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2116 #endif
2117 	      return xop00;
2118 	}
2119       break;
2120 
2121     case MINUS:
2122       /* We can't assume x-x is 0 even with non-IEEE floating point,
2123 	 but since it is zero except in very strange circumstances, we
2124 	 will treat it as zero with -ffinite-math-only.  */
2125       if (rtx_equal_p (trueop0, trueop1)
2126 	  && ! side_effects_p (op0)
2127 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2128 	return CONST0_RTX (mode);
2129 
2130       /* Change subtraction from zero into negation.  (0 - x) is the
2131 	 same as -x when x is NaN, infinite, or finite and nonzero.
2132 	 But if the mode has signed zeros, and does not round towards
2133 	 -infinity, then 0 - 0 is 0, not -0.  */
2134       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2135 	return simplify_gen_unary (NEG, mode, op1, mode);
2136 
2137       /* (-1 - a) is ~a.  */
2138       if (trueop0 == constm1_rtx)
2139 	return simplify_gen_unary (NOT, mode, op1, mode);
2140 
2141       /* Subtracting 0 has no effect unless the mode has signed zeros
2142 	 and supports rounding towards -infinity.  In such a case,
2143 	 0 - 0 is -0.  */
2144       if (!(HONOR_SIGNED_ZEROS (mode)
2145 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2146 	  && trueop1 == CONST0_RTX (mode))
2147 	return op0;
2148 
2149       /* See if this is something like X * C - X or vice versa or
2150 	 if the multiplication is written as a shift.  If so, we can
2151 	 distribute and make a new multiply, shift, or maybe just
2152 	 have X (if C is 2 in the example above).  But don't make
2153 	 something more expensive than we had before.  */
2154 
2155       if (SCALAR_INT_MODE_P (mode))
2156 	{
2157 	  double_int coeff0, negcoeff1;
2158 	  rtx lhs = op0, rhs = op1;
2159 
2160 	  coeff0 = double_int_one;
2161 	  negcoeff1 = double_int_minus_one;
2162 
2163 	  if (GET_CODE (lhs) == NEG)
2164 	    {
2165 	      coeff0 = double_int_minus_one;
2166 	      lhs = XEXP (lhs, 0);
2167 	    }
2168 	  else if (GET_CODE (lhs) == MULT
2169 		   && CONST_INT_P (XEXP (lhs, 1)))
2170 	    {
2171 	      coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2172 	      lhs = XEXP (lhs, 0);
2173 	    }
2174 	  else if (GET_CODE (lhs) == ASHIFT
2175 		   && CONST_INT_P (XEXP (lhs, 1))
2176 		   && INTVAL (XEXP (lhs, 1)) >= 0
2177 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2178 	    {
2179 	      coeff0 = double_int_setbit (double_int_zero,
2180 					  INTVAL (XEXP (lhs, 1)));
2181 	      lhs = XEXP (lhs, 0);
2182 	    }
2183 
2184 	  if (GET_CODE (rhs) == NEG)
2185 	    {
2186 	      negcoeff1 = double_int_one;
2187 	      rhs = XEXP (rhs, 0);
2188 	    }
2189 	  else if (GET_CODE (rhs) == MULT
2190 		   && CONST_INT_P (XEXP (rhs, 1)))
2191 	    {
2192 	      negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2193 	      rhs = XEXP (rhs, 0);
2194 	    }
2195 	  else if (GET_CODE (rhs) == ASHIFT
2196 		   && CONST_INT_P (XEXP (rhs, 1))
2197 		   && INTVAL (XEXP (rhs, 1)) >= 0
2198 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2199 	    {
2200 	      negcoeff1 = double_int_setbit (double_int_zero,
2201 					     INTVAL (XEXP (rhs, 1)));
2202 	      negcoeff1 = double_int_neg (negcoeff1);
2203 	      rhs = XEXP (rhs, 0);
2204 	    }
2205 
2206 	  if (rtx_equal_p (lhs, rhs))
2207 	    {
2208 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2209 	      rtx coeff;
2210 	      double_int val;
2211 	      bool speed = optimize_function_for_speed_p (cfun);
2212 
2213 	      val = double_int_add (coeff0, negcoeff1);
2214 	      coeff = immed_double_int_const (val, mode);
2215 
2216 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2217 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2218 		? tem : 0;
2219 	    }
2220 	}
2221 
2222       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2223       if (GET_CODE (op1) == NEG)
2224 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2225 
2226       /* (-x - c) may be simplified as (-c - x).  */
2227       if (GET_CODE (op0) == NEG
2228 	  && (CONST_INT_P (op1)
2229 	      || GET_CODE (op1) == CONST_DOUBLE))
2230 	{
2231 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2232 	  if (tem)
2233 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2234 	}
2235 
2236       /* Don't let a relocatable value get a negative coeff.  */
2237       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2238 	return simplify_gen_binary (PLUS, mode,
2239 				    op0,
2240 				    neg_const_int (mode, op1));
2241 
2242       /* (x - (x & y)) -> (x & ~y) */
2243       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2244 	{
2245 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2246 	    {
2247 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2248 					GET_MODE (XEXP (op1, 1)));
2249 	      return simplify_gen_binary (AND, mode, op0, tem);
2250 	    }
2251 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2252 	    {
2253 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2254 					GET_MODE (XEXP (op1, 0)));
2255 	      return simplify_gen_binary (AND, mode, op0, tem);
2256 	    }
2257 	}
2258 
2259       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2260 	 by reversing the comparison code if valid.  */
2261       if (STORE_FLAG_VALUE == 1
2262 	  && trueop0 == const1_rtx
2263 	  && COMPARISON_P (op1)
2264 	  && (reversed = reversed_comparison (op1, mode)))
2265 	return reversed;
2266 
2267       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2268       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2269 	  && GET_CODE (op1) == MULT
2270 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2271 	{
2272 	  rtx in1, in2;
2273 
2274 	  in1 = XEXP (XEXP (op1, 0), 0);
2275 	  in2 = XEXP (op1, 1);
2276 	  return simplify_gen_binary (PLUS, mode,
2277 				      simplify_gen_binary (MULT, mode,
2278 							   in1, in2),
2279 				      op0);
2280 	}
2281 
2282       /* Canonicalize (minus (neg A) (mult B C)) to
2283 	 (minus (mult (neg B) C) A).  */
2284       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2285 	  && GET_CODE (op1) == MULT
2286 	  && GET_CODE (op0) == NEG)
2287 	{
2288 	  rtx in1, in2;
2289 
2290 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2291 	  in2 = XEXP (op1, 1);
2292 	  return simplify_gen_binary (MINUS, mode,
2293 				      simplify_gen_binary (MULT, mode,
2294 							   in1, in2),
2295 				      XEXP (op0, 0));
2296 	}
2297 
2298       /* If one of the operands is a PLUS or a MINUS, see if we can
2299 	 simplify this by the associative law.  This will, for example,
2300          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2301 	 Don't use the associative law for floating point.
2302 	 The inaccuracy makes it nonassociative,
2303 	 and subtle programs can break if operations are associated.  */
2304 
2305       if (INTEGRAL_MODE_P (mode)
2306 	  && (plus_minus_operand_p (op0)
2307 	      || plus_minus_operand_p (op1))
2308 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2309 	return tem;
2310       break;
2311 
2312     case MULT:
2313       if (trueop1 == constm1_rtx)
2314 	return simplify_gen_unary (NEG, mode, op0, mode);
2315 
2316       if (GET_CODE (op0) == NEG)
2317 	{
2318 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2319 	  /* If op1 is a MULT as well and simplify_unary_operation
2320 	     just moved the NEG to the second operand, simplify_gen_binary
2321 	     below could through simplify_associative_operation move
2322 	     the NEG around again and recurse endlessly.  */
2323 	  if (temp
2324 	      && GET_CODE (op1) == MULT
2325 	      && GET_CODE (temp) == MULT
2326 	      && XEXP (op1, 0) == XEXP (temp, 0)
2327 	      && GET_CODE (XEXP (temp, 1)) == NEG
2328 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2329 	    temp = NULL_RTX;
2330 	  if (temp)
2331 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2332 	}
2333       if (GET_CODE (op1) == NEG)
2334 	{
2335 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2336 	  /* If op0 is a MULT as well and simplify_unary_operation
2337 	     just moved the NEG to the second operand, simplify_gen_binary
2338 	     below could through simplify_associative_operation move
2339 	     the NEG around again and recurse endlessly.  */
2340 	  if (temp
2341 	      && GET_CODE (op0) == MULT
2342 	      && GET_CODE (temp) == MULT
2343 	      && XEXP (op0, 0) == XEXP (temp, 0)
2344 	      && GET_CODE (XEXP (temp, 1)) == NEG
2345 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2346 	    temp = NULL_RTX;
2347 	  if (temp)
2348 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2349 	}
2350 
2351       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2352 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2353 	 when the mode has signed zeros, since multiplying a negative
2354 	 number by 0 will give -0, not 0.  */
2355       if (!HONOR_NANS (mode)
2356 	  && !HONOR_SIGNED_ZEROS (mode)
2357 	  && trueop1 == CONST0_RTX (mode)
2358 	  && ! side_effects_p (op0))
2359 	return op1;
2360 
2361       /* In IEEE floating point, x*1 is not equivalent to x for
2362 	 signalling NaNs.  */
2363       if (!HONOR_SNANS (mode)
2364 	  && trueop1 == CONST1_RTX (mode))
2365 	return op0;
2366 
2367       /* Convert multiply by constant power of two into shift unless
2368 	 we are still generating RTL.  This test is a kludge.  */
2369       if (CONST_INT_P (trueop1)
2370 	  && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2371 	  /* If the mode is larger than the host word size, and the
2372 	     uppermost bit is set, then this isn't a power of two due
2373 	     to implicit sign extension.  */
2374 	  && (width <= HOST_BITS_PER_WIDE_INT
2375 	      || val != HOST_BITS_PER_WIDE_INT - 1))
2376 	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2377 
2378       /* Likewise for multipliers wider than a word.  */
2379       if (GET_CODE (trueop1) == CONST_DOUBLE
2380 	  && (GET_MODE (trueop1) == VOIDmode
2381 	      || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2382 	  && GET_MODE (op0) == mode
2383 	  && CONST_DOUBLE_LOW (trueop1) == 0
2384 	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2385 	return simplify_gen_binary (ASHIFT, mode, op0,
2386 				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2387 
2388       /* x*2 is x+x and x*(-1) is -x */
2389       if (GET_CODE (trueop1) == CONST_DOUBLE
2390 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2391 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2392 	  && GET_MODE (op0) == mode)
2393 	{
2394 	  REAL_VALUE_TYPE d;
2395 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2396 
2397 	  if (REAL_VALUES_EQUAL (d, dconst2))
2398 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2399 
2400 	  if (!HONOR_SNANS (mode)
2401 	      && REAL_VALUES_EQUAL (d, dconstm1))
2402 	    return simplify_gen_unary (NEG, mode, op0, mode);
2403 	}
2404 
2405       /* Optimize -x * -x as x * x.  */
2406       if (FLOAT_MODE_P (mode)
2407 	  && GET_CODE (op0) == NEG
2408 	  && GET_CODE (op1) == NEG
2409 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2410 	  && !side_effects_p (XEXP (op0, 0)))
2411 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2412 
2413       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2414       if (SCALAR_FLOAT_MODE_P (mode)
2415 	  && GET_CODE (op0) == ABS
2416 	  && GET_CODE (op1) == ABS
2417 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2418 	  && !side_effects_p (XEXP (op0, 0)))
2419 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2420 
2421       /* Reassociate multiplication, but for floating point MULTs
2422 	 only when the user specifies unsafe math optimizations.  */
2423       if (! FLOAT_MODE_P (mode)
2424 	  || flag_unsafe_math_optimizations)
2425 	{
2426 	  tem = simplify_associative_operation (code, mode, op0, op1);
2427 	  if (tem)
2428 	    return tem;
2429 	}
2430       break;
2431 
2432     case IOR:
2433       if (trueop1 == CONST0_RTX (mode))
2434 	return op0;
2435       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2436 	return op1;
2437       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2438 	return op0;
2439       /* A | (~A) -> -1 */
2440       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2441 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2442 	  && ! side_effects_p (op0)
2443 	  && SCALAR_INT_MODE_P (mode))
2444 	return constm1_rtx;
2445 
2446       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2447       if (CONST_INT_P (op1)
2448 	  && HWI_COMPUTABLE_MODE_P (mode)
2449 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2450 	return op1;
2451 
2452       /* Canonicalize (X & C1) | C2.  */
2453       if (GET_CODE (op0) == AND
2454 	  && CONST_INT_P (trueop1)
2455 	  && CONST_INT_P (XEXP (op0, 1)))
2456 	{
2457 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2458 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2459 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2460 
2461 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2462 	  if ((c1 & c2) == c1
2463 	      && !side_effects_p (XEXP (op0, 0)))
2464 	    return trueop1;
2465 
2466 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2467 	  if (((c1|c2) & mask) == mask)
2468 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2469 
2470 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2471 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2472 	    {
2473 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2474 					 gen_int_mode (c1 & ~c2, mode));
2475 	      return simplify_gen_binary (IOR, mode, tem, op1);
2476 	    }
2477 	}
2478 
2479       /* Convert (A & B) | A to A.  */
2480       if (GET_CODE (op0) == AND
2481 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2482 	      || rtx_equal_p (XEXP (op0, 1), op1))
2483 	  && ! side_effects_p (XEXP (op0, 0))
2484 	  && ! side_effects_p (XEXP (op0, 1)))
2485 	return op1;
2486 
2487       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2488          mode size to (rotate A CX).  */
2489 
2490       if (GET_CODE (op1) == ASHIFT
2491           || GET_CODE (op1) == SUBREG)
2492         {
2493 	  opleft = op1;
2494 	  opright = op0;
2495 	}
2496       else
2497         {
2498 	  opright = op1;
2499 	  opleft = op0;
2500 	}
2501 
2502       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2503           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2504           && CONST_INT_P (XEXP (opleft, 1))
2505           && CONST_INT_P (XEXP (opright, 1))
2506           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2507               == GET_MODE_PRECISION (mode)))
2508         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2509 
2510       /* Same, but for ashift that has been "simplified" to a wider mode
2511         by simplify_shift_const.  */
2512 
2513       if (GET_CODE (opleft) == SUBREG
2514           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2515           && GET_CODE (opright) == LSHIFTRT
2516           && GET_CODE (XEXP (opright, 0)) == SUBREG
2517           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2518           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2519           && (GET_MODE_SIZE (GET_MODE (opleft))
2520               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2521           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2522                           SUBREG_REG (XEXP (opright, 0)))
2523           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2524           && CONST_INT_P (XEXP (opright, 1))
2525           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2526               == GET_MODE_PRECISION (mode)))
2527         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2528                                XEXP (SUBREG_REG (opleft), 1));
2529 
2530       /* If we have (ior (and (X C1) C2)), simplify this by making
2531 	 C1 as small as possible if C1 actually changes.  */
2532       if (CONST_INT_P (op1)
2533 	  && (HWI_COMPUTABLE_MODE_P (mode)
2534 	      || INTVAL (op1) > 0)
2535 	  && GET_CODE (op0) == AND
2536 	  && CONST_INT_P (XEXP (op0, 1))
2537 	  && CONST_INT_P (op1)
2538 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2539 	return simplify_gen_binary (IOR, mode,
2540 				    simplify_gen_binary
2541 					  (AND, mode, XEXP (op0, 0),
2542 					   GEN_INT (UINTVAL (XEXP (op0, 1))
2543 						    & ~UINTVAL (op1))),
2544 				    op1);
2545 
2546       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2547          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2548 	 the PLUS does not affect any of the bits in OP1: then we can do
2549 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2550          can be safely shifted left C bits.  */
2551       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2552           && GET_CODE (XEXP (op0, 0)) == PLUS
2553           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2554           && CONST_INT_P (XEXP (op0, 1))
2555           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2556         {
2557           int count = INTVAL (XEXP (op0, 1));
2558           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2559 
2560           if (mask >> count == INTVAL (trueop1)
2561               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2562 	    return simplify_gen_binary (ASHIFTRT, mode,
2563 					plus_constant (XEXP (op0, 0), mask),
2564 					XEXP (op0, 1));
2565         }
2566 
2567       tem = simplify_associative_operation (code, mode, op0, op1);
2568       if (tem)
2569 	return tem;
2570       break;
2571 
2572     case XOR:
2573       if (trueop1 == CONST0_RTX (mode))
2574 	return op0;
2575       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2576 	return simplify_gen_unary (NOT, mode, op0, mode);
2577       if (rtx_equal_p (trueop0, trueop1)
2578 	  && ! side_effects_p (op0)
2579 	  && GET_MODE_CLASS (mode) != MODE_CC)
2580 	 return CONST0_RTX (mode);
2581 
2582       /* Canonicalize XOR of the most significant bit to PLUS.  */
2583       if ((CONST_INT_P (op1)
2584 	   || GET_CODE (op1) == CONST_DOUBLE)
2585 	  && mode_signbit_p (mode, op1))
2586 	return simplify_gen_binary (PLUS, mode, op0, op1);
2587       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2588       if ((CONST_INT_P (op1)
2589 	   || GET_CODE (op1) == CONST_DOUBLE)
2590 	  && GET_CODE (op0) == PLUS
2591 	  && (CONST_INT_P (XEXP (op0, 1))
2592 	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2593 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2594 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2595 				    simplify_gen_binary (XOR, mode, op1,
2596 							 XEXP (op0, 1)));
2597 
2598       /* If we are XORing two things that have no bits in common,
2599 	 convert them into an IOR.  This helps to detect rotation encoded
2600 	 using those methods and possibly other simplifications.  */
2601 
2602       if (HWI_COMPUTABLE_MODE_P (mode)
2603 	  && (nonzero_bits (op0, mode)
2604 	      & nonzero_bits (op1, mode)) == 0)
2605 	return (simplify_gen_binary (IOR, mode, op0, op1));
2606 
2607       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2608 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2609 	 (NOT y).  */
2610       {
2611 	int num_negated = 0;
2612 
2613 	if (GET_CODE (op0) == NOT)
2614 	  num_negated++, op0 = XEXP (op0, 0);
2615 	if (GET_CODE (op1) == NOT)
2616 	  num_negated++, op1 = XEXP (op1, 0);
2617 
2618 	if (num_negated == 2)
2619 	  return simplify_gen_binary (XOR, mode, op0, op1);
2620 	else if (num_negated == 1)
2621 	  return simplify_gen_unary (NOT, mode,
2622 				     simplify_gen_binary (XOR, mode, op0, op1),
2623 				     mode);
2624       }
2625 
2626       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2627 	 correspond to a machine insn or result in further simplifications
2628 	 if B is a constant.  */
2629 
2630       if (GET_CODE (op0) == AND
2631 	  && rtx_equal_p (XEXP (op0, 1), op1)
2632 	  && ! side_effects_p (op1))
2633 	return simplify_gen_binary (AND, mode,
2634 				    simplify_gen_unary (NOT, mode,
2635 							XEXP (op0, 0), mode),
2636 				    op1);
2637 
2638       else if (GET_CODE (op0) == AND
2639 	       && rtx_equal_p (XEXP (op0, 0), op1)
2640 	       && ! side_effects_p (op1))
2641 	return simplify_gen_binary (AND, mode,
2642 				    simplify_gen_unary (NOT, mode,
2643 							XEXP (op0, 1), mode),
2644 				    op1);
2645 
2646       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2647 	 we can transform like this:
2648             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2649                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2650                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2651 	 Attempt a few simplifications when B and C are both constants.  */
2652       if (GET_CODE (op0) == AND
2653 	  && CONST_INT_P (op1)
2654 	  && CONST_INT_P (XEXP (op0, 1)))
2655 	{
2656 	  rtx a = XEXP (op0, 0);
2657 	  rtx b = XEXP (op0, 1);
2658 	  rtx c = op1;
2659 	  HOST_WIDE_INT bval = INTVAL (b);
2660 	  HOST_WIDE_INT cval = INTVAL (c);
2661 
2662 	  rtx na_c
2663 	    = simplify_binary_operation (AND, mode,
2664 					 simplify_gen_unary (NOT, mode, a, mode),
2665 					 c);
2666 	  if ((~cval & bval) == 0)
2667 	    {
2668 	      /* Try to simplify ~A&C | ~B&C.  */
2669 	      if (na_c != NULL_RTX)
2670 		return simplify_gen_binary (IOR, mode, na_c,
2671 					    GEN_INT (~bval & cval));
2672 	    }
2673 	  else
2674 	    {
2675 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2676 	      if (na_c == const0_rtx)
2677 		{
2678 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2679 						    GEN_INT (~cval & bval));
2680 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2681 					      GEN_INT (~bval & cval));
2682 		}
2683 	    }
2684 	}
2685 
2686       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2687 	 comparison if STORE_FLAG_VALUE is 1.  */
2688       if (STORE_FLAG_VALUE == 1
2689 	  && trueop1 == const1_rtx
2690 	  && COMPARISON_P (op0)
2691 	  && (reversed = reversed_comparison (op0, mode)))
2692 	return reversed;
2693 
2694       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2695 	 is (lt foo (const_int 0)), so we can perform the above
2696 	 simplification if STORE_FLAG_VALUE is 1.  */
2697 
2698       if (STORE_FLAG_VALUE == 1
2699 	  && trueop1 == const1_rtx
2700 	  && GET_CODE (op0) == LSHIFTRT
2701 	  && CONST_INT_P (XEXP (op0, 1))
2702 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2703 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2704 
2705       /* (xor (comparison foo bar) (const_int sign-bit))
2706 	 when STORE_FLAG_VALUE is the sign bit.  */
2707       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2708 	  && trueop1 == const_true_rtx
2709 	  && COMPARISON_P (op0)
2710 	  && (reversed = reversed_comparison (op0, mode)))
2711 	return reversed;
2712 
2713       tem = simplify_associative_operation (code, mode, op0, op1);
2714       if (tem)
2715 	return tem;
2716       break;
2717 
2718     case AND:
2719       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2720 	return trueop1;
2721       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2722 	return op0;
2723       if (HWI_COMPUTABLE_MODE_P (mode))
2724 	{
2725 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2726 	  HOST_WIDE_INT nzop1;
2727 	  if (CONST_INT_P (trueop1))
2728 	    {
2729 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2730 	      /* If we are turning off bits already known off in OP0, we need
2731 		 not do an AND.  */
2732 	      if ((nzop0 & ~val1) == 0)
2733 		return op0;
2734 	    }
2735 	  nzop1 = nonzero_bits (trueop1, mode);
2736 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2737 	  if ((nzop1 & nzop0) == 0
2738 	      && !side_effects_p (op0) && !side_effects_p (op1))
2739 	    return CONST0_RTX (mode);
2740 	}
2741       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2742 	  && GET_MODE_CLASS (mode) != MODE_CC)
2743 	return op0;
2744       /* A & (~A) -> 0 */
2745       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2746 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2747 	  && ! side_effects_p (op0)
2748 	  && GET_MODE_CLASS (mode) != MODE_CC)
2749 	return CONST0_RTX (mode);
2750 
2751       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2752 	 there are no nonzero bits of C outside of X's mode.  */
2753       if ((GET_CODE (op0) == SIGN_EXTEND
2754 	   || GET_CODE (op0) == ZERO_EXTEND)
2755 	  && CONST_INT_P (trueop1)
2756 	  && HWI_COMPUTABLE_MODE_P (mode)
2757 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2758 	      & UINTVAL (trueop1)) == 0)
2759 	{
2760 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2761 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2762 				     gen_int_mode (INTVAL (trueop1),
2763 						   imode));
2764 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2765 	}
2766 
2767       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2768 	 we might be able to further simplify the AND with X and potentially
2769 	 remove the truncation altogether.  */
2770       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2771 	{
2772 	  rtx x = XEXP (op0, 0);
2773 	  enum machine_mode xmode = GET_MODE (x);
2774 	  tem = simplify_gen_binary (AND, xmode, x,
2775 				     gen_int_mode (INTVAL (trueop1), xmode));
2776 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2777 	}
2778 
2779       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2780       if (GET_CODE (op0) == IOR
2781 	  && CONST_INT_P (trueop1)
2782 	  && CONST_INT_P (XEXP (op0, 1)))
2783 	{
2784 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2785 	  return simplify_gen_binary (IOR, mode,
2786 				      simplify_gen_binary (AND, mode,
2787 							   XEXP (op0, 0), op1),
2788 				      gen_int_mode (tmp, mode));
2789 	}
2790 
2791       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2792 	 insn (and may simplify more).  */
2793       if (GET_CODE (op0) == XOR
2794 	  && rtx_equal_p (XEXP (op0, 0), op1)
2795 	  && ! side_effects_p (op1))
2796 	return simplify_gen_binary (AND, mode,
2797 				    simplify_gen_unary (NOT, mode,
2798 							XEXP (op0, 1), mode),
2799 				    op1);
2800 
2801       if (GET_CODE (op0) == XOR
2802 	  && rtx_equal_p (XEXP (op0, 1), op1)
2803 	  && ! side_effects_p (op1))
2804 	return simplify_gen_binary (AND, mode,
2805 				    simplify_gen_unary (NOT, mode,
2806 							XEXP (op0, 0), mode),
2807 				    op1);
2808 
2809       /* Similarly for (~(A ^ B)) & A.  */
2810       if (GET_CODE (op0) == NOT
2811 	  && GET_CODE (XEXP (op0, 0)) == XOR
2812 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2813 	  && ! side_effects_p (op1))
2814 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2815 
2816       if (GET_CODE (op0) == NOT
2817 	  && GET_CODE (XEXP (op0, 0)) == XOR
2818 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2819 	  && ! side_effects_p (op1))
2820 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2821 
2822       /* Convert (A | B) & A to A.  */
2823       if (GET_CODE (op0) == IOR
2824 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2825 	      || rtx_equal_p (XEXP (op0, 1), op1))
2826 	  && ! side_effects_p (XEXP (op0, 0))
2827 	  && ! side_effects_p (XEXP (op0, 1)))
2828 	return op1;
2829 
2830       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2831 	 ((A & N) + B) & M -> (A + B) & M
2832 	 Similarly if (N & M) == 0,
2833 	 ((A | N) + B) & M -> (A + B) & M
2834 	 and for - instead of + and/or ^ instead of |.
2835          Also, if (N & M) == 0, then
2836 	 (A +- N) & M -> A & M.  */
2837       if (CONST_INT_P (trueop1)
2838 	  && HWI_COMPUTABLE_MODE_P (mode)
2839 	  && ~UINTVAL (trueop1)
2840 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2841 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2842 	{
2843 	  rtx pmop[2];
2844 	  int which;
2845 
2846 	  pmop[0] = XEXP (op0, 0);
2847 	  pmop[1] = XEXP (op0, 1);
2848 
2849 	  if (CONST_INT_P (pmop[1])
2850 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2851 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
2852 
2853 	  for (which = 0; which < 2; which++)
2854 	    {
2855 	      tem = pmop[which];
2856 	      switch (GET_CODE (tem))
2857 		{
2858 		case AND:
2859 		  if (CONST_INT_P (XEXP (tem, 1))
2860 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2861 		      == UINTVAL (trueop1))
2862 		    pmop[which] = XEXP (tem, 0);
2863 		  break;
2864 		case IOR:
2865 		case XOR:
2866 		  if (CONST_INT_P (XEXP (tem, 1))
2867 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2868 		    pmop[which] = XEXP (tem, 0);
2869 		  break;
2870 		default:
2871 		  break;
2872 		}
2873 	    }
2874 
2875 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2876 	    {
2877 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
2878 					 pmop[0], pmop[1]);
2879 	      return simplify_gen_binary (code, mode, tem, op1);
2880 	    }
2881 	}
2882 
2883       /* (and X (ior (not X) Y) -> (and X Y) */
2884       if (GET_CODE (op1) == IOR
2885 	  && GET_CODE (XEXP (op1, 0)) == NOT
2886 	  && op0 == XEXP (XEXP (op1, 0), 0))
2887        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2888 
2889       /* (and (ior (not X) Y) X) -> (and X Y) */
2890       if (GET_CODE (op0) == IOR
2891 	  && GET_CODE (XEXP (op0, 0)) == NOT
2892 	  && op1 == XEXP (XEXP (op0, 0), 0))
2893 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2894 
2895       tem = simplify_associative_operation (code, mode, op0, op1);
2896       if (tem)
2897 	return tem;
2898       break;
2899 
2900     case UDIV:
2901       /* 0/x is 0 (or x&0 if x has side-effects).  */
2902       if (trueop0 == CONST0_RTX (mode))
2903 	{
2904 	  if (side_effects_p (op1))
2905 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2906 	  return trueop0;
2907 	}
2908       /* x/1 is x.  */
2909       if (trueop1 == CONST1_RTX (mode))
2910 	return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2911       /* Convert divide by power of two into shift.  */
2912       if (CONST_INT_P (trueop1)
2913 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2914 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2915       break;
2916 
2917     case DIV:
2918       /* Handle floating point and integers separately.  */
2919       if (SCALAR_FLOAT_MODE_P (mode))
2920 	{
2921 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2922 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
2923 	     NaN rather than 0.0.  Nor is it safe for modes with signed
2924 	     zeros, since dividing 0 by a negative number gives -0.0  */
2925 	  if (trueop0 == CONST0_RTX (mode)
2926 	      && !HONOR_NANS (mode)
2927 	      && !HONOR_SIGNED_ZEROS (mode)
2928 	      && ! side_effects_p (op1))
2929 	    return op0;
2930 	  /* x/1.0 is x.  */
2931 	  if (trueop1 == CONST1_RTX (mode)
2932 	      && !HONOR_SNANS (mode))
2933 	    return op0;
2934 
2935 	  if (GET_CODE (trueop1) == CONST_DOUBLE
2936 	      && trueop1 != CONST0_RTX (mode))
2937 	    {
2938 	      REAL_VALUE_TYPE d;
2939 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2940 
2941 	      /* x/-1.0 is -x.  */
2942 	      if (REAL_VALUES_EQUAL (d, dconstm1)
2943 		  && !HONOR_SNANS (mode))
2944 		return simplify_gen_unary (NEG, mode, op0, mode);
2945 
2946 	      /* Change FP division by a constant into multiplication.
2947 		 Only do this with -freciprocal-math.  */
2948 	      if (flag_reciprocal_math
2949 		  && !REAL_VALUES_EQUAL (d, dconst0))
2950 		{
2951 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2952 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2953 		  return simplify_gen_binary (MULT, mode, op0, tem);
2954 		}
2955 	    }
2956 	}
2957       else if (SCALAR_INT_MODE_P (mode))
2958 	{
2959 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
2960 	  if (trueop0 == CONST0_RTX (mode)
2961 	      && !cfun->can_throw_non_call_exceptions)
2962 	    {
2963 	      if (side_effects_p (op1))
2964 		return simplify_gen_binary (AND, mode, op1, trueop0);
2965 	      return trueop0;
2966 	    }
2967 	  /* x/1 is x.  */
2968 	  if (trueop1 == CONST1_RTX (mode))
2969 	    return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2970 	  /* x/-1 is -x.  */
2971 	  if (trueop1 == constm1_rtx)
2972 	    {
2973 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2974 	      return simplify_gen_unary (NEG, mode, x, mode);
2975 	    }
2976 	}
2977       break;
2978 
2979     case UMOD:
2980       /* 0%x is 0 (or x&0 if x has side-effects).  */
2981       if (trueop0 == CONST0_RTX (mode))
2982 	{
2983 	  if (side_effects_p (op1))
2984 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2985 	  return trueop0;
2986 	}
2987       /* x%1 is 0 (of x&0 if x has side-effects).  */
2988       if (trueop1 == CONST1_RTX (mode))
2989 	{
2990 	  if (side_effects_p (op0))
2991 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2992 	  return CONST0_RTX (mode);
2993 	}
2994       /* Implement modulus by power of two as AND.  */
2995       if (CONST_INT_P (trueop1)
2996 	  && exact_log2 (UINTVAL (trueop1)) > 0)
2997 	return simplify_gen_binary (AND, mode, op0,
2998 				    GEN_INT (INTVAL (op1) - 1));
2999       break;
3000 
3001     case MOD:
3002       /* 0%x is 0 (or x&0 if x has side-effects).  */
3003       if (trueop0 == CONST0_RTX (mode))
3004 	{
3005 	  if (side_effects_p (op1))
3006 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3007 	  return trueop0;
3008 	}
3009       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3010       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3011 	{
3012 	  if (side_effects_p (op0))
3013 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3014 	  return CONST0_RTX (mode);
3015 	}
3016       break;
3017 
3018     case ROTATERT:
3019     case ROTATE:
3020     case ASHIFTRT:
3021       if (trueop1 == CONST0_RTX (mode))
3022 	return op0;
3023       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3024 	return op0;
3025       /* Rotating ~0 always results in ~0.  */
3026       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3027 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3028 	  && ! side_effects_p (op1))
3029 	return op0;
3030     canonicalize_shift:
3031       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3032 	{
3033 	  val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3034 	  if (val != INTVAL (op1))
3035 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3036 	}
3037       break;
3038 
3039     case ASHIFT:
3040     case SS_ASHIFT:
3041     case US_ASHIFT:
3042       if (trueop1 == CONST0_RTX (mode))
3043 	return op0;
3044       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3045 	return op0;
3046       goto canonicalize_shift;
3047 
3048     case LSHIFTRT:
3049       if (trueop1 == CONST0_RTX (mode))
3050 	return op0;
3051       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3052 	return op0;
3053       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3054       if (GET_CODE (op0) == CLZ
3055 	  && CONST_INT_P (trueop1)
3056 	  && STORE_FLAG_VALUE == 1
3057 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3058 	{
3059 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3060 	  unsigned HOST_WIDE_INT zero_val = 0;
3061 
3062 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3063 	      && zero_val == GET_MODE_PRECISION (imode)
3064 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3065 	    return simplify_gen_relational (EQ, mode, imode,
3066 					    XEXP (op0, 0), const0_rtx);
3067 	}
3068       goto canonicalize_shift;
3069 
3070     case SMIN:
3071       if (width <= HOST_BITS_PER_WIDE_INT
3072 	  && mode_signbit_p (mode, trueop1)
3073 	  && ! side_effects_p (op0))
3074 	return op1;
3075       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3076 	return op0;
3077       tem = simplify_associative_operation (code, mode, op0, op1);
3078       if (tem)
3079 	return tem;
3080       break;
3081 
3082     case SMAX:
3083       if (width <= HOST_BITS_PER_WIDE_INT
3084 	  && CONST_INT_P (trueop1)
3085 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3086 	  && ! side_effects_p (op0))
3087 	return op1;
3088       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3089 	return op0;
3090       tem = simplify_associative_operation (code, mode, op0, op1);
3091       if (tem)
3092 	return tem;
3093       break;
3094 
3095     case UMIN:
3096       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3097 	return op1;
3098       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3099 	return op0;
3100       tem = simplify_associative_operation (code, mode, op0, op1);
3101       if (tem)
3102 	return tem;
3103       break;
3104 
3105     case UMAX:
3106       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3107 	return op1;
3108       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3109 	return op0;
3110       tem = simplify_associative_operation (code, mode, op0, op1);
3111       if (tem)
3112 	return tem;
3113       break;
3114 
3115     case SS_PLUS:
3116     case US_PLUS:
3117     case SS_MINUS:
3118     case US_MINUS:
3119     case SS_MULT:
3120     case US_MULT:
3121     case SS_DIV:
3122     case US_DIV:
3123       /* ??? There are simplifications that can be done.  */
3124       return 0;
3125 
3126     case VEC_SELECT:
3127       if (!VECTOR_MODE_P (mode))
3128 	{
3129 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3130 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3131 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3132 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3133 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3134 
3135 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3136 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3137 						      (trueop1, 0, 0)));
3138 
3139 	  /* Extract a scalar element from a nested VEC_SELECT expression
3140 	     (with optional nested VEC_CONCAT expression).  Some targets
3141 	     (i386) extract scalar element from a vector using chain of
3142 	     nested VEC_SELECT expressions.  When input operand is a memory
3143 	     operand, this operation can be simplified to a simple scalar
3144 	     load from an offseted memory address.  */
3145 	  if (GET_CODE (trueop0) == VEC_SELECT)
3146 	    {
3147 	      rtx op0 = XEXP (trueop0, 0);
3148 	      rtx op1 = XEXP (trueop0, 1);
3149 
3150 	      enum machine_mode opmode = GET_MODE (op0);
3151 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3152 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3153 
3154 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3155 	      int elem;
3156 
3157 	      rtvec vec;
3158 	      rtx tmp_op, tmp;
3159 
3160 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3161 	      gcc_assert (i < n_elts);
3162 
3163 	      /* Select element, pointed by nested selector.  */
3164 	      elem = INTVAL (XVECEXP (op1, 0, i));
3165 
3166 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3167 	      if (GET_CODE (op0) == VEC_CONCAT)
3168 		{
3169 		  rtx op00 = XEXP (op0, 0);
3170 		  rtx op01 = XEXP (op0, 1);
3171 
3172 		  enum machine_mode mode00, mode01;
3173 		  int n_elts00, n_elts01;
3174 
3175 		  mode00 = GET_MODE (op00);
3176 		  mode01 = GET_MODE (op01);
3177 
3178 		  /* Find out number of elements of each operand.  */
3179 		  if (VECTOR_MODE_P (mode00))
3180 		    {
3181 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3182 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3183 		    }
3184 		  else
3185 		    n_elts00 = 1;
3186 
3187 		  if (VECTOR_MODE_P (mode01))
3188 		    {
3189 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3190 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3191 		    }
3192 		  else
3193 		    n_elts01 = 1;
3194 
3195 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3196 
3197 		  /* Select correct operand of VEC_CONCAT
3198 		     and adjust selector. */
3199 		  if (elem < n_elts01)
3200 		    tmp_op = op00;
3201 		  else
3202 		    {
3203 		      tmp_op = op01;
3204 		      elem -= n_elts00;
3205 		    }
3206 		}
3207 	      else
3208 		tmp_op = op0;
3209 
3210 	      vec = rtvec_alloc (1);
3211 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3212 
3213 	      tmp = gen_rtx_fmt_ee (code, mode,
3214 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3215 	      return tmp;
3216 	    }
3217 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3218 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3219 	    return XEXP (trueop0, 0);
3220 	}
3221       else
3222 	{
3223 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3224 	  gcc_assert (GET_MODE_INNER (mode)
3225 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3226 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3227 
3228 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3229 	    {
3230 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3231 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3232 	      rtvec v = rtvec_alloc (n_elts);
3233 	      unsigned int i;
3234 
3235 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3236 	      for (i = 0; i < n_elts; i++)
3237 		{
3238 		  rtx x = XVECEXP (trueop1, 0, i);
3239 
3240 		  gcc_assert (CONST_INT_P (x));
3241 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3242 						       INTVAL (x));
3243 		}
3244 
3245 	      return gen_rtx_CONST_VECTOR (mode, v);
3246 	    }
3247 	}
3248 
3249       if (XVECLEN (trueop1, 0) == 1
3250 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3251 	  && GET_CODE (trueop0) == VEC_CONCAT)
3252 	{
3253 	  rtx vec = trueop0;
3254 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3255 
3256 	  /* Try to find the element in the VEC_CONCAT.  */
3257 	  while (GET_MODE (vec) != mode
3258 		 && GET_CODE (vec) == VEC_CONCAT)
3259 	    {
3260 	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3261 	      if (offset < vec_size)
3262 		vec = XEXP (vec, 0);
3263 	      else
3264 		{
3265 		  offset -= vec_size;
3266 		  vec = XEXP (vec, 1);
3267 		}
3268 	      vec = avoid_constant_pool_reference (vec);
3269 	    }
3270 
3271 	  if (GET_MODE (vec) == mode)
3272 	    return vec;
3273 	}
3274 
3275       return 0;
3276     case VEC_CONCAT:
3277       {
3278 	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3279 				      ? GET_MODE (trueop0)
3280 				      : GET_MODE_INNER (mode));
3281 	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3282 				      ? GET_MODE (trueop1)
3283 				      : GET_MODE_INNER (mode));
3284 
3285 	gcc_assert (VECTOR_MODE_P (mode));
3286 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3287 		    == GET_MODE_SIZE (mode));
3288 
3289 	if (VECTOR_MODE_P (op0_mode))
3290 	  gcc_assert (GET_MODE_INNER (mode)
3291 		      == GET_MODE_INNER (op0_mode));
3292 	else
3293 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3294 
3295 	if (VECTOR_MODE_P (op1_mode))
3296 	  gcc_assert (GET_MODE_INNER (mode)
3297 		      == GET_MODE_INNER (op1_mode));
3298 	else
3299 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3300 
3301 	if ((GET_CODE (trueop0) == CONST_VECTOR
3302 	     || CONST_INT_P (trueop0)
3303 	     || GET_CODE (trueop0) == CONST_DOUBLE)
3304 	    && (GET_CODE (trueop1) == CONST_VECTOR
3305 		|| CONST_INT_P (trueop1)
3306 		|| GET_CODE (trueop1) == CONST_DOUBLE))
3307 	  {
3308 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3309 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3310 	    rtvec v = rtvec_alloc (n_elts);
3311 	    unsigned int i;
3312 	    unsigned in_n_elts = 1;
3313 
3314 	    if (VECTOR_MODE_P (op0_mode))
3315 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3316 	    for (i = 0; i < n_elts; i++)
3317 	      {
3318 		if (i < in_n_elts)
3319 		  {
3320 		    if (!VECTOR_MODE_P (op0_mode))
3321 		      RTVEC_ELT (v, i) = trueop0;
3322 		    else
3323 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3324 		  }
3325 		else
3326 		  {
3327 		    if (!VECTOR_MODE_P (op1_mode))
3328 		      RTVEC_ELT (v, i) = trueop1;
3329 		    else
3330 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3331 							   i - in_n_elts);
3332 		  }
3333 	      }
3334 
3335 	    return gen_rtx_CONST_VECTOR (mode, v);
3336 	  }
3337       }
3338       return 0;
3339 
3340     default:
3341       gcc_unreachable ();
3342     }
3343 
3344   return 0;
3345 }
3346 
3347 rtx
3348 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3349 				 rtx op0, rtx op1)
3350 {
3351   HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3352   HOST_WIDE_INT val;
3353   unsigned int width = GET_MODE_PRECISION (mode);
3354 
3355   if (VECTOR_MODE_P (mode)
3356       && code != VEC_CONCAT
3357       && GET_CODE (op0) == CONST_VECTOR
3358       && GET_CODE (op1) == CONST_VECTOR)
3359     {
3360       unsigned n_elts = GET_MODE_NUNITS (mode);
3361       enum machine_mode op0mode = GET_MODE (op0);
3362       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3363       enum machine_mode op1mode = GET_MODE (op1);
3364       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3365       rtvec v = rtvec_alloc (n_elts);
3366       unsigned int i;
3367 
3368       gcc_assert (op0_n_elts == n_elts);
3369       gcc_assert (op1_n_elts == n_elts);
3370       for (i = 0; i < n_elts; i++)
3371 	{
3372 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3373 					     CONST_VECTOR_ELT (op0, i),
3374 					     CONST_VECTOR_ELT (op1, i));
3375 	  if (!x)
3376 	    return 0;
3377 	  RTVEC_ELT (v, i) = x;
3378 	}
3379 
3380       return gen_rtx_CONST_VECTOR (mode, v);
3381     }
3382 
3383   if (VECTOR_MODE_P (mode)
3384       && code == VEC_CONCAT
3385       && (CONST_INT_P (op0)
3386 	  || GET_CODE (op0) == CONST_DOUBLE
3387 	  || GET_CODE (op0) == CONST_FIXED)
3388       && (CONST_INT_P (op1)
3389 	  || GET_CODE (op1) == CONST_DOUBLE
3390 	  || GET_CODE (op1) == CONST_FIXED))
3391     {
3392       unsigned n_elts = GET_MODE_NUNITS (mode);
3393       rtvec v = rtvec_alloc (n_elts);
3394 
3395       gcc_assert (n_elts >= 2);
3396       if (n_elts == 2)
3397 	{
3398 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3399 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3400 
3401 	  RTVEC_ELT (v, 0) = op0;
3402 	  RTVEC_ELT (v, 1) = op1;
3403 	}
3404       else
3405 	{
3406 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3407 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3408 	  unsigned i;
3409 
3410 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3411 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3412 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3413 
3414 	  for (i = 0; i < op0_n_elts; ++i)
3415 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3416 	  for (i = 0; i < op1_n_elts; ++i)
3417 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3418 	}
3419 
3420       return gen_rtx_CONST_VECTOR (mode, v);
3421     }
3422 
3423   if (SCALAR_FLOAT_MODE_P (mode)
3424       && GET_CODE (op0) == CONST_DOUBLE
3425       && GET_CODE (op1) == CONST_DOUBLE
3426       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3427     {
3428       if (code == AND
3429 	  || code == IOR
3430 	  || code == XOR)
3431 	{
3432 	  long tmp0[4];
3433 	  long tmp1[4];
3434 	  REAL_VALUE_TYPE r;
3435 	  int i;
3436 
3437 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3438 			  GET_MODE (op0));
3439 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3440 			  GET_MODE (op1));
3441 	  for (i = 0; i < 4; i++)
3442 	    {
3443 	      switch (code)
3444 	      {
3445 	      case AND:
3446 		tmp0[i] &= tmp1[i];
3447 		break;
3448 	      case IOR:
3449 		tmp0[i] |= tmp1[i];
3450 		break;
3451 	      case XOR:
3452 		tmp0[i] ^= tmp1[i];
3453 		break;
3454 	      default:
3455 		gcc_unreachable ();
3456 	      }
3457 	    }
3458 	   real_from_target (&r, tmp0, mode);
3459 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3460 	}
3461       else
3462 	{
3463 	  REAL_VALUE_TYPE f0, f1, value, result;
3464 	  bool inexact;
3465 
3466 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3467 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3468 	  real_convert (&f0, mode, &f0);
3469 	  real_convert (&f1, mode, &f1);
3470 
3471 	  if (HONOR_SNANS (mode)
3472 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3473 	    return 0;
3474 
3475 	  if (code == DIV
3476 	      && REAL_VALUES_EQUAL (f1, dconst0)
3477 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3478 	    return 0;
3479 
3480 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3481 	      && flag_trapping_math
3482 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3483 	    {
3484 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3485 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3486 
3487 	      switch (code)
3488 		{
3489 		case PLUS:
3490 		  /* Inf + -Inf = NaN plus exception.  */
3491 		  if (s0 != s1)
3492 		    return 0;
3493 		  break;
3494 		case MINUS:
3495 		  /* Inf - Inf = NaN plus exception.  */
3496 		  if (s0 == s1)
3497 		    return 0;
3498 		  break;
3499 		case DIV:
3500 		  /* Inf / Inf = NaN plus exception.  */
3501 		  return 0;
3502 		default:
3503 		  break;
3504 		}
3505 	    }
3506 
3507 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3508 	      && flag_trapping_math
3509 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3510 		  || (REAL_VALUE_ISINF (f1)
3511 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3512 	    /* Inf * 0 = NaN plus exception.  */
3513 	    return 0;
3514 
3515 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3516 				     &f0, &f1);
3517 	  real_convert (&result, mode, &value);
3518 
3519 	  /* Don't constant fold this floating point operation if
3520 	     the result has overflowed and flag_trapping_math.  */
3521 
3522 	  if (flag_trapping_math
3523 	      && MODE_HAS_INFINITIES (mode)
3524 	      && REAL_VALUE_ISINF (result)
3525 	      && !REAL_VALUE_ISINF (f0)
3526 	      && !REAL_VALUE_ISINF (f1))
3527 	    /* Overflow plus exception.  */
3528 	    return 0;
3529 
3530 	  /* Don't constant fold this floating point operation if the
3531 	     result may dependent upon the run-time rounding mode and
3532 	     flag_rounding_math is set, or if GCC's software emulation
3533 	     is unable to accurately represent the result.  */
3534 
3535 	  if ((flag_rounding_math
3536 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3537 	      && (inexact || !real_identical (&result, &value)))
3538 	    return NULL_RTX;
3539 
3540 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3541 	}
3542     }
3543 
3544   /* We can fold some multi-word operations.  */
3545   if (GET_MODE_CLASS (mode) == MODE_INT
3546       && width == HOST_BITS_PER_DOUBLE_INT
3547       && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3548       && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3549     {
3550       double_int o0, o1, res, tmp;
3551 
3552       o0 = rtx_to_double_int (op0);
3553       o1 = rtx_to_double_int (op1);
3554 
3555       switch (code)
3556 	{
3557 	case MINUS:
3558 	  /* A - B == A + (-B).  */
3559 	  o1 = double_int_neg (o1);
3560 
3561 	  /* Fall through....  */
3562 
3563 	case PLUS:
3564 	  res = double_int_add (o0, o1);
3565 	  break;
3566 
3567 	case MULT:
3568 	  res = double_int_mul (o0, o1);
3569 	  break;
3570 
3571 	case DIV:
3572 	  if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3573 				    o0.low, o0.high, o1.low, o1.high,
3574 				    &res.low, &res.high,
3575 				    &tmp.low, &tmp.high))
3576 	    return 0;
3577 	  break;
3578 
3579 	case MOD:
3580 	  if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3581 				    o0.low, o0.high, o1.low, o1.high,
3582 				    &tmp.low, &tmp.high,
3583 				    &res.low, &res.high))
3584 	    return 0;
3585 	  break;
3586 
3587 	case UDIV:
3588 	  if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3589 				    o0.low, o0.high, o1.low, o1.high,
3590 				    &res.low, &res.high,
3591 				    &tmp.low, &tmp.high))
3592 	    return 0;
3593 	  break;
3594 
3595 	case UMOD:
3596 	  if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3597 				    o0.low, o0.high, o1.low, o1.high,
3598 				    &tmp.low, &tmp.high,
3599 				    &res.low, &res.high))
3600 	    return 0;
3601 	  break;
3602 
3603 	case AND:
3604 	  res = double_int_and (o0, o1);
3605 	  break;
3606 
3607 	case IOR:
3608 	  res = double_int_ior (o0, o1);
3609 	  break;
3610 
3611 	case XOR:
3612 	  res = double_int_xor (o0, o1);
3613 	  break;
3614 
3615 	case SMIN:
3616 	  res = double_int_smin (o0, o1);
3617 	  break;
3618 
3619 	case SMAX:
3620 	  res = double_int_smax (o0, o1);
3621 	  break;
3622 
3623 	case UMIN:
3624 	  res = double_int_umin (o0, o1);
3625 	  break;
3626 
3627 	case UMAX:
3628 	  res = double_int_umax (o0, o1);
3629 	  break;
3630 
3631 	case LSHIFTRT:   case ASHIFTRT:
3632 	case ASHIFT:
3633 	case ROTATE:     case ROTATERT:
3634 	  {
3635 	    unsigned HOST_WIDE_INT cnt;
3636 
3637 	    if (SHIFT_COUNT_TRUNCATED)
3638 	      o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
3639 
3640 	    if (!double_int_fits_in_uhwi_p (o1)
3641 	        || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3642 	      return 0;
3643 
3644 	    cnt = double_int_to_uhwi (o1);
3645 
3646 	    if (code == LSHIFTRT || code == ASHIFTRT)
3647 	      res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3648 				       code == ASHIFTRT);
3649 	    else if (code == ASHIFT)
3650 	      res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3651 				       true);
3652 	    else if (code == ROTATE)
3653 	      res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3654 	    else /* code == ROTATERT */
3655 	      res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3656 	  }
3657 	  break;
3658 
3659 	default:
3660 	  return 0;
3661 	}
3662 
3663       return immed_double_int_const (res, mode);
3664     }
3665 
3666   if (CONST_INT_P (op0) && CONST_INT_P (op1)
3667       && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3668     {
3669       /* Get the integer argument values in two forms:
3670          zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3671 
3672       arg0 = INTVAL (op0);
3673       arg1 = INTVAL (op1);
3674 
3675       if (width < HOST_BITS_PER_WIDE_INT)
3676         {
3677           arg0 &= GET_MODE_MASK (mode);
3678           arg1 &= GET_MODE_MASK (mode);
3679 
3680           arg0s = arg0;
3681 	  if (val_signbit_known_set_p (mode, arg0s))
3682 	    arg0s |= ~GET_MODE_MASK (mode);
3683 
3684           arg1s = arg1;
3685 	  if (val_signbit_known_set_p (mode, arg1s))
3686 	    arg1s |= ~GET_MODE_MASK (mode);
3687 	}
3688       else
3689 	{
3690 	  arg0s = arg0;
3691 	  arg1s = arg1;
3692 	}
3693 
3694       /* Compute the value of the arithmetic.  */
3695 
3696       switch (code)
3697 	{
3698 	case PLUS:
3699 	  val = arg0s + arg1s;
3700 	  break;
3701 
3702 	case MINUS:
3703 	  val = arg0s - arg1s;
3704 	  break;
3705 
3706 	case MULT:
3707 	  val = arg0s * arg1s;
3708 	  break;
3709 
3710 	case DIV:
3711 	  if (arg1s == 0
3712 	      || ((unsigned HOST_WIDE_INT) arg0s
3713 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3714 		  && arg1s == -1))
3715 	    return 0;
3716 	  val = arg0s / arg1s;
3717 	  break;
3718 
3719 	case MOD:
3720 	  if (arg1s == 0
3721 	      || ((unsigned HOST_WIDE_INT) arg0s
3722 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3723 		  && arg1s == -1))
3724 	    return 0;
3725 	  val = arg0s % arg1s;
3726 	  break;
3727 
3728 	case UDIV:
3729 	  if (arg1 == 0
3730 	      || ((unsigned HOST_WIDE_INT) arg0s
3731 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3732 		  && arg1s == -1))
3733 	    return 0;
3734 	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3735 	  break;
3736 
3737 	case UMOD:
3738 	  if (arg1 == 0
3739 	      || ((unsigned HOST_WIDE_INT) arg0s
3740 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3741 		  && arg1s == -1))
3742 	    return 0;
3743 	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3744 	  break;
3745 
3746 	case AND:
3747 	  val = arg0 & arg1;
3748 	  break;
3749 
3750 	case IOR:
3751 	  val = arg0 | arg1;
3752 	  break;
3753 
3754 	case XOR:
3755 	  val = arg0 ^ arg1;
3756 	  break;
3757 
3758 	case LSHIFTRT:
3759 	case ASHIFT:
3760 	case ASHIFTRT:
3761 	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3762 	     the value is in range.  We can't return any old value for
3763 	     out-of-range arguments because either the middle-end (via
3764 	     shift_truncation_mask) or the back-end might be relying on
3765 	     target-specific knowledge.  Nor can we rely on
3766 	     shift_truncation_mask, since the shift might not be part of an
3767 	     ashlM3, lshrM3 or ashrM3 instruction.  */
3768 	  if (SHIFT_COUNT_TRUNCATED)
3769 	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3770 	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3771 	    return 0;
3772 
3773 	  val = (code == ASHIFT
3774 		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3775 		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3776 
3777 	  /* Sign-extend the result for arithmetic right shifts.  */
3778 	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3779 	    val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3780 	  break;
3781 
3782 	case ROTATERT:
3783 	  if (arg1 < 0)
3784 	    return 0;
3785 
3786 	  arg1 %= width;
3787 	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3788 		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3789 	  break;
3790 
3791 	case ROTATE:
3792 	  if (arg1 < 0)
3793 	    return 0;
3794 
3795 	  arg1 %= width;
3796 	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3797 		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3798 	  break;
3799 
3800 	case COMPARE:
3801 	  /* Do nothing here.  */
3802 	  return 0;
3803 
3804 	case SMIN:
3805 	  val = arg0s <= arg1s ? arg0s : arg1s;
3806 	  break;
3807 
3808 	case UMIN:
3809 	  val = ((unsigned HOST_WIDE_INT) arg0
3810 		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3811 	  break;
3812 
3813 	case SMAX:
3814 	  val = arg0s > arg1s ? arg0s : arg1s;
3815 	  break;
3816 
3817 	case UMAX:
3818 	  val = ((unsigned HOST_WIDE_INT) arg0
3819 		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3820 	  break;
3821 
3822 	case SS_PLUS:
3823 	case US_PLUS:
3824 	case SS_MINUS:
3825 	case US_MINUS:
3826 	case SS_MULT:
3827 	case US_MULT:
3828 	case SS_DIV:
3829 	case US_DIV:
3830 	case SS_ASHIFT:
3831 	case US_ASHIFT:
3832 	  /* ??? There are simplifications that can be done.  */
3833 	  return 0;
3834 
3835 	default:
3836 	  gcc_unreachable ();
3837 	}
3838 
3839       return gen_int_mode (val, mode);
3840     }
3841 
3842   return NULL_RTX;
3843 }
3844 
3845 
3846 
3847 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3848    PLUS or MINUS.
3849 
3850    Rather than test for specific case, we do this by a brute-force method
3851    and do all possible simplifications until no more changes occur.  Then
3852    we rebuild the operation.  */
3853 
3854 struct simplify_plus_minus_op_data
3855 {
3856   rtx op;
3857   short neg;
3858 };
3859 
3860 static bool
3861 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3862 {
3863   int result;
3864 
3865   result = (commutative_operand_precedence (y)
3866 	    - commutative_operand_precedence (x));
3867   if (result)
3868     return result > 0;
3869 
3870   /* Group together equal REGs to do more simplification.  */
3871   if (REG_P (x) && REG_P (y))
3872     return REGNO (x) > REGNO (y);
3873   else
3874     return false;
3875 }
3876 
3877 static rtx
3878 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3879 		     rtx op1)
3880 {
3881   struct simplify_plus_minus_op_data ops[8];
3882   rtx result, tem;
3883   int n_ops = 2, input_ops = 2;
3884   int changed, n_constants = 0, canonicalized = 0;
3885   int i, j;
3886 
3887   memset (ops, 0, sizeof ops);
3888 
3889   /* Set up the two operands and then expand them until nothing has been
3890      changed.  If we run out of room in our array, give up; this should
3891      almost never happen.  */
3892 
3893   ops[0].op = op0;
3894   ops[0].neg = 0;
3895   ops[1].op = op1;
3896   ops[1].neg = (code == MINUS);
3897 
3898   do
3899     {
3900       changed = 0;
3901 
3902       for (i = 0; i < n_ops; i++)
3903 	{
3904 	  rtx this_op = ops[i].op;
3905 	  int this_neg = ops[i].neg;
3906 	  enum rtx_code this_code = GET_CODE (this_op);
3907 
3908 	  switch (this_code)
3909 	    {
3910 	    case PLUS:
3911 	    case MINUS:
3912 	      if (n_ops == 7)
3913 		return NULL_RTX;
3914 
3915 	      ops[n_ops].op = XEXP (this_op, 1);
3916 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3917 	      n_ops++;
3918 
3919 	      ops[i].op = XEXP (this_op, 0);
3920 	      input_ops++;
3921 	      changed = 1;
3922 	      canonicalized |= this_neg;
3923 	      break;
3924 
3925 	    case NEG:
3926 	      ops[i].op = XEXP (this_op, 0);
3927 	      ops[i].neg = ! this_neg;
3928 	      changed = 1;
3929 	      canonicalized = 1;
3930 	      break;
3931 
3932 	    case CONST:
3933 	      if (n_ops < 7
3934 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
3935 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3936 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3937 		{
3938 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3939 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3940 		  ops[n_ops].neg = this_neg;
3941 		  n_ops++;
3942 		  changed = 1;
3943 	          canonicalized = 1;
3944 		}
3945 	      break;
3946 
3947 	    case NOT:
3948 	      /* ~a -> (-a - 1) */
3949 	      if (n_ops != 7)
3950 		{
3951 		  ops[n_ops].op = CONSTM1_RTX (mode);
3952 		  ops[n_ops++].neg = this_neg;
3953 		  ops[i].op = XEXP (this_op, 0);
3954 		  ops[i].neg = !this_neg;
3955 		  changed = 1;
3956 	          canonicalized = 1;
3957 		}
3958 	      break;
3959 
3960 	    case CONST_INT:
3961 	      n_constants++;
3962 	      if (this_neg)
3963 		{
3964 		  ops[i].op = neg_const_int (mode, this_op);
3965 		  ops[i].neg = 0;
3966 		  changed = 1;
3967 	          canonicalized = 1;
3968 		}
3969 	      break;
3970 
3971 	    default:
3972 	      break;
3973 	    }
3974 	}
3975     }
3976   while (changed);
3977 
3978   if (n_constants > 1)
3979     canonicalized = 1;
3980 
3981   gcc_assert (n_ops >= 2);
3982 
3983   /* If we only have two operands, we can avoid the loops.  */
3984   if (n_ops == 2)
3985     {
3986       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3987       rtx lhs, rhs;
3988 
3989       /* Get the two operands.  Be careful with the order, especially for
3990 	 the cases where code == MINUS.  */
3991       if (ops[0].neg && ops[1].neg)
3992 	{
3993 	  lhs = gen_rtx_NEG (mode, ops[0].op);
3994 	  rhs = ops[1].op;
3995 	}
3996       else if (ops[0].neg)
3997 	{
3998 	  lhs = ops[1].op;
3999 	  rhs = ops[0].op;
4000 	}
4001       else
4002 	{
4003 	  lhs = ops[0].op;
4004 	  rhs = ops[1].op;
4005 	}
4006 
4007       return simplify_const_binary_operation (code, mode, lhs, rhs);
4008     }
4009 
4010   /* Now simplify each pair of operands until nothing changes.  */
4011   do
4012     {
4013       /* Insertion sort is good enough for an eight-element array.  */
4014       for (i = 1; i < n_ops; i++)
4015         {
4016           struct simplify_plus_minus_op_data save;
4017           j = i - 1;
4018           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4019 	    continue;
4020 
4021           canonicalized = 1;
4022           save = ops[i];
4023           do
4024 	    ops[j + 1] = ops[j];
4025           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4026           ops[j + 1] = save;
4027         }
4028 
4029       changed = 0;
4030       for (i = n_ops - 1; i > 0; i--)
4031 	for (j = i - 1; j >= 0; j--)
4032 	  {
4033 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4034 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4035 
4036 	    if (lhs != 0 && rhs != 0)
4037 	      {
4038 		enum rtx_code ncode = PLUS;
4039 
4040 		if (lneg != rneg)
4041 		  {
4042 		    ncode = MINUS;
4043 		    if (lneg)
4044 		      tem = lhs, lhs = rhs, rhs = tem;
4045 		  }
4046 		else if (swap_commutative_operands_p (lhs, rhs))
4047 		  tem = lhs, lhs = rhs, rhs = tem;
4048 
4049 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4050 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4051 		  {
4052 		    rtx tem_lhs, tem_rhs;
4053 
4054 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4055 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4056 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4057 
4058 		    if (tem && !CONSTANT_P (tem))
4059 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4060 		  }
4061 		else
4062 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4063 
4064 		/* Reject "simplifications" that just wrap the two
4065 		   arguments in a CONST.  Failure to do so can result
4066 		   in infinite recursion with simplify_binary_operation
4067 		   when it calls us to simplify CONST operations.  */
4068 		if (tem
4069 		    && ! (GET_CODE (tem) == CONST
4070 			  && GET_CODE (XEXP (tem, 0)) == ncode
4071 			  && XEXP (XEXP (tem, 0), 0) == lhs
4072 			  && XEXP (XEXP (tem, 0), 1) == rhs))
4073 		  {
4074 		    lneg &= rneg;
4075 		    if (GET_CODE (tem) == NEG)
4076 		      tem = XEXP (tem, 0), lneg = !lneg;
4077 		    if (CONST_INT_P (tem) && lneg)
4078 		      tem = neg_const_int (mode, tem), lneg = 0;
4079 
4080 		    ops[i].op = tem;
4081 		    ops[i].neg = lneg;
4082 		    ops[j].op = NULL_RTX;
4083 		    changed = 1;
4084 		    canonicalized = 1;
4085 		  }
4086 	      }
4087 	  }
4088 
4089       /* If nothing changed, fail.  */
4090       if (!canonicalized)
4091         return NULL_RTX;
4092 
4093       /* Pack all the operands to the lower-numbered entries.  */
4094       for (i = 0, j = 0; j < n_ops; j++)
4095         if (ops[j].op)
4096           {
4097 	    ops[i] = ops[j];
4098 	    i++;
4099           }
4100       n_ops = i;
4101     }
4102   while (changed);
4103 
4104   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4105   if (n_ops == 2
4106       && CONST_INT_P (ops[1].op)
4107       && CONSTANT_P (ops[0].op)
4108       && ops[0].neg)
4109     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4110 
4111   /* We suppressed creation of trivial CONST expressions in the
4112      combination loop to avoid recursion.  Create one manually now.
4113      The combination loop should have ensured that there is exactly
4114      one CONST_INT, and the sort will have ensured that it is last
4115      in the array and that any other constant will be next-to-last.  */
4116 
4117   if (n_ops > 1
4118       && CONST_INT_P (ops[n_ops - 1].op)
4119       && CONSTANT_P (ops[n_ops - 2].op))
4120     {
4121       rtx value = ops[n_ops - 1].op;
4122       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4123 	value = neg_const_int (mode, value);
4124       ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4125       n_ops--;
4126     }
4127 
4128   /* Put a non-negated operand first, if possible.  */
4129 
4130   for (i = 0; i < n_ops && ops[i].neg; i++)
4131     continue;
4132   if (i == n_ops)
4133     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4134   else if (i != 0)
4135     {
4136       tem = ops[0].op;
4137       ops[0] = ops[i];
4138       ops[i].op = tem;
4139       ops[i].neg = 1;
4140     }
4141 
4142   /* Now make the result by performing the requested operations.  */
4143   result = ops[0].op;
4144   for (i = 1; i < n_ops; i++)
4145     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4146 			     mode, result, ops[i].op);
4147 
4148   return result;
4149 }
4150 
4151 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4152 static bool
4153 plus_minus_operand_p (const_rtx x)
4154 {
4155   return GET_CODE (x) == PLUS
4156          || GET_CODE (x) == MINUS
4157 	 || (GET_CODE (x) == CONST
4158 	     && GET_CODE (XEXP (x, 0)) == PLUS
4159 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4160 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4161 }
4162 
4163 /* Like simplify_binary_operation except used for relational operators.
4164    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4165    not also be VOIDmode.
4166 
4167    CMP_MODE specifies in which mode the comparison is done in, so it is
4168    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4169    the operands or, if both are VOIDmode, the operands are compared in
4170    "infinite precision".  */
4171 rtx
4172 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4173 			       enum machine_mode cmp_mode, rtx op0, rtx op1)
4174 {
4175   rtx tem, trueop0, trueop1;
4176 
4177   if (cmp_mode == VOIDmode)
4178     cmp_mode = GET_MODE (op0);
4179   if (cmp_mode == VOIDmode)
4180     cmp_mode = GET_MODE (op1);
4181 
4182   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4183   if (tem)
4184     {
4185       if (SCALAR_FLOAT_MODE_P (mode))
4186 	{
4187           if (tem == const0_rtx)
4188             return CONST0_RTX (mode);
4189 #ifdef FLOAT_STORE_FLAG_VALUE
4190 	  {
4191 	    REAL_VALUE_TYPE val;
4192 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4193 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4194 	  }
4195 #else
4196 	  return NULL_RTX;
4197 #endif
4198 	}
4199       if (VECTOR_MODE_P (mode))
4200 	{
4201 	  if (tem == const0_rtx)
4202 	    return CONST0_RTX (mode);
4203 #ifdef VECTOR_STORE_FLAG_VALUE
4204 	  {
4205 	    int i, units;
4206 	    rtvec v;
4207 
4208 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4209 	    if (val == NULL_RTX)
4210 	      return NULL_RTX;
4211 	    if (val == const1_rtx)
4212 	      return CONST1_RTX (mode);
4213 
4214 	    units = GET_MODE_NUNITS (mode);
4215 	    v = rtvec_alloc (units);
4216 	    for (i = 0; i < units; i++)
4217 	      RTVEC_ELT (v, i) = val;
4218 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4219 	  }
4220 #else
4221 	  return NULL_RTX;
4222 #endif
4223 	}
4224 
4225       return tem;
4226     }
4227 
4228   /* For the following tests, ensure const0_rtx is op1.  */
4229   if (swap_commutative_operands_p (op0, op1)
4230       || (op0 == const0_rtx && op1 != const0_rtx))
4231     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4232 
4233   /* If op0 is a compare, extract the comparison arguments from it.  */
4234   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4235     return simplify_gen_relational (code, mode, VOIDmode,
4236 				    XEXP (op0, 0), XEXP (op0, 1));
4237 
4238   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4239       || CC0_P (op0))
4240     return NULL_RTX;
4241 
4242   trueop0 = avoid_constant_pool_reference (op0);
4243   trueop1 = avoid_constant_pool_reference (op1);
4244   return simplify_relational_operation_1 (code, mode, cmp_mode,
4245 		  			  trueop0, trueop1);
4246 }
4247 
4248 /* This part of simplify_relational_operation is only used when CMP_MODE
4249    is not in class MODE_CC (i.e. it is a real comparison).
4250 
4251    MODE is the mode of the result, while CMP_MODE specifies in which
4252    mode the comparison is done in, so it is the mode of the operands.  */
4253 
4254 static rtx
4255 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4256 				 enum machine_mode cmp_mode, rtx op0, rtx op1)
4257 {
4258   enum rtx_code op0code = GET_CODE (op0);
4259 
4260   if (op1 == const0_rtx && COMPARISON_P (op0))
4261     {
4262       /* If op0 is a comparison, extract the comparison arguments
4263          from it.  */
4264       if (code == NE)
4265 	{
4266 	  if (GET_MODE (op0) == mode)
4267 	    return simplify_rtx (op0);
4268 	  else
4269 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4270 					    XEXP (op0, 0), XEXP (op0, 1));
4271 	}
4272       else if (code == EQ)
4273 	{
4274 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4275 	  if (new_code != UNKNOWN)
4276 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4277 					    XEXP (op0, 0), XEXP (op0, 1));
4278 	}
4279     }
4280 
4281   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4282      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4283   if ((code == LTU || code == GEU)
4284       && GET_CODE (op0) == PLUS
4285       && CONST_INT_P (XEXP (op0, 1))
4286       && (rtx_equal_p (op1, XEXP (op0, 0))
4287 	  || rtx_equal_p (op1, XEXP (op0, 1))))
4288     {
4289       rtx new_cmp
4290 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4291       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4292 				      cmp_mode, XEXP (op0, 0), new_cmp);
4293     }
4294 
4295   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4296   if ((code == LTU || code == GEU)
4297       && GET_CODE (op0) == PLUS
4298       && rtx_equal_p (op1, XEXP (op0, 1))
4299       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4300       && !rtx_equal_p (op1, XEXP (op0, 0)))
4301     return simplify_gen_relational (code, mode, cmp_mode, op0,
4302 				    copy_rtx (XEXP (op0, 0)));
4303 
4304   if (op1 == const0_rtx)
4305     {
4306       /* Canonicalize (GTU x 0) as (NE x 0).  */
4307       if (code == GTU)
4308         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4309       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4310       if (code == LEU)
4311         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4312     }
4313   else if (op1 == const1_rtx)
4314     {
4315       switch (code)
4316         {
4317         case GE:
4318 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4319 	  return simplify_gen_relational (GT, mode, cmp_mode,
4320 					  op0, const0_rtx);
4321 	case GEU:
4322 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4323 	  return simplify_gen_relational (NE, mode, cmp_mode,
4324 					  op0, const0_rtx);
4325 	case LT:
4326 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4327 	  return simplify_gen_relational (LE, mode, cmp_mode,
4328 					  op0, const0_rtx);
4329 	case LTU:
4330 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4331 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4332 					  op0, const0_rtx);
4333 	default:
4334 	  break;
4335 	}
4336     }
4337   else if (op1 == constm1_rtx)
4338     {
4339       /* Canonicalize (LE x -1) as (LT x 0).  */
4340       if (code == LE)
4341         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4342       /* Canonicalize (GT x -1) as (GE x 0).  */
4343       if (code == GT)
4344         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4345     }
4346 
4347   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4348   if ((code == EQ || code == NE)
4349       && (op0code == PLUS || op0code == MINUS)
4350       && CONSTANT_P (op1)
4351       && CONSTANT_P (XEXP (op0, 1))
4352       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4353     {
4354       rtx x = XEXP (op0, 0);
4355       rtx c = XEXP (op0, 1);
4356       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4357       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4358 
4359       /* Detect an infinite recursive condition, where we oscillate at this
4360 	 simplification case between:
4361 	    A + B == C  <--->  C - B == A,
4362 	 where A, B, and C are all constants with non-simplifiable expressions,
4363 	 usually SYMBOL_REFs.  */
4364       if (GET_CODE (tem) == invcode
4365 	  && CONSTANT_P (x)
4366 	  && rtx_equal_p (c, XEXP (tem, 1)))
4367 	return NULL_RTX;
4368 
4369       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4370     }
4371 
4372   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4373      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4374   if (code == NE
4375       && op1 == const0_rtx
4376       && GET_MODE_CLASS (mode) == MODE_INT
4377       && cmp_mode != VOIDmode
4378       /* ??? Work-around BImode bugs in the ia64 backend.  */
4379       && mode != BImode
4380       && cmp_mode != BImode
4381       && nonzero_bits (op0, cmp_mode) == 1
4382       && STORE_FLAG_VALUE == 1)
4383     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4384 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4385 	   : lowpart_subreg (mode, op0, cmp_mode);
4386 
4387   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4388   if ((code == EQ || code == NE)
4389       && op1 == const0_rtx
4390       && op0code == XOR)
4391     return simplify_gen_relational (code, mode, cmp_mode,
4392 				    XEXP (op0, 0), XEXP (op0, 1));
4393 
4394   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4395   if ((code == EQ || code == NE)
4396       && op0code == XOR
4397       && rtx_equal_p (XEXP (op0, 0), op1)
4398       && !side_effects_p (XEXP (op0, 0)))
4399     return simplify_gen_relational (code, mode, cmp_mode,
4400 				    XEXP (op0, 1), const0_rtx);
4401 
4402   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4403   if ((code == EQ || code == NE)
4404       && op0code == XOR
4405       && rtx_equal_p (XEXP (op0, 1), op1)
4406       && !side_effects_p (XEXP (op0, 1)))
4407     return simplify_gen_relational (code, mode, cmp_mode,
4408 				    XEXP (op0, 0), const0_rtx);
4409 
4410   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4411   if ((code == EQ || code == NE)
4412       && op0code == XOR
4413       && (CONST_INT_P (op1)
4414 	  || GET_CODE (op1) == CONST_DOUBLE)
4415       && (CONST_INT_P (XEXP (op0, 1))
4416 	  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4417     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4418 				    simplify_gen_binary (XOR, cmp_mode,
4419 							 XEXP (op0, 1), op1));
4420 
4421   if (op0code == POPCOUNT && op1 == const0_rtx)
4422     switch (code)
4423       {
4424       case EQ:
4425       case LE:
4426       case LEU:
4427 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4428 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4429 					XEXP (op0, 0), const0_rtx);
4430 
4431       case NE:
4432       case GT:
4433       case GTU:
4434 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4435 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4436 					XEXP (op0, 0), const0_rtx);
4437 
4438       default:
4439 	break;
4440       }
4441 
4442   return NULL_RTX;
4443 }
4444 
4445 enum
4446 {
4447   CMP_EQ = 1,
4448   CMP_LT = 2,
4449   CMP_GT = 4,
4450   CMP_LTU = 8,
4451   CMP_GTU = 16
4452 };
4453 
4454 
4455 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4456    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4457    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4458    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4459    For floating-point comparisons, assume that the operands were ordered.  */
4460 
4461 static rtx
4462 comparison_result (enum rtx_code code, int known_results)
4463 {
4464   switch (code)
4465     {
4466     case EQ:
4467     case UNEQ:
4468       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4469     case NE:
4470     case LTGT:
4471       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4472 
4473     case LT:
4474     case UNLT:
4475       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4476     case GE:
4477     case UNGE:
4478       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4479 
4480     case GT:
4481     case UNGT:
4482       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4483     case LE:
4484     case UNLE:
4485       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4486 
4487     case LTU:
4488       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4489     case GEU:
4490       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4491 
4492     case GTU:
4493       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4494     case LEU:
4495       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4496 
4497     case ORDERED:
4498       return const_true_rtx;
4499     case UNORDERED:
4500       return const0_rtx;
4501     default:
4502       gcc_unreachable ();
4503     }
4504 }
4505 
4506 /* Check if the given comparison (done in the given MODE) is actually a
4507    tautology or a contradiction.
4508    If no simplification is possible, this function returns zero.
4509    Otherwise, it returns either const_true_rtx or const0_rtx.  */
4510 
4511 rtx
4512 simplify_const_relational_operation (enum rtx_code code,
4513 				     enum machine_mode mode,
4514 				     rtx op0, rtx op1)
4515 {
4516   rtx tem;
4517   rtx trueop0;
4518   rtx trueop1;
4519 
4520   gcc_assert (mode != VOIDmode
4521 	      || (GET_MODE (op0) == VOIDmode
4522 		  && GET_MODE (op1) == VOIDmode));
4523 
4524   /* If op0 is a compare, extract the comparison arguments from it.  */
4525   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4526     {
4527       op1 = XEXP (op0, 1);
4528       op0 = XEXP (op0, 0);
4529 
4530       if (GET_MODE (op0) != VOIDmode)
4531 	mode = GET_MODE (op0);
4532       else if (GET_MODE (op1) != VOIDmode)
4533 	mode = GET_MODE (op1);
4534       else
4535 	return 0;
4536     }
4537 
4538   /* We can't simplify MODE_CC values since we don't know what the
4539      actual comparison is.  */
4540   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4541     return 0;
4542 
4543   /* Make sure the constant is second.  */
4544   if (swap_commutative_operands_p (op0, op1))
4545     {
4546       tem = op0, op0 = op1, op1 = tem;
4547       code = swap_condition (code);
4548     }
4549 
4550   trueop0 = avoid_constant_pool_reference (op0);
4551   trueop1 = avoid_constant_pool_reference (op1);
4552 
4553   /* For integer comparisons of A and B maybe we can simplify A - B and can
4554      then simplify a comparison of that with zero.  If A and B are both either
4555      a register or a CONST_INT, this can't help; testing for these cases will
4556      prevent infinite recursion here and speed things up.
4557 
4558      We can only do this for EQ and NE comparisons as otherwise we may
4559      lose or introduce overflow which we cannot disregard as undefined as
4560      we do not know the signedness of the operation on either the left or
4561      the right hand side of the comparison.  */
4562 
4563   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4564       && (code == EQ || code == NE)
4565       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4566 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4567       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4568       /* We cannot do this if tem is a nonzero address.  */
4569       && ! nonzero_address_p (tem))
4570     return simplify_const_relational_operation (signed_condition (code),
4571 						mode, tem, const0_rtx);
4572 
4573   if (! HONOR_NANS (mode) && code == ORDERED)
4574     return const_true_rtx;
4575 
4576   if (! HONOR_NANS (mode) && code == UNORDERED)
4577     return const0_rtx;
4578 
4579   /* For modes without NaNs, if the two operands are equal, we know the
4580      result except if they have side-effects.  Even with NaNs we know
4581      the result of unordered comparisons and, if signaling NaNs are
4582      irrelevant, also the result of LT/GT/LTGT.  */
4583   if ((! HONOR_NANS (GET_MODE (trueop0))
4584        || code == UNEQ || code == UNLE || code == UNGE
4585        || ((code == LT || code == GT || code == LTGT)
4586 	   && ! HONOR_SNANS (GET_MODE (trueop0))))
4587       && rtx_equal_p (trueop0, trueop1)
4588       && ! side_effects_p (trueop0))
4589     return comparison_result (code, CMP_EQ);
4590 
4591   /* If the operands are floating-point constants, see if we can fold
4592      the result.  */
4593   if (GET_CODE (trueop0) == CONST_DOUBLE
4594       && GET_CODE (trueop1) == CONST_DOUBLE
4595       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4596     {
4597       REAL_VALUE_TYPE d0, d1;
4598 
4599       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4600       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4601 
4602       /* Comparisons are unordered iff at least one of the values is NaN.  */
4603       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4604 	switch (code)
4605 	  {
4606 	  case UNEQ:
4607 	  case UNLT:
4608 	  case UNGT:
4609 	  case UNLE:
4610 	  case UNGE:
4611 	  case NE:
4612 	  case UNORDERED:
4613 	    return const_true_rtx;
4614 	  case EQ:
4615 	  case LT:
4616 	  case GT:
4617 	  case LE:
4618 	  case GE:
4619 	  case LTGT:
4620 	  case ORDERED:
4621 	    return const0_rtx;
4622 	  default:
4623 	    return 0;
4624 	  }
4625 
4626       return comparison_result (code,
4627 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4628 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4629     }
4630 
4631   /* Otherwise, see if the operands are both integers.  */
4632   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4633        && (GET_CODE (trueop0) == CONST_DOUBLE
4634 	   || CONST_INT_P (trueop0))
4635        && (GET_CODE (trueop1) == CONST_DOUBLE
4636 	   || CONST_INT_P (trueop1)))
4637     {
4638       int width = GET_MODE_PRECISION (mode);
4639       HOST_WIDE_INT l0s, h0s, l1s, h1s;
4640       unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4641 
4642       /* Get the two words comprising each integer constant.  */
4643       if (GET_CODE (trueop0) == CONST_DOUBLE)
4644 	{
4645 	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4646 	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4647 	}
4648       else
4649 	{
4650 	  l0u = l0s = INTVAL (trueop0);
4651 	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
4652 	}
4653 
4654       if (GET_CODE (trueop1) == CONST_DOUBLE)
4655 	{
4656 	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4657 	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4658 	}
4659       else
4660 	{
4661 	  l1u = l1s = INTVAL (trueop1);
4662 	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
4663 	}
4664 
4665       /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4666 	 we have to sign or zero-extend the values.  */
4667       if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4668 	{
4669 	  l0u &= GET_MODE_MASK (mode);
4670 	  l1u &= GET_MODE_MASK (mode);
4671 
4672 	  if (val_signbit_known_set_p (mode, l0s))
4673 	    l0s |= ~GET_MODE_MASK (mode);
4674 
4675 	  if (val_signbit_known_set_p (mode, l1s))
4676 	    l1s |= ~GET_MODE_MASK (mode);
4677 	}
4678       if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4679 	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4680 
4681       if (h0u == h1u && l0u == l1u)
4682 	return comparison_result (code, CMP_EQ);
4683       else
4684 	{
4685 	  int cr;
4686 	  cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4687 	  cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4688 	  return comparison_result (code, cr);
4689 	}
4690     }
4691 
4692   /* Optimize comparisons with upper and lower bounds.  */
4693   if (HWI_COMPUTABLE_MODE_P (mode)
4694       && CONST_INT_P (trueop1))
4695     {
4696       int sign;
4697       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4698       HOST_WIDE_INT val = INTVAL (trueop1);
4699       HOST_WIDE_INT mmin, mmax;
4700 
4701       if (code == GEU
4702 	  || code == LEU
4703 	  || code == GTU
4704 	  || code == LTU)
4705 	sign = 0;
4706       else
4707 	sign = 1;
4708 
4709       /* Get a reduced range if the sign bit is zero.  */
4710       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4711 	{
4712 	  mmin = 0;
4713 	  mmax = nonzero;
4714 	}
4715       else
4716 	{
4717 	  rtx mmin_rtx, mmax_rtx;
4718 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4719 
4720 	  mmin = INTVAL (mmin_rtx);
4721 	  mmax = INTVAL (mmax_rtx);
4722 	  if (sign)
4723 	    {
4724 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4725 
4726 	      mmin >>= (sign_copies - 1);
4727 	      mmax >>= (sign_copies - 1);
4728 	    }
4729 	}
4730 
4731       switch (code)
4732 	{
4733 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
4734 	case GEU:
4735 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4736 	    return const_true_rtx;
4737 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4738 	    return const0_rtx;
4739 	  break;
4740 	case GE:
4741 	  if (val <= mmin)
4742 	    return const_true_rtx;
4743 	  if (val > mmax)
4744 	    return const0_rtx;
4745 	  break;
4746 
4747 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
4748 	case LEU:
4749 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4750 	    return const_true_rtx;
4751 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4752 	    return const0_rtx;
4753 	  break;
4754 	case LE:
4755 	  if (val >= mmax)
4756 	    return const_true_rtx;
4757 	  if (val < mmin)
4758 	    return const0_rtx;
4759 	  break;
4760 
4761 	case EQ:
4762 	  /* x == y is always false for y out of range.  */
4763 	  if (val < mmin || val > mmax)
4764 	    return const0_rtx;
4765 	  break;
4766 
4767 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
4768 	case GTU:
4769 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4770 	    return const0_rtx;
4771 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4772 	    return const_true_rtx;
4773 	  break;
4774 	case GT:
4775 	  if (val >= mmax)
4776 	    return const0_rtx;
4777 	  if (val < mmin)
4778 	    return const_true_rtx;
4779 	  break;
4780 
4781 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
4782 	case LTU:
4783 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4784 	    return const0_rtx;
4785 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4786 	    return const_true_rtx;
4787 	  break;
4788 	case LT:
4789 	  if (val <= mmin)
4790 	    return const0_rtx;
4791 	  if (val > mmax)
4792 	    return const_true_rtx;
4793 	  break;
4794 
4795 	case NE:
4796 	  /* x != y is always true for y out of range.  */
4797 	  if (val < mmin || val > mmax)
4798 	    return const_true_rtx;
4799 	  break;
4800 
4801 	default:
4802 	  break;
4803 	}
4804     }
4805 
4806   /* Optimize integer comparisons with zero.  */
4807   if (trueop1 == const0_rtx)
4808     {
4809       /* Some addresses are known to be nonzero.  We don't know
4810 	 their sign, but equality comparisons are known.  */
4811       if (nonzero_address_p (trueop0))
4812 	{
4813 	  if (code == EQ || code == LEU)
4814 	    return const0_rtx;
4815 	  if (code == NE || code == GTU)
4816 	    return const_true_rtx;
4817 	}
4818 
4819       /* See if the first operand is an IOR with a constant.  If so, we
4820 	 may be able to determine the result of this comparison.  */
4821       if (GET_CODE (op0) == IOR)
4822 	{
4823 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4824 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4825 	    {
4826 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4827 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4828 			      && (UINTVAL (inner_const)
4829 				  & ((unsigned HOST_WIDE_INT) 1
4830 				     << sign_bitnum)));
4831 
4832 	      switch (code)
4833 		{
4834 		case EQ:
4835 		case LEU:
4836 		  return const0_rtx;
4837 		case NE:
4838 		case GTU:
4839 		  return const_true_rtx;
4840 		case LT:
4841 		case LE:
4842 		  if (has_sign)
4843 		    return const_true_rtx;
4844 		  break;
4845 		case GT:
4846 		case GE:
4847 		  if (has_sign)
4848 		    return const0_rtx;
4849 		  break;
4850 		default:
4851 		  break;
4852 		}
4853 	    }
4854 	}
4855     }
4856 
4857   /* Optimize comparison of ABS with zero.  */
4858   if (trueop1 == CONST0_RTX (mode)
4859       && (GET_CODE (trueop0) == ABS
4860 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
4861 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4862     {
4863       switch (code)
4864 	{
4865 	case LT:
4866 	  /* Optimize abs(x) < 0.0.  */
4867 	  if (!HONOR_SNANS (mode)
4868 	      && (!INTEGRAL_MODE_P (mode)
4869 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4870 	    {
4871 	      if (INTEGRAL_MODE_P (mode)
4872 		  && (issue_strict_overflow_warning
4873 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4874 		warning (OPT_Wstrict_overflow,
4875 			 ("assuming signed overflow does not occur when "
4876 			  "assuming abs (x) < 0 is false"));
4877 	       return const0_rtx;
4878 	    }
4879 	  break;
4880 
4881 	case GE:
4882 	  /* Optimize abs(x) >= 0.0.  */
4883 	  if (!HONOR_NANS (mode)
4884 	      && (!INTEGRAL_MODE_P (mode)
4885 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4886 	    {
4887 	      if (INTEGRAL_MODE_P (mode)
4888 	          && (issue_strict_overflow_warning
4889 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4890 	        warning (OPT_Wstrict_overflow,
4891 			 ("assuming signed overflow does not occur when "
4892 			  "assuming abs (x) >= 0 is true"));
4893 	      return const_true_rtx;
4894 	    }
4895 	  break;
4896 
4897 	case UNGE:
4898 	  /* Optimize ! (abs(x) < 0.0).  */
4899 	  return const_true_rtx;
4900 
4901 	default:
4902 	  break;
4903 	}
4904     }
4905 
4906   return 0;
4907 }
4908 
4909 /* Simplify CODE, an operation with result mode MODE and three operands,
4910    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4911    a constant.  Return 0 if no simplifications is possible.  */
4912 
4913 rtx
4914 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4915 			    enum machine_mode op0_mode, rtx op0, rtx op1,
4916 			    rtx op2)
4917 {
4918   unsigned int width = GET_MODE_PRECISION (mode);
4919   bool any_change = false;
4920   rtx tem;
4921 
4922   /* VOIDmode means "infinite" precision.  */
4923   if (width == 0)
4924     width = HOST_BITS_PER_WIDE_INT;
4925 
4926   switch (code)
4927     {
4928     case FMA:
4929       /* Simplify negations around the multiplication.  */
4930       /* -a * -b + c  =>  a * b + c.  */
4931       if (GET_CODE (op0) == NEG)
4932 	{
4933 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
4934 	  if (tem)
4935 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4936 	}
4937       else if (GET_CODE (op1) == NEG)
4938 	{
4939 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
4940 	  if (tem)
4941 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4942 	}
4943 
4944       /* Canonicalize the two multiplication operands.  */
4945       /* a * -b + c  =>  -b * a + c.  */
4946       if (swap_commutative_operands_p (op0, op1))
4947 	tem = op0, op0 = op1, op1 = tem, any_change = true;
4948 
4949       if (any_change)
4950 	return gen_rtx_FMA (mode, op0, op1, op2);
4951       return NULL_RTX;
4952 
4953     case SIGN_EXTRACT:
4954     case ZERO_EXTRACT:
4955       if (CONST_INT_P (op0)
4956 	  && CONST_INT_P (op1)
4957 	  && CONST_INT_P (op2)
4958 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4959 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4960 	{
4961 	  /* Extracting a bit-field from a constant */
4962 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
4963 	  HOST_WIDE_INT op1val = INTVAL (op1);
4964 	  HOST_WIDE_INT op2val = INTVAL (op2);
4965 	  if (BITS_BIG_ENDIAN)
4966 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4967 	  else
4968 	    val >>= op2val;
4969 
4970 	  if (HOST_BITS_PER_WIDE_INT != op1val)
4971 	    {
4972 	      /* First zero-extend.  */
4973 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4974 	      /* If desired, propagate sign bit.  */
4975 	      if (code == SIGN_EXTRACT
4976 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4977 		     != 0)
4978 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4979 	    }
4980 
4981 	  return gen_int_mode (val, mode);
4982 	}
4983       break;
4984 
4985     case IF_THEN_ELSE:
4986       if (CONST_INT_P (op0))
4987 	return op0 != const0_rtx ? op1 : op2;
4988 
4989       /* Convert c ? a : a into "a".  */
4990       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4991 	return op1;
4992 
4993       /* Convert a != b ? a : b into "a".  */
4994       if (GET_CODE (op0) == NE
4995 	  && ! side_effects_p (op0)
4996 	  && ! HONOR_NANS (mode)
4997 	  && ! HONOR_SIGNED_ZEROS (mode)
4998 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4999 	       && rtx_equal_p (XEXP (op0, 1), op2))
5000 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5001 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5002 	return op1;
5003 
5004       /* Convert a == b ? a : b into "b".  */
5005       if (GET_CODE (op0) == EQ
5006 	  && ! side_effects_p (op0)
5007 	  && ! HONOR_NANS (mode)
5008 	  && ! HONOR_SIGNED_ZEROS (mode)
5009 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5010 	       && rtx_equal_p (XEXP (op0, 1), op2))
5011 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5012 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5013 	return op2;
5014 
5015       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5016 	{
5017 	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5018 					? GET_MODE (XEXP (op0, 1))
5019 					: GET_MODE (XEXP (op0, 0)));
5020 	  rtx temp;
5021 
5022 	  /* Look for happy constants in op1 and op2.  */
5023 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5024 	    {
5025 	      HOST_WIDE_INT t = INTVAL (op1);
5026 	      HOST_WIDE_INT f = INTVAL (op2);
5027 
5028 	      if (t == STORE_FLAG_VALUE && f == 0)
5029 	        code = GET_CODE (op0);
5030 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5031 		{
5032 		  enum rtx_code tmp;
5033 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5034 		  if (tmp == UNKNOWN)
5035 		    break;
5036 		  code = tmp;
5037 		}
5038 	      else
5039 		break;
5040 
5041 	      return simplify_gen_relational (code, mode, cmp_mode,
5042 					      XEXP (op0, 0), XEXP (op0, 1));
5043 	    }
5044 
5045 	  if (cmp_mode == VOIDmode)
5046 	    cmp_mode = op0_mode;
5047 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5048 			  			cmp_mode, XEXP (op0, 0),
5049 						XEXP (op0, 1));
5050 
5051 	  /* See if any simplifications were possible.  */
5052 	  if (temp)
5053 	    {
5054 	      if (CONST_INT_P (temp))
5055 		return temp == const0_rtx ? op2 : op1;
5056 	      else if (temp)
5057 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5058 	    }
5059 	}
5060       break;
5061 
5062     case VEC_MERGE:
5063       gcc_assert (GET_MODE (op0) == mode);
5064       gcc_assert (GET_MODE (op1) == mode);
5065       gcc_assert (VECTOR_MODE_P (mode));
5066       op2 = avoid_constant_pool_reference (op2);
5067       if (CONST_INT_P (op2))
5068 	{
5069           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5070 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5071 	  int mask = (1 << n_elts) - 1;
5072 
5073 	  if (!(INTVAL (op2) & mask))
5074 	    return op1;
5075 	  if ((INTVAL (op2) & mask) == mask)
5076 	    return op0;
5077 
5078 	  op0 = avoid_constant_pool_reference (op0);
5079 	  op1 = avoid_constant_pool_reference (op1);
5080 	  if (GET_CODE (op0) == CONST_VECTOR
5081 	      && GET_CODE (op1) == CONST_VECTOR)
5082 	    {
5083 	      rtvec v = rtvec_alloc (n_elts);
5084 	      unsigned int i;
5085 
5086 	      for (i = 0; i < n_elts; i++)
5087 		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5088 				    ? CONST_VECTOR_ELT (op0, i)
5089 				    : CONST_VECTOR_ELT (op1, i));
5090 	      return gen_rtx_CONST_VECTOR (mode, v);
5091 	    }
5092 	}
5093       break;
5094 
5095     default:
5096       gcc_unreachable ();
5097     }
5098 
5099   return 0;
5100 }
5101 
5102 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5103    or CONST_VECTOR,
5104    returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5105 
5106    Works by unpacking OP into a collection of 8-bit values
5107    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5108    and then repacking them again for OUTERMODE.  */
5109 
5110 static rtx
5111 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5112 		       enum machine_mode innermode, unsigned int byte)
5113 {
5114   /* We support up to 512-bit values (for V8DFmode).  */
5115   enum {
5116     max_bitsize = 512,
5117     value_bit = 8,
5118     value_mask = (1 << value_bit) - 1
5119   };
5120   unsigned char value[max_bitsize / value_bit];
5121   int value_start;
5122   int i;
5123   int elem;
5124 
5125   int num_elem;
5126   rtx * elems;
5127   int elem_bitsize;
5128   rtx result_s;
5129   rtvec result_v = NULL;
5130   enum mode_class outer_class;
5131   enum machine_mode outer_submode;
5132 
5133   /* Some ports misuse CCmode.  */
5134   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5135     return op;
5136 
5137   /* We have no way to represent a complex constant at the rtl level.  */
5138   if (COMPLEX_MODE_P (outermode))
5139     return NULL_RTX;
5140 
5141   /* Unpack the value.  */
5142 
5143   if (GET_CODE (op) == CONST_VECTOR)
5144     {
5145       num_elem = CONST_VECTOR_NUNITS (op);
5146       elems = &CONST_VECTOR_ELT (op, 0);
5147       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5148     }
5149   else
5150     {
5151       num_elem = 1;
5152       elems = &op;
5153       elem_bitsize = max_bitsize;
5154     }
5155   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5156   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5157   /* I don't know how to handle endianness of sub-units.  */
5158   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5159 
5160   for (elem = 0; elem < num_elem; elem++)
5161     {
5162       unsigned char * vp;
5163       rtx el = elems[elem];
5164 
5165       /* Vectors are kept in target memory order.  (This is probably
5166 	 a mistake.)  */
5167       {
5168 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5169 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5170 			  / BITS_PER_UNIT);
5171 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5172 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5173 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5174 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5175 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5176       }
5177 
5178       switch (GET_CODE (el))
5179 	{
5180 	case CONST_INT:
5181 	  for (i = 0;
5182 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5183 	       i += value_bit)
5184 	    *vp++ = INTVAL (el) >> i;
5185 	  /* CONST_INTs are always logically sign-extended.  */
5186 	  for (; i < elem_bitsize; i += value_bit)
5187 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5188 	  break;
5189 
5190 	case CONST_DOUBLE:
5191 	  if (GET_MODE (el) == VOIDmode)
5192 	    {
5193 	      /* If this triggers, someone should have generated a
5194 		 CONST_INT instead.  */
5195 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5196 
5197 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5198 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5199 	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5200 		{
5201 		  *vp++
5202 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5203 		  i += value_bit;
5204 		}
5205 	      /* It shouldn't matter what's done here, so fill it with
5206 		 zero.  */
5207 	      for (; i < elem_bitsize; i += value_bit)
5208 		*vp++ = 0;
5209 	    }
5210 	  else
5211 	    {
5212 	      long tmp[max_bitsize / 32];
5213 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5214 
5215 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5216 	      gcc_assert (bitsize <= elem_bitsize);
5217 	      gcc_assert (bitsize % value_bit == 0);
5218 
5219 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5220 			      GET_MODE (el));
5221 
5222 	      /* real_to_target produces its result in words affected by
5223 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5224 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5225 	         of SUBREG in rtl.texi.  */
5226 	      for (i = 0; i < bitsize; i += value_bit)
5227 		{
5228 		  int ibase;
5229 		  if (WORDS_BIG_ENDIAN)
5230 		    ibase = bitsize - 1 - i;
5231 		  else
5232 		    ibase = i;
5233 		  *vp++ = tmp[ibase / 32] >> i % 32;
5234 		}
5235 
5236 	      /* It shouldn't matter what's done here, so fill it with
5237 		 zero.  */
5238 	      for (; i < elem_bitsize; i += value_bit)
5239 		*vp++ = 0;
5240 	    }
5241 	  break;
5242 
5243         case CONST_FIXED:
5244 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5245 	    {
5246 	      for (i = 0; i < elem_bitsize; i += value_bit)
5247 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5248 	    }
5249 	  else
5250 	    {
5251 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5252 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5253               for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5254 		   i += value_bit)
5255 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5256 			>> (i - HOST_BITS_PER_WIDE_INT);
5257 	      for (; i < elem_bitsize; i += value_bit)
5258 		*vp++ = 0;
5259 	    }
5260           break;
5261 
5262 	default:
5263 	  gcc_unreachable ();
5264 	}
5265     }
5266 
5267   /* Now, pick the right byte to start with.  */
5268   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5269      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5270      will already have offset 0.  */
5271   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5272     {
5273       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5274 			- byte);
5275       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5276       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5277       byte = (subword_byte % UNITS_PER_WORD
5278 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5279     }
5280 
5281   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5282      so if it's become negative it will instead be very large.)  */
5283   gcc_assert (byte < GET_MODE_SIZE (innermode));
5284 
5285   /* Convert from bytes to chunks of size value_bit.  */
5286   value_start = byte * (BITS_PER_UNIT / value_bit);
5287 
5288   /* Re-pack the value.  */
5289 
5290   if (VECTOR_MODE_P (outermode))
5291     {
5292       num_elem = GET_MODE_NUNITS (outermode);
5293       result_v = rtvec_alloc (num_elem);
5294       elems = &RTVEC_ELT (result_v, 0);
5295       outer_submode = GET_MODE_INNER (outermode);
5296     }
5297   else
5298     {
5299       num_elem = 1;
5300       elems = &result_s;
5301       outer_submode = outermode;
5302     }
5303 
5304   outer_class = GET_MODE_CLASS (outer_submode);
5305   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5306 
5307   gcc_assert (elem_bitsize % value_bit == 0);
5308   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5309 
5310   for (elem = 0; elem < num_elem; elem++)
5311     {
5312       unsigned char *vp;
5313 
5314       /* Vectors are stored in target memory order.  (This is probably
5315 	 a mistake.)  */
5316       {
5317 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5318 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5319 			  / BITS_PER_UNIT);
5320 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5321 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5322 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5323 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5324 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5325       }
5326 
5327       switch (outer_class)
5328 	{
5329 	case MODE_INT:
5330 	case MODE_PARTIAL_INT:
5331 	  {
5332 	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
5333 
5334 	    for (i = 0;
5335 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5336 		 i += value_bit)
5337 	      lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5338 	    for (; i < elem_bitsize; i += value_bit)
5339 	      hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5340 		     << (i - HOST_BITS_PER_WIDE_INT);
5341 
5342 	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5343 	       know why.  */
5344 	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5345 	      elems[elem] = gen_int_mode (lo, outer_submode);
5346 	    else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5347 	      elems[elem] = immed_double_const (lo, hi, outer_submode);
5348 	    else
5349 	      return NULL_RTX;
5350 	  }
5351 	  break;
5352 
5353 	case MODE_FLOAT:
5354 	case MODE_DECIMAL_FLOAT:
5355 	  {
5356 	    REAL_VALUE_TYPE r;
5357 	    long tmp[max_bitsize / 32];
5358 
5359 	    /* real_from_target wants its input in words affected by
5360 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5361 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5362 	       of SUBREG in rtl.texi.  */
5363 	    for (i = 0; i < max_bitsize / 32; i++)
5364 	      tmp[i] = 0;
5365 	    for (i = 0; i < elem_bitsize; i += value_bit)
5366 	      {
5367 		int ibase;
5368 		if (WORDS_BIG_ENDIAN)
5369 		  ibase = elem_bitsize - 1 - i;
5370 		else
5371 		  ibase = i;
5372 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5373 	      }
5374 
5375 	    real_from_target (&r, tmp, outer_submode);
5376 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5377 	  }
5378 	  break;
5379 
5380 	case MODE_FRACT:
5381 	case MODE_UFRACT:
5382 	case MODE_ACCUM:
5383 	case MODE_UACCUM:
5384 	  {
5385 	    FIXED_VALUE_TYPE f;
5386 	    f.data.low = 0;
5387 	    f.data.high = 0;
5388 	    f.mode = outer_submode;
5389 
5390 	    for (i = 0;
5391 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5392 		 i += value_bit)
5393 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5394 	    for (; i < elem_bitsize; i += value_bit)
5395 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5396 			     << (i - HOST_BITS_PER_WIDE_INT));
5397 
5398 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5399           }
5400           break;
5401 
5402 	default:
5403 	  gcc_unreachable ();
5404 	}
5405     }
5406   if (VECTOR_MODE_P (outermode))
5407     return gen_rtx_CONST_VECTOR (outermode, result_v);
5408   else
5409     return result_s;
5410 }
5411 
5412 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5413    Return 0 if no simplifications are possible.  */
5414 rtx
5415 simplify_subreg (enum machine_mode outermode, rtx op,
5416 		 enum machine_mode innermode, unsigned int byte)
5417 {
5418   /* Little bit of sanity checking.  */
5419   gcc_assert (innermode != VOIDmode);
5420   gcc_assert (outermode != VOIDmode);
5421   gcc_assert (innermode != BLKmode);
5422   gcc_assert (outermode != BLKmode);
5423 
5424   gcc_assert (GET_MODE (op) == innermode
5425 	      || GET_MODE (op) == VOIDmode);
5426 
5427   gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5428   gcc_assert (byte < GET_MODE_SIZE (innermode));
5429 
5430   if (outermode == innermode && !byte)
5431     return op;
5432 
5433   if (CONST_INT_P (op)
5434       || GET_CODE (op) == CONST_DOUBLE
5435       || GET_CODE (op) == CONST_FIXED
5436       || GET_CODE (op) == CONST_VECTOR)
5437     return simplify_immed_subreg (outermode, op, innermode, byte);
5438 
5439   /* Changing mode twice with SUBREG => just change it once,
5440      or not at all if changing back op starting mode.  */
5441   if (GET_CODE (op) == SUBREG)
5442     {
5443       enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5444       int final_offset = byte + SUBREG_BYTE (op);
5445       rtx newx;
5446 
5447       if (outermode == innermostmode
5448 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5449 	return SUBREG_REG (op);
5450 
5451       /* The SUBREG_BYTE represents offset, as if the value were stored
5452 	 in memory.  Irritating exception is paradoxical subreg, where
5453 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5454 	 value should be negative.  For a moment, undo this exception.  */
5455       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5456 	{
5457 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5458 	  if (WORDS_BIG_ENDIAN)
5459 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5460 	  if (BYTES_BIG_ENDIAN)
5461 	    final_offset += difference % UNITS_PER_WORD;
5462 	}
5463       if (SUBREG_BYTE (op) == 0
5464 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5465 	{
5466 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5467 	  if (WORDS_BIG_ENDIAN)
5468 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5469 	  if (BYTES_BIG_ENDIAN)
5470 	    final_offset += difference % UNITS_PER_WORD;
5471 	}
5472 
5473       /* See whether resulting subreg will be paradoxical.  */
5474       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5475 	{
5476 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5477 	  if (final_offset < 0)
5478 	    return NULL_RTX;
5479 	  /* Bail out in case resulting subreg would be incorrect.  */
5480 	  if (final_offset % GET_MODE_SIZE (outermode)
5481 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5482 	    return NULL_RTX;
5483 	}
5484       else
5485 	{
5486 	  int offset = 0;
5487 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5488 
5489 	  /* In paradoxical subreg, see if we are still looking on lower part.
5490 	     If so, our SUBREG_BYTE will be 0.  */
5491 	  if (WORDS_BIG_ENDIAN)
5492 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5493 	  if (BYTES_BIG_ENDIAN)
5494 	    offset += difference % UNITS_PER_WORD;
5495 	  if (offset == final_offset)
5496 	    final_offset = 0;
5497 	  else
5498 	    return NULL_RTX;
5499 	}
5500 
5501       /* Recurse for further possible simplifications.  */
5502       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5503 			      final_offset);
5504       if (newx)
5505 	return newx;
5506       if (validate_subreg (outermode, innermostmode,
5507 			   SUBREG_REG (op), final_offset))
5508 	{
5509 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5510 	  if (SUBREG_PROMOTED_VAR_P (op)
5511 	      && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5512 	      && GET_MODE_CLASS (outermode) == MODE_INT
5513 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5514 			   GET_MODE_SIZE (innermode),
5515 			   GET_MODE_SIZE (innermostmode))
5516 	      && subreg_lowpart_p (newx))
5517 	    {
5518 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5519 	      SUBREG_PROMOTED_UNSIGNED_SET
5520 		(newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5521 	    }
5522 	  return newx;
5523 	}
5524       return NULL_RTX;
5525     }
5526 
5527   /* Merge implicit and explicit truncations.  */
5528 
5529   if (GET_CODE (op) == TRUNCATE
5530       && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5531       && subreg_lowpart_offset (outermode, innermode) == byte)
5532     return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5533 			       GET_MODE (XEXP (op, 0)));
5534 
5535   /* SUBREG of a hard register => just change the register number
5536      and/or mode.  If the hard register is not valid in that mode,
5537      suppress this simplification.  If the hard register is the stack,
5538      frame, or argument pointer, leave this as a SUBREG.  */
5539 
5540   if (REG_P (op) && HARD_REGISTER_P (op))
5541     {
5542       unsigned int regno, final_regno;
5543 
5544       regno = REGNO (op);
5545       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5546       if (HARD_REGISTER_NUM_P (final_regno))
5547 	{
5548 	  rtx x;
5549 	  int final_offset = byte;
5550 
5551 	  /* Adjust offset for paradoxical subregs.  */
5552 	  if (byte == 0
5553 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5554 	    {
5555 	      int difference = (GET_MODE_SIZE (innermode)
5556 				- GET_MODE_SIZE (outermode));
5557 	      if (WORDS_BIG_ENDIAN)
5558 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5559 	      if (BYTES_BIG_ENDIAN)
5560 		final_offset += difference % UNITS_PER_WORD;
5561 	    }
5562 
5563 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5564 
5565 	  /* Propagate original regno.  We don't have any way to specify
5566 	     the offset inside original regno, so do so only for lowpart.
5567 	     The information is used only by alias analysis that can not
5568 	     grog partial register anyway.  */
5569 
5570 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5571 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5572 	  return x;
5573 	}
5574     }
5575 
5576   /* If we have a SUBREG of a register that we are replacing and we are
5577      replacing it with a MEM, make a new MEM and try replacing the
5578      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5579      or if we would be widening it.  */
5580 
5581   if (MEM_P (op)
5582       && ! mode_dependent_address_p (XEXP (op, 0))
5583       /* Allow splitting of volatile memory references in case we don't
5584          have instruction to move the whole thing.  */
5585       && (! MEM_VOLATILE_P (op)
5586 	  || ! have_insn_for (SET, innermode))
5587       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5588     return adjust_address_nv (op, outermode, byte);
5589 
5590   /* Handle complex values represented as CONCAT
5591      of real and imaginary part.  */
5592   if (GET_CODE (op) == CONCAT)
5593     {
5594       unsigned int part_size, final_offset;
5595       rtx part, res;
5596 
5597       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5598       if (byte < part_size)
5599 	{
5600 	  part = XEXP (op, 0);
5601 	  final_offset = byte;
5602 	}
5603       else
5604 	{
5605 	  part = XEXP (op, 1);
5606 	  final_offset = byte - part_size;
5607 	}
5608 
5609       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5610 	return NULL_RTX;
5611 
5612       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5613       if (res)
5614 	return res;
5615       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5616 	return gen_rtx_SUBREG (outermode, part, final_offset);
5617       return NULL_RTX;
5618     }
5619 
5620   /* Optimize SUBREG truncations of zero and sign extended values.  */
5621   if ((GET_CODE (op) == ZERO_EXTEND
5622        || GET_CODE (op) == SIGN_EXTEND)
5623       && SCALAR_INT_MODE_P (innermode)
5624       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5625     {
5626       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5627 
5628       /* If we're requesting the lowpart of a zero or sign extension,
5629 	 there are three possibilities.  If the outermode is the same
5630 	 as the origmode, we can omit both the extension and the subreg.
5631 	 If the outermode is not larger than the origmode, we can apply
5632 	 the truncation without the extension.  Finally, if the outermode
5633 	 is larger than the origmode, but both are integer modes, we
5634 	 can just extend to the appropriate mode.  */
5635       if (bitpos == 0)
5636 	{
5637 	  enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5638 	  if (outermode == origmode)
5639 	    return XEXP (op, 0);
5640 	  if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5641 	    return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5642 					subreg_lowpart_offset (outermode,
5643 							       origmode));
5644 	  if (SCALAR_INT_MODE_P (outermode))
5645 	    return simplify_gen_unary (GET_CODE (op), outermode,
5646 				       XEXP (op, 0), origmode);
5647 	}
5648 
5649       /* A SUBREG resulting from a zero extension may fold to zero if
5650 	 it extracts higher bits that the ZERO_EXTEND's source bits.  */
5651       if (GET_CODE (op) == ZERO_EXTEND
5652 	  && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5653 	return CONST0_RTX (outermode);
5654     }
5655 
5656   /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5657      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5658      the outer subreg is effectively a truncation to the original mode.  */
5659   if ((GET_CODE (op) == LSHIFTRT
5660        || GET_CODE (op) == ASHIFTRT)
5661       && SCALAR_INT_MODE_P (outermode)
5662       && SCALAR_INT_MODE_P (innermode)
5663       /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5664 	 to avoid the possibility that an outer LSHIFTRT shifts by more
5665 	 than the sign extension's sign_bit_copies and introduces zeros
5666 	 into the high bits of the result.  */
5667       && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5668       && CONST_INT_P (XEXP (op, 1))
5669       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5670       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5671       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5672       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5673     return simplify_gen_binary (ASHIFTRT, outermode,
5674 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5675 
5676   /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5677      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5678      the outer subreg is effectively a truncation to the original mode.  */
5679   if ((GET_CODE (op) == LSHIFTRT
5680        || GET_CODE (op) == ASHIFTRT)
5681       && SCALAR_INT_MODE_P (outermode)
5682       && SCALAR_INT_MODE_P (innermode)
5683       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5684       && CONST_INT_P (XEXP (op, 1))
5685       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5686       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5687       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5688       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5689     return simplify_gen_binary (LSHIFTRT, outermode,
5690 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5691 
5692   /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5693      to (ashift:QI (x:QI) C), where C is a suitable small constant and
5694      the outer subreg is effectively a truncation to the original mode.  */
5695   if (GET_CODE (op) == ASHIFT
5696       && SCALAR_INT_MODE_P (outermode)
5697       && SCALAR_INT_MODE_P (innermode)
5698       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5699       && CONST_INT_P (XEXP (op, 1))
5700       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5701 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5702       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5703       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5704       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5705     return simplify_gen_binary (ASHIFT, outermode,
5706 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5707 
5708   /* Recognize a word extraction from a multi-word subreg.  */
5709   if ((GET_CODE (op) == LSHIFTRT
5710        || GET_CODE (op) == ASHIFTRT)
5711       && SCALAR_INT_MODE_P (innermode)
5712       && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5713       && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5714       && CONST_INT_P (XEXP (op, 1))
5715       && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5716       && INTVAL (XEXP (op, 1)) >= 0
5717       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5718       && byte == subreg_lowpart_offset (outermode, innermode))
5719     {
5720       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5721       return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5722 				  (WORDS_BIG_ENDIAN
5723 				   ? byte - shifted_bytes
5724 				   : byte + shifted_bytes));
5725     }
5726 
5727   /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5728      and try replacing the SUBREG and shift with it.  Don't do this if
5729      the MEM has a mode-dependent address or if we would be widening it.  */
5730 
5731   if ((GET_CODE (op) == LSHIFTRT
5732        || GET_CODE (op) == ASHIFTRT)
5733       && SCALAR_INT_MODE_P (innermode)
5734       && MEM_P (XEXP (op, 0))
5735       && CONST_INT_P (XEXP (op, 1))
5736       && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5737       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5738       && INTVAL (XEXP (op, 1)) > 0
5739       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5740       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5741       && ! MEM_VOLATILE_P (XEXP (op, 0))
5742       && byte == subreg_lowpart_offset (outermode, innermode)
5743       && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5744 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5745     {
5746       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5747       return adjust_address_nv (XEXP (op, 0), outermode,
5748 				(WORDS_BIG_ENDIAN
5749 				 ? byte - shifted_bytes
5750 				 : byte + shifted_bytes));
5751     }
5752 
5753   return NULL_RTX;
5754 }
5755 
5756 /* Make a SUBREG operation or equivalent if it folds.  */
5757 
5758 rtx
5759 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5760 		     enum machine_mode innermode, unsigned int byte)
5761 {
5762   rtx newx;
5763 
5764   newx = simplify_subreg (outermode, op, innermode, byte);
5765   if (newx)
5766     return newx;
5767 
5768   if (GET_CODE (op) == SUBREG
5769       || GET_CODE (op) == CONCAT
5770       || GET_MODE (op) == VOIDmode)
5771     return NULL_RTX;
5772 
5773   if (validate_subreg (outermode, innermode, op, byte))
5774     return gen_rtx_SUBREG (outermode, op, byte);
5775 
5776   return NULL_RTX;
5777 }
5778 
5779 /* Simplify X, an rtx expression.
5780 
5781    Return the simplified expression or NULL if no simplifications
5782    were possible.
5783 
5784    This is the preferred entry point into the simplification routines;
5785    however, we still allow passes to call the more specific routines.
5786 
5787    Right now GCC has three (yes, three) major bodies of RTL simplification
5788    code that need to be unified.
5789 
5790 	1. fold_rtx in cse.c.  This code uses various CSE specific
5791 	   information to aid in RTL simplification.
5792 
5793 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5794 	   it uses combine specific information to aid in RTL
5795 	   simplification.
5796 
5797 	3. The routines in this file.
5798 
5799 
5800    Long term we want to only have one body of simplification code; to
5801    get to that state I recommend the following steps:
5802 
5803 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
5804 	   which are not pass dependent state into these routines.
5805 
5806 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
5807 	   use this routine whenever possible.
5808 
5809 	3. Allow for pass dependent state to be provided to these
5810 	   routines and add simplifications based on the pass dependent
5811 	   state.  Remove code from cse.c & combine.c that becomes
5812 	   redundant/dead.
5813 
5814     It will take time, but ultimately the compiler will be easier to
5815     maintain and improve.  It's totally silly that when we add a
5816     simplification that it needs to be added to 4 places (3 for RTL
5817     simplification and 1 for tree simplification.  */
5818 
5819 rtx
5820 simplify_rtx (const_rtx x)
5821 {
5822   const enum rtx_code code = GET_CODE (x);
5823   const enum machine_mode mode = GET_MODE (x);
5824 
5825   switch (GET_RTX_CLASS (code))
5826     {
5827     case RTX_UNARY:
5828       return simplify_unary_operation (code, mode,
5829 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5830     case RTX_COMM_ARITH:
5831       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5832 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5833 
5834       /* Fall through....  */
5835 
5836     case RTX_BIN_ARITH:
5837       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5838 
5839     case RTX_TERNARY:
5840     case RTX_BITFIELD_OPS:
5841       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5842 					 XEXP (x, 0), XEXP (x, 1),
5843 					 XEXP (x, 2));
5844 
5845     case RTX_COMPARE:
5846     case RTX_COMM_COMPARE:
5847       return simplify_relational_operation (code, mode,
5848                                             ((GET_MODE (XEXP (x, 0))
5849                                              != VOIDmode)
5850                                             ? GET_MODE (XEXP (x, 0))
5851                                             : GET_MODE (XEXP (x, 1))),
5852                                             XEXP (x, 0),
5853                                             XEXP (x, 1));
5854 
5855     case RTX_EXTRA:
5856       if (code == SUBREG)
5857 	return simplify_subreg (mode, SUBREG_REG (x),
5858 				GET_MODE (SUBREG_REG (x)),
5859 				SUBREG_BYTE (x));
5860       break;
5861 
5862     case RTX_OBJ:
5863       if (code == LO_SUM)
5864 	{
5865 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
5866 	  if (GET_CODE (XEXP (x, 0)) == HIGH
5867 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5868 	  return XEXP (x, 1);
5869 	}
5870       break;
5871 
5872     default:
5873       break;
5874     }
5875   return NULL;
5876 }
5877