1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4    2011, 2012  Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "diagnostic-core.h"
38 #include "output.h"
39 #include "ggc.h"
40 #include "target.h"
41 
42 /* Simplification and canonicalization of RTL.  */
43 
44 /* Much code operates on (low, high) pairs; the low value is an
45    unsigned wide int, the high value a signed wide int.  We
46    occasionally need to sign extend from low to high as if low were a
47    signed wide int.  */
48 #define HWI_SIGN_EXTEND(low) \
49  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 
51 static rtx neg_const_int (enum machine_mode, const_rtx);
52 static bool plus_minus_operand_p (const_rtx);
53 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
55 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
56 				  unsigned int);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 					   rtx, rtx);
59 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
60 					    enum machine_mode, rtx, rtx);
61 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
62 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
63 					rtx, rtx, rtx, rtx);
64 
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66    maximally negative number can overflow).  */
67 static rtx
68 neg_const_int (enum machine_mode mode, const_rtx i)
69 {
70   return gen_int_mode (- INTVAL (i), mode);
71 }
72 
73 /* Test whether expression, X, is an immediate constant that represents
74    the most significant bit of machine mode MODE.  */
75 
76 bool
77 mode_signbit_p (enum machine_mode mode, const_rtx x)
78 {
79   unsigned HOST_WIDE_INT val;
80   unsigned int width;
81 
82   if (GET_MODE_CLASS (mode) != MODE_INT)
83     return false;
84 
85   width = GET_MODE_PRECISION (mode);
86   if (width == 0)
87     return false;
88 
89   if (width <= HOST_BITS_PER_WIDE_INT
90       && CONST_INT_P (x))
91     val = INTVAL (x);
92   else if (width <= 2 * HOST_BITS_PER_WIDE_INT
93 	   && GET_CODE (x) == CONST_DOUBLE
94 	   && CONST_DOUBLE_LOW (x) == 0)
95     {
96       val = CONST_DOUBLE_HIGH (x);
97       width -= HOST_BITS_PER_WIDE_INT;
98     }
99   else
100     return false;
101 
102   if (width < HOST_BITS_PER_WIDE_INT)
103     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
104   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 }
106 
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108    (after masking with the mode mask of MODE).  Returns false if the
109    precision of MODE is too large to handle.  */
110 
111 bool
112 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
113 {
114   unsigned int width;
115 
116   if (GET_MODE_CLASS (mode) != MODE_INT)
117     return false;
118 
119   width = GET_MODE_PRECISION (mode);
120   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
121     return false;
122 
123   val &= GET_MODE_MASK (mode);
124   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
125 }
126 
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128    Returns false if the precision of MODE is too large to handle.  */
129 bool
130 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
131 {
132   unsigned int width;
133 
134   if (GET_MODE_CLASS (mode) != MODE_INT)
135     return false;
136 
137   width = GET_MODE_PRECISION (mode);
138   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
139     return false;
140 
141   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
142   return val != 0;
143 }
144 
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146    Returns false if the precision of MODE is too large to handle.  */
147 bool
148 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
149 {
150   unsigned int width;
151 
152   if (GET_MODE_CLASS (mode) != MODE_INT)
153     return false;
154 
155   width = GET_MODE_PRECISION (mode);
156   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
157     return false;
158 
159   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
160   return val == 0;
161 }
162 
163 /* Make a binary operation by properly ordering the operands and
164    seeing if the expression folds.  */
165 
166 rtx
167 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
168 		     rtx op1)
169 {
170   rtx tem;
171 
172   /* If this simplifies, do it.  */
173   tem = simplify_binary_operation (code, mode, op0, op1);
174   if (tem)
175     return tem;
176 
177   /* Put complex operands first and constants second if commutative.  */
178   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
179       && swap_commutative_operands_p (op0, op1))
180     tem = op0, op0 = op1, op1 = tem;
181 
182   return gen_rtx_fmt_ee (code, mode, op0, op1);
183 }
184 
185 /* If X is a MEM referencing the constant pool, return the real value.
186    Otherwise return X.  */
187 rtx
188 avoid_constant_pool_reference (rtx x)
189 {
190   rtx c, tmp, addr;
191   enum machine_mode cmode;
192   HOST_WIDE_INT offset = 0;
193 
194   switch (GET_CODE (x))
195     {
196     case MEM:
197       break;
198 
199     case FLOAT_EXTEND:
200       /* Handle float extensions of constant pool references.  */
201       tmp = XEXP (x, 0);
202       c = avoid_constant_pool_reference (tmp);
203       if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
204 	{
205 	  REAL_VALUE_TYPE d;
206 
207 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
208 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
209 	}
210       return x;
211 
212     default:
213       return x;
214     }
215 
216   if (GET_MODE (x) == BLKmode)
217     return x;
218 
219   addr = XEXP (x, 0);
220 
221   /* Call target hook to avoid the effects of -fpic etc....  */
222   addr = targetm.delegitimize_address (addr);
223 
224   /* Split the address into a base and integer offset.  */
225   if (GET_CODE (addr) == CONST
226       && GET_CODE (XEXP (addr, 0)) == PLUS
227       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
228     {
229       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
230       addr = XEXP (XEXP (addr, 0), 0);
231     }
232 
233   if (GET_CODE (addr) == LO_SUM)
234     addr = XEXP (addr, 1);
235 
236   /* If this is a constant pool reference, we can turn it into its
237      constant and hope that simplifications happen.  */
238   if (GET_CODE (addr) == SYMBOL_REF
239       && CONSTANT_POOL_ADDRESS_P (addr))
240     {
241       c = get_pool_constant (addr);
242       cmode = get_pool_mode (addr);
243 
244       /* If we're accessing the constant in a different mode than it was
245          originally stored, attempt to fix that up via subreg simplifications.
246          If that fails we have no choice but to return the original memory.  */
247       if ((offset != 0 || cmode != GET_MODE (x))
248 	  && offset >= 0 && offset < GET_MODE_SIZE (cmode))
249         {
250           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251           if (tem && CONSTANT_P (tem))
252             return tem;
253         }
254       else
255         return c;
256     }
257 
258   return x;
259 }
260 
261 /* Simplify a MEM based on its attributes.  This is the default
262    delegitimize_address target hook, and it's recommended that every
263    overrider call it.  */
264 
265 rtx
266 delegitimize_mem_from_attrs (rtx x)
267 {
268   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
269      use their base addresses as equivalent.  */
270   if (MEM_P (x)
271       && MEM_EXPR (x)
272       && MEM_OFFSET_KNOWN_P (x))
273     {
274       tree decl = MEM_EXPR (x);
275       enum machine_mode mode = GET_MODE (x);
276       HOST_WIDE_INT offset = 0;
277 
278       switch (TREE_CODE (decl))
279 	{
280 	default:
281 	  decl = NULL;
282 	  break;
283 
284 	case VAR_DECL:
285 	  break;
286 
287 	case ARRAY_REF:
288 	case ARRAY_RANGE_REF:
289 	case COMPONENT_REF:
290 	case BIT_FIELD_REF:
291 	case REALPART_EXPR:
292 	case IMAGPART_EXPR:
293 	case VIEW_CONVERT_EXPR:
294 	  {
295 	    HOST_WIDE_INT bitsize, bitpos;
296 	    tree toffset;
297 	    int unsignedp = 0, volatilep = 0;
298 
299 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
300 					&mode, &unsignedp, &volatilep, false);
301 	    if (bitsize != GET_MODE_BITSIZE (mode)
302 		|| (bitpos % BITS_PER_UNIT)
303 		|| (toffset && !host_integerp (toffset, 0)))
304 	      decl = NULL;
305 	    else
306 	      {
307 		offset += bitpos / BITS_PER_UNIT;
308 		if (toffset)
309 		  offset += TREE_INT_CST_LOW (toffset);
310 	      }
311 	    break;
312 	  }
313 	}
314 
315       if (decl
316 	  && mode == GET_MODE (x)
317 	  && TREE_CODE (decl) == VAR_DECL
318 	  && (TREE_STATIC (decl)
319 	      || DECL_THREAD_LOCAL_P (decl))
320 	  && DECL_RTL_SET_P (decl)
321 	  && MEM_P (DECL_RTL (decl)))
322 	{
323 	  rtx newx;
324 
325 	  offset += MEM_OFFSET (x);
326 
327 	  newx = DECL_RTL (decl);
328 
329 	  if (MEM_P (newx))
330 	    {
331 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
332 
333 	      /* Avoid creating a new MEM needlessly if we already had
334 		 the same address.  We do if there's no OFFSET and the
335 		 old address X is identical to NEWX, or if X is of the
336 		 form (plus NEWX OFFSET), or the NEWX is of the form
337 		 (plus Y (const_int Z)) and X is that with the offset
338 		 added: (plus Y (const_int Z+OFFSET)).  */
339 	      if (!((offset == 0
340 		     || (GET_CODE (o) == PLUS
341 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
342 			 && (offset == INTVAL (XEXP (o, 1))
343 			     || (GET_CODE (n) == PLUS
344 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
345 				 && (INTVAL (XEXP (n, 1)) + offset
346 				     == INTVAL (XEXP (o, 1)))
347 				 && (n = XEXP (n, 0))))
348 			 && (o = XEXP (o, 0))))
349 		    && rtx_equal_p (o, n)))
350 		x = adjust_address_nv (newx, mode, offset);
351 	    }
352 	  else if (GET_MODE (x) == GET_MODE (newx)
353 		   && offset == 0)
354 	    x = newx;
355 	}
356     }
357 
358   return x;
359 }
360 
361 /* Make a unary operation by first seeing if it folds and otherwise making
362    the specified operation.  */
363 
364 rtx
365 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
366 		    enum machine_mode op_mode)
367 {
368   rtx tem;
369 
370   /* If this simplifies, use it.  */
371   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
372     return tem;
373 
374   return gen_rtx_fmt_e (code, mode, op);
375 }
376 
377 /* Likewise for ternary operations.  */
378 
379 rtx
380 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
381 		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
382 {
383   rtx tem;
384 
385   /* If this simplifies, use it.  */
386   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
387 					      op0, op1, op2)))
388     return tem;
389 
390   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
391 }
392 
393 /* Likewise, for relational operations.
394    CMP_MODE specifies mode comparison is done in.  */
395 
396 rtx
397 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
398 			 enum machine_mode cmp_mode, rtx op0, rtx op1)
399 {
400   rtx tem;
401 
402   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
403 						 op0, op1)))
404     return tem;
405 
406   return gen_rtx_fmt_ee (code, mode, op0, op1);
407 }
408 
409 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
410    and simplify the result.  If FN is non-NULL, call this callback on each
411    X, if it returns non-NULL, replace X with its return value and simplify the
412    result.  */
413 
414 rtx
415 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
416 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
417 {
418   enum rtx_code code = GET_CODE (x);
419   enum machine_mode mode = GET_MODE (x);
420   enum machine_mode op_mode;
421   const char *fmt;
422   rtx op0, op1, op2, newx, op;
423   rtvec vec, newvec;
424   int i, j;
425 
426   if (__builtin_expect (fn != NULL, 0))
427     {
428       newx = fn (x, old_rtx, data);
429       if (newx)
430 	return newx;
431     }
432   else if (rtx_equal_p (x, old_rtx))
433     return copy_rtx ((rtx) data);
434 
435   switch (GET_RTX_CLASS (code))
436     {
437     case RTX_UNARY:
438       op0 = XEXP (x, 0);
439       op_mode = GET_MODE (op0);
440       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
441       if (op0 == XEXP (x, 0))
442 	return x;
443       return simplify_gen_unary (code, mode, op0, op_mode);
444 
445     case RTX_BIN_ARITH:
446     case RTX_COMM_ARITH:
447       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
448       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
449       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
450 	return x;
451       return simplify_gen_binary (code, mode, op0, op1);
452 
453     case RTX_COMPARE:
454     case RTX_COMM_COMPARE:
455       op0 = XEXP (x, 0);
456       op1 = XEXP (x, 1);
457       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
458       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
459       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
460       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
461 	return x;
462       return simplify_gen_relational (code, mode, op_mode, op0, op1);
463 
464     case RTX_TERNARY:
465     case RTX_BITFIELD_OPS:
466       op0 = XEXP (x, 0);
467       op_mode = GET_MODE (op0);
468       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
469       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
470       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
471       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
472 	return x;
473       if (op_mode == VOIDmode)
474 	op_mode = GET_MODE (op0);
475       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
476 
477     case RTX_EXTRA:
478       if (code == SUBREG)
479 	{
480 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
481 	  if (op0 == SUBREG_REG (x))
482 	    return x;
483 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
484 				     GET_MODE (SUBREG_REG (x)),
485 				     SUBREG_BYTE (x));
486 	  return op0 ? op0 : x;
487 	}
488       break;
489 
490     case RTX_OBJ:
491       if (code == MEM)
492 	{
493 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
494 	  if (op0 == XEXP (x, 0))
495 	    return x;
496 	  return replace_equiv_address_nv (x, op0);
497 	}
498       else if (code == LO_SUM)
499 	{
500 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
502 
503 	  /* (lo_sum (high x) x) -> x  */
504 	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
505 	    return op1;
506 
507 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
508 	    return x;
509 	  return gen_rtx_LO_SUM (mode, op0, op1);
510 	}
511       break;
512 
513     default:
514       break;
515     }
516 
517   newx = x;
518   fmt = GET_RTX_FORMAT (code);
519   for (i = 0; fmt[i]; i++)
520     switch (fmt[i])
521       {
522       case 'E':
523 	vec = XVEC (x, i);
524 	newvec = XVEC (newx, i);
525 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
526 	  {
527 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
528 					  old_rtx, fn, data);
529 	    if (op != RTVEC_ELT (vec, j))
530 	      {
531 		if (newvec == vec)
532 		  {
533 		    newvec = shallow_copy_rtvec (vec);
534 		    if (x == newx)
535 		      newx = shallow_copy_rtx (x);
536 		    XVEC (newx, i) = newvec;
537 		  }
538 		RTVEC_ELT (newvec, j) = op;
539 	      }
540 	  }
541 	break;
542 
543       case 'e':
544 	if (XEXP (x, i))
545 	  {
546 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
547 	    if (op != XEXP (x, i))
548 	      {
549 		if (x == newx)
550 		  newx = shallow_copy_rtx (x);
551 		XEXP (newx, i) = op;
552 	      }
553 	  }
554 	break;
555       }
556   return newx;
557 }
558 
559 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
560    resulting RTX.  Return a new RTX which is as simplified as possible.  */
561 
562 rtx
563 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
564 {
565   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
566 }
567 
568 /* Try to simplify a unary operation CODE whose output mode is to be
569    MODE with input operand OP whose mode was originally OP_MODE.
570    Return zero if no simplification can be made.  */
571 rtx
572 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
573 			  rtx op, enum machine_mode op_mode)
574 {
575   rtx trueop, tem;
576 
577   trueop = avoid_constant_pool_reference (op);
578 
579   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
580   if (tem)
581     return tem;
582 
583   return simplify_unary_operation_1 (code, mode, op);
584 }
585 
586 /* Perform some simplifications we can do even if the operands
587    aren't constant.  */
588 static rtx
589 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
590 {
591   enum rtx_code reversed;
592   rtx temp;
593 
594   switch (code)
595     {
596     case NOT:
597       /* (not (not X)) == X.  */
598       if (GET_CODE (op) == NOT)
599 	return XEXP (op, 0);
600 
601       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
602 	 comparison is all ones.   */
603       if (COMPARISON_P (op)
604 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
605 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
606 	return simplify_gen_relational (reversed, mode, VOIDmode,
607 					XEXP (op, 0), XEXP (op, 1));
608 
609       /* (not (plus X -1)) can become (neg X).  */
610       if (GET_CODE (op) == PLUS
611 	  && XEXP (op, 1) == constm1_rtx)
612 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
613 
614       /* Similarly, (not (neg X)) is (plus X -1).  */
615       if (GET_CODE (op) == NEG)
616 	return plus_constant (XEXP (op, 0), -1);
617 
618       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
619       if (GET_CODE (op) == XOR
620 	  && CONST_INT_P (XEXP (op, 1))
621 	  && (temp = simplify_unary_operation (NOT, mode,
622 					       XEXP (op, 1), mode)) != 0)
623 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
624 
625       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
626       if (GET_CODE (op) == PLUS
627 	  && CONST_INT_P (XEXP (op, 1))
628 	  && mode_signbit_p (mode, XEXP (op, 1))
629 	  && (temp = simplify_unary_operation (NOT, mode,
630 					       XEXP (op, 1), mode)) != 0)
631 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
632 
633 
634       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
635 	 operands other than 1, but that is not valid.  We could do a
636 	 similar simplification for (not (lshiftrt C X)) where C is
637 	 just the sign bit, but this doesn't seem common enough to
638 	 bother with.  */
639       if (GET_CODE (op) == ASHIFT
640 	  && XEXP (op, 0) == const1_rtx)
641 	{
642 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
643 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
644 	}
645 
646       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
647 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
648 	 so we can perform the above simplification.  */
649 
650       if (STORE_FLAG_VALUE == -1
651 	  && GET_CODE (op) == ASHIFTRT
652 	  && GET_CODE (XEXP (op, 1))
653 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
654 	return simplify_gen_relational (GE, mode, VOIDmode,
655 					XEXP (op, 0), const0_rtx);
656 
657 
658       if (GET_CODE (op) == SUBREG
659 	  && subreg_lowpart_p (op)
660 	  && (GET_MODE_SIZE (GET_MODE (op))
661 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
662 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
663 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
664 	{
665 	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
666 	  rtx x;
667 
668 	  x = gen_rtx_ROTATE (inner_mode,
669 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
670 						  inner_mode),
671 			      XEXP (SUBREG_REG (op), 1));
672 	  return rtl_hooks.gen_lowpart_no_emit (mode, x);
673 	}
674 
675       /* Apply De Morgan's laws to reduce number of patterns for machines
676 	 with negating logical insns (and-not, nand, etc.).  If result has
677 	 only one NOT, put it first, since that is how the patterns are
678 	 coded.  */
679 
680       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
681 	{
682 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
683 	  enum machine_mode op_mode;
684 
685 	  op_mode = GET_MODE (in1);
686 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
687 
688 	  op_mode = GET_MODE (in2);
689 	  if (op_mode == VOIDmode)
690 	    op_mode = mode;
691 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
692 
693 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
694 	    {
695 	      rtx tem = in2;
696 	      in2 = in1; in1 = tem;
697 	    }
698 
699 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
700 				 mode, in1, in2);
701 	}
702       break;
703 
704     case NEG:
705       /* (neg (neg X)) == X.  */
706       if (GET_CODE (op) == NEG)
707 	return XEXP (op, 0);
708 
709       /* (neg (plus X 1)) can become (not X).  */
710       if (GET_CODE (op) == PLUS
711 	  && XEXP (op, 1) == const1_rtx)
712 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
713 
714       /* Similarly, (neg (not X)) is (plus X 1).  */
715       if (GET_CODE (op) == NOT)
716 	return plus_constant (XEXP (op, 0), 1);
717 
718       /* (neg (minus X Y)) can become (minus Y X).  This transformation
719 	 isn't safe for modes with signed zeros, since if X and Y are
720 	 both +0, (minus Y X) is the same as (minus X Y).  If the
721 	 rounding mode is towards +infinity (or -infinity) then the two
722 	 expressions will be rounded differently.  */
723       if (GET_CODE (op) == MINUS
724 	  && !HONOR_SIGNED_ZEROS (mode)
725 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
726 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
727 
728       if (GET_CODE (op) == PLUS
729 	  && !HONOR_SIGNED_ZEROS (mode)
730 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
731 	{
732 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
733 	  if (CONST_INT_P (XEXP (op, 1))
734 	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
735 	    {
736 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
737 	      if (temp)
738 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
739 	    }
740 
741 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
742 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
743 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
744 	}
745 
746       /* (neg (mult A B)) becomes (mult A (neg B)).
747 	 This works even for floating-point values.  */
748       if (GET_CODE (op) == MULT
749 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
750 	{
751 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
752 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
753 	}
754 
755       /* NEG commutes with ASHIFT since it is multiplication.  Only do
756 	 this if we can then eliminate the NEG (e.g., if the operand
757 	 is a constant).  */
758       if (GET_CODE (op) == ASHIFT)
759 	{
760 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
761 	  if (temp)
762 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
763 	}
764 
765       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
766 	 C is equal to the width of MODE minus 1.  */
767       if (GET_CODE (op) == ASHIFTRT
768 	  && CONST_INT_P (XEXP (op, 1))
769 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
770 	return simplify_gen_binary (LSHIFTRT, mode,
771 				    XEXP (op, 0), XEXP (op, 1));
772 
773       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
774 	 C is equal to the width of MODE minus 1.  */
775       if (GET_CODE (op) == LSHIFTRT
776 	  && CONST_INT_P (XEXP (op, 1))
777 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
778 	return simplify_gen_binary (ASHIFTRT, mode,
779 				    XEXP (op, 0), XEXP (op, 1));
780 
781       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
782       if (GET_CODE (op) == XOR
783 	  && XEXP (op, 1) == const1_rtx
784 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
785 	return plus_constant (XEXP (op, 0), -1);
786 
787       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
788       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
789       if (GET_CODE (op) == LT
790 	  && XEXP (op, 1) == const0_rtx
791 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
792 	{
793 	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
794 	  int isize = GET_MODE_PRECISION (inner);
795 	  if (STORE_FLAG_VALUE == 1)
796 	    {
797 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
798 					  GEN_INT (isize - 1));
799 	      if (mode == inner)
800 		return temp;
801 	      if (GET_MODE_PRECISION (mode) > isize)
802 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
803 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
804 	    }
805 	  else if (STORE_FLAG_VALUE == -1)
806 	    {
807 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
808 					  GEN_INT (isize - 1));
809 	      if (mode == inner)
810 		return temp;
811 	      if (GET_MODE_PRECISION (mode) > isize)
812 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
813 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
814 	    }
815 	}
816       break;
817 
818     case TRUNCATE:
819       /* We can't handle truncation to a partial integer mode here
820          because we don't know the real bitsize of the partial
821          integer mode.  */
822       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
823         break;
824 
825       /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
826       if ((GET_CODE (op) == SIGN_EXTEND
827 	   || GET_CODE (op) == ZERO_EXTEND)
828 	  && GET_MODE (XEXP (op, 0)) == mode)
829 	return XEXP (op, 0);
830 
831       /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
832 	 (OP:SI foo:SI) if OP is NEG or ABS.  */
833       if ((GET_CODE (op) == ABS
834 	   || GET_CODE (op) == NEG)
835 	  && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
836 	      || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
837 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
838 	return simplify_gen_unary (GET_CODE (op), mode,
839 				   XEXP (XEXP (op, 0), 0), mode);
840 
841       /* (truncate:A (subreg:B (truncate:C X) 0)) is
842 	 (truncate:A X).  */
843       if (GET_CODE (op) == SUBREG
844 	  && GET_CODE (SUBREG_REG (op)) == TRUNCATE
845 	  && subreg_lowpart_p (op))
846 	return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
847 				   GET_MODE (XEXP (SUBREG_REG (op), 0)));
848 
849       /* If we know that the value is already truncated, we can
850          replace the TRUNCATE with a SUBREG.  Note that this is also
851          valid if TRULY_NOOP_TRUNCATION is false for the corresponding
852          modes we just have to apply a different definition for
853          truncation.  But don't do this for an (LSHIFTRT (MULT ...))
854          since this will cause problems with the umulXi3_highpart
855          patterns.  */
856       if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
857 	   ? (num_sign_bit_copies (op, GET_MODE (op))
858 	      > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
859 				- GET_MODE_PRECISION (mode)))
860 	   : truncated_to_mode (mode, op))
861 	  && ! (GET_CODE (op) == LSHIFTRT
862 		&& GET_CODE (XEXP (op, 0)) == MULT))
863 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
864 
865       /* A truncate of a comparison can be replaced with a subreg if
866          STORE_FLAG_VALUE permits.  This is like the previous test,
867          but it works even if the comparison is done in a mode larger
868          than HOST_BITS_PER_WIDE_INT.  */
869       if (HWI_COMPUTABLE_MODE_P (mode)
870 	  && COMPARISON_P (op)
871 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
872 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
873       break;
874 
875     case FLOAT_TRUNCATE:
876       if (DECIMAL_FLOAT_MODE_P (mode))
877 	break;
878 
879       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
880       if (GET_CODE (op) == FLOAT_EXTEND
881 	  && GET_MODE (XEXP (op, 0)) == mode)
882 	return XEXP (op, 0);
883 
884       /* (float_truncate:SF (float_truncate:DF foo:XF))
885          = (float_truncate:SF foo:XF).
886 	 This may eliminate double rounding, so it is unsafe.
887 
888          (float_truncate:SF (float_extend:XF foo:DF))
889          = (float_truncate:SF foo:DF).
890 
891          (float_truncate:DF (float_extend:XF foo:SF))
892          = (float_extend:SF foo:DF).  */
893       if ((GET_CODE (op) == FLOAT_TRUNCATE
894 	   && flag_unsafe_math_optimizations)
895 	  || GET_CODE (op) == FLOAT_EXTEND)
896 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
897 							    0)))
898 				   > GET_MODE_SIZE (mode)
899 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
900 				   mode,
901 				   XEXP (op, 0), mode);
902 
903       /*  (float_truncate (float x)) is (float x)  */
904       if (GET_CODE (op) == FLOAT
905 	  && (flag_unsafe_math_optimizations
906 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
907 		  && ((unsigned)significand_size (GET_MODE (op))
908 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
909 			  - num_sign_bit_copies (XEXP (op, 0),
910 						 GET_MODE (XEXP (op, 0))))))))
911 	return simplify_gen_unary (FLOAT, mode,
912 				   XEXP (op, 0),
913 				   GET_MODE (XEXP (op, 0)));
914 
915       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
916 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
917       if ((GET_CODE (op) == ABS
918 	   || GET_CODE (op) == NEG)
919 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
920 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
921 	return simplify_gen_unary (GET_CODE (op), mode,
922 				   XEXP (XEXP (op, 0), 0), mode);
923 
924       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
925 	 is (float_truncate:SF x).  */
926       if (GET_CODE (op) == SUBREG
927 	  && subreg_lowpart_p (op)
928 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
929 	return SUBREG_REG (op);
930       break;
931 
932     case FLOAT_EXTEND:
933       if (DECIMAL_FLOAT_MODE_P (mode))
934 	break;
935 
936       /*  (float_extend (float_extend x)) is (float_extend x)
937 
938 	  (float_extend (float x)) is (float x) assuming that double
939 	  rounding can't happen.
940           */
941       if (GET_CODE (op) == FLOAT_EXTEND
942 	  || (GET_CODE (op) == FLOAT
943 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
944 	      && ((unsigned)significand_size (GET_MODE (op))
945 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
946 		      - num_sign_bit_copies (XEXP (op, 0),
947 					     GET_MODE (XEXP (op, 0)))))))
948 	return simplify_gen_unary (GET_CODE (op), mode,
949 				   XEXP (op, 0),
950 				   GET_MODE (XEXP (op, 0)));
951 
952       break;
953 
954     case ABS:
955       /* (abs (neg <foo>)) -> (abs <foo>) */
956       if (GET_CODE (op) == NEG)
957 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
958 				   GET_MODE (XEXP (op, 0)));
959 
960       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
961          do nothing.  */
962       if (GET_MODE (op) == VOIDmode)
963 	break;
964 
965       /* If operand is something known to be positive, ignore the ABS.  */
966       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
967 	  || val_signbit_known_clear_p (GET_MODE (op),
968 					nonzero_bits (op, GET_MODE (op))))
969 	return op;
970 
971       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
972       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
973 	return gen_rtx_NEG (mode, op);
974 
975       break;
976 
977     case FFS:
978       /* (ffs (*_extend <X>)) = (ffs <X>) */
979       if (GET_CODE (op) == SIGN_EXTEND
980 	  || GET_CODE (op) == ZERO_EXTEND)
981 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
982 				   GET_MODE (XEXP (op, 0)));
983       break;
984 
985     case POPCOUNT:
986       switch (GET_CODE (op))
987 	{
988 	case BSWAP:
989 	case ZERO_EXTEND:
990 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
991 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
992 				     GET_MODE (XEXP (op, 0)));
993 
994 	case ROTATE:
995 	case ROTATERT:
996 	  /* Rotations don't affect popcount.  */
997 	  if (!side_effects_p (XEXP (op, 1)))
998 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
999 				       GET_MODE (XEXP (op, 0)));
1000 	  break;
1001 
1002 	default:
1003 	  break;
1004 	}
1005       break;
1006 
1007     case PARITY:
1008       switch (GET_CODE (op))
1009 	{
1010 	case NOT:
1011 	case BSWAP:
1012 	case ZERO_EXTEND:
1013 	case SIGN_EXTEND:
1014 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1015 				     GET_MODE (XEXP (op, 0)));
1016 
1017 	case ROTATE:
1018 	case ROTATERT:
1019 	  /* Rotations don't affect parity.  */
1020 	  if (!side_effects_p (XEXP (op, 1)))
1021 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1022 				       GET_MODE (XEXP (op, 0)));
1023 	  break;
1024 
1025 	default:
1026 	  break;
1027 	}
1028       break;
1029 
1030     case BSWAP:
1031       /* (bswap (bswap x)) -> x.  */
1032       if (GET_CODE (op) == BSWAP)
1033 	return XEXP (op, 0);
1034       break;
1035 
1036     case FLOAT:
1037       /* (float (sign_extend <X>)) = (float <X>).  */
1038       if (GET_CODE (op) == SIGN_EXTEND)
1039 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1040 				   GET_MODE (XEXP (op, 0)));
1041       break;
1042 
1043     case SIGN_EXTEND:
1044       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1045 	 becomes just the MINUS if its mode is MODE.  This allows
1046 	 folding switch statements on machines using casesi (such as
1047 	 the VAX).  */
1048       if (GET_CODE (op) == TRUNCATE
1049 	  && GET_MODE (XEXP (op, 0)) == mode
1050 	  && GET_CODE (XEXP (op, 0)) == MINUS
1051 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1052 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1053 	return XEXP (op, 0);
1054 
1055       /* Extending a widening multiplication should be canonicalized to
1056 	 a wider widening multiplication.  */
1057       if (GET_CODE (op) == MULT)
1058 	{
1059 	  rtx lhs = XEXP (op, 0);
1060 	  rtx rhs = XEXP (op, 1);
1061 	  enum rtx_code lcode = GET_CODE (lhs);
1062 	  enum rtx_code rcode = GET_CODE (rhs);
1063 
1064 	  /* Widening multiplies usually extend both operands, but sometimes
1065 	     they use a shift to extract a portion of a register.  */
1066 	  if ((lcode == SIGN_EXTEND
1067 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1068 	      && (rcode == SIGN_EXTEND
1069 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1070 	    {
1071 	      enum machine_mode lmode = GET_MODE (lhs);
1072 	      enum machine_mode rmode = GET_MODE (rhs);
1073 	      int bits;
1074 
1075 	      if (lcode == ASHIFTRT)
1076 		/* Number of bits not shifted off the end.  */
1077 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1078 	      else /* lcode == SIGN_EXTEND */
1079 		/* Size of inner mode.  */
1080 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1081 
1082 	      if (rcode == ASHIFTRT)
1083 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1084 	      else /* rcode == SIGN_EXTEND */
1085 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1086 
1087 	      /* We can only widen multiplies if the result is mathematiclly
1088 		 equivalent.  I.e. if overflow was impossible.  */
1089 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1090 		return simplify_gen_binary
1091 			 (MULT, mode,
1092 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1093 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1094 	    }
1095 	}
1096 
1097       /* Check for a sign extension of a subreg of a promoted
1098 	 variable, where the promotion is sign-extended, and the
1099 	 target mode is the same as the variable's promotion.  */
1100       if (GET_CODE (op) == SUBREG
1101 	  && SUBREG_PROMOTED_VAR_P (op)
1102 	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1103 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1104 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
1105 
1106       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1107 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1108       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1109 	{
1110 	  gcc_assert (GET_MODE_BITSIZE (mode)
1111 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1112 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1113 				     GET_MODE (XEXP (op, 0)));
1114 	}
1115 
1116       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1117 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1118 	 GET_MODE_BITSIZE (N) - I bits.
1119 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1120 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1121       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1122 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1123 	  && CONST_INT_P (XEXP (op, 1))
1124 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1125 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1126 	{
1127 	  enum machine_mode tmode
1128 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1129 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1130 	  gcc_assert (GET_MODE_BITSIZE (mode)
1131 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1132 	  if (tmode != BLKmode)
1133 	    {
1134 	      rtx inner =
1135 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1136 	      return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1137 					 ? SIGN_EXTEND : ZERO_EXTEND,
1138 					 mode, inner, tmode);
1139 	    }
1140 	}
1141 
1142 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1143       /* As we do not know which address space the pointer is refering to,
1144 	 we can do this only if the target does not support different pointer
1145 	 or address modes depending on the address space.  */
1146       if (target_default_pointer_address_modes_p ()
1147 	  && ! POINTERS_EXTEND_UNSIGNED
1148 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1149 	  && (CONSTANT_P (op)
1150 	      || (GET_CODE (op) == SUBREG
1151 		  && REG_P (SUBREG_REG (op))
1152 		  && REG_POINTER (SUBREG_REG (op))
1153 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1154 	return convert_memory_address (Pmode, op);
1155 #endif
1156       break;
1157 
1158     case ZERO_EXTEND:
1159       /* Check for a zero extension of a subreg of a promoted
1160 	 variable, where the promotion is zero-extended, and the
1161 	 target mode is the same as the variable's promotion.  */
1162       if (GET_CODE (op) == SUBREG
1163 	  && SUBREG_PROMOTED_VAR_P (op)
1164 	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1165 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1166 	return rtl_hooks.gen_lowpart_no_emit (mode, op);
1167 
1168       /* Extending a widening multiplication should be canonicalized to
1169 	 a wider widening multiplication.  */
1170       if (GET_CODE (op) == MULT)
1171 	{
1172 	  rtx lhs = XEXP (op, 0);
1173 	  rtx rhs = XEXP (op, 1);
1174 	  enum rtx_code lcode = GET_CODE (lhs);
1175 	  enum rtx_code rcode = GET_CODE (rhs);
1176 
1177 	  /* Widening multiplies usually extend both operands, but sometimes
1178 	     they use a shift to extract a portion of a register.  */
1179 	  if ((lcode == ZERO_EXTEND
1180 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1181 	      && (rcode == ZERO_EXTEND
1182 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1183 	    {
1184 	      enum machine_mode lmode = GET_MODE (lhs);
1185 	      enum machine_mode rmode = GET_MODE (rhs);
1186 	      int bits;
1187 
1188 	      if (lcode == LSHIFTRT)
1189 		/* Number of bits not shifted off the end.  */
1190 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1191 	      else /* lcode == ZERO_EXTEND */
1192 		/* Size of inner mode.  */
1193 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1194 
1195 	      if (rcode == LSHIFTRT)
1196 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1197 	      else /* rcode == ZERO_EXTEND */
1198 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1199 
1200 	      /* We can only widen multiplies if the result is mathematiclly
1201 		 equivalent.  I.e. if overflow was impossible.  */
1202 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1203 		return simplify_gen_binary
1204 			 (MULT, mode,
1205 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1206 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1207 	    }
1208 	}
1209 
1210       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1211       if (GET_CODE (op) == ZERO_EXTEND)
1212 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1213 				   GET_MODE (XEXP (op, 0)));
1214 
1215       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1216 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1217 	 GET_MODE_BITSIZE (N) - I bits.  */
1218       if (GET_CODE (op) == LSHIFTRT
1219 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1220 	  && CONST_INT_P (XEXP (op, 1))
1221 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1222 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1223 	{
1224 	  enum machine_mode tmode
1225 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1226 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1227 	  if (tmode != BLKmode)
1228 	    {
1229 	      rtx inner =
1230 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1231 	      return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1232 	    }
1233 	}
1234 
1235 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1236       /* As we do not know which address space the pointer is refering to,
1237 	 we can do this only if the target does not support different pointer
1238 	 or address modes depending on the address space.  */
1239       if (target_default_pointer_address_modes_p ()
1240 	  && POINTERS_EXTEND_UNSIGNED > 0
1241 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1242 	  && (CONSTANT_P (op)
1243 	      || (GET_CODE (op) == SUBREG
1244 		  && REG_P (SUBREG_REG (op))
1245 		  && REG_POINTER (SUBREG_REG (op))
1246 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1247 	return convert_memory_address (Pmode, op);
1248 #endif
1249       break;
1250 
1251     default:
1252       break;
1253     }
1254 
1255   return 0;
1256 }
1257 
1258 /* Try to compute the value of a unary operation CODE whose output mode is to
1259    be MODE with input operand OP whose mode was originally OP_MODE.
1260    Return zero if the value cannot be computed.  */
1261 rtx
1262 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1263 				rtx op, enum machine_mode op_mode)
1264 {
1265   unsigned int width = GET_MODE_PRECISION (mode);
1266   unsigned int op_width = GET_MODE_PRECISION (op_mode);
1267 
1268   if (code == VEC_DUPLICATE)
1269     {
1270       gcc_assert (VECTOR_MODE_P (mode));
1271       if (GET_MODE (op) != VOIDmode)
1272       {
1273 	if (!VECTOR_MODE_P (GET_MODE (op)))
1274 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1275 	else
1276 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1277 						(GET_MODE (op)));
1278       }
1279       if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1280 	  || GET_CODE (op) == CONST_VECTOR)
1281 	{
1282           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1283           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1284 	  rtvec v = rtvec_alloc (n_elts);
1285 	  unsigned int i;
1286 
1287 	  if (GET_CODE (op) != CONST_VECTOR)
1288 	    for (i = 0; i < n_elts; i++)
1289 	      RTVEC_ELT (v, i) = op;
1290 	  else
1291 	    {
1292 	      enum machine_mode inmode = GET_MODE (op);
1293               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1294               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1295 
1296 	      gcc_assert (in_n_elts < n_elts);
1297 	      gcc_assert ((n_elts % in_n_elts) == 0);
1298 	      for (i = 0; i < n_elts; i++)
1299 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1300 	    }
1301 	  return gen_rtx_CONST_VECTOR (mode, v);
1302 	}
1303     }
1304 
1305   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1306     {
1307       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1308       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1309       enum machine_mode opmode = GET_MODE (op);
1310       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1311       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1312       rtvec v = rtvec_alloc (n_elts);
1313       unsigned int i;
1314 
1315       gcc_assert (op_n_elts == n_elts);
1316       for (i = 0; i < n_elts; i++)
1317 	{
1318 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1319 					    CONST_VECTOR_ELT (op, i),
1320 					    GET_MODE_INNER (opmode));
1321 	  if (!x)
1322 	    return 0;
1323 	  RTVEC_ELT (v, i) = x;
1324 	}
1325       return gen_rtx_CONST_VECTOR (mode, v);
1326     }
1327 
1328   /* The order of these tests is critical so that, for example, we don't
1329      check the wrong mode (input vs. output) for a conversion operation,
1330      such as FIX.  At some point, this should be simplified.  */
1331 
1332   if (code == FLOAT && GET_MODE (op) == VOIDmode
1333       && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1334     {
1335       HOST_WIDE_INT hv, lv;
1336       REAL_VALUE_TYPE d;
1337 
1338       if (CONST_INT_P (op))
1339 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1340       else
1341 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1342 
1343       REAL_VALUE_FROM_INT (d, lv, hv, mode);
1344       d = real_value_truncate (mode, d);
1345       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1346     }
1347   else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1348 	   && (GET_CODE (op) == CONST_DOUBLE
1349 	       || CONST_INT_P (op)))
1350     {
1351       HOST_WIDE_INT hv, lv;
1352       REAL_VALUE_TYPE d;
1353 
1354       if (CONST_INT_P (op))
1355 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1356       else
1357 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1358 
1359       if (op_mode == VOIDmode)
1360 	{
1361 	  /* We don't know how to interpret negative-looking numbers in
1362 	     this case, so don't try to fold those.  */
1363 	  if (hv < 0)
1364 	    return 0;
1365 	}
1366       else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1367 	;
1368       else
1369 	hv = 0, lv &= GET_MODE_MASK (op_mode);
1370 
1371       REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1372       d = real_value_truncate (mode, d);
1373       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1374     }
1375 
1376   if (CONST_INT_P (op)
1377       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1378     {
1379       HOST_WIDE_INT arg0 = INTVAL (op);
1380       HOST_WIDE_INT val;
1381 
1382       switch (code)
1383 	{
1384 	case NOT:
1385 	  val = ~ arg0;
1386 	  break;
1387 
1388 	case NEG:
1389 	  val = - arg0;
1390 	  break;
1391 
1392 	case ABS:
1393 	  val = (arg0 >= 0 ? arg0 : - arg0);
1394 	  break;
1395 
1396 	case FFS:
1397 	  arg0 &= GET_MODE_MASK (mode);
1398 	  val = ffs_hwi (arg0);
1399 	  break;
1400 
1401 	case CLZ:
1402 	  arg0 &= GET_MODE_MASK (mode);
1403 	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1404 	    ;
1405 	  else
1406 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1407 	  break;
1408 
1409 	case CLRSB:
1410 	  arg0 &= GET_MODE_MASK (mode);
1411 	  if (arg0 == 0)
1412 	    val = GET_MODE_PRECISION (mode) - 1;
1413 	  else if (arg0 >= 0)
1414 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1415 	  else if (arg0 < 0)
1416 	    val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1417 	  break;
1418 
1419 	case CTZ:
1420 	  arg0 &= GET_MODE_MASK (mode);
1421 	  if (arg0 == 0)
1422 	    {
1423 	      /* Even if the value at zero is undefined, we have to come
1424 		 up with some replacement.  Seems good enough.  */
1425 	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1426 		val = GET_MODE_PRECISION (mode);
1427 	    }
1428 	  else
1429 	    val = ctz_hwi (arg0);
1430 	  break;
1431 
1432 	case POPCOUNT:
1433 	  arg0 &= GET_MODE_MASK (mode);
1434 	  val = 0;
1435 	  while (arg0)
1436 	    val++, arg0 &= arg0 - 1;
1437 	  break;
1438 
1439 	case PARITY:
1440 	  arg0 &= GET_MODE_MASK (mode);
1441 	  val = 0;
1442 	  while (arg0)
1443 	    val++, arg0 &= arg0 - 1;
1444 	  val &= 1;
1445 	  break;
1446 
1447 	case BSWAP:
1448 	  {
1449 	    unsigned int s;
1450 
1451 	    val = 0;
1452 	    for (s = 0; s < width; s += 8)
1453 	      {
1454 		unsigned int d = width - s - 8;
1455 		unsigned HOST_WIDE_INT byte;
1456 		byte = (arg0 >> s) & 0xff;
1457 		val |= byte << d;
1458 	      }
1459 	  }
1460 	  break;
1461 
1462 	case TRUNCATE:
1463 	  val = arg0;
1464 	  break;
1465 
1466 	case ZERO_EXTEND:
1467 	  /* When zero-extending a CONST_INT, we need to know its
1468              original mode.  */
1469 	  gcc_assert (op_mode != VOIDmode);
1470 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1471 	    {
1472 	      /* If we were really extending the mode,
1473 		 we would have to distinguish between zero-extension
1474 		 and sign-extension.  */
1475 	      gcc_assert (width == op_width);
1476 	      val = arg0;
1477 	    }
1478 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1479 	    val = arg0 & GET_MODE_MASK (op_mode);
1480 	  else
1481 	    return 0;
1482 	  break;
1483 
1484 	case SIGN_EXTEND:
1485 	  if (op_mode == VOIDmode)
1486 	    op_mode = mode;
1487 	  op_width = GET_MODE_PRECISION (op_mode);
1488 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1489 	    {
1490 	      /* If we were really extending the mode,
1491 		 we would have to distinguish between zero-extension
1492 		 and sign-extension.  */
1493 	      gcc_assert (width == op_width);
1494 	      val = arg0;
1495 	    }
1496 	  else if (op_width < HOST_BITS_PER_WIDE_INT)
1497 	    {
1498 	      val = arg0 & GET_MODE_MASK (op_mode);
1499 	      if (val_signbit_known_set_p (op_mode, val))
1500 		val |= ~GET_MODE_MASK (op_mode);
1501 	    }
1502 	  else
1503 	    return 0;
1504 	  break;
1505 
1506 	case SQRT:
1507 	case FLOAT_EXTEND:
1508 	case FLOAT_TRUNCATE:
1509 	case SS_TRUNCATE:
1510 	case US_TRUNCATE:
1511 	case SS_NEG:
1512 	case US_NEG:
1513 	case SS_ABS:
1514 	  return 0;
1515 
1516 	default:
1517 	  gcc_unreachable ();
1518 	}
1519 
1520       return gen_int_mode (val, mode);
1521     }
1522 
1523   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1524      for a DImode operation on a CONST_INT.  */
1525   else if (GET_MODE (op) == VOIDmode
1526 	   && width <= HOST_BITS_PER_WIDE_INT * 2
1527 	   && (GET_CODE (op) == CONST_DOUBLE
1528 	       || CONST_INT_P (op)))
1529     {
1530       unsigned HOST_WIDE_INT l1, lv;
1531       HOST_WIDE_INT h1, hv;
1532 
1533       if (GET_CODE (op) == CONST_DOUBLE)
1534 	l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1535       else
1536 	l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1537 
1538       switch (code)
1539 	{
1540 	case NOT:
1541 	  lv = ~ l1;
1542 	  hv = ~ h1;
1543 	  break;
1544 
1545 	case NEG:
1546 	  neg_double (l1, h1, &lv, &hv);
1547 	  break;
1548 
1549 	case ABS:
1550 	  if (h1 < 0)
1551 	    neg_double (l1, h1, &lv, &hv);
1552 	  else
1553 	    lv = l1, hv = h1;
1554 	  break;
1555 
1556 	case FFS:
1557 	  hv = 0;
1558 	  if (l1 != 0)
1559 	    lv = ffs_hwi (l1);
1560 	  else if (h1 != 0)
1561 	    lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1562 	  else
1563 	    lv = 0;
1564 	  break;
1565 
1566 	case CLZ:
1567 	  hv = 0;
1568 	  if (h1 != 0)
1569 	    lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
1570 	      - HOST_BITS_PER_WIDE_INT;
1571 	  else if (l1 != 0)
1572 	    lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
1573 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1574 	    lv = GET_MODE_PRECISION (mode);
1575 	  break;
1576 
1577 	case CTZ:
1578 	  hv = 0;
1579 	  if (l1 != 0)
1580 	    lv = ctz_hwi (l1);
1581 	  else if (h1 != 0)
1582 	    lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1583 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1584 	    lv = GET_MODE_PRECISION (mode);
1585 	  break;
1586 
1587 	case POPCOUNT:
1588 	  hv = 0;
1589 	  lv = 0;
1590 	  while (l1)
1591 	    lv++, l1 &= l1 - 1;
1592 	  while (h1)
1593 	    lv++, h1 &= h1 - 1;
1594 	  break;
1595 
1596 	case PARITY:
1597 	  hv = 0;
1598 	  lv = 0;
1599 	  while (l1)
1600 	    lv++, l1 &= l1 - 1;
1601 	  while (h1)
1602 	    lv++, h1 &= h1 - 1;
1603 	  lv &= 1;
1604 	  break;
1605 
1606 	case BSWAP:
1607 	  {
1608 	    unsigned int s;
1609 
1610 	    hv = 0;
1611 	    lv = 0;
1612 	    for (s = 0; s < width; s += 8)
1613 	      {
1614 		unsigned int d = width - s - 8;
1615 		unsigned HOST_WIDE_INT byte;
1616 
1617 		if (s < HOST_BITS_PER_WIDE_INT)
1618 		  byte = (l1 >> s) & 0xff;
1619 		else
1620 		  byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1621 
1622 		if (d < HOST_BITS_PER_WIDE_INT)
1623 		  lv |= byte << d;
1624 		else
1625 		  hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1626 	      }
1627 	  }
1628 	  break;
1629 
1630 	case TRUNCATE:
1631 	  /* This is just a change-of-mode, so do nothing.  */
1632 	  lv = l1, hv = h1;
1633 	  break;
1634 
1635 	case ZERO_EXTEND:
1636 	  gcc_assert (op_mode != VOIDmode);
1637 
1638 	  if (op_width > HOST_BITS_PER_WIDE_INT)
1639 	    return 0;
1640 
1641 	  hv = 0;
1642 	  lv = l1 & GET_MODE_MASK (op_mode);
1643 	  break;
1644 
1645 	case SIGN_EXTEND:
1646 	  if (op_mode == VOIDmode
1647 	      || op_width > HOST_BITS_PER_WIDE_INT)
1648 	    return 0;
1649 	  else
1650 	    {
1651 	      lv = l1 & GET_MODE_MASK (op_mode);
1652 	      if (val_signbit_known_set_p (op_mode, lv))
1653 		lv |= ~GET_MODE_MASK (op_mode);
1654 
1655 	      hv = HWI_SIGN_EXTEND (lv);
1656 	    }
1657 	  break;
1658 
1659 	case SQRT:
1660 	  return 0;
1661 
1662 	default:
1663 	  return 0;
1664 	}
1665 
1666       return immed_double_const (lv, hv, mode);
1667     }
1668 
1669   else if (GET_CODE (op) == CONST_DOUBLE
1670 	   && SCALAR_FLOAT_MODE_P (mode)
1671 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1672     {
1673       REAL_VALUE_TYPE d, t;
1674       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1675 
1676       switch (code)
1677 	{
1678 	case SQRT:
1679 	  if (HONOR_SNANS (mode) && real_isnan (&d))
1680 	    return 0;
1681 	  real_sqrt (&t, mode, &d);
1682 	  d = t;
1683 	  break;
1684 	case ABS:
1685 	  d = real_value_abs (&d);
1686 	  break;
1687 	case NEG:
1688 	  d = real_value_negate (&d);
1689 	  break;
1690 	case FLOAT_TRUNCATE:
1691 	  d = real_value_truncate (mode, d);
1692 	  break;
1693 	case FLOAT_EXTEND:
1694 	  /* All this does is change the mode, unless changing
1695 	     mode class.  */
1696 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1697 	    real_convert (&d, mode, &d);
1698 	  break;
1699 	case FIX:
1700 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1701 	  break;
1702 	case NOT:
1703 	  {
1704 	    long tmp[4];
1705 	    int i;
1706 
1707 	    real_to_target (tmp, &d, GET_MODE (op));
1708 	    for (i = 0; i < 4; i++)
1709 	      tmp[i] = ~tmp[i];
1710 	    real_from_target (&d, tmp, mode);
1711 	    break;
1712 	  }
1713 	default:
1714 	  gcc_unreachable ();
1715 	}
1716       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1717     }
1718 
1719   else if (GET_CODE (op) == CONST_DOUBLE
1720 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1721 	   && GET_MODE_CLASS (mode) == MODE_INT
1722 	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1723     {
1724       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1725 	 operators are intentionally left unspecified (to ease implementation
1726 	 by target backends), for consistency, this routine implements the
1727 	 same semantics for constant folding as used by the middle-end.  */
1728 
1729       /* This was formerly used only for non-IEEE float.
1730 	 eggert@twinsun.com says it is safe for IEEE also.  */
1731       HOST_WIDE_INT xh, xl, th, tl;
1732       REAL_VALUE_TYPE x, t;
1733       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1734       switch (code)
1735 	{
1736 	case FIX:
1737 	  if (REAL_VALUE_ISNAN (x))
1738 	    return const0_rtx;
1739 
1740 	  /* Test against the signed upper bound.  */
1741 	  if (width > HOST_BITS_PER_WIDE_INT)
1742 	    {
1743 	      th = ((unsigned HOST_WIDE_INT) 1
1744 		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1745 	      tl = -1;
1746 	    }
1747 	  else
1748 	    {
1749 	      th = 0;
1750 	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1751 	    }
1752 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1753 	  if (REAL_VALUES_LESS (t, x))
1754 	    {
1755 	      xh = th;
1756 	      xl = tl;
1757 	      break;
1758 	    }
1759 
1760 	  /* Test against the signed lower bound.  */
1761 	  if (width > HOST_BITS_PER_WIDE_INT)
1762 	    {
1763 	      th = (unsigned HOST_WIDE_INT) (-1)
1764 		   << (width - HOST_BITS_PER_WIDE_INT - 1);
1765 	      tl = 0;
1766 	    }
1767 	  else
1768 	    {
1769 	      th = -1;
1770 	      tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
1771 	    }
1772 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1773 	  if (REAL_VALUES_LESS (x, t))
1774 	    {
1775 	      xh = th;
1776 	      xl = tl;
1777 	      break;
1778 	    }
1779 	  REAL_VALUE_TO_INT (&xl, &xh, x);
1780 	  break;
1781 
1782 	case UNSIGNED_FIX:
1783 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1784 	    return const0_rtx;
1785 
1786 	  /* Test against the unsigned upper bound.  */
1787 	  if (width == 2*HOST_BITS_PER_WIDE_INT)
1788 	    {
1789 	      th = -1;
1790 	      tl = -1;
1791 	    }
1792 	  else if (width >= HOST_BITS_PER_WIDE_INT)
1793 	    {
1794 	      th = ((unsigned HOST_WIDE_INT) 1
1795 		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1796 	      tl = -1;
1797 	    }
1798 	  else
1799 	    {
1800 	      th = 0;
1801 	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1802 	    }
1803 	  real_from_integer (&t, VOIDmode, tl, th, 1);
1804 	  if (REAL_VALUES_LESS (t, x))
1805 	    {
1806 	      xh = th;
1807 	      xl = tl;
1808 	      break;
1809 	    }
1810 
1811 	  REAL_VALUE_TO_INT (&xl, &xh, x);
1812 	  break;
1813 
1814 	default:
1815 	  gcc_unreachable ();
1816 	}
1817       return immed_double_const (xl, xh, mode);
1818     }
1819 
1820   return NULL_RTX;
1821 }
1822 
1823 /* Subroutine of simplify_binary_operation to simplify a commutative,
1824    associative binary operation CODE with result mode MODE, operating
1825    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1826    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1827    canonicalization is possible.  */
1828 
1829 static rtx
1830 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1831 				rtx op0, rtx op1)
1832 {
1833   rtx tem;
1834 
1835   /* Linearize the operator to the left.  */
1836   if (GET_CODE (op1) == code)
1837     {
1838       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1839       if (GET_CODE (op0) == code)
1840 	{
1841 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1842 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1843 	}
1844 
1845       /* "a op (b op c)" becomes "(b op c) op a".  */
1846       if (! swap_commutative_operands_p (op1, op0))
1847 	return simplify_gen_binary (code, mode, op1, op0);
1848 
1849       tem = op0;
1850       op0 = op1;
1851       op1 = tem;
1852     }
1853 
1854   if (GET_CODE (op0) == code)
1855     {
1856       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1857       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1858 	{
1859 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1860 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1861 	}
1862 
1863       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1864       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1865       if (tem != 0)
1866         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1867 
1868       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1869       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1870       if (tem != 0)
1871         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1872     }
1873 
1874   return 0;
1875 }
1876 
1877 
1878 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1879    and OP1.  Return 0 if no simplification is possible.
1880 
1881    Don't use this for relational operations such as EQ or LT.
1882    Use simplify_relational_operation instead.  */
1883 rtx
1884 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1885 			   rtx op0, rtx op1)
1886 {
1887   rtx trueop0, trueop1;
1888   rtx tem;
1889 
1890   /* Relational operations don't work here.  We must know the mode
1891      of the operands in order to do the comparison correctly.
1892      Assuming a full word can give incorrect results.
1893      Consider comparing 128 with -128 in QImode.  */
1894   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1895   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1896 
1897   /* Make sure the constant is second.  */
1898   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1899       && swap_commutative_operands_p (op0, op1))
1900     {
1901       tem = op0, op0 = op1, op1 = tem;
1902     }
1903 
1904   trueop0 = avoid_constant_pool_reference (op0);
1905   trueop1 = avoid_constant_pool_reference (op1);
1906 
1907   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1908   if (tem)
1909     return tem;
1910   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1911 }
1912 
1913 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
1914    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1915    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1916    actual constants.  */
1917 
1918 static rtx
1919 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1920 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1921 {
1922   rtx tem, reversed, opleft, opright;
1923   HOST_WIDE_INT val;
1924   unsigned int width = GET_MODE_PRECISION (mode);
1925 
1926   /* Even if we can't compute a constant result,
1927      there are some cases worth simplifying.  */
1928 
1929   switch (code)
1930     {
1931     case PLUS:
1932       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1933 	 when x is NaN, infinite, or finite and nonzero.  They aren't
1934 	 when x is -0 and the rounding mode is not towards -infinity,
1935 	 since (-0) + 0 is then 0.  */
1936       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1937 	return op0;
1938 
1939       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1940 	 transformations are safe even for IEEE.  */
1941       if (GET_CODE (op0) == NEG)
1942 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1943       else if (GET_CODE (op1) == NEG)
1944 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1945 
1946       /* (~a) + 1 -> -a */
1947       if (INTEGRAL_MODE_P (mode)
1948 	  && GET_CODE (op0) == NOT
1949 	  && trueop1 == const1_rtx)
1950 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1951 
1952       /* Handle both-operands-constant cases.  We can only add
1953 	 CONST_INTs to constants since the sum of relocatable symbols
1954 	 can't be handled by most assemblers.  Don't add CONST_INT
1955 	 to CONST_INT since overflow won't be computed properly if wider
1956 	 than HOST_BITS_PER_WIDE_INT.  */
1957 
1958       if ((GET_CODE (op0) == CONST
1959 	   || GET_CODE (op0) == SYMBOL_REF
1960 	   || GET_CODE (op0) == LABEL_REF)
1961 	  && CONST_INT_P (op1))
1962 	return plus_constant (op0, INTVAL (op1));
1963       else if ((GET_CODE (op1) == CONST
1964 		|| GET_CODE (op1) == SYMBOL_REF
1965 		|| GET_CODE (op1) == LABEL_REF)
1966 	       && CONST_INT_P (op0))
1967 	return plus_constant (op1, INTVAL (op0));
1968 
1969       /* See if this is something like X * C - X or vice versa or
1970 	 if the multiplication is written as a shift.  If so, we can
1971 	 distribute and make a new multiply, shift, or maybe just
1972 	 have X (if C is 2 in the example above).  But don't make
1973 	 something more expensive than we had before.  */
1974 
1975       if (SCALAR_INT_MODE_P (mode))
1976 	{
1977 	  double_int coeff0, coeff1;
1978 	  rtx lhs = op0, rhs = op1;
1979 
1980 	  coeff0 = double_int_one;
1981 	  coeff1 = double_int_one;
1982 
1983 	  if (GET_CODE (lhs) == NEG)
1984 	    {
1985 	      coeff0 = double_int_minus_one;
1986 	      lhs = XEXP (lhs, 0);
1987 	    }
1988 	  else if (GET_CODE (lhs) == MULT
1989 		   && CONST_INT_P (XEXP (lhs, 1)))
1990 	    {
1991 	      coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1992 	      lhs = XEXP (lhs, 0);
1993 	    }
1994 	  else if (GET_CODE (lhs) == ASHIFT
1995 		   && CONST_INT_P (XEXP (lhs, 1))
1996                    && INTVAL (XEXP (lhs, 1)) >= 0
1997 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1998 	    {
1999 	      coeff0 = double_int_setbit (double_int_zero,
2000 					  INTVAL (XEXP (lhs, 1)));
2001 	      lhs = XEXP (lhs, 0);
2002 	    }
2003 
2004 	  if (GET_CODE (rhs) == NEG)
2005 	    {
2006 	      coeff1 = double_int_minus_one;
2007 	      rhs = XEXP (rhs, 0);
2008 	    }
2009 	  else if (GET_CODE (rhs) == MULT
2010 		   && CONST_INT_P (XEXP (rhs, 1)))
2011 	    {
2012 	      coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
2013 	      rhs = XEXP (rhs, 0);
2014 	    }
2015 	  else if (GET_CODE (rhs) == ASHIFT
2016 		   && CONST_INT_P (XEXP (rhs, 1))
2017 		   && INTVAL (XEXP (rhs, 1)) >= 0
2018 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2019 	    {
2020 	      coeff1 = double_int_setbit (double_int_zero,
2021 					  INTVAL (XEXP (rhs, 1)));
2022 	      rhs = XEXP (rhs, 0);
2023 	    }
2024 
2025 	  if (rtx_equal_p (lhs, rhs))
2026 	    {
2027 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2028 	      rtx coeff;
2029 	      double_int val;
2030 	      bool speed = optimize_function_for_speed_p (cfun);
2031 
2032 	      val = double_int_add (coeff0, coeff1);
2033 	      coeff = immed_double_int_const (val, mode);
2034 
2035 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2036 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2037 		? tem : 0;
2038 	    }
2039 	}
2040 
2041       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2042       if ((CONST_INT_P (op1)
2043 	   || GET_CODE (op1) == CONST_DOUBLE)
2044 	  && GET_CODE (op0) == XOR
2045 	  && (CONST_INT_P (XEXP (op0, 1))
2046 	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2047 	  && mode_signbit_p (mode, op1))
2048 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2049 				    simplify_gen_binary (XOR, mode, op1,
2050 							 XEXP (op0, 1)));
2051 
2052       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2053       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2054 	  && GET_CODE (op0) == MULT
2055 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2056 	{
2057 	  rtx in1, in2;
2058 
2059 	  in1 = XEXP (XEXP (op0, 0), 0);
2060 	  in2 = XEXP (op0, 1);
2061 	  return simplify_gen_binary (MINUS, mode, op1,
2062 				      simplify_gen_binary (MULT, mode,
2063 							   in1, in2));
2064 	}
2065 
2066       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2067 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2068 	 is 1.  */
2069       if (COMPARISON_P (op0)
2070 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2071 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2072 	  && (reversed = reversed_comparison (op0, mode)))
2073 	return
2074 	  simplify_gen_unary (NEG, mode, reversed, mode);
2075 
2076       /* If one of the operands is a PLUS or a MINUS, see if we can
2077 	 simplify this by the associative law.
2078 	 Don't use the associative law for floating point.
2079 	 The inaccuracy makes it nonassociative,
2080 	 and subtle programs can break if operations are associated.  */
2081 
2082       if (INTEGRAL_MODE_P (mode)
2083 	  && (plus_minus_operand_p (op0)
2084 	      || plus_minus_operand_p (op1))
2085 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2086 	return tem;
2087 
2088       /* Reassociate floating point addition only when the user
2089 	 specifies associative math operations.  */
2090       if (FLOAT_MODE_P (mode)
2091 	  && flag_associative_math)
2092 	{
2093 	  tem = simplify_associative_operation (code, mode, op0, op1);
2094 	  if (tem)
2095 	    return tem;
2096 	}
2097       break;
2098 
2099     case COMPARE:
2100       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2101       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2102 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2103 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2104 	{
2105 	  rtx xop00 = XEXP (op0, 0);
2106 	  rtx xop10 = XEXP (op1, 0);
2107 
2108 #ifdef HAVE_cc0
2109 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2110 #else
2111 	    if (REG_P (xop00) && REG_P (xop10)
2112 		&& GET_MODE (xop00) == GET_MODE (xop10)
2113 		&& REGNO (xop00) == REGNO (xop10)
2114 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2115 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2116 #endif
2117 	      return xop00;
2118 	}
2119       break;
2120 
2121     case MINUS:
2122       /* We can't assume x-x is 0 even with non-IEEE floating point,
2123 	 but since it is zero except in very strange circumstances, we
2124 	 will treat it as zero with -ffinite-math-only.  */
2125       if (rtx_equal_p (trueop0, trueop1)
2126 	  && ! side_effects_p (op0)
2127 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2128 	return CONST0_RTX (mode);
2129 
2130       /* Change subtraction from zero into negation.  (0 - x) is the
2131 	 same as -x when x is NaN, infinite, or finite and nonzero.
2132 	 But if the mode has signed zeros, and does not round towards
2133 	 -infinity, then 0 - 0 is 0, not -0.  */
2134       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2135 	return simplify_gen_unary (NEG, mode, op1, mode);
2136 
2137       /* (-1 - a) is ~a.  */
2138       if (trueop0 == constm1_rtx)
2139 	return simplify_gen_unary (NOT, mode, op1, mode);
2140 
2141       /* Subtracting 0 has no effect unless the mode has signed zeros
2142 	 and supports rounding towards -infinity.  In such a case,
2143 	 0 - 0 is -0.  */
2144       if (!(HONOR_SIGNED_ZEROS (mode)
2145 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2146 	  && trueop1 == CONST0_RTX (mode))
2147 	return op0;
2148 
2149       /* See if this is something like X * C - X or vice versa or
2150 	 if the multiplication is written as a shift.  If so, we can
2151 	 distribute and make a new multiply, shift, or maybe just
2152 	 have X (if C is 2 in the example above).  But don't make
2153 	 something more expensive than we had before.  */
2154 
2155       if (SCALAR_INT_MODE_P (mode))
2156 	{
2157 	  double_int coeff0, negcoeff1;
2158 	  rtx lhs = op0, rhs = op1;
2159 
2160 	  coeff0 = double_int_one;
2161 	  negcoeff1 = double_int_minus_one;
2162 
2163 	  if (GET_CODE (lhs) == NEG)
2164 	    {
2165 	      coeff0 = double_int_minus_one;
2166 	      lhs = XEXP (lhs, 0);
2167 	    }
2168 	  else if (GET_CODE (lhs) == MULT
2169 		   && CONST_INT_P (XEXP (lhs, 1)))
2170 	    {
2171 	      coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2172 	      lhs = XEXP (lhs, 0);
2173 	    }
2174 	  else if (GET_CODE (lhs) == ASHIFT
2175 		   && CONST_INT_P (XEXP (lhs, 1))
2176 		   && INTVAL (XEXP (lhs, 1)) >= 0
2177 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2178 	    {
2179 	      coeff0 = double_int_setbit (double_int_zero,
2180 					  INTVAL (XEXP (lhs, 1)));
2181 	      lhs = XEXP (lhs, 0);
2182 	    }
2183 
2184 	  if (GET_CODE (rhs) == NEG)
2185 	    {
2186 	      negcoeff1 = double_int_one;
2187 	      rhs = XEXP (rhs, 0);
2188 	    }
2189 	  else if (GET_CODE (rhs) == MULT
2190 		   && CONST_INT_P (XEXP (rhs, 1)))
2191 	    {
2192 	      negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2193 	      rhs = XEXP (rhs, 0);
2194 	    }
2195 	  else if (GET_CODE (rhs) == ASHIFT
2196 		   && CONST_INT_P (XEXP (rhs, 1))
2197 		   && INTVAL (XEXP (rhs, 1)) >= 0
2198 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2199 	    {
2200 	      negcoeff1 = double_int_setbit (double_int_zero,
2201 					     INTVAL (XEXP (rhs, 1)));
2202 	      negcoeff1 = double_int_neg (negcoeff1);
2203 	      rhs = XEXP (rhs, 0);
2204 	    }
2205 
2206 	  if (rtx_equal_p (lhs, rhs))
2207 	    {
2208 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2209 	      rtx coeff;
2210 	      double_int val;
2211 	      bool speed = optimize_function_for_speed_p (cfun);
2212 
2213 	      val = double_int_add (coeff0, negcoeff1);
2214 	      coeff = immed_double_int_const (val, mode);
2215 
2216 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2217 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2218 		? tem : 0;
2219 	    }
2220 	}
2221 
2222       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2223       if (GET_CODE (op1) == NEG)
2224 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2225 
2226       /* (-x - c) may be simplified as (-c - x).  */
2227       if (GET_CODE (op0) == NEG
2228 	  && (CONST_INT_P (op1)
2229 	      || GET_CODE (op1) == CONST_DOUBLE))
2230 	{
2231 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2232 	  if (tem)
2233 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2234 	}
2235 
2236       /* Don't let a relocatable value get a negative coeff.  */
2237       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2238 	return simplify_gen_binary (PLUS, mode,
2239 				    op0,
2240 				    neg_const_int (mode, op1));
2241 
2242       /* (x - (x & y)) -> (x & ~y) */
2243       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2244 	{
2245 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2246 	    {
2247 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2248 					GET_MODE (XEXP (op1, 1)));
2249 	      return simplify_gen_binary (AND, mode, op0, tem);
2250 	    }
2251 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2252 	    {
2253 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2254 					GET_MODE (XEXP (op1, 0)));
2255 	      return simplify_gen_binary (AND, mode, op0, tem);
2256 	    }
2257 	}
2258 
2259       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2260 	 by reversing the comparison code if valid.  */
2261       if (STORE_FLAG_VALUE == 1
2262 	  && trueop0 == const1_rtx
2263 	  && COMPARISON_P (op1)
2264 	  && (reversed = reversed_comparison (op1, mode)))
2265 	return reversed;
2266 
2267       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2268       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2269 	  && GET_CODE (op1) == MULT
2270 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2271 	{
2272 	  rtx in1, in2;
2273 
2274 	  in1 = XEXP (XEXP (op1, 0), 0);
2275 	  in2 = XEXP (op1, 1);
2276 	  return simplify_gen_binary (PLUS, mode,
2277 				      simplify_gen_binary (MULT, mode,
2278 							   in1, in2),
2279 				      op0);
2280 	}
2281 
2282       /* Canonicalize (minus (neg A) (mult B C)) to
2283 	 (minus (mult (neg B) C) A).  */
2284       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2285 	  && GET_CODE (op1) == MULT
2286 	  && GET_CODE (op0) == NEG)
2287 	{
2288 	  rtx in1, in2;
2289 
2290 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2291 	  in2 = XEXP (op1, 1);
2292 	  return simplify_gen_binary (MINUS, mode,
2293 				      simplify_gen_binary (MULT, mode,
2294 							   in1, in2),
2295 				      XEXP (op0, 0));
2296 	}
2297 
2298       /* If one of the operands is a PLUS or a MINUS, see if we can
2299 	 simplify this by the associative law.  This will, for example,
2300          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2301 	 Don't use the associative law for floating point.
2302 	 The inaccuracy makes it nonassociative,
2303 	 and subtle programs can break if operations are associated.  */
2304 
2305       if (INTEGRAL_MODE_P (mode)
2306 	  && (plus_minus_operand_p (op0)
2307 	      || plus_minus_operand_p (op1))
2308 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2309 	return tem;
2310       break;
2311 
2312     case MULT:
2313       if (trueop1 == constm1_rtx)
2314 	return simplify_gen_unary (NEG, mode, op0, mode);
2315 
2316       if (GET_CODE (op0) == NEG)
2317 	{
2318 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2319 	  /* If op1 is a MULT as well and simplify_unary_operation
2320 	     just moved the NEG to the second operand, simplify_gen_binary
2321 	     below could through simplify_associative_operation move
2322 	     the NEG around again and recurse endlessly.  */
2323 	  if (temp
2324 	      && GET_CODE (op1) == MULT
2325 	      && GET_CODE (temp) == MULT
2326 	      && XEXP (op1, 0) == XEXP (temp, 0)
2327 	      && GET_CODE (XEXP (temp, 1)) == NEG
2328 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2329 	    temp = NULL_RTX;
2330 	  if (temp)
2331 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2332 	}
2333       if (GET_CODE (op1) == NEG)
2334 	{
2335 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2336 	  /* If op0 is a MULT as well and simplify_unary_operation
2337 	     just moved the NEG to the second operand, simplify_gen_binary
2338 	     below could through simplify_associative_operation move
2339 	     the NEG around again and recurse endlessly.  */
2340 	  if (temp
2341 	      && GET_CODE (op0) == MULT
2342 	      && GET_CODE (temp) == MULT
2343 	      && XEXP (op0, 0) == XEXP (temp, 0)
2344 	      && GET_CODE (XEXP (temp, 1)) == NEG
2345 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2346 	    temp = NULL_RTX;
2347 	  if (temp)
2348 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2349 	}
2350 
2351       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2352 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2353 	 when the mode has signed zeros, since multiplying a negative
2354 	 number by 0 will give -0, not 0.  */
2355       if (!HONOR_NANS (mode)
2356 	  && !HONOR_SIGNED_ZEROS (mode)
2357 	  && trueop1 == CONST0_RTX (mode)
2358 	  && ! side_effects_p (op0))
2359 	return op1;
2360 
2361       /* In IEEE floating point, x*1 is not equivalent to x for
2362 	 signalling NaNs.  */
2363       if (!HONOR_SNANS (mode)
2364 	  && trueop1 == CONST1_RTX (mode))
2365 	return op0;
2366 
2367       /* Convert multiply by constant power of two into shift unless
2368 	 we are still generating RTL.  This test is a kludge.  */
2369       if (CONST_INT_P (trueop1)
2370 	  && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2371 	  /* If the mode is larger than the host word size, and the
2372 	     uppermost bit is set, then this isn't a power of two due
2373 	     to implicit sign extension.  */
2374 	  && (width <= HOST_BITS_PER_WIDE_INT
2375 	      || val != HOST_BITS_PER_WIDE_INT - 1))
2376 	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2377 
2378       /* Likewise for multipliers wider than a word.  */
2379       if (GET_CODE (trueop1) == CONST_DOUBLE
2380 	  && (GET_MODE (trueop1) == VOIDmode
2381 	      || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2382 	  && GET_MODE (op0) == mode
2383 	  && CONST_DOUBLE_LOW (trueop1) == 0
2384 	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2385 	return simplify_gen_binary (ASHIFT, mode, op0,
2386 				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2387 
2388       /* x*2 is x+x and x*(-1) is -x */
2389       if (GET_CODE (trueop1) == CONST_DOUBLE
2390 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2391 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2392 	  && GET_MODE (op0) == mode)
2393 	{
2394 	  REAL_VALUE_TYPE d;
2395 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2396 
2397 	  if (REAL_VALUES_EQUAL (d, dconst2))
2398 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2399 
2400 	  if (!HONOR_SNANS (mode)
2401 	      && REAL_VALUES_EQUAL (d, dconstm1))
2402 	    return simplify_gen_unary (NEG, mode, op0, mode);
2403 	}
2404 
2405       /* Optimize -x * -x as x * x.  */
2406       if (FLOAT_MODE_P (mode)
2407 	  && GET_CODE (op0) == NEG
2408 	  && GET_CODE (op1) == NEG
2409 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2410 	  && !side_effects_p (XEXP (op0, 0)))
2411 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2412 
2413       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2414       if (SCALAR_FLOAT_MODE_P (mode)
2415 	  && GET_CODE (op0) == ABS
2416 	  && GET_CODE (op1) == ABS
2417 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2418 	  && !side_effects_p (XEXP (op0, 0)))
2419 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2420 
2421       /* Reassociate multiplication, but for floating point MULTs
2422 	 only when the user specifies unsafe math optimizations.  */
2423       if (! FLOAT_MODE_P (mode)
2424 	  || flag_unsafe_math_optimizations)
2425 	{
2426 	  tem = simplify_associative_operation (code, mode, op0, op1);
2427 	  if (tem)
2428 	    return tem;
2429 	}
2430       break;
2431 
2432     case IOR:
2433       if (trueop1 == CONST0_RTX (mode))
2434 	return op0;
2435       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2436 	return op1;
2437       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2438 	return op0;
2439       /* A | (~A) -> -1 */
2440       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2441 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2442 	  && ! side_effects_p (op0)
2443 	  && SCALAR_INT_MODE_P (mode))
2444 	return constm1_rtx;
2445 
2446       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2447       if (CONST_INT_P (op1)
2448 	  && HWI_COMPUTABLE_MODE_P (mode)
2449 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0)
2450 	return op1;
2451 
2452       /* Canonicalize (X & C1) | C2.  */
2453       if (GET_CODE (op0) == AND
2454 	  && CONST_INT_P (trueop1)
2455 	  && CONST_INT_P (XEXP (op0, 1)))
2456 	{
2457 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2458 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2459 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2460 
2461 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2462 	  if ((c1 & c2) == c1
2463 	      && !side_effects_p (XEXP (op0, 0)))
2464 	    return trueop1;
2465 
2466 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2467 	  if (((c1|c2) & mask) == mask)
2468 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2469 
2470 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2471 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2472 	    {
2473 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2474 					 gen_int_mode (c1 & ~c2, mode));
2475 	      return simplify_gen_binary (IOR, mode, tem, op1);
2476 	    }
2477 	}
2478 
2479       /* Convert (A & B) | A to A.  */
2480       if (GET_CODE (op0) == AND
2481 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2482 	      || rtx_equal_p (XEXP (op0, 1), op1))
2483 	  && ! side_effects_p (XEXP (op0, 0))
2484 	  && ! side_effects_p (XEXP (op0, 1)))
2485 	return op1;
2486 
2487       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2488          mode size to (rotate A CX).  */
2489 
2490       if (GET_CODE (op1) == ASHIFT
2491           || GET_CODE (op1) == SUBREG)
2492         {
2493 	  opleft = op1;
2494 	  opright = op0;
2495 	}
2496       else
2497         {
2498 	  opright = op1;
2499 	  opleft = op0;
2500 	}
2501 
2502       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2503           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2504           && CONST_INT_P (XEXP (opleft, 1))
2505           && CONST_INT_P (XEXP (opright, 1))
2506           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2507               == GET_MODE_PRECISION (mode)))
2508         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2509 
2510       /* Same, but for ashift that has been "simplified" to a wider mode
2511         by simplify_shift_const.  */
2512 
2513       if (GET_CODE (opleft) == SUBREG
2514           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2515           && GET_CODE (opright) == LSHIFTRT
2516           && GET_CODE (XEXP (opright, 0)) == SUBREG
2517           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2518           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2519           && (GET_MODE_SIZE (GET_MODE (opleft))
2520               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2521           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2522                           SUBREG_REG (XEXP (opright, 0)))
2523           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2524           && CONST_INT_P (XEXP (opright, 1))
2525           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2526               == GET_MODE_PRECISION (mode)))
2527         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2528                                XEXP (SUBREG_REG (opleft), 1));
2529 
2530       /* If we have (ior (and (X C1) C2)), simplify this by making
2531 	 C1 as small as possible if C1 actually changes.  */
2532       if (CONST_INT_P (op1)
2533 	  && (HWI_COMPUTABLE_MODE_P (mode)
2534 	      || INTVAL (op1) > 0)
2535 	  && GET_CODE (op0) == AND
2536 	  && CONST_INT_P (XEXP (op0, 1))
2537 	  && CONST_INT_P (op1)
2538 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2539 	return simplify_gen_binary (IOR, mode,
2540 				    simplify_gen_binary
2541 					  (AND, mode, XEXP (op0, 0),
2542 					   GEN_INT (UINTVAL (XEXP (op0, 1))
2543 						    & ~UINTVAL (op1))),
2544 				    op1);
2545 
2546       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2547          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2548 	 the PLUS does not affect any of the bits in OP1: then we can do
2549 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2550          can be safely shifted left C bits.  */
2551       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2552           && GET_CODE (XEXP (op0, 0)) == PLUS
2553           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2554           && CONST_INT_P (XEXP (op0, 1))
2555           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2556         {
2557           int count = INTVAL (XEXP (op0, 1));
2558           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2559 
2560           if (mask >> count == INTVAL (trueop1)
2561 	      && trunc_int_for_mode (mask, mode) == mask
2562               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2563 	    return simplify_gen_binary (ASHIFTRT, mode,
2564 					plus_constant (XEXP (op0, 0), mask),
2565 					XEXP (op0, 1));
2566         }
2567 
2568       tem = simplify_associative_operation (code, mode, op0, op1);
2569       if (tem)
2570 	return tem;
2571       break;
2572 
2573     case XOR:
2574       if (trueop1 == CONST0_RTX (mode))
2575 	return op0;
2576       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2577 	return simplify_gen_unary (NOT, mode, op0, mode);
2578       if (rtx_equal_p (trueop0, trueop1)
2579 	  && ! side_effects_p (op0)
2580 	  && GET_MODE_CLASS (mode) != MODE_CC)
2581 	 return CONST0_RTX (mode);
2582 
2583       /* Canonicalize XOR of the most significant bit to PLUS.  */
2584       if ((CONST_INT_P (op1)
2585 	   || GET_CODE (op1) == CONST_DOUBLE)
2586 	  && mode_signbit_p (mode, op1))
2587 	return simplify_gen_binary (PLUS, mode, op0, op1);
2588       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2589       if ((CONST_INT_P (op1)
2590 	   || GET_CODE (op1) == CONST_DOUBLE)
2591 	  && GET_CODE (op0) == PLUS
2592 	  && (CONST_INT_P (XEXP (op0, 1))
2593 	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2594 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2595 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2596 				    simplify_gen_binary (XOR, mode, op1,
2597 							 XEXP (op0, 1)));
2598 
2599       /* If we are XORing two things that have no bits in common,
2600 	 convert them into an IOR.  This helps to detect rotation encoded
2601 	 using those methods and possibly other simplifications.  */
2602 
2603       if (HWI_COMPUTABLE_MODE_P (mode)
2604 	  && (nonzero_bits (op0, mode)
2605 	      & nonzero_bits (op1, mode)) == 0)
2606 	return (simplify_gen_binary (IOR, mode, op0, op1));
2607 
2608       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2609 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2610 	 (NOT y).  */
2611       {
2612 	int num_negated = 0;
2613 
2614 	if (GET_CODE (op0) == NOT)
2615 	  num_negated++, op0 = XEXP (op0, 0);
2616 	if (GET_CODE (op1) == NOT)
2617 	  num_negated++, op1 = XEXP (op1, 0);
2618 
2619 	if (num_negated == 2)
2620 	  return simplify_gen_binary (XOR, mode, op0, op1);
2621 	else if (num_negated == 1)
2622 	  return simplify_gen_unary (NOT, mode,
2623 				     simplify_gen_binary (XOR, mode, op0, op1),
2624 				     mode);
2625       }
2626 
2627       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2628 	 correspond to a machine insn or result in further simplifications
2629 	 if B is a constant.  */
2630 
2631       if (GET_CODE (op0) == AND
2632 	  && rtx_equal_p (XEXP (op0, 1), op1)
2633 	  && ! side_effects_p (op1))
2634 	return simplify_gen_binary (AND, mode,
2635 				    simplify_gen_unary (NOT, mode,
2636 							XEXP (op0, 0), mode),
2637 				    op1);
2638 
2639       else if (GET_CODE (op0) == AND
2640 	       && rtx_equal_p (XEXP (op0, 0), op1)
2641 	       && ! side_effects_p (op1))
2642 	return simplify_gen_binary (AND, mode,
2643 				    simplify_gen_unary (NOT, mode,
2644 							XEXP (op0, 1), mode),
2645 				    op1);
2646 
2647       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2648 	 we can transform like this:
2649             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2650                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2651                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2652 	 Attempt a few simplifications when B and C are both constants.  */
2653       if (GET_CODE (op0) == AND
2654 	  && CONST_INT_P (op1)
2655 	  && CONST_INT_P (XEXP (op0, 1)))
2656 	{
2657 	  rtx a = XEXP (op0, 0);
2658 	  rtx b = XEXP (op0, 1);
2659 	  rtx c = op1;
2660 	  HOST_WIDE_INT bval = INTVAL (b);
2661 	  HOST_WIDE_INT cval = INTVAL (c);
2662 
2663 	  rtx na_c
2664 	    = simplify_binary_operation (AND, mode,
2665 					 simplify_gen_unary (NOT, mode, a, mode),
2666 					 c);
2667 	  if ((~cval & bval) == 0)
2668 	    {
2669 	      /* Try to simplify ~A&C | ~B&C.  */
2670 	      if (na_c != NULL_RTX)
2671 		return simplify_gen_binary (IOR, mode, na_c,
2672 					    GEN_INT (~bval & cval));
2673 	    }
2674 	  else
2675 	    {
2676 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2677 	      if (na_c == const0_rtx)
2678 		{
2679 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2680 						    GEN_INT (~cval & bval));
2681 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2682 					      GEN_INT (~bval & cval));
2683 		}
2684 	    }
2685 	}
2686 
2687       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2688 	 comparison if STORE_FLAG_VALUE is 1.  */
2689       if (STORE_FLAG_VALUE == 1
2690 	  && trueop1 == const1_rtx
2691 	  && COMPARISON_P (op0)
2692 	  && (reversed = reversed_comparison (op0, mode)))
2693 	return reversed;
2694 
2695       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2696 	 is (lt foo (const_int 0)), so we can perform the above
2697 	 simplification if STORE_FLAG_VALUE is 1.  */
2698 
2699       if (STORE_FLAG_VALUE == 1
2700 	  && trueop1 == const1_rtx
2701 	  && GET_CODE (op0) == LSHIFTRT
2702 	  && CONST_INT_P (XEXP (op0, 1))
2703 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2704 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2705 
2706       /* (xor (comparison foo bar) (const_int sign-bit))
2707 	 when STORE_FLAG_VALUE is the sign bit.  */
2708       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2709 	  && trueop1 == const_true_rtx
2710 	  && COMPARISON_P (op0)
2711 	  && (reversed = reversed_comparison (op0, mode)))
2712 	return reversed;
2713 
2714       tem = simplify_associative_operation (code, mode, op0, op1);
2715       if (tem)
2716 	return tem;
2717       break;
2718 
2719     case AND:
2720       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2721 	return trueop1;
2722       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2723 	return op0;
2724       if (HWI_COMPUTABLE_MODE_P (mode))
2725 	{
2726 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2727 	  HOST_WIDE_INT nzop1;
2728 	  if (CONST_INT_P (trueop1))
2729 	    {
2730 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2731 	      /* If we are turning off bits already known off in OP0, we need
2732 		 not do an AND.  */
2733 	      if ((nzop0 & ~val1) == 0)
2734 		return op0;
2735 	    }
2736 	  nzop1 = nonzero_bits (trueop1, mode);
2737 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2738 	  if ((nzop1 & nzop0) == 0
2739 	      && !side_effects_p (op0) && !side_effects_p (op1))
2740 	    return CONST0_RTX (mode);
2741 	}
2742       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2743 	  && GET_MODE_CLASS (mode) != MODE_CC)
2744 	return op0;
2745       /* A & (~A) -> 0 */
2746       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2747 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2748 	  && ! side_effects_p (op0)
2749 	  && GET_MODE_CLASS (mode) != MODE_CC)
2750 	return CONST0_RTX (mode);
2751 
2752       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2753 	 there are no nonzero bits of C outside of X's mode.  */
2754       if ((GET_CODE (op0) == SIGN_EXTEND
2755 	   || GET_CODE (op0) == ZERO_EXTEND)
2756 	  && CONST_INT_P (trueop1)
2757 	  && HWI_COMPUTABLE_MODE_P (mode)
2758 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2759 	      & UINTVAL (trueop1)) == 0)
2760 	{
2761 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2762 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2763 				     gen_int_mode (INTVAL (trueop1),
2764 						   imode));
2765 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2766 	}
2767 
2768       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2769 	 we might be able to further simplify the AND with X and potentially
2770 	 remove the truncation altogether.  */
2771       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2772 	{
2773 	  rtx x = XEXP (op0, 0);
2774 	  enum machine_mode xmode = GET_MODE (x);
2775 	  tem = simplify_gen_binary (AND, xmode, x,
2776 				     gen_int_mode (INTVAL (trueop1), xmode));
2777 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2778 	}
2779 
2780       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
2781       if (GET_CODE (op0) == IOR
2782 	  && CONST_INT_P (trueop1)
2783 	  && CONST_INT_P (XEXP (op0, 1)))
2784 	{
2785 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2786 	  return simplify_gen_binary (IOR, mode,
2787 				      simplify_gen_binary (AND, mode,
2788 							   XEXP (op0, 0), op1),
2789 				      gen_int_mode (tmp, mode));
2790 	}
2791 
2792       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2793 	 insn (and may simplify more).  */
2794       if (GET_CODE (op0) == XOR
2795 	  && rtx_equal_p (XEXP (op0, 0), op1)
2796 	  && ! side_effects_p (op1))
2797 	return simplify_gen_binary (AND, mode,
2798 				    simplify_gen_unary (NOT, mode,
2799 							XEXP (op0, 1), mode),
2800 				    op1);
2801 
2802       if (GET_CODE (op0) == XOR
2803 	  && rtx_equal_p (XEXP (op0, 1), op1)
2804 	  && ! side_effects_p (op1))
2805 	return simplify_gen_binary (AND, mode,
2806 				    simplify_gen_unary (NOT, mode,
2807 							XEXP (op0, 0), mode),
2808 				    op1);
2809 
2810       /* Similarly for (~(A ^ B)) & A.  */
2811       if (GET_CODE (op0) == NOT
2812 	  && GET_CODE (XEXP (op0, 0)) == XOR
2813 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2814 	  && ! side_effects_p (op1))
2815 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2816 
2817       if (GET_CODE (op0) == NOT
2818 	  && GET_CODE (XEXP (op0, 0)) == XOR
2819 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2820 	  && ! side_effects_p (op1))
2821 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2822 
2823       /* Convert (A | B) & A to A.  */
2824       if (GET_CODE (op0) == IOR
2825 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2826 	      || rtx_equal_p (XEXP (op0, 1), op1))
2827 	  && ! side_effects_p (XEXP (op0, 0))
2828 	  && ! side_effects_p (XEXP (op0, 1)))
2829 	return op1;
2830 
2831       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2832 	 ((A & N) + B) & M -> (A + B) & M
2833 	 Similarly if (N & M) == 0,
2834 	 ((A | N) + B) & M -> (A + B) & M
2835 	 and for - instead of + and/or ^ instead of |.
2836          Also, if (N & M) == 0, then
2837 	 (A +- N) & M -> A & M.  */
2838       if (CONST_INT_P (trueop1)
2839 	  && HWI_COMPUTABLE_MODE_P (mode)
2840 	  && ~UINTVAL (trueop1)
2841 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
2842 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2843 	{
2844 	  rtx pmop[2];
2845 	  int which;
2846 
2847 	  pmop[0] = XEXP (op0, 0);
2848 	  pmop[1] = XEXP (op0, 1);
2849 
2850 	  if (CONST_INT_P (pmop[1])
2851 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
2852 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
2853 
2854 	  for (which = 0; which < 2; which++)
2855 	    {
2856 	      tem = pmop[which];
2857 	      switch (GET_CODE (tem))
2858 		{
2859 		case AND:
2860 		  if (CONST_INT_P (XEXP (tem, 1))
2861 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
2862 		      == UINTVAL (trueop1))
2863 		    pmop[which] = XEXP (tem, 0);
2864 		  break;
2865 		case IOR:
2866 		case XOR:
2867 		  if (CONST_INT_P (XEXP (tem, 1))
2868 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
2869 		    pmop[which] = XEXP (tem, 0);
2870 		  break;
2871 		default:
2872 		  break;
2873 		}
2874 	    }
2875 
2876 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2877 	    {
2878 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
2879 					 pmop[0], pmop[1]);
2880 	      return simplify_gen_binary (code, mode, tem, op1);
2881 	    }
2882 	}
2883 
2884       /* (and X (ior (not X) Y) -> (and X Y) */
2885       if (GET_CODE (op1) == IOR
2886 	  && GET_CODE (XEXP (op1, 0)) == NOT
2887 	  && op0 == XEXP (XEXP (op1, 0), 0))
2888        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2889 
2890       /* (and (ior (not X) Y) X) -> (and X Y) */
2891       if (GET_CODE (op0) == IOR
2892 	  && GET_CODE (XEXP (op0, 0)) == NOT
2893 	  && op1 == XEXP (XEXP (op0, 0), 0))
2894 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2895 
2896       tem = simplify_associative_operation (code, mode, op0, op1);
2897       if (tem)
2898 	return tem;
2899       break;
2900 
2901     case UDIV:
2902       /* 0/x is 0 (or x&0 if x has side-effects).  */
2903       if (trueop0 == CONST0_RTX (mode))
2904 	{
2905 	  if (side_effects_p (op1))
2906 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2907 	  return trueop0;
2908 	}
2909       /* x/1 is x.  */
2910       if (trueop1 == CONST1_RTX (mode))
2911 	return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2912       /* Convert divide by power of two into shift.  */
2913       if (CONST_INT_P (trueop1)
2914 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
2915 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2916       break;
2917 
2918     case DIV:
2919       /* Handle floating point and integers separately.  */
2920       if (SCALAR_FLOAT_MODE_P (mode))
2921 	{
2922 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2923 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
2924 	     NaN rather than 0.0.  Nor is it safe for modes with signed
2925 	     zeros, since dividing 0 by a negative number gives -0.0  */
2926 	  if (trueop0 == CONST0_RTX (mode)
2927 	      && !HONOR_NANS (mode)
2928 	      && !HONOR_SIGNED_ZEROS (mode)
2929 	      && ! side_effects_p (op1))
2930 	    return op0;
2931 	  /* x/1.0 is x.  */
2932 	  if (trueop1 == CONST1_RTX (mode)
2933 	      && !HONOR_SNANS (mode))
2934 	    return op0;
2935 
2936 	  if (GET_CODE (trueop1) == CONST_DOUBLE
2937 	      && trueop1 != CONST0_RTX (mode))
2938 	    {
2939 	      REAL_VALUE_TYPE d;
2940 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2941 
2942 	      /* x/-1.0 is -x.  */
2943 	      if (REAL_VALUES_EQUAL (d, dconstm1)
2944 		  && !HONOR_SNANS (mode))
2945 		return simplify_gen_unary (NEG, mode, op0, mode);
2946 
2947 	      /* Change FP division by a constant into multiplication.
2948 		 Only do this with -freciprocal-math.  */
2949 	      if (flag_reciprocal_math
2950 		  && !REAL_VALUES_EQUAL (d, dconst0))
2951 		{
2952 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2953 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2954 		  return simplify_gen_binary (MULT, mode, op0, tem);
2955 		}
2956 	    }
2957 	}
2958       else if (SCALAR_INT_MODE_P (mode))
2959 	{
2960 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
2961 	  if (trueop0 == CONST0_RTX (mode)
2962 	      && !cfun->can_throw_non_call_exceptions)
2963 	    {
2964 	      if (side_effects_p (op1))
2965 		return simplify_gen_binary (AND, mode, op1, trueop0);
2966 	      return trueop0;
2967 	    }
2968 	  /* x/1 is x.  */
2969 	  if (trueop1 == CONST1_RTX (mode))
2970 	    return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2971 	  /* x/-1 is -x.  */
2972 	  if (trueop1 == constm1_rtx)
2973 	    {
2974 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2975 	      return simplify_gen_unary (NEG, mode, x, mode);
2976 	    }
2977 	}
2978       break;
2979 
2980     case UMOD:
2981       /* 0%x is 0 (or x&0 if x has side-effects).  */
2982       if (trueop0 == CONST0_RTX (mode))
2983 	{
2984 	  if (side_effects_p (op1))
2985 	    return simplify_gen_binary (AND, mode, op1, trueop0);
2986 	  return trueop0;
2987 	}
2988       /* x%1 is 0 (of x&0 if x has side-effects).  */
2989       if (trueop1 == CONST1_RTX (mode))
2990 	{
2991 	  if (side_effects_p (op0))
2992 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2993 	  return CONST0_RTX (mode);
2994 	}
2995       /* Implement modulus by power of two as AND.  */
2996       if (CONST_INT_P (trueop1)
2997 	  && exact_log2 (UINTVAL (trueop1)) > 0)
2998 	return simplify_gen_binary (AND, mode, op0,
2999 				    GEN_INT (INTVAL (op1) - 1));
3000       break;
3001 
3002     case MOD:
3003       /* 0%x is 0 (or x&0 if x has side-effects).  */
3004       if (trueop0 == CONST0_RTX (mode))
3005 	{
3006 	  if (side_effects_p (op1))
3007 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3008 	  return trueop0;
3009 	}
3010       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3011       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3012 	{
3013 	  if (side_effects_p (op0))
3014 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3015 	  return CONST0_RTX (mode);
3016 	}
3017       break;
3018 
3019     case ROTATERT:
3020     case ROTATE:
3021     case ASHIFTRT:
3022       if (trueop1 == CONST0_RTX (mode))
3023 	return op0;
3024       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3025 	return op0;
3026       /* Rotating ~0 always results in ~0.  */
3027       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3028 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3029 	  && ! side_effects_p (op1))
3030 	return op0;
3031     canonicalize_shift:
3032       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3033 	{
3034 	  val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3035 	  if (val != INTVAL (op1))
3036 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3037 	}
3038       break;
3039 
3040     case ASHIFT:
3041     case SS_ASHIFT:
3042     case US_ASHIFT:
3043       if (trueop1 == CONST0_RTX (mode))
3044 	return op0;
3045       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3046 	return op0;
3047       goto canonicalize_shift;
3048 
3049     case LSHIFTRT:
3050       if (trueop1 == CONST0_RTX (mode))
3051 	return op0;
3052       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3053 	return op0;
3054       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3055       if (GET_CODE (op0) == CLZ
3056 	  && CONST_INT_P (trueop1)
3057 	  && STORE_FLAG_VALUE == 1
3058 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3059 	{
3060 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3061 	  unsigned HOST_WIDE_INT zero_val = 0;
3062 
3063 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3064 	      && zero_val == GET_MODE_PRECISION (imode)
3065 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3066 	    return simplify_gen_relational (EQ, mode, imode,
3067 					    XEXP (op0, 0), const0_rtx);
3068 	}
3069       goto canonicalize_shift;
3070 
3071     case SMIN:
3072       if (width <= HOST_BITS_PER_WIDE_INT
3073 	  && mode_signbit_p (mode, trueop1)
3074 	  && ! side_effects_p (op0))
3075 	return op1;
3076       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3077 	return op0;
3078       tem = simplify_associative_operation (code, mode, op0, op1);
3079       if (tem)
3080 	return tem;
3081       break;
3082 
3083     case SMAX:
3084       if (width <= HOST_BITS_PER_WIDE_INT
3085 	  && CONST_INT_P (trueop1)
3086 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3087 	  && ! side_effects_p (op0))
3088 	return op1;
3089       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3090 	return op0;
3091       tem = simplify_associative_operation (code, mode, op0, op1);
3092       if (tem)
3093 	return tem;
3094       break;
3095 
3096     case UMIN:
3097       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3098 	return op1;
3099       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3100 	return op0;
3101       tem = simplify_associative_operation (code, mode, op0, op1);
3102       if (tem)
3103 	return tem;
3104       break;
3105 
3106     case UMAX:
3107       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3108 	return op1;
3109       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3110 	return op0;
3111       tem = simplify_associative_operation (code, mode, op0, op1);
3112       if (tem)
3113 	return tem;
3114       break;
3115 
3116     case SS_PLUS:
3117     case US_PLUS:
3118     case SS_MINUS:
3119     case US_MINUS:
3120     case SS_MULT:
3121     case US_MULT:
3122     case SS_DIV:
3123     case US_DIV:
3124       /* ??? There are simplifications that can be done.  */
3125       return 0;
3126 
3127     case VEC_SELECT:
3128       if (!VECTOR_MODE_P (mode))
3129 	{
3130 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3131 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3132 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3133 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3134 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3135 
3136 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3137 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3138 						      (trueop1, 0, 0)));
3139 
3140 	  /* Extract a scalar element from a nested VEC_SELECT expression
3141 	     (with optional nested VEC_CONCAT expression).  Some targets
3142 	     (i386) extract scalar element from a vector using chain of
3143 	     nested VEC_SELECT expressions.  When input operand is a memory
3144 	     operand, this operation can be simplified to a simple scalar
3145 	     load from an offseted memory address.  */
3146 	  if (GET_CODE (trueop0) == VEC_SELECT)
3147 	    {
3148 	      rtx op0 = XEXP (trueop0, 0);
3149 	      rtx op1 = XEXP (trueop0, 1);
3150 
3151 	      enum machine_mode opmode = GET_MODE (op0);
3152 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3153 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3154 
3155 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3156 	      int elem;
3157 
3158 	      rtvec vec;
3159 	      rtx tmp_op, tmp;
3160 
3161 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3162 	      gcc_assert (i < n_elts);
3163 
3164 	      /* Select element, pointed by nested selector.  */
3165 	      elem = INTVAL (XVECEXP (op1, 0, i));
3166 
3167 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3168 	      if (GET_CODE (op0) == VEC_CONCAT)
3169 		{
3170 		  rtx op00 = XEXP (op0, 0);
3171 		  rtx op01 = XEXP (op0, 1);
3172 
3173 		  enum machine_mode mode00, mode01;
3174 		  int n_elts00, n_elts01;
3175 
3176 		  mode00 = GET_MODE (op00);
3177 		  mode01 = GET_MODE (op01);
3178 
3179 		  /* Find out number of elements of each operand.  */
3180 		  if (VECTOR_MODE_P (mode00))
3181 		    {
3182 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3183 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3184 		    }
3185 		  else
3186 		    n_elts00 = 1;
3187 
3188 		  if (VECTOR_MODE_P (mode01))
3189 		    {
3190 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3191 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3192 		    }
3193 		  else
3194 		    n_elts01 = 1;
3195 
3196 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3197 
3198 		  /* Select correct operand of VEC_CONCAT
3199 		     and adjust selector. */
3200 		  if (elem < n_elts01)
3201 		    tmp_op = op00;
3202 		  else
3203 		    {
3204 		      tmp_op = op01;
3205 		      elem -= n_elts00;
3206 		    }
3207 		}
3208 	      else
3209 		tmp_op = op0;
3210 
3211 	      vec = rtvec_alloc (1);
3212 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3213 
3214 	      tmp = gen_rtx_fmt_ee (code, mode,
3215 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3216 	      return tmp;
3217 	    }
3218 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3219 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3220 	    return XEXP (trueop0, 0);
3221 	}
3222       else
3223 	{
3224 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3225 	  gcc_assert (GET_MODE_INNER (mode)
3226 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3227 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3228 
3229 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3230 	    {
3231 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3232 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3233 	      rtvec v = rtvec_alloc (n_elts);
3234 	      unsigned int i;
3235 
3236 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3237 	      for (i = 0; i < n_elts; i++)
3238 		{
3239 		  rtx x = XVECEXP (trueop1, 0, i);
3240 
3241 		  gcc_assert (CONST_INT_P (x));
3242 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3243 						       INTVAL (x));
3244 		}
3245 
3246 	      return gen_rtx_CONST_VECTOR (mode, v);
3247 	    }
3248 	}
3249 
3250       if (XVECLEN (trueop1, 0) == 1
3251 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3252 	  && GET_CODE (trueop0) == VEC_CONCAT)
3253 	{
3254 	  rtx vec = trueop0;
3255 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3256 
3257 	  /* Try to find the element in the VEC_CONCAT.  */
3258 	  while (GET_MODE (vec) != mode
3259 		 && GET_CODE (vec) == VEC_CONCAT)
3260 	    {
3261 	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3262 	      if (offset < vec_size)
3263 		vec = XEXP (vec, 0);
3264 	      else
3265 		{
3266 		  offset -= vec_size;
3267 		  vec = XEXP (vec, 1);
3268 		}
3269 	      vec = avoid_constant_pool_reference (vec);
3270 	    }
3271 
3272 	  if (GET_MODE (vec) == mode)
3273 	    return vec;
3274 	}
3275 
3276       return 0;
3277     case VEC_CONCAT:
3278       {
3279 	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3280 				      ? GET_MODE (trueop0)
3281 				      : GET_MODE_INNER (mode));
3282 	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3283 				      ? GET_MODE (trueop1)
3284 				      : GET_MODE_INNER (mode));
3285 
3286 	gcc_assert (VECTOR_MODE_P (mode));
3287 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3288 		    == GET_MODE_SIZE (mode));
3289 
3290 	if (VECTOR_MODE_P (op0_mode))
3291 	  gcc_assert (GET_MODE_INNER (mode)
3292 		      == GET_MODE_INNER (op0_mode));
3293 	else
3294 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3295 
3296 	if (VECTOR_MODE_P (op1_mode))
3297 	  gcc_assert (GET_MODE_INNER (mode)
3298 		      == GET_MODE_INNER (op1_mode));
3299 	else
3300 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3301 
3302 	if ((GET_CODE (trueop0) == CONST_VECTOR
3303 	     || CONST_INT_P (trueop0)
3304 	     || GET_CODE (trueop0) == CONST_DOUBLE)
3305 	    && (GET_CODE (trueop1) == CONST_VECTOR
3306 		|| CONST_INT_P (trueop1)
3307 		|| GET_CODE (trueop1) == CONST_DOUBLE))
3308 	  {
3309 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3310 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3311 	    rtvec v = rtvec_alloc (n_elts);
3312 	    unsigned int i;
3313 	    unsigned in_n_elts = 1;
3314 
3315 	    if (VECTOR_MODE_P (op0_mode))
3316 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3317 	    for (i = 0; i < n_elts; i++)
3318 	      {
3319 		if (i < in_n_elts)
3320 		  {
3321 		    if (!VECTOR_MODE_P (op0_mode))
3322 		      RTVEC_ELT (v, i) = trueop0;
3323 		    else
3324 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3325 		  }
3326 		else
3327 		  {
3328 		    if (!VECTOR_MODE_P (op1_mode))
3329 		      RTVEC_ELT (v, i) = trueop1;
3330 		    else
3331 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3332 							   i - in_n_elts);
3333 		  }
3334 	      }
3335 
3336 	    return gen_rtx_CONST_VECTOR (mode, v);
3337 	  }
3338       }
3339       return 0;
3340 
3341     default:
3342       gcc_unreachable ();
3343     }
3344 
3345   return 0;
3346 }
3347 
3348 rtx
3349 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3350 				 rtx op0, rtx op1)
3351 {
3352   HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3353   HOST_WIDE_INT val;
3354   unsigned int width = GET_MODE_PRECISION (mode);
3355 
3356   if (VECTOR_MODE_P (mode)
3357       && code != VEC_CONCAT
3358       && GET_CODE (op0) == CONST_VECTOR
3359       && GET_CODE (op1) == CONST_VECTOR)
3360     {
3361       unsigned n_elts = GET_MODE_NUNITS (mode);
3362       enum machine_mode op0mode = GET_MODE (op0);
3363       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3364       enum machine_mode op1mode = GET_MODE (op1);
3365       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3366       rtvec v = rtvec_alloc (n_elts);
3367       unsigned int i;
3368 
3369       gcc_assert (op0_n_elts == n_elts);
3370       gcc_assert (op1_n_elts == n_elts);
3371       for (i = 0; i < n_elts; i++)
3372 	{
3373 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3374 					     CONST_VECTOR_ELT (op0, i),
3375 					     CONST_VECTOR_ELT (op1, i));
3376 	  if (!x)
3377 	    return 0;
3378 	  RTVEC_ELT (v, i) = x;
3379 	}
3380 
3381       return gen_rtx_CONST_VECTOR (mode, v);
3382     }
3383 
3384   if (VECTOR_MODE_P (mode)
3385       && code == VEC_CONCAT
3386       && (CONST_INT_P (op0)
3387 	  || GET_CODE (op0) == CONST_DOUBLE
3388 	  || GET_CODE (op0) == CONST_FIXED)
3389       && (CONST_INT_P (op1)
3390 	  || GET_CODE (op1) == CONST_DOUBLE
3391 	  || GET_CODE (op1) == CONST_FIXED))
3392     {
3393       unsigned n_elts = GET_MODE_NUNITS (mode);
3394       rtvec v = rtvec_alloc (n_elts);
3395 
3396       gcc_assert (n_elts >= 2);
3397       if (n_elts == 2)
3398 	{
3399 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3400 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3401 
3402 	  RTVEC_ELT (v, 0) = op0;
3403 	  RTVEC_ELT (v, 1) = op1;
3404 	}
3405       else
3406 	{
3407 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3408 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3409 	  unsigned i;
3410 
3411 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3412 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3413 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3414 
3415 	  for (i = 0; i < op0_n_elts; ++i)
3416 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3417 	  for (i = 0; i < op1_n_elts; ++i)
3418 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3419 	}
3420 
3421       return gen_rtx_CONST_VECTOR (mode, v);
3422     }
3423 
3424   if (SCALAR_FLOAT_MODE_P (mode)
3425       && GET_CODE (op0) == CONST_DOUBLE
3426       && GET_CODE (op1) == CONST_DOUBLE
3427       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3428     {
3429       if (code == AND
3430 	  || code == IOR
3431 	  || code == XOR)
3432 	{
3433 	  long tmp0[4];
3434 	  long tmp1[4];
3435 	  REAL_VALUE_TYPE r;
3436 	  int i;
3437 
3438 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3439 			  GET_MODE (op0));
3440 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3441 			  GET_MODE (op1));
3442 	  for (i = 0; i < 4; i++)
3443 	    {
3444 	      switch (code)
3445 	      {
3446 	      case AND:
3447 		tmp0[i] &= tmp1[i];
3448 		break;
3449 	      case IOR:
3450 		tmp0[i] |= tmp1[i];
3451 		break;
3452 	      case XOR:
3453 		tmp0[i] ^= tmp1[i];
3454 		break;
3455 	      default:
3456 		gcc_unreachable ();
3457 	      }
3458 	    }
3459 	   real_from_target (&r, tmp0, mode);
3460 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3461 	}
3462       else
3463 	{
3464 	  REAL_VALUE_TYPE f0, f1, value, result;
3465 	  bool inexact;
3466 
3467 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3468 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3469 	  real_convert (&f0, mode, &f0);
3470 	  real_convert (&f1, mode, &f1);
3471 
3472 	  if (HONOR_SNANS (mode)
3473 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3474 	    return 0;
3475 
3476 	  if (code == DIV
3477 	      && REAL_VALUES_EQUAL (f1, dconst0)
3478 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3479 	    return 0;
3480 
3481 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3482 	      && flag_trapping_math
3483 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3484 	    {
3485 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3486 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3487 
3488 	      switch (code)
3489 		{
3490 		case PLUS:
3491 		  /* Inf + -Inf = NaN plus exception.  */
3492 		  if (s0 != s1)
3493 		    return 0;
3494 		  break;
3495 		case MINUS:
3496 		  /* Inf - Inf = NaN plus exception.  */
3497 		  if (s0 == s1)
3498 		    return 0;
3499 		  break;
3500 		case DIV:
3501 		  /* Inf / Inf = NaN plus exception.  */
3502 		  return 0;
3503 		default:
3504 		  break;
3505 		}
3506 	    }
3507 
3508 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3509 	      && flag_trapping_math
3510 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3511 		  || (REAL_VALUE_ISINF (f1)
3512 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3513 	    /* Inf * 0 = NaN plus exception.  */
3514 	    return 0;
3515 
3516 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3517 				     &f0, &f1);
3518 	  real_convert (&result, mode, &value);
3519 
3520 	  /* Don't constant fold this floating point operation if
3521 	     the result has overflowed and flag_trapping_math.  */
3522 
3523 	  if (flag_trapping_math
3524 	      && MODE_HAS_INFINITIES (mode)
3525 	      && REAL_VALUE_ISINF (result)
3526 	      && !REAL_VALUE_ISINF (f0)
3527 	      && !REAL_VALUE_ISINF (f1))
3528 	    /* Overflow plus exception.  */
3529 	    return 0;
3530 
3531 	  /* Don't constant fold this floating point operation if the
3532 	     result may dependent upon the run-time rounding mode and
3533 	     flag_rounding_math is set, or if GCC's software emulation
3534 	     is unable to accurately represent the result.  */
3535 
3536 	  if ((flag_rounding_math
3537 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3538 	      && (inexact || !real_identical (&result, &value)))
3539 	    return NULL_RTX;
3540 
3541 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3542 	}
3543     }
3544 
3545   /* We can fold some multi-word operations.  */
3546   if (GET_MODE_CLASS (mode) == MODE_INT
3547       && width == HOST_BITS_PER_DOUBLE_INT
3548       && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3549       && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3550     {
3551       double_int o0, o1, res, tmp;
3552 
3553       o0 = rtx_to_double_int (op0);
3554       o1 = rtx_to_double_int (op1);
3555 
3556       switch (code)
3557 	{
3558 	case MINUS:
3559 	  /* A - B == A + (-B).  */
3560 	  o1 = double_int_neg (o1);
3561 
3562 	  /* Fall through....  */
3563 
3564 	case PLUS:
3565 	  res = double_int_add (o0, o1);
3566 	  break;
3567 
3568 	case MULT:
3569 	  res = double_int_mul (o0, o1);
3570 	  break;
3571 
3572 	case DIV:
3573 	  if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3574 				    o0.low, o0.high, o1.low, o1.high,
3575 				    &res.low, &res.high,
3576 				    &tmp.low, &tmp.high))
3577 	    return 0;
3578 	  break;
3579 
3580 	case MOD:
3581 	  if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3582 				    o0.low, o0.high, o1.low, o1.high,
3583 				    &tmp.low, &tmp.high,
3584 				    &res.low, &res.high))
3585 	    return 0;
3586 	  break;
3587 
3588 	case UDIV:
3589 	  if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3590 				    o0.low, o0.high, o1.low, o1.high,
3591 				    &res.low, &res.high,
3592 				    &tmp.low, &tmp.high))
3593 	    return 0;
3594 	  break;
3595 
3596 	case UMOD:
3597 	  if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3598 				    o0.low, o0.high, o1.low, o1.high,
3599 				    &tmp.low, &tmp.high,
3600 				    &res.low, &res.high))
3601 	    return 0;
3602 	  break;
3603 
3604 	case AND:
3605 	  res = double_int_and (o0, o1);
3606 	  break;
3607 
3608 	case IOR:
3609 	  res = double_int_ior (o0, o1);
3610 	  break;
3611 
3612 	case XOR:
3613 	  res = double_int_xor (o0, o1);
3614 	  break;
3615 
3616 	case SMIN:
3617 	  res = double_int_smin (o0, o1);
3618 	  break;
3619 
3620 	case SMAX:
3621 	  res = double_int_smax (o0, o1);
3622 	  break;
3623 
3624 	case UMIN:
3625 	  res = double_int_umin (o0, o1);
3626 	  break;
3627 
3628 	case UMAX:
3629 	  res = double_int_umax (o0, o1);
3630 	  break;
3631 
3632 	case LSHIFTRT:   case ASHIFTRT:
3633 	case ASHIFT:
3634 	case ROTATE:     case ROTATERT:
3635 	  {
3636 	    unsigned HOST_WIDE_INT cnt;
3637 
3638 	    if (SHIFT_COUNT_TRUNCATED)
3639 	      o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
3640 
3641 	    if (!double_int_fits_in_uhwi_p (o1)
3642 	        || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
3643 	      return 0;
3644 
3645 	    cnt = double_int_to_uhwi (o1);
3646 
3647 	    if (code == LSHIFTRT || code == ASHIFTRT)
3648 	      res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
3649 				       code == ASHIFTRT);
3650 	    else if (code == ASHIFT)
3651 	      res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
3652 				       true);
3653 	    else if (code == ROTATE)
3654 	      res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
3655 	    else /* code == ROTATERT */
3656 	      res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
3657 	  }
3658 	  break;
3659 
3660 	default:
3661 	  return 0;
3662 	}
3663 
3664       return immed_double_int_const (res, mode);
3665     }
3666 
3667   if (CONST_INT_P (op0) && CONST_INT_P (op1)
3668       && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3669     {
3670       /* Get the integer argument values in two forms:
3671          zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3672 
3673       arg0 = INTVAL (op0);
3674       arg1 = INTVAL (op1);
3675 
3676       if (width < HOST_BITS_PER_WIDE_INT)
3677         {
3678           arg0 &= GET_MODE_MASK (mode);
3679           arg1 &= GET_MODE_MASK (mode);
3680 
3681           arg0s = arg0;
3682 	  if (val_signbit_known_set_p (mode, arg0s))
3683 	    arg0s |= ~GET_MODE_MASK (mode);
3684 
3685           arg1s = arg1;
3686 	  if (val_signbit_known_set_p (mode, arg1s))
3687 	    arg1s |= ~GET_MODE_MASK (mode);
3688 	}
3689       else
3690 	{
3691 	  arg0s = arg0;
3692 	  arg1s = arg1;
3693 	}
3694 
3695       /* Compute the value of the arithmetic.  */
3696 
3697       switch (code)
3698 	{
3699 	case PLUS:
3700 	  val = arg0s + arg1s;
3701 	  break;
3702 
3703 	case MINUS:
3704 	  val = arg0s - arg1s;
3705 	  break;
3706 
3707 	case MULT:
3708 	  val = arg0s * arg1s;
3709 	  break;
3710 
3711 	case DIV:
3712 	  if (arg1s == 0
3713 	      || ((unsigned HOST_WIDE_INT) arg0s
3714 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3715 		  && arg1s == -1))
3716 	    return 0;
3717 	  val = arg0s / arg1s;
3718 	  break;
3719 
3720 	case MOD:
3721 	  if (arg1s == 0
3722 	      || ((unsigned HOST_WIDE_INT) arg0s
3723 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3724 		  && arg1s == -1))
3725 	    return 0;
3726 	  val = arg0s % arg1s;
3727 	  break;
3728 
3729 	case UDIV:
3730 	  if (arg1 == 0
3731 	      || ((unsigned HOST_WIDE_INT) arg0s
3732 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3733 		  && arg1s == -1))
3734 	    return 0;
3735 	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3736 	  break;
3737 
3738 	case UMOD:
3739 	  if (arg1 == 0
3740 	      || ((unsigned HOST_WIDE_INT) arg0s
3741 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3742 		  && arg1s == -1))
3743 	    return 0;
3744 	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3745 	  break;
3746 
3747 	case AND:
3748 	  val = arg0 & arg1;
3749 	  break;
3750 
3751 	case IOR:
3752 	  val = arg0 | arg1;
3753 	  break;
3754 
3755 	case XOR:
3756 	  val = arg0 ^ arg1;
3757 	  break;
3758 
3759 	case LSHIFTRT:
3760 	case ASHIFT:
3761 	case ASHIFTRT:
3762 	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3763 	     the value is in range.  We can't return any old value for
3764 	     out-of-range arguments because either the middle-end (via
3765 	     shift_truncation_mask) or the back-end might be relying on
3766 	     target-specific knowledge.  Nor can we rely on
3767 	     shift_truncation_mask, since the shift might not be part of an
3768 	     ashlM3, lshrM3 or ashrM3 instruction.  */
3769 	  if (SHIFT_COUNT_TRUNCATED)
3770 	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3771 	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3772 	    return 0;
3773 
3774 	  val = (code == ASHIFT
3775 		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3776 		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3777 
3778 	  /* Sign-extend the result for arithmetic right shifts.  */
3779 	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3780 	    val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
3781 	  break;
3782 
3783 	case ROTATERT:
3784 	  if (arg1 < 0)
3785 	    return 0;
3786 
3787 	  arg1 %= width;
3788 	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3789 		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3790 	  break;
3791 
3792 	case ROTATE:
3793 	  if (arg1 < 0)
3794 	    return 0;
3795 
3796 	  arg1 %= width;
3797 	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3798 		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3799 	  break;
3800 
3801 	case COMPARE:
3802 	  /* Do nothing here.  */
3803 	  return 0;
3804 
3805 	case SMIN:
3806 	  val = arg0s <= arg1s ? arg0s : arg1s;
3807 	  break;
3808 
3809 	case UMIN:
3810 	  val = ((unsigned HOST_WIDE_INT) arg0
3811 		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3812 	  break;
3813 
3814 	case SMAX:
3815 	  val = arg0s > arg1s ? arg0s : arg1s;
3816 	  break;
3817 
3818 	case UMAX:
3819 	  val = ((unsigned HOST_WIDE_INT) arg0
3820 		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3821 	  break;
3822 
3823 	case SS_PLUS:
3824 	case US_PLUS:
3825 	case SS_MINUS:
3826 	case US_MINUS:
3827 	case SS_MULT:
3828 	case US_MULT:
3829 	case SS_DIV:
3830 	case US_DIV:
3831 	case SS_ASHIFT:
3832 	case US_ASHIFT:
3833 	  /* ??? There are simplifications that can be done.  */
3834 	  return 0;
3835 
3836 	default:
3837 	  gcc_unreachable ();
3838 	}
3839 
3840       return gen_int_mode (val, mode);
3841     }
3842 
3843   return NULL_RTX;
3844 }
3845 
3846 
3847 
3848 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3849    PLUS or MINUS.
3850 
3851    Rather than test for specific case, we do this by a brute-force method
3852    and do all possible simplifications until no more changes occur.  Then
3853    we rebuild the operation.  */
3854 
3855 struct simplify_plus_minus_op_data
3856 {
3857   rtx op;
3858   short neg;
3859 };
3860 
3861 static bool
3862 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3863 {
3864   int result;
3865 
3866   result = (commutative_operand_precedence (y)
3867 	    - commutative_operand_precedence (x));
3868   if (result)
3869     return result > 0;
3870 
3871   /* Group together equal REGs to do more simplification.  */
3872   if (REG_P (x) && REG_P (y))
3873     return REGNO (x) > REGNO (y);
3874   else
3875     return false;
3876 }
3877 
3878 static rtx
3879 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3880 		     rtx op1)
3881 {
3882   struct simplify_plus_minus_op_data ops[8];
3883   rtx result, tem;
3884   int n_ops = 2, input_ops = 2;
3885   int changed, n_constants = 0, canonicalized = 0;
3886   int i, j;
3887 
3888   memset (ops, 0, sizeof ops);
3889 
3890   /* Set up the two operands and then expand them until nothing has been
3891      changed.  If we run out of room in our array, give up; this should
3892      almost never happen.  */
3893 
3894   ops[0].op = op0;
3895   ops[0].neg = 0;
3896   ops[1].op = op1;
3897   ops[1].neg = (code == MINUS);
3898 
3899   do
3900     {
3901       changed = 0;
3902 
3903       for (i = 0; i < n_ops; i++)
3904 	{
3905 	  rtx this_op = ops[i].op;
3906 	  int this_neg = ops[i].neg;
3907 	  enum rtx_code this_code = GET_CODE (this_op);
3908 
3909 	  switch (this_code)
3910 	    {
3911 	    case PLUS:
3912 	    case MINUS:
3913 	      if (n_ops == 7)
3914 		return NULL_RTX;
3915 
3916 	      ops[n_ops].op = XEXP (this_op, 1);
3917 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3918 	      n_ops++;
3919 
3920 	      ops[i].op = XEXP (this_op, 0);
3921 	      input_ops++;
3922 	      changed = 1;
3923 	      canonicalized |= this_neg;
3924 	      break;
3925 
3926 	    case NEG:
3927 	      ops[i].op = XEXP (this_op, 0);
3928 	      ops[i].neg = ! this_neg;
3929 	      changed = 1;
3930 	      canonicalized = 1;
3931 	      break;
3932 
3933 	    case CONST:
3934 	      if (n_ops < 7
3935 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
3936 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3937 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3938 		{
3939 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3940 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3941 		  ops[n_ops].neg = this_neg;
3942 		  n_ops++;
3943 		  changed = 1;
3944 	          canonicalized = 1;
3945 		}
3946 	      break;
3947 
3948 	    case NOT:
3949 	      /* ~a -> (-a - 1) */
3950 	      if (n_ops != 7)
3951 		{
3952 		  ops[n_ops].op = CONSTM1_RTX (mode);
3953 		  ops[n_ops++].neg = this_neg;
3954 		  ops[i].op = XEXP (this_op, 0);
3955 		  ops[i].neg = !this_neg;
3956 		  changed = 1;
3957 	          canonicalized = 1;
3958 		}
3959 	      break;
3960 
3961 	    case CONST_INT:
3962 	      n_constants++;
3963 	      if (this_neg)
3964 		{
3965 		  ops[i].op = neg_const_int (mode, this_op);
3966 		  ops[i].neg = 0;
3967 		  changed = 1;
3968 	          canonicalized = 1;
3969 		}
3970 	      break;
3971 
3972 	    default:
3973 	      break;
3974 	    }
3975 	}
3976     }
3977   while (changed);
3978 
3979   if (n_constants > 1)
3980     canonicalized = 1;
3981 
3982   gcc_assert (n_ops >= 2);
3983 
3984   /* If we only have two operands, we can avoid the loops.  */
3985   if (n_ops == 2)
3986     {
3987       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3988       rtx lhs, rhs;
3989 
3990       /* Get the two operands.  Be careful with the order, especially for
3991 	 the cases where code == MINUS.  */
3992       if (ops[0].neg && ops[1].neg)
3993 	{
3994 	  lhs = gen_rtx_NEG (mode, ops[0].op);
3995 	  rhs = ops[1].op;
3996 	}
3997       else if (ops[0].neg)
3998 	{
3999 	  lhs = ops[1].op;
4000 	  rhs = ops[0].op;
4001 	}
4002       else
4003 	{
4004 	  lhs = ops[0].op;
4005 	  rhs = ops[1].op;
4006 	}
4007 
4008       return simplify_const_binary_operation (code, mode, lhs, rhs);
4009     }
4010 
4011   /* Now simplify each pair of operands until nothing changes.  */
4012   do
4013     {
4014       /* Insertion sort is good enough for an eight-element array.  */
4015       for (i = 1; i < n_ops; i++)
4016         {
4017           struct simplify_plus_minus_op_data save;
4018           j = i - 1;
4019           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4020 	    continue;
4021 
4022           canonicalized = 1;
4023           save = ops[i];
4024           do
4025 	    ops[j + 1] = ops[j];
4026           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4027           ops[j + 1] = save;
4028         }
4029 
4030       changed = 0;
4031       for (i = n_ops - 1; i > 0; i--)
4032 	for (j = i - 1; j >= 0; j--)
4033 	  {
4034 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4035 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4036 
4037 	    if (lhs != 0 && rhs != 0)
4038 	      {
4039 		enum rtx_code ncode = PLUS;
4040 
4041 		if (lneg != rneg)
4042 		  {
4043 		    ncode = MINUS;
4044 		    if (lneg)
4045 		      tem = lhs, lhs = rhs, rhs = tem;
4046 		  }
4047 		else if (swap_commutative_operands_p (lhs, rhs))
4048 		  tem = lhs, lhs = rhs, rhs = tem;
4049 
4050 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4051 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4052 		  {
4053 		    rtx tem_lhs, tem_rhs;
4054 
4055 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4056 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4057 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4058 
4059 		    if (tem && !CONSTANT_P (tem))
4060 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4061 		  }
4062 		else
4063 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4064 
4065 		/* Reject "simplifications" that just wrap the two
4066 		   arguments in a CONST.  Failure to do so can result
4067 		   in infinite recursion with simplify_binary_operation
4068 		   when it calls us to simplify CONST operations.  */
4069 		if (tem
4070 		    && ! (GET_CODE (tem) == CONST
4071 			  && GET_CODE (XEXP (tem, 0)) == ncode
4072 			  && XEXP (XEXP (tem, 0), 0) == lhs
4073 			  && XEXP (XEXP (tem, 0), 1) == rhs))
4074 		  {
4075 		    lneg &= rneg;
4076 		    if (GET_CODE (tem) == NEG)
4077 		      tem = XEXP (tem, 0), lneg = !lneg;
4078 		    if (CONST_INT_P (tem) && lneg)
4079 		      tem = neg_const_int (mode, tem), lneg = 0;
4080 
4081 		    ops[i].op = tem;
4082 		    ops[i].neg = lneg;
4083 		    ops[j].op = NULL_RTX;
4084 		    changed = 1;
4085 		    canonicalized = 1;
4086 		  }
4087 	      }
4088 	  }
4089 
4090       /* If nothing changed, fail.  */
4091       if (!canonicalized)
4092         return NULL_RTX;
4093 
4094       /* Pack all the operands to the lower-numbered entries.  */
4095       for (i = 0, j = 0; j < n_ops; j++)
4096         if (ops[j].op)
4097           {
4098 	    ops[i] = ops[j];
4099 	    i++;
4100           }
4101       n_ops = i;
4102     }
4103   while (changed);
4104 
4105   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4106   if (n_ops == 2
4107       && CONST_INT_P (ops[1].op)
4108       && CONSTANT_P (ops[0].op)
4109       && ops[0].neg)
4110     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4111 
4112   /* We suppressed creation of trivial CONST expressions in the
4113      combination loop to avoid recursion.  Create one manually now.
4114      The combination loop should have ensured that there is exactly
4115      one CONST_INT, and the sort will have ensured that it is last
4116      in the array and that any other constant will be next-to-last.  */
4117 
4118   if (n_ops > 1
4119       && CONST_INT_P (ops[n_ops - 1].op)
4120       && CONSTANT_P (ops[n_ops - 2].op))
4121     {
4122       rtx value = ops[n_ops - 1].op;
4123       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4124 	value = neg_const_int (mode, value);
4125       ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
4126       n_ops--;
4127     }
4128 
4129   /* Put a non-negated operand first, if possible.  */
4130 
4131   for (i = 0; i < n_ops && ops[i].neg; i++)
4132     continue;
4133   if (i == n_ops)
4134     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4135   else if (i != 0)
4136     {
4137       tem = ops[0].op;
4138       ops[0] = ops[i];
4139       ops[i].op = tem;
4140       ops[i].neg = 1;
4141     }
4142 
4143   /* Now make the result by performing the requested operations.  */
4144   result = ops[0].op;
4145   for (i = 1; i < n_ops; i++)
4146     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4147 			     mode, result, ops[i].op);
4148 
4149   return result;
4150 }
4151 
4152 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4153 static bool
4154 plus_minus_operand_p (const_rtx x)
4155 {
4156   return GET_CODE (x) == PLUS
4157          || GET_CODE (x) == MINUS
4158 	 || (GET_CODE (x) == CONST
4159 	     && GET_CODE (XEXP (x, 0)) == PLUS
4160 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4161 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4162 }
4163 
4164 /* Like simplify_binary_operation except used for relational operators.
4165    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4166    not also be VOIDmode.
4167 
4168    CMP_MODE specifies in which mode the comparison is done in, so it is
4169    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4170    the operands or, if both are VOIDmode, the operands are compared in
4171    "infinite precision".  */
4172 rtx
4173 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4174 			       enum machine_mode cmp_mode, rtx op0, rtx op1)
4175 {
4176   rtx tem, trueop0, trueop1;
4177 
4178   if (cmp_mode == VOIDmode)
4179     cmp_mode = GET_MODE (op0);
4180   if (cmp_mode == VOIDmode)
4181     cmp_mode = GET_MODE (op1);
4182 
4183   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4184   if (tem)
4185     {
4186       if (SCALAR_FLOAT_MODE_P (mode))
4187 	{
4188           if (tem == const0_rtx)
4189             return CONST0_RTX (mode);
4190 #ifdef FLOAT_STORE_FLAG_VALUE
4191 	  {
4192 	    REAL_VALUE_TYPE val;
4193 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4194 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4195 	  }
4196 #else
4197 	  return NULL_RTX;
4198 #endif
4199 	}
4200       if (VECTOR_MODE_P (mode))
4201 	{
4202 	  if (tem == const0_rtx)
4203 	    return CONST0_RTX (mode);
4204 #ifdef VECTOR_STORE_FLAG_VALUE
4205 	  {
4206 	    int i, units;
4207 	    rtvec v;
4208 
4209 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4210 	    if (val == NULL_RTX)
4211 	      return NULL_RTX;
4212 	    if (val == const1_rtx)
4213 	      return CONST1_RTX (mode);
4214 
4215 	    units = GET_MODE_NUNITS (mode);
4216 	    v = rtvec_alloc (units);
4217 	    for (i = 0; i < units; i++)
4218 	      RTVEC_ELT (v, i) = val;
4219 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4220 	  }
4221 #else
4222 	  return NULL_RTX;
4223 #endif
4224 	}
4225 
4226       return tem;
4227     }
4228 
4229   /* For the following tests, ensure const0_rtx is op1.  */
4230   if (swap_commutative_operands_p (op0, op1)
4231       || (op0 == const0_rtx && op1 != const0_rtx))
4232     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4233 
4234   /* If op0 is a compare, extract the comparison arguments from it.  */
4235   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4236     return simplify_gen_relational (code, mode, VOIDmode,
4237 				    XEXP (op0, 0), XEXP (op0, 1));
4238 
4239   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4240       || CC0_P (op0))
4241     return NULL_RTX;
4242 
4243   trueop0 = avoid_constant_pool_reference (op0);
4244   trueop1 = avoid_constant_pool_reference (op1);
4245   return simplify_relational_operation_1 (code, mode, cmp_mode,
4246 		  			  trueop0, trueop1);
4247 }
4248 
4249 /* This part of simplify_relational_operation is only used when CMP_MODE
4250    is not in class MODE_CC (i.e. it is a real comparison).
4251 
4252    MODE is the mode of the result, while CMP_MODE specifies in which
4253    mode the comparison is done in, so it is the mode of the operands.  */
4254 
4255 static rtx
4256 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4257 				 enum machine_mode cmp_mode, rtx op0, rtx op1)
4258 {
4259   enum rtx_code op0code = GET_CODE (op0);
4260 
4261   if (op1 == const0_rtx && COMPARISON_P (op0))
4262     {
4263       /* If op0 is a comparison, extract the comparison arguments
4264          from it.  */
4265       if (code == NE)
4266 	{
4267 	  if (GET_MODE (op0) == mode)
4268 	    return simplify_rtx (op0);
4269 	  else
4270 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4271 					    XEXP (op0, 0), XEXP (op0, 1));
4272 	}
4273       else if (code == EQ)
4274 	{
4275 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4276 	  if (new_code != UNKNOWN)
4277 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4278 					    XEXP (op0, 0), XEXP (op0, 1));
4279 	}
4280     }
4281 
4282   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4283      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4284   if ((code == LTU || code == GEU)
4285       && GET_CODE (op0) == PLUS
4286       && CONST_INT_P (XEXP (op0, 1))
4287       && (rtx_equal_p (op1, XEXP (op0, 0))
4288 	  || rtx_equal_p (op1, XEXP (op0, 1))))
4289     {
4290       rtx new_cmp
4291 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4292       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4293 				      cmp_mode, XEXP (op0, 0), new_cmp);
4294     }
4295 
4296   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4297   if ((code == LTU || code == GEU)
4298       && GET_CODE (op0) == PLUS
4299       && rtx_equal_p (op1, XEXP (op0, 1))
4300       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4301       && !rtx_equal_p (op1, XEXP (op0, 0)))
4302     return simplify_gen_relational (code, mode, cmp_mode, op0,
4303 				    copy_rtx (XEXP (op0, 0)));
4304 
4305   if (op1 == const0_rtx)
4306     {
4307       /* Canonicalize (GTU x 0) as (NE x 0).  */
4308       if (code == GTU)
4309         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4310       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4311       if (code == LEU)
4312         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4313     }
4314   else if (op1 == const1_rtx)
4315     {
4316       switch (code)
4317         {
4318         case GE:
4319 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4320 	  return simplify_gen_relational (GT, mode, cmp_mode,
4321 					  op0, const0_rtx);
4322 	case GEU:
4323 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4324 	  return simplify_gen_relational (NE, mode, cmp_mode,
4325 					  op0, const0_rtx);
4326 	case LT:
4327 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4328 	  return simplify_gen_relational (LE, mode, cmp_mode,
4329 					  op0, const0_rtx);
4330 	case LTU:
4331 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4332 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4333 					  op0, const0_rtx);
4334 	default:
4335 	  break;
4336 	}
4337     }
4338   else if (op1 == constm1_rtx)
4339     {
4340       /* Canonicalize (LE x -1) as (LT x 0).  */
4341       if (code == LE)
4342         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4343       /* Canonicalize (GT x -1) as (GE x 0).  */
4344       if (code == GT)
4345         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4346     }
4347 
4348   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4349   if ((code == EQ || code == NE)
4350       && (op0code == PLUS || op0code == MINUS)
4351       && CONSTANT_P (op1)
4352       && CONSTANT_P (XEXP (op0, 1))
4353       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4354     {
4355       rtx x = XEXP (op0, 0);
4356       rtx c = XEXP (op0, 1);
4357       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4358       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4359 
4360       /* Detect an infinite recursive condition, where we oscillate at this
4361 	 simplification case between:
4362 	    A + B == C  <--->  C - B == A,
4363 	 where A, B, and C are all constants with non-simplifiable expressions,
4364 	 usually SYMBOL_REFs.  */
4365       if (GET_CODE (tem) == invcode
4366 	  && CONSTANT_P (x)
4367 	  && rtx_equal_p (c, XEXP (tem, 1)))
4368 	return NULL_RTX;
4369 
4370       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4371     }
4372 
4373   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4374      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4375   if (code == NE
4376       && op1 == const0_rtx
4377       && GET_MODE_CLASS (mode) == MODE_INT
4378       && cmp_mode != VOIDmode
4379       /* ??? Work-around BImode bugs in the ia64 backend.  */
4380       && mode != BImode
4381       && cmp_mode != BImode
4382       && nonzero_bits (op0, cmp_mode) == 1
4383       && STORE_FLAG_VALUE == 1)
4384     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4385 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4386 	   : lowpart_subreg (mode, op0, cmp_mode);
4387 
4388   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4389   if ((code == EQ || code == NE)
4390       && op1 == const0_rtx
4391       && op0code == XOR)
4392     return simplify_gen_relational (code, mode, cmp_mode,
4393 				    XEXP (op0, 0), XEXP (op0, 1));
4394 
4395   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4396   if ((code == EQ || code == NE)
4397       && op0code == XOR
4398       && rtx_equal_p (XEXP (op0, 0), op1)
4399       && !side_effects_p (XEXP (op0, 0)))
4400     return simplify_gen_relational (code, mode, cmp_mode,
4401 				    XEXP (op0, 1), const0_rtx);
4402 
4403   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4404   if ((code == EQ || code == NE)
4405       && op0code == XOR
4406       && rtx_equal_p (XEXP (op0, 1), op1)
4407       && !side_effects_p (XEXP (op0, 1)))
4408     return simplify_gen_relational (code, mode, cmp_mode,
4409 				    XEXP (op0, 0), const0_rtx);
4410 
4411   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4412   if ((code == EQ || code == NE)
4413       && op0code == XOR
4414       && (CONST_INT_P (op1)
4415 	  || GET_CODE (op1) == CONST_DOUBLE)
4416       && (CONST_INT_P (XEXP (op0, 1))
4417 	  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4418     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4419 				    simplify_gen_binary (XOR, cmp_mode,
4420 							 XEXP (op0, 1), op1));
4421 
4422   if (op0code == POPCOUNT && op1 == const0_rtx)
4423     switch (code)
4424       {
4425       case EQ:
4426       case LE:
4427       case LEU:
4428 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4429 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4430 					XEXP (op0, 0), const0_rtx);
4431 
4432       case NE:
4433       case GT:
4434       case GTU:
4435 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4436 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4437 					XEXP (op0, 0), const0_rtx);
4438 
4439       default:
4440 	break;
4441       }
4442 
4443   return NULL_RTX;
4444 }
4445 
4446 enum
4447 {
4448   CMP_EQ = 1,
4449   CMP_LT = 2,
4450   CMP_GT = 4,
4451   CMP_LTU = 8,
4452   CMP_GTU = 16
4453 };
4454 
4455 
4456 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4457    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4458    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4459    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4460    For floating-point comparisons, assume that the operands were ordered.  */
4461 
4462 static rtx
4463 comparison_result (enum rtx_code code, int known_results)
4464 {
4465   switch (code)
4466     {
4467     case EQ:
4468     case UNEQ:
4469       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4470     case NE:
4471     case LTGT:
4472       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4473 
4474     case LT:
4475     case UNLT:
4476       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4477     case GE:
4478     case UNGE:
4479       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4480 
4481     case GT:
4482     case UNGT:
4483       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4484     case LE:
4485     case UNLE:
4486       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4487 
4488     case LTU:
4489       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4490     case GEU:
4491       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4492 
4493     case GTU:
4494       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4495     case LEU:
4496       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4497 
4498     case ORDERED:
4499       return const_true_rtx;
4500     case UNORDERED:
4501       return const0_rtx;
4502     default:
4503       gcc_unreachable ();
4504     }
4505 }
4506 
4507 /* Check if the given comparison (done in the given MODE) is actually a
4508    tautology or a contradiction.
4509    If no simplification is possible, this function returns zero.
4510    Otherwise, it returns either const_true_rtx or const0_rtx.  */
4511 
4512 rtx
4513 simplify_const_relational_operation (enum rtx_code code,
4514 				     enum machine_mode mode,
4515 				     rtx op0, rtx op1)
4516 {
4517   rtx tem;
4518   rtx trueop0;
4519   rtx trueop1;
4520 
4521   gcc_assert (mode != VOIDmode
4522 	      || (GET_MODE (op0) == VOIDmode
4523 		  && GET_MODE (op1) == VOIDmode));
4524 
4525   /* If op0 is a compare, extract the comparison arguments from it.  */
4526   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4527     {
4528       op1 = XEXP (op0, 1);
4529       op0 = XEXP (op0, 0);
4530 
4531       if (GET_MODE (op0) != VOIDmode)
4532 	mode = GET_MODE (op0);
4533       else if (GET_MODE (op1) != VOIDmode)
4534 	mode = GET_MODE (op1);
4535       else
4536 	return 0;
4537     }
4538 
4539   /* We can't simplify MODE_CC values since we don't know what the
4540      actual comparison is.  */
4541   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4542     return 0;
4543 
4544   /* Make sure the constant is second.  */
4545   if (swap_commutative_operands_p (op0, op1))
4546     {
4547       tem = op0, op0 = op1, op1 = tem;
4548       code = swap_condition (code);
4549     }
4550 
4551   trueop0 = avoid_constant_pool_reference (op0);
4552   trueop1 = avoid_constant_pool_reference (op1);
4553 
4554   /* For integer comparisons of A and B maybe we can simplify A - B and can
4555      then simplify a comparison of that with zero.  If A and B are both either
4556      a register or a CONST_INT, this can't help; testing for these cases will
4557      prevent infinite recursion here and speed things up.
4558 
4559      We can only do this for EQ and NE comparisons as otherwise we may
4560      lose or introduce overflow which we cannot disregard as undefined as
4561      we do not know the signedness of the operation on either the left or
4562      the right hand side of the comparison.  */
4563 
4564   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4565       && (code == EQ || code == NE)
4566       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4567 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4568       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4569       /* We cannot do this if tem is a nonzero address.  */
4570       && ! nonzero_address_p (tem))
4571     return simplify_const_relational_operation (signed_condition (code),
4572 						mode, tem, const0_rtx);
4573 
4574   if (! HONOR_NANS (mode) && code == ORDERED)
4575     return const_true_rtx;
4576 
4577   if (! HONOR_NANS (mode) && code == UNORDERED)
4578     return const0_rtx;
4579 
4580   /* For modes without NaNs, if the two operands are equal, we know the
4581      result except if they have side-effects.  Even with NaNs we know
4582      the result of unordered comparisons and, if signaling NaNs are
4583      irrelevant, also the result of LT/GT/LTGT.  */
4584   if ((! HONOR_NANS (GET_MODE (trueop0))
4585        || code == UNEQ || code == UNLE || code == UNGE
4586        || ((code == LT || code == GT || code == LTGT)
4587 	   && ! HONOR_SNANS (GET_MODE (trueop0))))
4588       && rtx_equal_p (trueop0, trueop1)
4589       && ! side_effects_p (trueop0))
4590     return comparison_result (code, CMP_EQ);
4591 
4592   /* If the operands are floating-point constants, see if we can fold
4593      the result.  */
4594   if (GET_CODE (trueop0) == CONST_DOUBLE
4595       && GET_CODE (trueop1) == CONST_DOUBLE
4596       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4597     {
4598       REAL_VALUE_TYPE d0, d1;
4599 
4600       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4601       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4602 
4603       /* Comparisons are unordered iff at least one of the values is NaN.  */
4604       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4605 	switch (code)
4606 	  {
4607 	  case UNEQ:
4608 	  case UNLT:
4609 	  case UNGT:
4610 	  case UNLE:
4611 	  case UNGE:
4612 	  case NE:
4613 	  case UNORDERED:
4614 	    return const_true_rtx;
4615 	  case EQ:
4616 	  case LT:
4617 	  case GT:
4618 	  case LE:
4619 	  case GE:
4620 	  case LTGT:
4621 	  case ORDERED:
4622 	    return const0_rtx;
4623 	  default:
4624 	    return 0;
4625 	  }
4626 
4627       return comparison_result (code,
4628 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4629 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4630     }
4631 
4632   /* Otherwise, see if the operands are both integers.  */
4633   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4634        && (GET_CODE (trueop0) == CONST_DOUBLE
4635 	   || CONST_INT_P (trueop0))
4636        && (GET_CODE (trueop1) == CONST_DOUBLE
4637 	   || CONST_INT_P (trueop1)))
4638     {
4639       int width = GET_MODE_PRECISION (mode);
4640       HOST_WIDE_INT l0s, h0s, l1s, h1s;
4641       unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4642 
4643       /* Get the two words comprising each integer constant.  */
4644       if (GET_CODE (trueop0) == CONST_DOUBLE)
4645 	{
4646 	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4647 	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4648 	}
4649       else
4650 	{
4651 	  l0u = l0s = INTVAL (trueop0);
4652 	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
4653 	}
4654 
4655       if (GET_CODE (trueop1) == CONST_DOUBLE)
4656 	{
4657 	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4658 	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4659 	}
4660       else
4661 	{
4662 	  l1u = l1s = INTVAL (trueop1);
4663 	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
4664 	}
4665 
4666       /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4667 	 we have to sign or zero-extend the values.  */
4668       if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4669 	{
4670 	  l0u &= GET_MODE_MASK (mode);
4671 	  l1u &= GET_MODE_MASK (mode);
4672 
4673 	  if (val_signbit_known_set_p (mode, l0s))
4674 	    l0s |= ~GET_MODE_MASK (mode);
4675 
4676 	  if (val_signbit_known_set_p (mode, l1s))
4677 	    l1s |= ~GET_MODE_MASK (mode);
4678 	}
4679       if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4680 	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4681 
4682       if (h0u == h1u && l0u == l1u)
4683 	return comparison_result (code, CMP_EQ);
4684       else
4685 	{
4686 	  int cr;
4687 	  cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4688 	  cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4689 	  return comparison_result (code, cr);
4690 	}
4691     }
4692 
4693   /* Optimize comparisons with upper and lower bounds.  */
4694   if (HWI_COMPUTABLE_MODE_P (mode)
4695       && CONST_INT_P (trueop1))
4696     {
4697       int sign;
4698       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4699       HOST_WIDE_INT val = INTVAL (trueop1);
4700       HOST_WIDE_INT mmin, mmax;
4701 
4702       if (code == GEU
4703 	  || code == LEU
4704 	  || code == GTU
4705 	  || code == LTU)
4706 	sign = 0;
4707       else
4708 	sign = 1;
4709 
4710       /* Get a reduced range if the sign bit is zero.  */
4711       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4712 	{
4713 	  mmin = 0;
4714 	  mmax = nonzero;
4715 	}
4716       else
4717 	{
4718 	  rtx mmin_rtx, mmax_rtx;
4719 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4720 
4721 	  mmin = INTVAL (mmin_rtx);
4722 	  mmax = INTVAL (mmax_rtx);
4723 	  if (sign)
4724 	    {
4725 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4726 
4727 	      mmin >>= (sign_copies - 1);
4728 	      mmax >>= (sign_copies - 1);
4729 	    }
4730 	}
4731 
4732       switch (code)
4733 	{
4734 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
4735 	case GEU:
4736 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4737 	    return const_true_rtx;
4738 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4739 	    return const0_rtx;
4740 	  break;
4741 	case GE:
4742 	  if (val <= mmin)
4743 	    return const_true_rtx;
4744 	  if (val > mmax)
4745 	    return const0_rtx;
4746 	  break;
4747 
4748 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
4749 	case LEU:
4750 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4751 	    return const_true_rtx;
4752 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4753 	    return const0_rtx;
4754 	  break;
4755 	case LE:
4756 	  if (val >= mmax)
4757 	    return const_true_rtx;
4758 	  if (val < mmin)
4759 	    return const0_rtx;
4760 	  break;
4761 
4762 	case EQ:
4763 	  /* x == y is always false for y out of range.  */
4764 	  if (val < mmin || val > mmax)
4765 	    return const0_rtx;
4766 	  break;
4767 
4768 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
4769 	case GTU:
4770 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4771 	    return const0_rtx;
4772 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4773 	    return const_true_rtx;
4774 	  break;
4775 	case GT:
4776 	  if (val >= mmax)
4777 	    return const0_rtx;
4778 	  if (val < mmin)
4779 	    return const_true_rtx;
4780 	  break;
4781 
4782 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
4783 	case LTU:
4784 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4785 	    return const0_rtx;
4786 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4787 	    return const_true_rtx;
4788 	  break;
4789 	case LT:
4790 	  if (val <= mmin)
4791 	    return const0_rtx;
4792 	  if (val > mmax)
4793 	    return const_true_rtx;
4794 	  break;
4795 
4796 	case NE:
4797 	  /* x != y is always true for y out of range.  */
4798 	  if (val < mmin || val > mmax)
4799 	    return const_true_rtx;
4800 	  break;
4801 
4802 	default:
4803 	  break;
4804 	}
4805     }
4806 
4807   /* Optimize integer comparisons with zero.  */
4808   if (trueop1 == const0_rtx)
4809     {
4810       /* Some addresses are known to be nonzero.  We don't know
4811 	 their sign, but equality comparisons are known.  */
4812       if (nonzero_address_p (trueop0))
4813 	{
4814 	  if (code == EQ || code == LEU)
4815 	    return const0_rtx;
4816 	  if (code == NE || code == GTU)
4817 	    return const_true_rtx;
4818 	}
4819 
4820       /* See if the first operand is an IOR with a constant.  If so, we
4821 	 may be able to determine the result of this comparison.  */
4822       if (GET_CODE (op0) == IOR)
4823 	{
4824 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4825 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4826 	    {
4827 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
4828 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4829 			      && (UINTVAL (inner_const)
4830 				  & ((unsigned HOST_WIDE_INT) 1
4831 				     << sign_bitnum)));
4832 
4833 	      switch (code)
4834 		{
4835 		case EQ:
4836 		case LEU:
4837 		  return const0_rtx;
4838 		case NE:
4839 		case GTU:
4840 		  return const_true_rtx;
4841 		case LT:
4842 		case LE:
4843 		  if (has_sign)
4844 		    return const_true_rtx;
4845 		  break;
4846 		case GT:
4847 		case GE:
4848 		  if (has_sign)
4849 		    return const0_rtx;
4850 		  break;
4851 		default:
4852 		  break;
4853 		}
4854 	    }
4855 	}
4856     }
4857 
4858   /* Optimize comparison of ABS with zero.  */
4859   if (trueop1 == CONST0_RTX (mode)
4860       && (GET_CODE (trueop0) == ABS
4861 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
4862 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4863     {
4864       switch (code)
4865 	{
4866 	case LT:
4867 	  /* Optimize abs(x) < 0.0.  */
4868 	  if (!HONOR_SNANS (mode)
4869 	      && (!INTEGRAL_MODE_P (mode)
4870 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4871 	    {
4872 	      if (INTEGRAL_MODE_P (mode)
4873 		  && (issue_strict_overflow_warning
4874 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4875 		warning (OPT_Wstrict_overflow,
4876 			 ("assuming signed overflow does not occur when "
4877 			  "assuming abs (x) < 0 is false"));
4878 	       return const0_rtx;
4879 	    }
4880 	  break;
4881 
4882 	case GE:
4883 	  /* Optimize abs(x) >= 0.0.  */
4884 	  if (!HONOR_NANS (mode)
4885 	      && (!INTEGRAL_MODE_P (mode)
4886 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4887 	    {
4888 	      if (INTEGRAL_MODE_P (mode)
4889 	          && (issue_strict_overflow_warning
4890 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4891 	        warning (OPT_Wstrict_overflow,
4892 			 ("assuming signed overflow does not occur when "
4893 			  "assuming abs (x) >= 0 is true"));
4894 	      return const_true_rtx;
4895 	    }
4896 	  break;
4897 
4898 	case UNGE:
4899 	  /* Optimize ! (abs(x) < 0.0).  */
4900 	  return const_true_rtx;
4901 
4902 	default:
4903 	  break;
4904 	}
4905     }
4906 
4907   return 0;
4908 }
4909 
4910 /* Simplify CODE, an operation with result mode MODE and three operands,
4911    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4912    a constant.  Return 0 if no simplifications is possible.  */
4913 
4914 rtx
4915 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4916 			    enum machine_mode op0_mode, rtx op0, rtx op1,
4917 			    rtx op2)
4918 {
4919   unsigned int width = GET_MODE_PRECISION (mode);
4920   bool any_change = false;
4921   rtx tem;
4922 
4923   /* VOIDmode means "infinite" precision.  */
4924   if (width == 0)
4925     width = HOST_BITS_PER_WIDE_INT;
4926 
4927   switch (code)
4928     {
4929     case FMA:
4930       /* Simplify negations around the multiplication.  */
4931       /* -a * -b + c  =>  a * b + c.  */
4932       if (GET_CODE (op0) == NEG)
4933 	{
4934 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
4935 	  if (tem)
4936 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
4937 	}
4938       else if (GET_CODE (op1) == NEG)
4939 	{
4940 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
4941 	  if (tem)
4942 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
4943 	}
4944 
4945       /* Canonicalize the two multiplication operands.  */
4946       /* a * -b + c  =>  -b * a + c.  */
4947       if (swap_commutative_operands_p (op0, op1))
4948 	tem = op0, op0 = op1, op1 = tem, any_change = true;
4949 
4950       if (any_change)
4951 	return gen_rtx_FMA (mode, op0, op1, op2);
4952       return NULL_RTX;
4953 
4954     case SIGN_EXTRACT:
4955     case ZERO_EXTRACT:
4956       if (CONST_INT_P (op0)
4957 	  && CONST_INT_P (op1)
4958 	  && CONST_INT_P (op2)
4959 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4960 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4961 	{
4962 	  /* Extracting a bit-field from a constant */
4963 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
4964 	  HOST_WIDE_INT op1val = INTVAL (op1);
4965 	  HOST_WIDE_INT op2val = INTVAL (op2);
4966 	  if (BITS_BIG_ENDIAN)
4967 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
4968 	  else
4969 	    val >>= op2val;
4970 
4971 	  if (HOST_BITS_PER_WIDE_INT != op1val)
4972 	    {
4973 	      /* First zero-extend.  */
4974 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
4975 	      /* If desired, propagate sign bit.  */
4976 	      if (code == SIGN_EXTRACT
4977 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
4978 		     != 0)
4979 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
4980 	    }
4981 
4982 	  return gen_int_mode (val, mode);
4983 	}
4984       break;
4985 
4986     case IF_THEN_ELSE:
4987       if (CONST_INT_P (op0))
4988 	return op0 != const0_rtx ? op1 : op2;
4989 
4990       /* Convert c ? a : a into "a".  */
4991       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4992 	return op1;
4993 
4994       /* Convert a != b ? a : b into "a".  */
4995       if (GET_CODE (op0) == NE
4996 	  && ! side_effects_p (op0)
4997 	  && ! HONOR_NANS (mode)
4998 	  && ! HONOR_SIGNED_ZEROS (mode)
4999 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5000 	       && rtx_equal_p (XEXP (op0, 1), op2))
5001 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5002 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5003 	return op1;
5004 
5005       /* Convert a == b ? a : b into "b".  */
5006       if (GET_CODE (op0) == EQ
5007 	  && ! side_effects_p (op0)
5008 	  && ! HONOR_NANS (mode)
5009 	  && ! HONOR_SIGNED_ZEROS (mode)
5010 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5011 	       && rtx_equal_p (XEXP (op0, 1), op2))
5012 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5013 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5014 	return op2;
5015 
5016       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5017 	{
5018 	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5019 					? GET_MODE (XEXP (op0, 1))
5020 					: GET_MODE (XEXP (op0, 0)));
5021 	  rtx temp;
5022 
5023 	  /* Look for happy constants in op1 and op2.  */
5024 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5025 	    {
5026 	      HOST_WIDE_INT t = INTVAL (op1);
5027 	      HOST_WIDE_INT f = INTVAL (op2);
5028 
5029 	      if (t == STORE_FLAG_VALUE && f == 0)
5030 	        code = GET_CODE (op0);
5031 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5032 		{
5033 		  enum rtx_code tmp;
5034 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5035 		  if (tmp == UNKNOWN)
5036 		    break;
5037 		  code = tmp;
5038 		}
5039 	      else
5040 		break;
5041 
5042 	      return simplify_gen_relational (code, mode, cmp_mode,
5043 					      XEXP (op0, 0), XEXP (op0, 1));
5044 	    }
5045 
5046 	  if (cmp_mode == VOIDmode)
5047 	    cmp_mode = op0_mode;
5048 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5049 			  			cmp_mode, XEXP (op0, 0),
5050 						XEXP (op0, 1));
5051 
5052 	  /* See if any simplifications were possible.  */
5053 	  if (temp)
5054 	    {
5055 	      if (CONST_INT_P (temp))
5056 		return temp == const0_rtx ? op2 : op1;
5057 	      else if (temp)
5058 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5059 	    }
5060 	}
5061       break;
5062 
5063     case VEC_MERGE:
5064       gcc_assert (GET_MODE (op0) == mode);
5065       gcc_assert (GET_MODE (op1) == mode);
5066       gcc_assert (VECTOR_MODE_P (mode));
5067       op2 = avoid_constant_pool_reference (op2);
5068       if (CONST_INT_P (op2))
5069 	{
5070           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5071 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5072 	  int mask = (1 << n_elts) - 1;
5073 
5074 	  if (!(INTVAL (op2) & mask))
5075 	    return op1;
5076 	  if ((INTVAL (op2) & mask) == mask)
5077 	    return op0;
5078 
5079 	  op0 = avoid_constant_pool_reference (op0);
5080 	  op1 = avoid_constant_pool_reference (op1);
5081 	  if (GET_CODE (op0) == CONST_VECTOR
5082 	      && GET_CODE (op1) == CONST_VECTOR)
5083 	    {
5084 	      rtvec v = rtvec_alloc (n_elts);
5085 	      unsigned int i;
5086 
5087 	      for (i = 0; i < n_elts; i++)
5088 		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5089 				    ? CONST_VECTOR_ELT (op0, i)
5090 				    : CONST_VECTOR_ELT (op1, i));
5091 	      return gen_rtx_CONST_VECTOR (mode, v);
5092 	    }
5093 	}
5094       break;
5095 
5096     default:
5097       gcc_unreachable ();
5098     }
5099 
5100   return 0;
5101 }
5102 
5103 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5104    or CONST_VECTOR,
5105    returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5106 
5107    Works by unpacking OP into a collection of 8-bit values
5108    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5109    and then repacking them again for OUTERMODE.  */
5110 
5111 static rtx
5112 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5113 		       enum machine_mode innermode, unsigned int byte)
5114 {
5115   /* We support up to 512-bit values (for V8DFmode).  */
5116   enum {
5117     max_bitsize = 512,
5118     value_bit = 8,
5119     value_mask = (1 << value_bit) - 1
5120   };
5121   unsigned char value[max_bitsize / value_bit];
5122   int value_start;
5123   int i;
5124   int elem;
5125 
5126   int num_elem;
5127   rtx * elems;
5128   int elem_bitsize;
5129   rtx result_s;
5130   rtvec result_v = NULL;
5131   enum mode_class outer_class;
5132   enum machine_mode outer_submode;
5133 
5134   /* Some ports misuse CCmode.  */
5135   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5136     return op;
5137 
5138   /* We have no way to represent a complex constant at the rtl level.  */
5139   if (COMPLEX_MODE_P (outermode))
5140     return NULL_RTX;
5141 
5142   /* Unpack the value.  */
5143 
5144   if (GET_CODE (op) == CONST_VECTOR)
5145     {
5146       num_elem = CONST_VECTOR_NUNITS (op);
5147       elems = &CONST_VECTOR_ELT (op, 0);
5148       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5149     }
5150   else
5151     {
5152       num_elem = 1;
5153       elems = &op;
5154       elem_bitsize = max_bitsize;
5155     }
5156   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5157   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5158   /* I don't know how to handle endianness of sub-units.  */
5159   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5160 
5161   for (elem = 0; elem < num_elem; elem++)
5162     {
5163       unsigned char * vp;
5164       rtx el = elems[elem];
5165 
5166       /* Vectors are kept in target memory order.  (This is probably
5167 	 a mistake.)  */
5168       {
5169 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5170 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5171 			  / BITS_PER_UNIT);
5172 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5173 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5174 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5175 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5176 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5177       }
5178 
5179       switch (GET_CODE (el))
5180 	{
5181 	case CONST_INT:
5182 	  for (i = 0;
5183 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5184 	       i += value_bit)
5185 	    *vp++ = INTVAL (el) >> i;
5186 	  /* CONST_INTs are always logically sign-extended.  */
5187 	  for (; i < elem_bitsize; i += value_bit)
5188 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5189 	  break;
5190 
5191 	case CONST_DOUBLE:
5192 	  if (GET_MODE (el) == VOIDmode)
5193 	    {
5194 	      /* If this triggers, someone should have generated a
5195 		 CONST_INT instead.  */
5196 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5197 
5198 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5199 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5200 	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
5201 		{
5202 		  *vp++
5203 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5204 		  i += value_bit;
5205 		}
5206 	      /* It shouldn't matter what's done here, so fill it with
5207 		 zero.  */
5208 	      for (; i < elem_bitsize; i += value_bit)
5209 		*vp++ = 0;
5210 	    }
5211 	  else
5212 	    {
5213 	      long tmp[max_bitsize / 32];
5214 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5215 
5216 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5217 	      gcc_assert (bitsize <= elem_bitsize);
5218 	      gcc_assert (bitsize % value_bit == 0);
5219 
5220 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5221 			      GET_MODE (el));
5222 
5223 	      /* real_to_target produces its result in words affected by
5224 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5225 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5226 	         of SUBREG in rtl.texi.  */
5227 	      for (i = 0; i < bitsize; i += value_bit)
5228 		{
5229 		  int ibase;
5230 		  if (WORDS_BIG_ENDIAN)
5231 		    ibase = bitsize - 1 - i;
5232 		  else
5233 		    ibase = i;
5234 		  *vp++ = tmp[ibase / 32] >> i % 32;
5235 		}
5236 
5237 	      /* It shouldn't matter what's done here, so fill it with
5238 		 zero.  */
5239 	      for (; i < elem_bitsize; i += value_bit)
5240 		*vp++ = 0;
5241 	    }
5242 	  break;
5243 
5244         case CONST_FIXED:
5245 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5246 	    {
5247 	      for (i = 0; i < elem_bitsize; i += value_bit)
5248 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5249 	    }
5250 	  else
5251 	    {
5252 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5253 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5254               for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5255 		   i += value_bit)
5256 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5257 			>> (i - HOST_BITS_PER_WIDE_INT);
5258 	      for (; i < elem_bitsize; i += value_bit)
5259 		*vp++ = 0;
5260 	    }
5261           break;
5262 
5263 	default:
5264 	  gcc_unreachable ();
5265 	}
5266     }
5267 
5268   /* Now, pick the right byte to start with.  */
5269   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5270      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5271      will already have offset 0.  */
5272   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5273     {
5274       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5275 			- byte);
5276       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5277       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5278       byte = (subword_byte % UNITS_PER_WORD
5279 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5280     }
5281 
5282   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5283      so if it's become negative it will instead be very large.)  */
5284   gcc_assert (byte < GET_MODE_SIZE (innermode));
5285 
5286   /* Convert from bytes to chunks of size value_bit.  */
5287   value_start = byte * (BITS_PER_UNIT / value_bit);
5288 
5289   /* Re-pack the value.  */
5290 
5291   if (VECTOR_MODE_P (outermode))
5292     {
5293       num_elem = GET_MODE_NUNITS (outermode);
5294       result_v = rtvec_alloc (num_elem);
5295       elems = &RTVEC_ELT (result_v, 0);
5296       outer_submode = GET_MODE_INNER (outermode);
5297     }
5298   else
5299     {
5300       num_elem = 1;
5301       elems = &result_s;
5302       outer_submode = outermode;
5303     }
5304 
5305   outer_class = GET_MODE_CLASS (outer_submode);
5306   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5307 
5308   gcc_assert (elem_bitsize % value_bit == 0);
5309   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5310 
5311   for (elem = 0; elem < num_elem; elem++)
5312     {
5313       unsigned char *vp;
5314 
5315       /* Vectors are stored in target memory order.  (This is probably
5316 	 a mistake.)  */
5317       {
5318 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5319 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5320 			  / BITS_PER_UNIT);
5321 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5322 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5323 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5324 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5325 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5326       }
5327 
5328       switch (outer_class)
5329 	{
5330 	case MODE_INT:
5331 	case MODE_PARTIAL_INT:
5332 	  {
5333 	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
5334 
5335 	    for (i = 0;
5336 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5337 		 i += value_bit)
5338 	      lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5339 	    for (; i < elem_bitsize; i += value_bit)
5340 	      hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5341 		     << (i - HOST_BITS_PER_WIDE_INT);
5342 
5343 	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5344 	       know why.  */
5345 	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5346 	      elems[elem] = gen_int_mode (lo, outer_submode);
5347 	    else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5348 	      elems[elem] = immed_double_const (lo, hi, outer_submode);
5349 	    else
5350 	      return NULL_RTX;
5351 	  }
5352 	  break;
5353 
5354 	case MODE_FLOAT:
5355 	case MODE_DECIMAL_FLOAT:
5356 	  {
5357 	    REAL_VALUE_TYPE r;
5358 	    long tmp[max_bitsize / 32];
5359 
5360 	    /* real_from_target wants its input in words affected by
5361 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5362 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5363 	       of SUBREG in rtl.texi.  */
5364 	    for (i = 0; i < max_bitsize / 32; i++)
5365 	      tmp[i] = 0;
5366 	    for (i = 0; i < elem_bitsize; i += value_bit)
5367 	      {
5368 		int ibase;
5369 		if (WORDS_BIG_ENDIAN)
5370 		  ibase = elem_bitsize - 1 - i;
5371 		else
5372 		  ibase = i;
5373 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5374 	      }
5375 
5376 	    real_from_target (&r, tmp, outer_submode);
5377 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5378 	  }
5379 	  break;
5380 
5381 	case MODE_FRACT:
5382 	case MODE_UFRACT:
5383 	case MODE_ACCUM:
5384 	case MODE_UACCUM:
5385 	  {
5386 	    FIXED_VALUE_TYPE f;
5387 	    f.data.low = 0;
5388 	    f.data.high = 0;
5389 	    f.mode = outer_submode;
5390 
5391 	    for (i = 0;
5392 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5393 		 i += value_bit)
5394 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5395 	    for (; i < elem_bitsize; i += value_bit)
5396 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5397 			     << (i - HOST_BITS_PER_WIDE_INT));
5398 
5399 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5400           }
5401           break;
5402 
5403 	default:
5404 	  gcc_unreachable ();
5405 	}
5406     }
5407   if (VECTOR_MODE_P (outermode))
5408     return gen_rtx_CONST_VECTOR (outermode, result_v);
5409   else
5410     return result_s;
5411 }
5412 
5413 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5414    Return 0 if no simplifications are possible.  */
5415 rtx
5416 simplify_subreg (enum machine_mode outermode, rtx op,
5417 		 enum machine_mode innermode, unsigned int byte)
5418 {
5419   /* Little bit of sanity checking.  */
5420   gcc_assert (innermode != VOIDmode);
5421   gcc_assert (outermode != VOIDmode);
5422   gcc_assert (innermode != BLKmode);
5423   gcc_assert (outermode != BLKmode);
5424 
5425   gcc_assert (GET_MODE (op) == innermode
5426 	      || GET_MODE (op) == VOIDmode);
5427 
5428   gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5429   gcc_assert (byte < GET_MODE_SIZE (innermode));
5430 
5431   if (outermode == innermode && !byte)
5432     return op;
5433 
5434   if (CONST_INT_P (op)
5435       || GET_CODE (op) == CONST_DOUBLE
5436       || GET_CODE (op) == CONST_FIXED
5437       || GET_CODE (op) == CONST_VECTOR)
5438     return simplify_immed_subreg (outermode, op, innermode, byte);
5439 
5440   /* Changing mode twice with SUBREG => just change it once,
5441      or not at all if changing back op starting mode.  */
5442   if (GET_CODE (op) == SUBREG)
5443     {
5444       enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5445       int final_offset = byte + SUBREG_BYTE (op);
5446       rtx newx;
5447 
5448       if (outermode == innermostmode
5449 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5450 	return SUBREG_REG (op);
5451 
5452       /* The SUBREG_BYTE represents offset, as if the value were stored
5453 	 in memory.  Irritating exception is paradoxical subreg, where
5454 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5455 	 value should be negative.  For a moment, undo this exception.  */
5456       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5457 	{
5458 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5459 	  if (WORDS_BIG_ENDIAN)
5460 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5461 	  if (BYTES_BIG_ENDIAN)
5462 	    final_offset += difference % UNITS_PER_WORD;
5463 	}
5464       if (SUBREG_BYTE (op) == 0
5465 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5466 	{
5467 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5468 	  if (WORDS_BIG_ENDIAN)
5469 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5470 	  if (BYTES_BIG_ENDIAN)
5471 	    final_offset += difference % UNITS_PER_WORD;
5472 	}
5473 
5474       /* See whether resulting subreg will be paradoxical.  */
5475       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5476 	{
5477 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5478 	  if (final_offset < 0)
5479 	    return NULL_RTX;
5480 	  /* Bail out in case resulting subreg would be incorrect.  */
5481 	  if (final_offset % GET_MODE_SIZE (outermode)
5482 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5483 	    return NULL_RTX;
5484 	}
5485       else
5486 	{
5487 	  int offset = 0;
5488 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5489 
5490 	  /* In paradoxical subreg, see if we are still looking on lower part.
5491 	     If so, our SUBREG_BYTE will be 0.  */
5492 	  if (WORDS_BIG_ENDIAN)
5493 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5494 	  if (BYTES_BIG_ENDIAN)
5495 	    offset += difference % UNITS_PER_WORD;
5496 	  if (offset == final_offset)
5497 	    final_offset = 0;
5498 	  else
5499 	    return NULL_RTX;
5500 	}
5501 
5502       /* Recurse for further possible simplifications.  */
5503       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5504 			      final_offset);
5505       if (newx)
5506 	return newx;
5507       if (validate_subreg (outermode, innermostmode,
5508 			   SUBREG_REG (op), final_offset))
5509 	{
5510 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5511 	  if (SUBREG_PROMOTED_VAR_P (op)
5512 	      && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5513 	      && GET_MODE_CLASS (outermode) == MODE_INT
5514 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5515 			   GET_MODE_SIZE (innermode),
5516 			   GET_MODE_SIZE (innermostmode))
5517 	      && subreg_lowpart_p (newx))
5518 	    {
5519 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5520 	      SUBREG_PROMOTED_UNSIGNED_SET
5521 		(newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5522 	    }
5523 	  return newx;
5524 	}
5525       return NULL_RTX;
5526     }
5527 
5528   /* Merge implicit and explicit truncations.  */
5529 
5530   if (GET_CODE (op) == TRUNCATE
5531       && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5532       && subreg_lowpart_offset (outermode, innermode) == byte)
5533     return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5534 			       GET_MODE (XEXP (op, 0)));
5535 
5536   /* SUBREG of a hard register => just change the register number
5537      and/or mode.  If the hard register is not valid in that mode,
5538      suppress this simplification.  If the hard register is the stack,
5539      frame, or argument pointer, leave this as a SUBREG.  */
5540 
5541   if (REG_P (op) && HARD_REGISTER_P (op))
5542     {
5543       unsigned int regno, final_regno;
5544 
5545       regno = REGNO (op);
5546       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5547       if (HARD_REGISTER_NUM_P (final_regno))
5548 	{
5549 	  rtx x;
5550 	  int final_offset = byte;
5551 
5552 	  /* Adjust offset for paradoxical subregs.  */
5553 	  if (byte == 0
5554 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5555 	    {
5556 	      int difference = (GET_MODE_SIZE (innermode)
5557 				- GET_MODE_SIZE (outermode));
5558 	      if (WORDS_BIG_ENDIAN)
5559 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5560 	      if (BYTES_BIG_ENDIAN)
5561 		final_offset += difference % UNITS_PER_WORD;
5562 	    }
5563 
5564 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5565 
5566 	  /* Propagate original regno.  We don't have any way to specify
5567 	     the offset inside original regno, so do so only for lowpart.
5568 	     The information is used only by alias analysis that can not
5569 	     grog partial register anyway.  */
5570 
5571 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5572 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5573 	  return x;
5574 	}
5575     }
5576 
5577   /* If we have a SUBREG of a register that we are replacing and we are
5578      replacing it with a MEM, make a new MEM and try replacing the
5579      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5580      or if we would be widening it.  */
5581 
5582   if (MEM_P (op)
5583       && ! mode_dependent_address_p (XEXP (op, 0))
5584       /* Allow splitting of volatile memory references in case we don't
5585          have instruction to move the whole thing.  */
5586       && (! MEM_VOLATILE_P (op)
5587 	  || ! have_insn_for (SET, innermode))
5588       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5589     return adjust_address_nv (op, outermode, byte);
5590 
5591   /* Handle complex values represented as CONCAT
5592      of real and imaginary part.  */
5593   if (GET_CODE (op) == CONCAT)
5594     {
5595       unsigned int part_size, final_offset;
5596       rtx part, res;
5597 
5598       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5599       if (byte < part_size)
5600 	{
5601 	  part = XEXP (op, 0);
5602 	  final_offset = byte;
5603 	}
5604       else
5605 	{
5606 	  part = XEXP (op, 1);
5607 	  final_offset = byte - part_size;
5608 	}
5609 
5610       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5611 	return NULL_RTX;
5612 
5613       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5614       if (res)
5615 	return res;
5616       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5617 	return gen_rtx_SUBREG (outermode, part, final_offset);
5618       return NULL_RTX;
5619     }
5620 
5621   /* Optimize SUBREG truncations of zero and sign extended values.  */
5622   if ((GET_CODE (op) == ZERO_EXTEND
5623        || GET_CODE (op) == SIGN_EXTEND)
5624       && SCALAR_INT_MODE_P (innermode)
5625       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
5626     {
5627       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5628 
5629       /* If we're requesting the lowpart of a zero or sign extension,
5630 	 there are three possibilities.  If the outermode is the same
5631 	 as the origmode, we can omit both the extension and the subreg.
5632 	 If the outermode is not larger than the origmode, we can apply
5633 	 the truncation without the extension.  Finally, if the outermode
5634 	 is larger than the origmode, but both are integer modes, we
5635 	 can just extend to the appropriate mode.  */
5636       if (bitpos == 0)
5637 	{
5638 	  enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5639 	  if (outermode == origmode)
5640 	    return XEXP (op, 0);
5641 	  if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
5642 	    return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5643 					subreg_lowpart_offset (outermode,
5644 							       origmode));
5645 	  if (SCALAR_INT_MODE_P (outermode))
5646 	    return simplify_gen_unary (GET_CODE (op), outermode,
5647 				       XEXP (op, 0), origmode);
5648 	}
5649 
5650       /* A SUBREG resulting from a zero extension may fold to zero if
5651 	 it extracts higher bits that the ZERO_EXTEND's source bits.  */
5652       if (GET_CODE (op) == ZERO_EXTEND
5653 	  && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5654 	return CONST0_RTX (outermode);
5655     }
5656 
5657   /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5658      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5659      the outer subreg is effectively a truncation to the original mode.  */
5660   if ((GET_CODE (op) == LSHIFTRT
5661        || GET_CODE (op) == ASHIFTRT)
5662       && SCALAR_INT_MODE_P (outermode)
5663       && SCALAR_INT_MODE_P (innermode)
5664       /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5665 	 to avoid the possibility that an outer LSHIFTRT shifts by more
5666 	 than the sign extension's sign_bit_copies and introduces zeros
5667 	 into the high bits of the result.  */
5668       && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
5669       && CONST_INT_P (XEXP (op, 1))
5670       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5671       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5672       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5673       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5674     return simplify_gen_binary (ASHIFTRT, outermode,
5675 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5676 
5677   /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5678      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5679      the outer subreg is effectively a truncation to the original mode.  */
5680   if ((GET_CODE (op) == LSHIFTRT
5681        || GET_CODE (op) == ASHIFTRT)
5682       && SCALAR_INT_MODE_P (outermode)
5683       && SCALAR_INT_MODE_P (innermode)
5684       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5685       && CONST_INT_P (XEXP (op, 1))
5686       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5687       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5688       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5689       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5690     return simplify_gen_binary (LSHIFTRT, outermode,
5691 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5692 
5693   /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5694      to (ashift:QI (x:QI) C), where C is a suitable small constant and
5695      the outer subreg is effectively a truncation to the original mode.  */
5696   if (GET_CODE (op) == ASHIFT
5697       && SCALAR_INT_MODE_P (outermode)
5698       && SCALAR_INT_MODE_P (innermode)
5699       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5700       && CONST_INT_P (XEXP (op, 1))
5701       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5702 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5703       && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5704       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
5705       && subreg_lsb_1 (outermode, innermode, byte) == 0)
5706     return simplify_gen_binary (ASHIFT, outermode,
5707 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5708 
5709   /* Recognize a word extraction from a multi-word subreg.  */
5710   if ((GET_CODE (op) == LSHIFTRT
5711        || GET_CODE (op) == ASHIFTRT)
5712       && SCALAR_INT_MODE_P (innermode)
5713       && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
5714       && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
5715       && CONST_INT_P (XEXP (op, 1))
5716       && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
5717       && INTVAL (XEXP (op, 1)) >= 0
5718       && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
5719       && byte == subreg_lowpart_offset (outermode, innermode))
5720     {
5721       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5722       return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5723 				  (WORDS_BIG_ENDIAN
5724 				   ? byte - shifted_bytes
5725 				   : byte + shifted_bytes));
5726     }
5727 
5728   /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5729      and try replacing the SUBREG and shift with it.  Don't do this if
5730      the MEM has a mode-dependent address or if we would be widening it.  */
5731 
5732   if ((GET_CODE (op) == LSHIFTRT
5733        || GET_CODE (op) == ASHIFTRT)
5734       && SCALAR_INT_MODE_P (innermode)
5735       && MEM_P (XEXP (op, 0))
5736       && CONST_INT_P (XEXP (op, 1))
5737       && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (GET_MODE (op))
5738       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (outermode)) == 0
5739       && INTVAL (XEXP (op, 1)) > 0
5740       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5741       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0))
5742       && ! MEM_VOLATILE_P (XEXP (op, 0))
5743       && byte == subreg_lowpart_offset (outermode, innermode)
5744       && (GET_MODE_SIZE (outermode) >= UNITS_PER_WORD
5745 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
5746     {
5747       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5748       return adjust_address_nv (XEXP (op, 0), outermode,
5749 				(WORDS_BIG_ENDIAN
5750 				 ? byte - shifted_bytes
5751 				 : byte + shifted_bytes));
5752     }
5753 
5754   return NULL_RTX;
5755 }
5756 
5757 /* Make a SUBREG operation or equivalent if it folds.  */
5758 
5759 rtx
5760 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5761 		     enum machine_mode innermode, unsigned int byte)
5762 {
5763   rtx newx;
5764 
5765   newx = simplify_subreg (outermode, op, innermode, byte);
5766   if (newx)
5767     return newx;
5768 
5769   if (GET_CODE (op) == SUBREG
5770       || GET_CODE (op) == CONCAT
5771       || GET_MODE (op) == VOIDmode)
5772     return NULL_RTX;
5773 
5774   if (validate_subreg (outermode, innermode, op, byte))
5775     return gen_rtx_SUBREG (outermode, op, byte);
5776 
5777   return NULL_RTX;
5778 }
5779 
5780 /* Simplify X, an rtx expression.
5781 
5782    Return the simplified expression or NULL if no simplifications
5783    were possible.
5784 
5785    This is the preferred entry point into the simplification routines;
5786    however, we still allow passes to call the more specific routines.
5787 
5788    Right now GCC has three (yes, three) major bodies of RTL simplification
5789    code that need to be unified.
5790 
5791 	1. fold_rtx in cse.c.  This code uses various CSE specific
5792 	   information to aid in RTL simplification.
5793 
5794 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5795 	   it uses combine specific information to aid in RTL
5796 	   simplification.
5797 
5798 	3. The routines in this file.
5799 
5800 
5801    Long term we want to only have one body of simplification code; to
5802    get to that state I recommend the following steps:
5803 
5804 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
5805 	   which are not pass dependent state into these routines.
5806 
5807 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
5808 	   use this routine whenever possible.
5809 
5810 	3. Allow for pass dependent state to be provided to these
5811 	   routines and add simplifications based on the pass dependent
5812 	   state.  Remove code from cse.c & combine.c that becomes
5813 	   redundant/dead.
5814 
5815     It will take time, but ultimately the compiler will be easier to
5816     maintain and improve.  It's totally silly that when we add a
5817     simplification that it needs to be added to 4 places (3 for RTL
5818     simplification and 1 for tree simplification.  */
5819 
5820 rtx
5821 simplify_rtx (const_rtx x)
5822 {
5823   const enum rtx_code code = GET_CODE (x);
5824   const enum machine_mode mode = GET_MODE (x);
5825 
5826   switch (GET_RTX_CLASS (code))
5827     {
5828     case RTX_UNARY:
5829       return simplify_unary_operation (code, mode,
5830 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5831     case RTX_COMM_ARITH:
5832       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5833 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5834 
5835       /* Fall through....  */
5836 
5837     case RTX_BIN_ARITH:
5838       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5839 
5840     case RTX_TERNARY:
5841     case RTX_BITFIELD_OPS:
5842       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5843 					 XEXP (x, 0), XEXP (x, 1),
5844 					 XEXP (x, 2));
5845 
5846     case RTX_COMPARE:
5847     case RTX_COMM_COMPARE:
5848       return simplify_relational_operation (code, mode,
5849                                             ((GET_MODE (XEXP (x, 0))
5850                                              != VOIDmode)
5851                                             ? GET_MODE (XEXP (x, 0))
5852                                             : GET_MODE (XEXP (x, 1))),
5853                                             XEXP (x, 0),
5854                                             XEXP (x, 1));
5855 
5856     case RTX_EXTRA:
5857       if (code == SUBREG)
5858 	return simplify_subreg (mode, SUBREG_REG (x),
5859 				GET_MODE (SUBREG_REG (x)),
5860 				SUBREG_BYTE (x));
5861       break;
5862 
5863     case RTX_OBJ:
5864       if (code == LO_SUM)
5865 	{
5866 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
5867 	  if (GET_CODE (XEXP (x, 0)) == HIGH
5868 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5869 	  return XEXP (x, 1);
5870 	}
5871       break;
5872 
5873     default:
5874       break;
5875     }
5876   return NULL;
5877 }
5878