1 /* RTL simplification functions for GNU compiler.
2    Copyright (C) 1987-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "function.h"
34 #include "expr.h"
35 #include "diagnostic-core.h"
36 #include "ggc.h"
37 #include "target.h"
38 
39 /* Simplification and canonicalization of RTL.  */
40 
41 /* Much code operates on (low, high) pairs; the low value is an
42    unsigned wide int, the high value a signed wide int.  We
43    occasionally need to sign extend from low to high as if low were a
44    signed wide int.  */
45 #define HWI_SIGN_EXTEND(low) \
46  ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
47 
48 static rtx neg_const_int (enum machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
51 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
52 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
53 				  unsigned int);
54 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
55 					   rtx, rtx);
56 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
57 					    enum machine_mode, rtx, rtx);
58 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
59 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
60 					rtx, rtx, rtx, rtx);
61 
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63    maximally negative number can overflow).  */
64 static rtx
neg_const_int(enum machine_mode mode,const_rtx i)65 neg_const_int (enum machine_mode mode, const_rtx i)
66 {
67   return gen_int_mode (-(unsigned HOST_WIDE_INT) INTVAL (i), mode);
68 }
69 
70 /* Test whether expression, X, is an immediate constant that represents
71    the most significant bit of machine mode MODE.  */
72 
73 bool
mode_signbit_p(enum machine_mode mode,const_rtx x)74 mode_signbit_p (enum machine_mode mode, const_rtx x)
75 {
76   unsigned HOST_WIDE_INT val;
77   unsigned int width;
78 
79   if (GET_MODE_CLASS (mode) != MODE_INT)
80     return false;
81 
82   width = GET_MODE_PRECISION (mode);
83   if (width == 0)
84     return false;
85 
86   if (width <= HOST_BITS_PER_WIDE_INT
87       && CONST_INT_P (x))
88     val = INTVAL (x);
89   else if (width <= HOST_BITS_PER_DOUBLE_INT
90 	   && CONST_DOUBLE_AS_INT_P (x)
91 	   && CONST_DOUBLE_LOW (x) == 0)
92     {
93       val = CONST_DOUBLE_HIGH (x);
94       width -= HOST_BITS_PER_WIDE_INT;
95     }
96   else
97     /* FIXME: We don't yet have a representation for wider modes.  */
98     return false;
99 
100   if (width < HOST_BITS_PER_WIDE_INT)
101     val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
103 }
104 
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106    (after masking with the mode mask of MODE).  Returns false if the
107    precision of MODE is too large to handle.  */
108 
109 bool
val_signbit_p(enum machine_mode mode,unsigned HOST_WIDE_INT val)110 val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
111 {
112   unsigned int width;
113 
114   if (GET_MODE_CLASS (mode) != MODE_INT)
115     return false;
116 
117   width = GET_MODE_PRECISION (mode);
118   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
119     return false;
120 
121   val &= GET_MODE_MASK (mode);
122   return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
123 }
124 
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126    Returns false if the precision of MODE is too large to handle.  */
127 bool
val_signbit_known_set_p(enum machine_mode mode,unsigned HOST_WIDE_INT val)128 val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
129 {
130   unsigned int width;
131 
132   if (GET_MODE_CLASS (mode) != MODE_INT)
133     return false;
134 
135   width = GET_MODE_PRECISION (mode);
136   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
137     return false;
138 
139   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
140   return val != 0;
141 }
142 
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144    Returns false if the precision of MODE is too large to handle.  */
145 bool
val_signbit_known_clear_p(enum machine_mode mode,unsigned HOST_WIDE_INT val)146 val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val)
147 {
148   unsigned int width;
149 
150   if (GET_MODE_CLASS (mode) != MODE_INT)
151     return false;
152 
153   width = GET_MODE_PRECISION (mode);
154   if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
155     return false;
156 
157   val &= (unsigned HOST_WIDE_INT) 1 << (width - 1);
158   return val == 0;
159 }
160 
161 /* Make a binary operation by properly ordering the operands and
162    seeing if the expression folds.  */
163 
164 rtx
simplify_gen_binary(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)165 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
166 		     rtx op1)
167 {
168   rtx tem;
169 
170   /* If this simplifies, do it.  */
171   tem = simplify_binary_operation (code, mode, op0, op1);
172   if (tem)
173     return tem;
174 
175   /* Put complex operands first and constants second if commutative.  */
176   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
177       && swap_commutative_operands_p (op0, op1))
178     tem = op0, op0 = op1, op1 = tem;
179 
180   return gen_rtx_fmt_ee (code, mode, op0, op1);
181 }
182 
183 /* If X is a MEM referencing the constant pool, return the real value.
184    Otherwise return X.  */
185 rtx
avoid_constant_pool_reference(rtx x)186 avoid_constant_pool_reference (rtx x)
187 {
188   rtx c, tmp, addr;
189   enum machine_mode cmode;
190   HOST_WIDE_INT offset = 0;
191 
192   switch (GET_CODE (x))
193     {
194     case MEM:
195       break;
196 
197     case FLOAT_EXTEND:
198       /* Handle float extensions of constant pool references.  */
199       tmp = XEXP (x, 0);
200       c = avoid_constant_pool_reference (tmp);
201       if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
202 	{
203 	  REAL_VALUE_TYPE d;
204 
205 	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
206 	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
207 	}
208       return x;
209 
210     default:
211       return x;
212     }
213 
214   if (GET_MODE (x) == BLKmode)
215     return x;
216 
217   addr = XEXP (x, 0);
218 
219   /* Call target hook to avoid the effects of -fpic etc....  */
220   addr = targetm.delegitimize_address (addr);
221 
222   /* Split the address into a base and integer offset.  */
223   if (GET_CODE (addr) == CONST
224       && GET_CODE (XEXP (addr, 0)) == PLUS
225       && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
226     {
227       offset = INTVAL (XEXP (XEXP (addr, 0), 1));
228       addr = XEXP (XEXP (addr, 0), 0);
229     }
230 
231   if (GET_CODE (addr) == LO_SUM)
232     addr = XEXP (addr, 1);
233 
234   /* If this is a constant pool reference, we can turn it into its
235      constant and hope that simplifications happen.  */
236   if (GET_CODE (addr) == SYMBOL_REF
237       && CONSTANT_POOL_ADDRESS_P (addr))
238     {
239       c = get_pool_constant (addr);
240       cmode = get_pool_mode (addr);
241 
242       /* If we're accessing the constant in a different mode than it was
243          originally stored, attempt to fix that up via subreg simplifications.
244          If that fails we have no choice but to return the original memory.  */
245       if ((offset != 0 || cmode != GET_MODE (x))
246 	  && offset >= 0 && offset < GET_MODE_SIZE (cmode))
247         {
248           rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
249           if (tem && CONSTANT_P (tem))
250             return tem;
251         }
252       else
253         return c;
254     }
255 
256   return x;
257 }
258 
259 /* Simplify a MEM based on its attributes.  This is the default
260    delegitimize_address target hook, and it's recommended that every
261    overrider call it.  */
262 
263 rtx
delegitimize_mem_from_attrs(rtx x)264 delegitimize_mem_from_attrs (rtx x)
265 {
266   /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267      use their base addresses as equivalent.  */
268   if (MEM_P (x)
269       && MEM_EXPR (x)
270       && MEM_OFFSET_KNOWN_P (x))
271     {
272       tree decl = MEM_EXPR (x);
273       enum machine_mode mode = GET_MODE (x);
274       HOST_WIDE_INT offset = 0;
275 
276       switch (TREE_CODE (decl))
277 	{
278 	default:
279 	  decl = NULL;
280 	  break;
281 
282 	case VAR_DECL:
283 	  break;
284 
285 	case ARRAY_REF:
286 	case ARRAY_RANGE_REF:
287 	case COMPONENT_REF:
288 	case BIT_FIELD_REF:
289 	case REALPART_EXPR:
290 	case IMAGPART_EXPR:
291 	case VIEW_CONVERT_EXPR:
292 	  {
293 	    HOST_WIDE_INT bitsize, bitpos;
294 	    tree toffset;
295 	    int unsignedp, volatilep = 0;
296 
297 	    decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
298 					&mode, &unsignedp, &volatilep, false);
299 	    if (bitsize != GET_MODE_BITSIZE (mode)
300 		|| (bitpos % BITS_PER_UNIT)
301 		|| (toffset && !host_integerp (toffset, 0)))
302 	      decl = NULL;
303 	    else
304 	      {
305 		offset += bitpos / BITS_PER_UNIT;
306 		if (toffset)
307 		  offset += TREE_INT_CST_LOW (toffset);
308 	      }
309 	    break;
310 	  }
311 	}
312 
313       if (decl
314 	  && mode == GET_MODE (x)
315 	  && TREE_CODE (decl) == VAR_DECL
316 	  && (TREE_STATIC (decl)
317 	      || DECL_THREAD_LOCAL_P (decl))
318 	  && DECL_RTL_SET_P (decl)
319 	  && MEM_P (DECL_RTL (decl)))
320 	{
321 	  rtx newx;
322 
323 	  offset += MEM_OFFSET (x);
324 
325 	  newx = DECL_RTL (decl);
326 
327 	  if (MEM_P (newx))
328 	    {
329 	      rtx n = XEXP (newx, 0), o = XEXP (x, 0);
330 
331 	      /* Avoid creating a new MEM needlessly if we already had
332 		 the same address.  We do if there's no OFFSET and the
333 		 old address X is identical to NEWX, or if X is of the
334 		 form (plus NEWX OFFSET), or the NEWX is of the form
335 		 (plus Y (const_int Z)) and X is that with the offset
336 		 added: (plus Y (const_int Z+OFFSET)).  */
337 	      if (!((offset == 0
338 		     || (GET_CODE (o) == PLUS
339 			 && GET_CODE (XEXP (o, 1)) == CONST_INT
340 			 && (offset == INTVAL (XEXP (o, 1))
341 			     || (GET_CODE (n) == PLUS
342 				 && GET_CODE (XEXP (n, 1)) == CONST_INT
343 				 && (INTVAL (XEXP (n, 1)) + offset
344 				     == INTVAL (XEXP (o, 1)))
345 				 && (n = XEXP (n, 0))))
346 			 && (o = XEXP (o, 0))))
347 		    && rtx_equal_p (o, n)))
348 		x = adjust_address_nv (newx, mode, offset);
349 	    }
350 	  else if (GET_MODE (x) == GET_MODE (newx)
351 		   && offset == 0)
352 	    x = newx;
353 	}
354     }
355 
356   return x;
357 }
358 
359 /* Make a unary operation by first seeing if it folds and otherwise making
360    the specified operation.  */
361 
362 rtx
simplify_gen_unary(enum rtx_code code,enum machine_mode mode,rtx op,enum machine_mode op_mode)363 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
364 		    enum machine_mode op_mode)
365 {
366   rtx tem;
367 
368   /* If this simplifies, use it.  */
369   if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
370     return tem;
371 
372   return gen_rtx_fmt_e (code, mode, op);
373 }
374 
375 /* Likewise for ternary operations.  */
376 
377 rtx
simplify_gen_ternary(enum rtx_code code,enum machine_mode mode,enum machine_mode op0_mode,rtx op0,rtx op1,rtx op2)378 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
379 		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
380 {
381   rtx tem;
382 
383   /* If this simplifies, use it.  */
384   if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
385 					      op0, op1, op2)))
386     return tem;
387 
388   return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
389 }
390 
391 /* Likewise, for relational operations.
392    CMP_MODE specifies mode comparison is done in.  */
393 
394 rtx
simplify_gen_relational(enum rtx_code code,enum machine_mode mode,enum machine_mode cmp_mode,rtx op0,rtx op1)395 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
396 			 enum machine_mode cmp_mode, rtx op0, rtx op1)
397 {
398   rtx tem;
399 
400   if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
401 						 op0, op1)))
402     return tem;
403 
404   return gen_rtx_fmt_ee (code, mode, op0, op1);
405 }
406 
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408    and simplify the result.  If FN is non-NULL, call this callback on each
409    X, if it returns non-NULL, replace X with its return value and simplify the
410    result.  */
411 
412 rtx
simplify_replace_fn_rtx(rtx x,const_rtx old_rtx,rtx (* fn)(rtx,const_rtx,void *),void * data)413 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
414 			 rtx (*fn) (rtx, const_rtx, void *), void *data)
415 {
416   enum rtx_code code = GET_CODE (x);
417   enum machine_mode mode = GET_MODE (x);
418   enum machine_mode op_mode;
419   const char *fmt;
420   rtx op0, op1, op2, newx, op;
421   rtvec vec, newvec;
422   int i, j;
423 
424   if (__builtin_expect (fn != NULL, 0))
425     {
426       newx = fn (x, old_rtx, data);
427       if (newx)
428 	return newx;
429     }
430   else if (rtx_equal_p (x, old_rtx))
431     return copy_rtx ((rtx) data);
432 
433   switch (GET_RTX_CLASS (code))
434     {
435     case RTX_UNARY:
436       op0 = XEXP (x, 0);
437       op_mode = GET_MODE (op0);
438       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
439       if (op0 == XEXP (x, 0))
440 	return x;
441       return simplify_gen_unary (code, mode, op0, op_mode);
442 
443     case RTX_BIN_ARITH:
444     case RTX_COMM_ARITH:
445       op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
446       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
448 	return x;
449       return simplify_gen_binary (code, mode, op0, op1);
450 
451     case RTX_COMPARE:
452     case RTX_COMM_COMPARE:
453       op0 = XEXP (x, 0);
454       op1 = XEXP (x, 1);
455       op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
456       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
457       op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
458       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
459 	return x;
460       return simplify_gen_relational (code, mode, op_mode, op0, op1);
461 
462     case RTX_TERNARY:
463     case RTX_BITFIELD_OPS:
464       op0 = XEXP (x, 0);
465       op_mode = GET_MODE (op0);
466       op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
467       op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
468       op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
469       if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
470 	return x;
471       if (op_mode == VOIDmode)
472 	op_mode = GET_MODE (op0);
473       return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
474 
475     case RTX_EXTRA:
476       if (code == SUBREG)
477 	{
478 	  op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
479 	  if (op0 == SUBREG_REG (x))
480 	    return x;
481 	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
482 				     GET_MODE (SUBREG_REG (x)),
483 				     SUBREG_BYTE (x));
484 	  return op0 ? op0 : x;
485 	}
486       break;
487 
488     case RTX_OBJ:
489       if (code == MEM)
490 	{
491 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 	  if (op0 == XEXP (x, 0))
493 	    return x;
494 	  return replace_equiv_address_nv (x, op0);
495 	}
496       else if (code == LO_SUM)
497 	{
498 	  op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
499 	  op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
500 
501 	  /* (lo_sum (high x) x) -> x  */
502 	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
503 	    return op1;
504 
505 	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
506 	    return x;
507 	  return gen_rtx_LO_SUM (mode, op0, op1);
508 	}
509       break;
510 
511     default:
512       break;
513     }
514 
515   newx = x;
516   fmt = GET_RTX_FORMAT (code);
517   for (i = 0; fmt[i]; i++)
518     switch (fmt[i])
519       {
520       case 'E':
521 	vec = XVEC (x, i);
522 	newvec = XVEC (newx, i);
523 	for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 	  {
525 	    op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
526 					  old_rtx, fn, data);
527 	    if (op != RTVEC_ELT (vec, j))
528 	      {
529 		if (newvec == vec)
530 		  {
531 		    newvec = shallow_copy_rtvec (vec);
532 		    if (x == newx)
533 		      newx = shallow_copy_rtx (x);
534 		    XVEC (newx, i) = newvec;
535 		  }
536 		RTVEC_ELT (newvec, j) = op;
537 	      }
538 	  }
539 	break;
540 
541       case 'e':
542 	if (XEXP (x, i))
543 	  {
544 	    op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
545 	    if (op != XEXP (x, i))
546 	      {
547 		if (x == newx)
548 		  newx = shallow_copy_rtx (x);
549 		XEXP (newx, i) = op;
550 	      }
551 	  }
552 	break;
553       }
554   return newx;
555 }
556 
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558    resulting RTX.  Return a new RTX which is as simplified as possible.  */
559 
560 rtx
simplify_replace_rtx(rtx x,const_rtx old_rtx,rtx new_rtx)561 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 {
563   return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
564 }
565 
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567    Only handle cases where the truncated value is inherently an rvalue.
568 
569    RTL provides two ways of truncating a value:
570 
571    1. a lowpart subreg.  This form is only a truncation when both
572       the outer and inner modes (here MODE and OP_MODE respectively)
573       are scalar integers, and only then when the subreg is used as
574       an rvalue.
575 
576       It is only valid to form such truncating subregs if the
577       truncation requires no action by the target.  The onus for
578       proving this is on the creator of the subreg -- e.g. the
579       caller to simplify_subreg or simplify_gen_subreg -- and typically
580       involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 
582    2. a TRUNCATE.  This form handles both scalar and compound integers.
583 
584    The first form is preferred where valid.  However, the TRUNCATE
585    handling in simplify_unary_operation turns the second form into the
586    first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587    so it is generally safe to form rvalue truncations using:
588 
589       simplify_gen_unary (TRUNCATE, ...)
590 
591    and leave simplify_unary_operation to work out which representation
592    should be used.
593 
594    Because of the proof requirements on (1), simplify_truncation must
595    also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596    regardless of whether the outer truncation came from a SUBREG or a
597    TRUNCATE.  For example, if the caller has proven that an SImode
598    truncation of:
599 
600       (and:DI X Y)
601 
602    is a no-op and can be represented as a subreg, it does not follow
603    that SImode truncations of X and Y are also no-ops.  On a target
604    like 64-bit MIPS that requires SImode values to be stored in
605    sign-extended form, an SImode truncation of:
606 
607       (and:DI (reg:DI X) (const_int 63))
608 
609    is trivially a no-op because only the lower 6 bits can be set.
610    However, X is still an arbitrary 64-bit number and so we cannot
611    assume that truncating it too is a no-op.  */
612 
613 static rtx
simplify_truncation(enum machine_mode mode,rtx op,enum machine_mode op_mode)614 simplify_truncation (enum machine_mode mode, rtx op,
615 		     enum machine_mode op_mode)
616 {
617   unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
618   unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
619   gcc_assert (precision <= op_precision);
620 
621   /* Optimize truncations of zero and sign extended values.  */
622   if (GET_CODE (op) == ZERO_EXTEND
623       || GET_CODE (op) == SIGN_EXTEND)
624     {
625       /* There are three possibilities.  If MODE is the same as the
626 	 origmode, we can omit both the extension and the subreg.
627 	 If MODE is not larger than the origmode, we can apply the
628 	 truncation without the extension.  Finally, if the outermode
629 	 is larger than the origmode, we can just extend to the appropriate
630 	 mode.  */
631       enum machine_mode origmode = GET_MODE (XEXP (op, 0));
632       if (mode == origmode)
633 	return XEXP (op, 0);
634       else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
635 	return simplify_gen_unary (TRUNCATE, mode,
636 				   XEXP (op, 0), origmode);
637       else
638 	return simplify_gen_unary (GET_CODE (op), mode,
639 				   XEXP (op, 0), origmode);
640     }
641 
642   /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643      to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))).  */
644   if (GET_CODE (op) == PLUS
645       || GET_CODE (op) == MINUS
646       || GET_CODE (op) == MULT)
647     {
648       rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
649       if (op0)
650 	{
651 	  rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
652 	  if (op1)
653 	    return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
654 	}
655     }
656 
657   /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658      to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659      the outer subreg is effectively a truncation to the original mode.  */
660   if ((GET_CODE (op) == LSHIFTRT
661        || GET_CODE (op) == ASHIFTRT)
662       /* Ensure that OP_MODE is at least twice as wide as MODE
663 	 to avoid the possibility that an outer LSHIFTRT shifts by more
664 	 than the sign extension's sign_bit_copies and introduces zeros
665 	 into the high bits of the result.  */
666       && 2 * precision <= op_precision
667       && CONST_INT_P (XEXP (op, 1))
668       && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
669       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
670       && UINTVAL (XEXP (op, 1)) < precision)
671     return simplify_gen_binary (ASHIFTRT, mode,
672 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
673 
674   /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675      to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676      the outer subreg is effectively a truncation to the original mode.  */
677   if ((GET_CODE (op) == LSHIFTRT
678        || GET_CODE (op) == ASHIFTRT)
679       && CONST_INT_P (XEXP (op, 1))
680       && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
681       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
682       && UINTVAL (XEXP (op, 1)) < precision)
683     return simplify_gen_binary (LSHIFTRT, mode,
684 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
685 
686   /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687      to (ashift:QI (x:QI) C), where C is a suitable small constant and
688      the outer subreg is effectively a truncation to the original mode.  */
689   if (GET_CODE (op) == ASHIFT
690       && CONST_INT_P (XEXP (op, 1))
691       && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
692 	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
693       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
694       && UINTVAL (XEXP (op, 1)) < precision)
695     return simplify_gen_binary (ASHIFT, mode,
696 				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
697 
698   /* Recognize a word extraction from a multi-word subreg.  */
699   if ((GET_CODE (op) == LSHIFTRT
700        || GET_CODE (op) == ASHIFTRT)
701       && SCALAR_INT_MODE_P (mode)
702       && SCALAR_INT_MODE_P (op_mode)
703       && precision >= BITS_PER_WORD
704       && 2 * precision <= op_precision
705       && CONST_INT_P (XEXP (op, 1))
706       && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
707       && UINTVAL (XEXP (op, 1)) < op_precision)
708     {
709       int byte = subreg_lowpart_offset (mode, op_mode);
710       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
711       return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
712 				  (WORDS_BIG_ENDIAN
713 				   ? byte - shifted_bytes
714 				   : byte + shifted_bytes));
715     }
716 
717   /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718      and try replacing the TRUNCATE and shift with it.  Don't do this
719      if the MEM has a mode-dependent address.  */
720   if ((GET_CODE (op) == LSHIFTRT
721        || GET_CODE (op) == ASHIFTRT)
722       && SCALAR_INT_MODE_P (op_mode)
723       && MEM_P (XEXP (op, 0))
724       && CONST_INT_P (XEXP (op, 1))
725       && (INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (mode)) == 0
726       && INTVAL (XEXP (op, 1)) > 0
727       && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (op_mode)
728       && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
729 				     MEM_ADDR_SPACE (XEXP (op, 0)))
730       && ! MEM_VOLATILE_P (XEXP (op, 0))
731       && (GET_MODE_SIZE (mode) >= UNITS_PER_WORD
732 	  || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
733     {
734       int byte = subreg_lowpart_offset (mode, op_mode);
735       int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
736       return adjust_address_nv (XEXP (op, 0), mode,
737 				(WORDS_BIG_ENDIAN
738 				 ? byte - shifted_bytes
739 				 : byte + shifted_bytes));
740     }
741 
742   /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743      (OP:SI foo:SI) if OP is NEG or ABS.  */
744   if ((GET_CODE (op) == ABS
745        || GET_CODE (op) == NEG)
746       && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
747 	  || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
748       && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
749     return simplify_gen_unary (GET_CODE (op), mode,
750 			       XEXP (XEXP (op, 0), 0), mode);
751 
752   /* (truncate:A (subreg:B (truncate:C X) 0)) is
753      (truncate:A X).  */
754   if (GET_CODE (op) == SUBREG
755       && SCALAR_INT_MODE_P (mode)
756       && SCALAR_INT_MODE_P (op_mode)
757       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op)))
758       && GET_CODE (SUBREG_REG (op)) == TRUNCATE
759       && subreg_lowpart_p (op))
760     {
761       rtx inner = XEXP (SUBREG_REG (op), 0);
762       if (GET_MODE_PRECISION (mode)
763 	  <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
764 	return simplify_gen_unary (TRUNCATE, mode, inner, GET_MODE (inner));
765       else
766 	/* If subreg above is paradoxical and C is narrower
767 	   than A, return (subreg:A (truncate:C X) 0).  */
768 	return simplify_gen_subreg (mode, SUBREG_REG (op),
769 				    GET_MODE (SUBREG_REG (op)), 0);
770     }
771 
772   /* (truncate:A (truncate:B X)) is (truncate:A X).  */
773   if (GET_CODE (op) == TRUNCATE)
774     return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
775 			       GET_MODE (XEXP (op, 0)));
776 
777   return NULL_RTX;
778 }
779 
780 /* Try to simplify a unary operation CODE whose output mode is to be
781    MODE with input operand OP whose mode was originally OP_MODE.
782    Return zero if no simplification can be made.  */
783 rtx
simplify_unary_operation(enum rtx_code code,enum machine_mode mode,rtx op,enum machine_mode op_mode)784 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
785 			  rtx op, enum machine_mode op_mode)
786 {
787   rtx trueop, tem;
788 
789   trueop = avoid_constant_pool_reference (op);
790 
791   tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
792   if (tem)
793     return tem;
794 
795   return simplify_unary_operation_1 (code, mode, op);
796 }
797 
798 /* Perform some simplifications we can do even if the operands
799    aren't constant.  */
800 static rtx
simplify_unary_operation_1(enum rtx_code code,enum machine_mode mode,rtx op)801 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
802 {
803   enum rtx_code reversed;
804   rtx temp;
805 
806   switch (code)
807     {
808     case NOT:
809       /* (not (not X)) == X.  */
810       if (GET_CODE (op) == NOT)
811 	return XEXP (op, 0);
812 
813       /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 	 comparison is all ones.   */
815       if (COMPARISON_P (op)
816 	  && (mode == BImode || STORE_FLAG_VALUE == -1)
817 	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
818 	return simplify_gen_relational (reversed, mode, VOIDmode,
819 					XEXP (op, 0), XEXP (op, 1));
820 
821       /* (not (plus X -1)) can become (neg X).  */
822       if (GET_CODE (op) == PLUS
823 	  && XEXP (op, 1) == constm1_rtx)
824 	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
825 
826       /* Similarly, (not (neg X)) is (plus X -1).  */
827       if (GET_CODE (op) == NEG)
828 	return plus_constant (mode, XEXP (op, 0), -1);
829 
830       /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
831       if (GET_CODE (op) == XOR
832 	  && CONST_INT_P (XEXP (op, 1))
833 	  && (temp = simplify_unary_operation (NOT, mode,
834 					       XEXP (op, 1), mode)) != 0)
835 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
836 
837       /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
838       if (GET_CODE (op) == PLUS
839 	  && CONST_INT_P (XEXP (op, 1))
840 	  && mode_signbit_p (mode, XEXP (op, 1))
841 	  && (temp = simplify_unary_operation (NOT, mode,
842 					       XEXP (op, 1), mode)) != 0)
843 	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
844 
845 
846       /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
847 	 operands other than 1, but that is not valid.  We could do a
848 	 similar simplification for (not (lshiftrt C X)) where C is
849 	 just the sign bit, but this doesn't seem common enough to
850 	 bother with.  */
851       if (GET_CODE (op) == ASHIFT
852 	  && XEXP (op, 0) == const1_rtx)
853 	{
854 	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
855 	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
856 	}
857 
858       /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 	 so we can perform the above simplification.  */
861 
862       if (STORE_FLAG_VALUE == -1
863 	  && GET_CODE (op) == ASHIFTRT
864 	  && GET_CODE (XEXP (op, 1))
865 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
866 	return simplify_gen_relational (GE, mode, VOIDmode,
867 					XEXP (op, 0), const0_rtx);
868 
869 
870       if (GET_CODE (op) == SUBREG
871 	  && subreg_lowpart_p (op)
872 	  && (GET_MODE_SIZE (GET_MODE (op))
873 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
874 	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
875 	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
876 	{
877 	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
878 	  rtx x;
879 
880 	  x = gen_rtx_ROTATE (inner_mode,
881 			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
882 						  inner_mode),
883 			      XEXP (SUBREG_REG (op), 1));
884 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
885 	  if (temp)
886 	    return temp;
887 	}
888 
889       /* Apply De Morgan's laws to reduce number of patterns for machines
890 	 with negating logical insns (and-not, nand, etc.).  If result has
891 	 only one NOT, put it first, since that is how the patterns are
892 	 coded.  */
893 
894       if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
895 	{
896 	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
897 	  enum machine_mode op_mode;
898 
899 	  op_mode = GET_MODE (in1);
900 	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
901 
902 	  op_mode = GET_MODE (in2);
903 	  if (op_mode == VOIDmode)
904 	    op_mode = mode;
905 	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
906 
907 	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
908 	    {
909 	      rtx tem = in2;
910 	      in2 = in1; in1 = tem;
911 	    }
912 
913 	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
914 				 mode, in1, in2);
915 	}
916       break;
917 
918     case NEG:
919       /* (neg (neg X)) == X.  */
920       if (GET_CODE (op) == NEG)
921 	return XEXP (op, 0);
922 
923       /* (neg (plus X 1)) can become (not X).  */
924       if (GET_CODE (op) == PLUS
925 	  && XEXP (op, 1) == const1_rtx)
926 	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
927 
928       /* Similarly, (neg (not X)) is (plus X 1).  */
929       if (GET_CODE (op) == NOT)
930 	return plus_constant (mode, XEXP (op, 0), 1);
931 
932       /* (neg (minus X Y)) can become (minus Y X).  This transformation
933 	 isn't safe for modes with signed zeros, since if X and Y are
934 	 both +0, (minus Y X) is the same as (minus X Y).  If the
935 	 rounding mode is towards +infinity (or -infinity) then the two
936 	 expressions will be rounded differently.  */
937       if (GET_CODE (op) == MINUS
938 	  && !HONOR_SIGNED_ZEROS (mode)
939 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
940 	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
941 
942       if (GET_CODE (op) == PLUS
943 	  && !HONOR_SIGNED_ZEROS (mode)
944 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
945 	{
946 	  /* (neg (plus A C)) is simplified to (minus -C A).  */
947 	  if (CONST_SCALAR_INT_P (XEXP (op, 1))
948 	      || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
949 	    {
950 	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
951 	      if (temp)
952 		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
953 	    }
954 
955 	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
956 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
957 	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
958 	}
959 
960       /* (neg (mult A B)) becomes (mult A (neg B)).
961 	 This works even for floating-point values.  */
962       if (GET_CODE (op) == MULT
963 	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
964 	{
965 	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
966 	  return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
967 	}
968 
969       /* NEG commutes with ASHIFT since it is multiplication.  Only do
970 	 this if we can then eliminate the NEG (e.g., if the operand
971 	 is a constant).  */
972       if (GET_CODE (op) == ASHIFT)
973 	{
974 	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
975 	  if (temp)
976 	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
977 	}
978 
979       /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
980 	 C is equal to the width of MODE minus 1.  */
981       if (GET_CODE (op) == ASHIFTRT
982 	  && CONST_INT_P (XEXP (op, 1))
983 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
984 	return simplify_gen_binary (LSHIFTRT, mode,
985 				    XEXP (op, 0), XEXP (op, 1));
986 
987       /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
988 	 C is equal to the width of MODE minus 1.  */
989       if (GET_CODE (op) == LSHIFTRT
990 	  && CONST_INT_P (XEXP (op, 1))
991 	  && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
992 	return simplify_gen_binary (ASHIFTRT, mode,
993 				    XEXP (op, 0), XEXP (op, 1));
994 
995       /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
996       if (GET_CODE (op) == XOR
997 	  && XEXP (op, 1) == const1_rtx
998 	  && nonzero_bits (XEXP (op, 0), mode) == 1)
999 	return plus_constant (mode, XEXP (op, 0), -1);
1000 
1001       /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
1002       /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
1003       if (GET_CODE (op) == LT
1004 	  && XEXP (op, 1) == const0_rtx
1005 	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
1006 	{
1007 	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
1008 	  int isize = GET_MODE_PRECISION (inner);
1009 	  if (STORE_FLAG_VALUE == 1)
1010 	    {
1011 	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1012 					  GEN_INT (isize - 1));
1013 	      if (mode == inner)
1014 		return temp;
1015 	      if (GET_MODE_PRECISION (mode) > isize)
1016 		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
1017 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1018 	    }
1019 	  else if (STORE_FLAG_VALUE == -1)
1020 	    {
1021 	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1022 					  GEN_INT (isize - 1));
1023 	      if (mode == inner)
1024 		return temp;
1025 	      if (GET_MODE_PRECISION (mode) > isize)
1026 		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
1027 	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
1028 	    }
1029 	}
1030       break;
1031 
1032     case TRUNCATE:
1033       /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1034 	 with the umulXi3_highpart patterns.  */
1035       if (GET_CODE (op) == LSHIFTRT
1036 	  && GET_CODE (XEXP (op, 0)) == MULT)
1037 	break;
1038 
1039       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1040 	{
1041 	  if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1042 	    {
1043 	      temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1044 	      if (temp)
1045 		return temp;
1046 	    }
1047 	  /* We can't handle truncation to a partial integer mode here
1048 	     because we don't know the real bitsize of the partial
1049 	     integer mode.  */
1050 	  break;
1051 	}
1052 
1053       if (GET_MODE (op) != VOIDmode)
1054 	{
1055 	  temp = simplify_truncation (mode, op, GET_MODE (op));
1056 	  if (temp)
1057 	    return temp;
1058 	}
1059 
1060       /* If we know that the value is already truncated, we can
1061 	 replace the TRUNCATE with a SUBREG.  */
1062       if (GET_MODE_NUNITS (mode) == 1
1063 	  && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1064 	      || truncated_to_mode (mode, op)))
1065 	{
1066 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1067 	  if (temp)
1068 	    return temp;
1069 	}
1070 
1071       /* A truncate of a comparison can be replaced with a subreg if
1072          STORE_FLAG_VALUE permits.  This is like the previous test,
1073          but it works even if the comparison is done in a mode larger
1074          than HOST_BITS_PER_WIDE_INT.  */
1075       if (HWI_COMPUTABLE_MODE_P (mode)
1076 	  && COMPARISON_P (op)
1077 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1078 	{
1079 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1080 	  if (temp)
1081 	    return temp;
1082 	}
1083 
1084       /* A truncate of a memory is just loading the low part of the memory
1085 	 if we are not changing the meaning of the address. */
1086       if (GET_CODE (op) == MEM
1087 	  && !VECTOR_MODE_P (mode)
1088 	  && !MEM_VOLATILE_P (op)
1089 	  && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1090 	{
1091 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1092 	  if (temp)
1093 	    return temp;
1094 	}
1095 
1096       break;
1097 
1098     case FLOAT_TRUNCATE:
1099       if (DECIMAL_FLOAT_MODE_P (mode))
1100 	break;
1101 
1102       /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
1103       if (GET_CODE (op) == FLOAT_EXTEND
1104 	  && GET_MODE (XEXP (op, 0)) == mode)
1105 	return XEXP (op, 0);
1106 
1107       /* (float_truncate:SF (float_truncate:DF foo:XF))
1108          = (float_truncate:SF foo:XF).
1109 	 This may eliminate double rounding, so it is unsafe.
1110 
1111          (float_truncate:SF (float_extend:XF foo:DF))
1112          = (float_truncate:SF foo:DF).
1113 
1114          (float_truncate:DF (float_extend:XF foo:SF))
1115          = (float_extend:SF foo:DF).  */
1116       if ((GET_CODE (op) == FLOAT_TRUNCATE
1117 	   && flag_unsafe_math_optimizations)
1118 	  || GET_CODE (op) == FLOAT_EXTEND)
1119 	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
1120 							    0)))
1121 				   > GET_MODE_SIZE (mode)
1122 				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1123 				   mode,
1124 				   XEXP (op, 0), mode);
1125 
1126       /*  (float_truncate (float x)) is (float x)  */
1127       if (GET_CODE (op) == FLOAT
1128 	  && (flag_unsafe_math_optimizations
1129 	      || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1130 		  && ((unsigned)significand_size (GET_MODE (op))
1131 		      >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1132 			  - num_sign_bit_copies (XEXP (op, 0),
1133 						 GET_MODE (XEXP (op, 0))))))))
1134 	return simplify_gen_unary (FLOAT, mode,
1135 				   XEXP (op, 0),
1136 				   GET_MODE (XEXP (op, 0)));
1137 
1138       /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1139 	 (OP:SF foo:SF) if OP is NEG or ABS.  */
1140       if ((GET_CODE (op) == ABS
1141 	   || GET_CODE (op) == NEG)
1142 	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1143 	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1144 	return simplify_gen_unary (GET_CODE (op), mode,
1145 				   XEXP (XEXP (op, 0), 0), mode);
1146 
1147       /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1148 	 is (float_truncate:SF x).  */
1149       if (GET_CODE (op) == SUBREG
1150 	  && subreg_lowpart_p (op)
1151 	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1152 	return SUBREG_REG (op);
1153       break;
1154 
1155     case FLOAT_EXTEND:
1156       if (DECIMAL_FLOAT_MODE_P (mode))
1157 	break;
1158 
1159       /*  (float_extend (float_extend x)) is (float_extend x)
1160 
1161 	  (float_extend (float x)) is (float x) assuming that double
1162 	  rounding can't happen.
1163           */
1164       if (GET_CODE (op) == FLOAT_EXTEND
1165 	  || (GET_CODE (op) == FLOAT
1166 	      && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1167 	      && ((unsigned)significand_size (GET_MODE (op))
1168 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
1169 		      - num_sign_bit_copies (XEXP (op, 0),
1170 					     GET_MODE (XEXP (op, 0)))))))
1171 	return simplify_gen_unary (GET_CODE (op), mode,
1172 				   XEXP (op, 0),
1173 				   GET_MODE (XEXP (op, 0)));
1174 
1175       break;
1176 
1177     case ABS:
1178       /* (abs (neg <foo>)) -> (abs <foo>) */
1179       if (GET_CODE (op) == NEG)
1180 	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1181 				   GET_MODE (XEXP (op, 0)));
1182 
1183       /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1184          do nothing.  */
1185       if (GET_MODE (op) == VOIDmode)
1186 	break;
1187 
1188       /* If operand is something known to be positive, ignore the ABS.  */
1189       if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1190 	  || val_signbit_known_clear_p (GET_MODE (op),
1191 					nonzero_bits (op, GET_MODE (op))))
1192 	return op;
1193 
1194       /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
1195       if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
1196 	return gen_rtx_NEG (mode, op);
1197 
1198       break;
1199 
1200     case FFS:
1201       /* (ffs (*_extend <X>)) = (ffs <X>) */
1202       if (GET_CODE (op) == SIGN_EXTEND
1203 	  || GET_CODE (op) == ZERO_EXTEND)
1204 	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1205 				   GET_MODE (XEXP (op, 0)));
1206       break;
1207 
1208     case POPCOUNT:
1209       switch (GET_CODE (op))
1210 	{
1211 	case BSWAP:
1212 	case ZERO_EXTEND:
1213 	  /* (popcount (zero_extend <X>)) = (popcount <X>) */
1214 	  return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1215 				     GET_MODE (XEXP (op, 0)));
1216 
1217 	case ROTATE:
1218 	case ROTATERT:
1219 	  /* Rotations don't affect popcount.  */
1220 	  if (!side_effects_p (XEXP (op, 1)))
1221 	    return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1222 				       GET_MODE (XEXP (op, 0)));
1223 	  break;
1224 
1225 	default:
1226 	  break;
1227 	}
1228       break;
1229 
1230     case PARITY:
1231       switch (GET_CODE (op))
1232 	{
1233 	case NOT:
1234 	case BSWAP:
1235 	case ZERO_EXTEND:
1236 	case SIGN_EXTEND:
1237 	  return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1238 				     GET_MODE (XEXP (op, 0)));
1239 
1240 	case ROTATE:
1241 	case ROTATERT:
1242 	  /* Rotations don't affect parity.  */
1243 	  if (!side_effects_p (XEXP (op, 1)))
1244 	    return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1245 				       GET_MODE (XEXP (op, 0)));
1246 	  break;
1247 
1248 	default:
1249 	  break;
1250 	}
1251       break;
1252 
1253     case BSWAP:
1254       /* (bswap (bswap x)) -> x.  */
1255       if (GET_CODE (op) == BSWAP)
1256 	return XEXP (op, 0);
1257       break;
1258 
1259     case FLOAT:
1260       /* (float (sign_extend <X>)) = (float <X>).  */
1261       if (GET_CODE (op) == SIGN_EXTEND)
1262 	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1263 				   GET_MODE (XEXP (op, 0)));
1264       break;
1265 
1266     case SIGN_EXTEND:
1267       /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1268 	 becomes just the MINUS if its mode is MODE.  This allows
1269 	 folding switch statements on machines using casesi (such as
1270 	 the VAX).  */
1271       if (GET_CODE (op) == TRUNCATE
1272 	  && GET_MODE (XEXP (op, 0)) == mode
1273 	  && GET_CODE (XEXP (op, 0)) == MINUS
1274 	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1275 	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1276 	return XEXP (op, 0);
1277 
1278       /* Extending a widening multiplication should be canonicalized to
1279 	 a wider widening multiplication.  */
1280       if (GET_CODE (op) == MULT)
1281 	{
1282 	  rtx lhs = XEXP (op, 0);
1283 	  rtx rhs = XEXP (op, 1);
1284 	  enum rtx_code lcode = GET_CODE (lhs);
1285 	  enum rtx_code rcode = GET_CODE (rhs);
1286 
1287 	  /* Widening multiplies usually extend both operands, but sometimes
1288 	     they use a shift to extract a portion of a register.  */
1289 	  if ((lcode == SIGN_EXTEND
1290 	       || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1291 	      && (rcode == SIGN_EXTEND
1292 		  || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1293 	    {
1294 	      enum machine_mode lmode = GET_MODE (lhs);
1295 	      enum machine_mode rmode = GET_MODE (rhs);
1296 	      int bits;
1297 
1298 	      if (lcode == ASHIFTRT)
1299 		/* Number of bits not shifted off the end.  */
1300 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1301 	      else /* lcode == SIGN_EXTEND */
1302 		/* Size of inner mode.  */
1303 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1304 
1305 	      if (rcode == ASHIFTRT)
1306 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1307 	      else /* rcode == SIGN_EXTEND */
1308 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1309 
1310 	      /* We can only widen multiplies if the result is mathematiclly
1311 		 equivalent.  I.e. if overflow was impossible.  */
1312 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1313 		return simplify_gen_binary
1314 			 (MULT, mode,
1315 			  simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1316 			  simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1317 	    }
1318 	}
1319 
1320       /* Check for a sign extension of a subreg of a promoted
1321 	 variable, where the promotion is sign-extended, and the
1322 	 target mode is the same as the variable's promotion.  */
1323       if (GET_CODE (op) == SUBREG
1324 	  && SUBREG_PROMOTED_VAR_P (op)
1325 	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1326 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1327 	{
1328 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1329 	  if (temp)
1330 	    return temp;
1331 	}
1332 
1333       /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1334 	 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1335       if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1336 	{
1337 	  gcc_assert (GET_MODE_BITSIZE (mode)
1338 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1339 	  return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1340 				     GET_MODE (XEXP (op, 0)));
1341 	}
1342 
1343       /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1344 	 is (sign_extend:M (subreg:O <X>)) if there is mode with
1345 	 GET_MODE_BITSIZE (N) - I bits.
1346 	 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1347 	 is similarly (zero_extend:M (subreg:O <X>)).  */
1348       if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1349 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1350 	  && CONST_INT_P (XEXP (op, 1))
1351 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1352 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1353 	{
1354 	  enum machine_mode tmode
1355 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1356 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1357 	  gcc_assert (GET_MODE_BITSIZE (mode)
1358 		      > GET_MODE_BITSIZE (GET_MODE (op)));
1359 	  if (tmode != BLKmode)
1360 	    {
1361 	      rtx inner =
1362 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1363 	      if (inner)
1364 		return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1365 					   ? SIGN_EXTEND : ZERO_EXTEND,
1366 					   mode, inner, tmode);
1367 	    }
1368 	}
1369 
1370 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1371       /* As we do not know which address space the pointer is referring to,
1372 	 we can do this only if the target does not support different pointer
1373 	 or address modes depending on the address space.  */
1374       if (target_default_pointer_address_modes_p ()
1375 	  && ! POINTERS_EXTEND_UNSIGNED
1376 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1377 	  && (CONSTANT_P (op)
1378 	      || (GET_CODE (op) == SUBREG
1379 		  && REG_P (SUBREG_REG (op))
1380 		  && REG_POINTER (SUBREG_REG (op))
1381 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1382 	return convert_memory_address (Pmode, op);
1383 #endif
1384       break;
1385 
1386     case ZERO_EXTEND:
1387       /* Check for a zero extension of a subreg of a promoted
1388 	 variable, where the promotion is zero-extended, and the
1389 	 target mode is the same as the variable's promotion.  */
1390       if (GET_CODE (op) == SUBREG
1391 	  && SUBREG_PROMOTED_VAR_P (op)
1392 	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1393 	  && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1394 	{
1395 	  temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1396 	  if (temp)
1397 	    return temp;
1398 	}
1399 
1400       /* Extending a widening multiplication should be canonicalized to
1401 	 a wider widening multiplication.  */
1402       if (GET_CODE (op) == MULT)
1403 	{
1404 	  rtx lhs = XEXP (op, 0);
1405 	  rtx rhs = XEXP (op, 1);
1406 	  enum rtx_code lcode = GET_CODE (lhs);
1407 	  enum rtx_code rcode = GET_CODE (rhs);
1408 
1409 	  /* Widening multiplies usually extend both operands, but sometimes
1410 	     they use a shift to extract a portion of a register.  */
1411 	  if ((lcode == ZERO_EXTEND
1412 	       || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1413 	      && (rcode == ZERO_EXTEND
1414 		  || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1415 	    {
1416 	      enum machine_mode lmode = GET_MODE (lhs);
1417 	      enum machine_mode rmode = GET_MODE (rhs);
1418 	      int bits;
1419 
1420 	      if (lcode == LSHIFTRT)
1421 		/* Number of bits not shifted off the end.  */
1422 		bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
1423 	      else /* lcode == ZERO_EXTEND */
1424 		/* Size of inner mode.  */
1425 		bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
1426 
1427 	      if (rcode == LSHIFTRT)
1428 		bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
1429 	      else /* rcode == ZERO_EXTEND */
1430 		bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
1431 
1432 	      /* We can only widen multiplies if the result is mathematiclly
1433 		 equivalent.  I.e. if overflow was impossible.  */
1434 	      if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
1435 		return simplify_gen_binary
1436 			 (MULT, mode,
1437 			  simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1438 			  simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1439 	    }
1440 	}
1441 
1442       /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>).  */
1443       if (GET_CODE (op) == ZERO_EXTEND)
1444 	return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1445 				   GET_MODE (XEXP (op, 0)));
1446 
1447       /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1448 	 is (zero_extend:M (subreg:O <X>)) if there is mode with
1449 	 GET_MODE_BITSIZE (N) - I bits.  */
1450       if (GET_CODE (op) == LSHIFTRT
1451 	  && GET_CODE (XEXP (op, 0)) == ASHIFT
1452 	  && CONST_INT_P (XEXP (op, 1))
1453 	  && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1454 	  && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1455 	{
1456 	  enum machine_mode tmode
1457 	    = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1458 			     - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1459 	  if (tmode != BLKmode)
1460 	    {
1461 	      rtx inner =
1462 		rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1463 	      if (inner)
1464 		return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1465 	    }
1466 	}
1467 
1468 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1469       /* As we do not know which address space the pointer is referring to,
1470 	 we can do this only if the target does not support different pointer
1471 	 or address modes depending on the address space.  */
1472       if (target_default_pointer_address_modes_p ()
1473 	  && POINTERS_EXTEND_UNSIGNED > 0
1474 	  && mode == Pmode && GET_MODE (op) == ptr_mode
1475 	  && (CONSTANT_P (op)
1476 	      || (GET_CODE (op) == SUBREG
1477 		  && REG_P (SUBREG_REG (op))
1478 		  && REG_POINTER (SUBREG_REG (op))
1479 		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
1480 	return convert_memory_address (Pmode, op);
1481 #endif
1482       break;
1483 
1484     default:
1485       break;
1486     }
1487 
1488   return 0;
1489 }
1490 
1491 /* Try to compute the value of a unary operation CODE whose output mode is to
1492    be MODE with input operand OP whose mode was originally OP_MODE.
1493    Return zero if the value cannot be computed.  */
1494 rtx
simplify_const_unary_operation(enum rtx_code code,enum machine_mode mode,rtx op,enum machine_mode op_mode)1495 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1496 				rtx op, enum machine_mode op_mode)
1497 {
1498   unsigned int width = GET_MODE_PRECISION (mode);
1499   unsigned int op_width = GET_MODE_PRECISION (op_mode);
1500 
1501   if (code == VEC_DUPLICATE)
1502     {
1503       gcc_assert (VECTOR_MODE_P (mode));
1504       if (GET_MODE (op) != VOIDmode)
1505       {
1506 	if (!VECTOR_MODE_P (GET_MODE (op)))
1507 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1508 	else
1509 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1510 						(GET_MODE (op)));
1511       }
1512       if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op)
1513 	  || GET_CODE (op) == CONST_VECTOR)
1514 	{
1515           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1516           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1517 	  rtvec v = rtvec_alloc (n_elts);
1518 	  unsigned int i;
1519 
1520 	  if (GET_CODE (op) != CONST_VECTOR)
1521 	    for (i = 0; i < n_elts; i++)
1522 	      RTVEC_ELT (v, i) = op;
1523 	  else
1524 	    {
1525 	      enum machine_mode inmode = GET_MODE (op);
1526               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1527               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1528 
1529 	      gcc_assert (in_n_elts < n_elts);
1530 	      gcc_assert ((n_elts % in_n_elts) == 0);
1531 	      for (i = 0; i < n_elts; i++)
1532 	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1533 	    }
1534 	  return gen_rtx_CONST_VECTOR (mode, v);
1535 	}
1536     }
1537 
1538   if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1539     {
1540       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1541       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1542       enum machine_mode opmode = GET_MODE (op);
1543       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1544       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1545       rtvec v = rtvec_alloc (n_elts);
1546       unsigned int i;
1547 
1548       gcc_assert (op_n_elts == n_elts);
1549       for (i = 0; i < n_elts; i++)
1550 	{
1551 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1552 					    CONST_VECTOR_ELT (op, i),
1553 					    GET_MODE_INNER (opmode));
1554 	  if (!x)
1555 	    return 0;
1556 	  RTVEC_ELT (v, i) = x;
1557 	}
1558       return gen_rtx_CONST_VECTOR (mode, v);
1559     }
1560 
1561   /* The order of these tests is critical so that, for example, we don't
1562      check the wrong mode (input vs. output) for a conversion operation,
1563      such as FIX.  At some point, this should be simplified.  */
1564 
1565   if (code == FLOAT && CONST_SCALAR_INT_P (op))
1566     {
1567       HOST_WIDE_INT hv, lv;
1568       REAL_VALUE_TYPE d;
1569 
1570       if (CONST_INT_P (op))
1571 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1572       else
1573 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1574 
1575       REAL_VALUE_FROM_INT (d, lv, hv, mode);
1576       d = real_value_truncate (mode, d);
1577       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1578     }
1579   else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1580     {
1581       HOST_WIDE_INT hv, lv;
1582       REAL_VALUE_TYPE d;
1583 
1584       if (CONST_INT_P (op))
1585 	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1586       else
1587 	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
1588 
1589       if (op_mode == VOIDmode
1590 	  || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
1591 	/* We should never get a negative number.  */
1592 	gcc_assert (hv >= 0);
1593       else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
1594 	hv = 0, lv &= GET_MODE_MASK (op_mode);
1595 
1596       REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1597       d = real_value_truncate (mode, d);
1598       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1599     }
1600 
1601   if (CONST_INT_P (op)
1602       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1603     {
1604       HOST_WIDE_INT arg0 = INTVAL (op);
1605       HOST_WIDE_INT val;
1606 
1607       switch (code)
1608 	{
1609 	case NOT:
1610 	  val = ~ arg0;
1611 	  break;
1612 
1613 	case NEG:
1614 	  val = - arg0;
1615 	  break;
1616 
1617 	case ABS:
1618 	  val = (arg0 >= 0 ? arg0 : - arg0);
1619 	  break;
1620 
1621 	case FFS:
1622 	  arg0 &= GET_MODE_MASK (mode);
1623 	  val = ffs_hwi (arg0);
1624 	  break;
1625 
1626 	case CLZ:
1627 	  arg0 &= GET_MODE_MASK (mode);
1628 	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1629 	    ;
1630 	  else
1631 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
1632 	  break;
1633 
1634 	case CLRSB:
1635 	  arg0 &= GET_MODE_MASK (mode);
1636 	  if (arg0 == 0)
1637 	    val = GET_MODE_PRECISION (mode) - 1;
1638 	  else if (arg0 >= 0)
1639 	    val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
1640 	  else if (arg0 < 0)
1641 	    val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
1642 	  break;
1643 
1644 	case CTZ:
1645 	  arg0 &= GET_MODE_MASK (mode);
1646 	  if (arg0 == 0)
1647 	    {
1648 	      /* Even if the value at zero is undefined, we have to come
1649 		 up with some replacement.  Seems good enough.  */
1650 	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1651 		val = GET_MODE_PRECISION (mode);
1652 	    }
1653 	  else
1654 	    val = ctz_hwi (arg0);
1655 	  break;
1656 
1657 	case POPCOUNT:
1658 	  arg0 &= GET_MODE_MASK (mode);
1659 	  val = 0;
1660 	  while (arg0)
1661 	    val++, arg0 &= arg0 - 1;
1662 	  break;
1663 
1664 	case PARITY:
1665 	  arg0 &= GET_MODE_MASK (mode);
1666 	  val = 0;
1667 	  while (arg0)
1668 	    val++, arg0 &= arg0 - 1;
1669 	  val &= 1;
1670 	  break;
1671 
1672 	case BSWAP:
1673 	  {
1674 	    unsigned int s;
1675 
1676 	    val = 0;
1677 	    for (s = 0; s < width; s += 8)
1678 	      {
1679 		unsigned int d = width - s - 8;
1680 		unsigned HOST_WIDE_INT byte;
1681 		byte = (arg0 >> s) & 0xff;
1682 		val |= byte << d;
1683 	      }
1684 	  }
1685 	  break;
1686 
1687 	case TRUNCATE:
1688 	  val = arg0;
1689 	  break;
1690 
1691 	case ZERO_EXTEND:
1692 	  /* When zero-extending a CONST_INT, we need to know its
1693              original mode.  */
1694 	  gcc_assert (op_mode != VOIDmode);
1695 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1696 	    {
1697 	      /* If we were really extending the mode,
1698 		 we would have to distinguish between zero-extension
1699 		 and sign-extension.  */
1700 	      gcc_assert (width == op_width);
1701 	      val = arg0;
1702 	    }
1703 	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1704 	    val = arg0 & GET_MODE_MASK (op_mode);
1705 	  else
1706 	    return 0;
1707 	  break;
1708 
1709 	case SIGN_EXTEND:
1710 	  if (op_mode == VOIDmode)
1711 	    op_mode = mode;
1712 	  op_width = GET_MODE_PRECISION (op_mode);
1713 	  if (op_width == HOST_BITS_PER_WIDE_INT)
1714 	    {
1715 	      /* If we were really extending the mode,
1716 		 we would have to distinguish between zero-extension
1717 		 and sign-extension.  */
1718 	      gcc_assert (width == op_width);
1719 	      val = arg0;
1720 	    }
1721 	  else if (op_width < HOST_BITS_PER_WIDE_INT)
1722 	    {
1723 	      val = arg0 & GET_MODE_MASK (op_mode);
1724 	      if (val_signbit_known_set_p (op_mode, val))
1725 		val |= ~GET_MODE_MASK (op_mode);
1726 	    }
1727 	  else
1728 	    return 0;
1729 	  break;
1730 
1731 	case SQRT:
1732 	case FLOAT_EXTEND:
1733 	case FLOAT_TRUNCATE:
1734 	case SS_TRUNCATE:
1735 	case US_TRUNCATE:
1736 	case SS_NEG:
1737 	case US_NEG:
1738 	case SS_ABS:
1739 	  return 0;
1740 
1741 	default:
1742 	  gcc_unreachable ();
1743 	}
1744 
1745       return gen_int_mode (val, mode);
1746     }
1747 
1748   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1749      for a DImode operation on a CONST_INT.  */
1750   else if (width <= HOST_BITS_PER_DOUBLE_INT
1751 	   && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
1752     {
1753       double_int first, value;
1754 
1755       if (CONST_DOUBLE_AS_INT_P (op))
1756 	first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
1757 				       CONST_DOUBLE_LOW (op));
1758       else
1759 	first = double_int::from_shwi (INTVAL (op));
1760 
1761       switch (code)
1762 	{
1763 	case NOT:
1764 	  value = ~first;
1765 	  break;
1766 
1767 	case NEG:
1768 	  value = -first;
1769 	  break;
1770 
1771 	case ABS:
1772 	  if (first.is_negative ())
1773 	    value = -first;
1774 	  else
1775 	    value = first;
1776 	  break;
1777 
1778 	case FFS:
1779 	  value.high = 0;
1780 	  if (first.low != 0)
1781 	    value.low = ffs_hwi (first.low);
1782 	  else if (first.high != 0)
1783 	    value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
1784 	  else
1785 	    value.low = 0;
1786 	  break;
1787 
1788 	case CLZ:
1789 	  value.high = 0;
1790 	  if (first.high != 0)
1791 	    value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
1792 	              - HOST_BITS_PER_WIDE_INT;
1793 	  else if (first.low != 0)
1794 	    value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
1795 	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1796 	    value.low = GET_MODE_PRECISION (mode);
1797 	  break;
1798 
1799 	case CTZ:
1800 	  value.high = 0;
1801 	  if (first.low != 0)
1802 	    value.low = ctz_hwi (first.low);
1803 	  else if (first.high != 0)
1804 	    value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
1805 	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
1806 	    value.low = GET_MODE_PRECISION (mode);
1807 	  break;
1808 
1809 	case POPCOUNT:
1810 	  value = double_int_zero;
1811 	  while (first.low)
1812 	    {
1813 	      value.low++;
1814 	      first.low &= first.low - 1;
1815 	    }
1816 	  while (first.high)
1817 	    {
1818 	      value.low++;
1819 	      first.high &= first.high - 1;
1820 	    }
1821 	  break;
1822 
1823 	case PARITY:
1824 	  value = double_int_zero;
1825 	  while (first.low)
1826 	    {
1827 	      value.low++;
1828 	      first.low &= first.low - 1;
1829 	    }
1830 	  while (first.high)
1831 	    {
1832 	      value.low++;
1833 	      first.high &= first.high - 1;
1834 	    }
1835 	  value.low &= 1;
1836 	  break;
1837 
1838 	case BSWAP:
1839 	  {
1840 	    unsigned int s;
1841 
1842 	    value = double_int_zero;
1843 	    for (s = 0; s < width; s += 8)
1844 	      {
1845 		unsigned int d = width - s - 8;
1846 		unsigned HOST_WIDE_INT byte;
1847 
1848 		if (s < HOST_BITS_PER_WIDE_INT)
1849 		  byte = (first.low >> s) & 0xff;
1850 		else
1851 		  byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1852 
1853 		if (d < HOST_BITS_PER_WIDE_INT)
1854 		  value.low |= byte << d;
1855 		else
1856 		  value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
1857 	      }
1858 	  }
1859 	  break;
1860 
1861 	case TRUNCATE:
1862 	  /* This is just a change-of-mode, so do nothing.  */
1863 	  value = first;
1864 	  break;
1865 
1866 	case ZERO_EXTEND:
1867 	  gcc_assert (op_mode != VOIDmode);
1868 
1869 	  if (op_width > HOST_BITS_PER_WIDE_INT)
1870 	    return 0;
1871 
1872 	  value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
1873 	  break;
1874 
1875 	case SIGN_EXTEND:
1876 	  if (op_mode == VOIDmode
1877 	      || op_width > HOST_BITS_PER_WIDE_INT)
1878 	    return 0;
1879 	  else
1880 	    {
1881 	      value.low = first.low & GET_MODE_MASK (op_mode);
1882 	      if (val_signbit_known_set_p (op_mode, value.low))
1883 		value.low |= ~GET_MODE_MASK (op_mode);
1884 
1885 	      value.high = HWI_SIGN_EXTEND (value.low);
1886 	    }
1887 	  break;
1888 
1889 	case SQRT:
1890 	  return 0;
1891 
1892 	default:
1893 	  return 0;
1894 	}
1895 
1896       return immed_double_int_const (value, mode);
1897     }
1898 
1899   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1900 	   && SCALAR_FLOAT_MODE_P (mode)
1901 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1902     {
1903       REAL_VALUE_TYPE d, t;
1904       REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1905 
1906       switch (code)
1907 	{
1908 	case SQRT:
1909 	  if (HONOR_SNANS (mode) && real_isnan (&d))
1910 	    return 0;
1911 	  real_sqrt (&t, mode, &d);
1912 	  d = t;
1913 	  break;
1914 	case ABS:
1915 	  d = real_value_abs (&d);
1916 	  break;
1917 	case NEG:
1918 	  d = real_value_negate (&d);
1919 	  break;
1920 	case FLOAT_TRUNCATE:
1921 	  d = real_value_truncate (mode, d);
1922 	  break;
1923 	case FLOAT_EXTEND:
1924 	  /* All this does is change the mode, unless changing
1925 	     mode class.  */
1926 	  if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1927 	    real_convert (&d, mode, &d);
1928 	  break;
1929 	case FIX:
1930 	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1931 	  break;
1932 	case NOT:
1933 	  {
1934 	    long tmp[4];
1935 	    int i;
1936 
1937 	    real_to_target (tmp, &d, GET_MODE (op));
1938 	    for (i = 0; i < 4; i++)
1939 	      tmp[i] = ~tmp[i];
1940 	    real_from_target (&d, tmp, mode);
1941 	    break;
1942 	  }
1943 	default:
1944 	  gcc_unreachable ();
1945 	}
1946       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1947     }
1948 
1949   else if (CONST_DOUBLE_AS_FLOAT_P (op)
1950 	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1951 	   && GET_MODE_CLASS (mode) == MODE_INT
1952 	   && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
1953     {
1954       /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1955 	 operators are intentionally left unspecified (to ease implementation
1956 	 by target backends), for consistency, this routine implements the
1957 	 same semantics for constant folding as used by the middle-end.  */
1958 
1959       /* This was formerly used only for non-IEEE float.
1960 	 eggert@twinsun.com says it is safe for IEEE also.  */
1961       HOST_WIDE_INT xh, xl, th, tl;
1962       REAL_VALUE_TYPE x, t;
1963       REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1964       switch (code)
1965 	{
1966 	case FIX:
1967 	  if (REAL_VALUE_ISNAN (x))
1968 	    return const0_rtx;
1969 
1970 	  /* Test against the signed upper bound.  */
1971 	  if (width > HOST_BITS_PER_WIDE_INT)
1972 	    {
1973 	      th = ((unsigned HOST_WIDE_INT) 1
1974 		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1975 	      tl = -1;
1976 	    }
1977 	  else
1978 	    {
1979 	      th = 0;
1980 	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1981 	    }
1982 	  real_from_integer (&t, VOIDmode, tl, th, 0);
1983 	  if (REAL_VALUES_LESS (t, x))
1984 	    {
1985 	      xh = th;
1986 	      xl = tl;
1987 	      break;
1988 	    }
1989 
1990 	  /* Test against the signed lower bound.  */
1991 	  if (width > HOST_BITS_PER_WIDE_INT)
1992 	    {
1993 	      th = (unsigned HOST_WIDE_INT) (-1)
1994 		   << (width - HOST_BITS_PER_WIDE_INT - 1);
1995 	      tl = 0;
1996 	    }
1997 	  else
1998 	    {
1999 	      th = -1;
2000 	      tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1);
2001 	    }
2002 	  real_from_integer (&t, VOIDmode, tl, th, 0);
2003 	  if (REAL_VALUES_LESS (x, t))
2004 	    {
2005 	      xh = th;
2006 	      xl = tl;
2007 	      break;
2008 	    }
2009 	  REAL_VALUE_TO_INT (&xl, &xh, x);
2010 	  break;
2011 
2012 	case UNSIGNED_FIX:
2013 	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
2014 	    return const0_rtx;
2015 
2016 	  /* Test against the unsigned upper bound.  */
2017 	  if (width == HOST_BITS_PER_DOUBLE_INT)
2018 	    {
2019 	      th = -1;
2020 	      tl = -1;
2021 	    }
2022 	  else if (width >= HOST_BITS_PER_WIDE_INT)
2023 	    {
2024 	      th = ((unsigned HOST_WIDE_INT) 1
2025 		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
2026 	      tl = -1;
2027 	    }
2028 	  else
2029 	    {
2030 	      th = 0;
2031 	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
2032 	    }
2033 	  real_from_integer (&t, VOIDmode, tl, th, 1);
2034 	  if (REAL_VALUES_LESS (t, x))
2035 	    {
2036 	      xh = th;
2037 	      xl = tl;
2038 	      break;
2039 	    }
2040 
2041 	  REAL_VALUE_TO_INT (&xl, &xh, x);
2042 	  break;
2043 
2044 	default:
2045 	  gcc_unreachable ();
2046 	}
2047       return immed_double_const (xl, xh, mode);
2048     }
2049 
2050   return NULL_RTX;
2051 }
2052 
2053 /* Subroutine of simplify_binary_operation to simplify a commutative,
2054    associative binary operation CODE with result mode MODE, operating
2055    on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2056    SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
2057    canonicalization is possible.  */
2058 
2059 static rtx
simplify_associative_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)2060 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
2061 				rtx op0, rtx op1)
2062 {
2063   rtx tem;
2064 
2065   /* Linearize the operator to the left.  */
2066   if (GET_CODE (op1) == code)
2067     {
2068       /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
2069       if (GET_CODE (op0) == code)
2070 	{
2071 	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2072 	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2073 	}
2074 
2075       /* "a op (b op c)" becomes "(b op c) op a".  */
2076       if (! swap_commutative_operands_p (op1, op0))
2077 	return simplify_gen_binary (code, mode, op1, op0);
2078 
2079       tem = op0;
2080       op0 = op1;
2081       op1 = tem;
2082     }
2083 
2084   if (GET_CODE (op0) == code)
2085     {
2086       /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
2087       if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2088 	{
2089 	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2090 	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2091 	}
2092 
2093       /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
2094       tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2095       if (tem != 0)
2096         return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2097 
2098       /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
2099       tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2100       if (tem != 0)
2101         return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2102     }
2103 
2104   return 0;
2105 }
2106 
2107 
2108 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2109    and OP1.  Return 0 if no simplification is possible.
2110 
2111    Don't use this for relational operations such as EQ or LT.
2112    Use simplify_relational_operation instead.  */
2113 rtx
simplify_binary_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)2114 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
2115 			   rtx op0, rtx op1)
2116 {
2117   rtx trueop0, trueop1;
2118   rtx tem;
2119 
2120   /* Relational operations don't work here.  We must know the mode
2121      of the operands in order to do the comparison correctly.
2122      Assuming a full word can give incorrect results.
2123      Consider comparing 128 with -128 in QImode.  */
2124   gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2125   gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2126 
2127   /* Make sure the constant is second.  */
2128   if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2129       && swap_commutative_operands_p (op0, op1))
2130     {
2131       tem = op0, op0 = op1, op1 = tem;
2132     }
2133 
2134   trueop0 = avoid_constant_pool_reference (op0);
2135   trueop1 = avoid_constant_pool_reference (op1);
2136 
2137   tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2138   if (tem)
2139     return tem;
2140   return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2141 }
2142 
2143 /* Subroutine of simplify_binary_operation.  Simplify a binary operation
2144    CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
2145    OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2146    actual constants.  */
2147 
2148 static rtx
simplify_binary_operation_1(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1,rtx trueop0,rtx trueop1)2149 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
2150 			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2151 {
2152   rtx tem, reversed, opleft, opright;
2153   HOST_WIDE_INT val;
2154   unsigned int width = GET_MODE_PRECISION (mode);
2155 
2156   /* Even if we can't compute a constant result,
2157      there are some cases worth simplifying.  */
2158 
2159   switch (code)
2160     {
2161     case PLUS:
2162       /* Maybe simplify x + 0 to x.  The two expressions are equivalent
2163 	 when x is NaN, infinite, or finite and nonzero.  They aren't
2164 	 when x is -0 and the rounding mode is not towards -infinity,
2165 	 since (-0) + 0 is then 0.  */
2166       if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2167 	return op0;
2168 
2169       /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
2170 	 transformations are safe even for IEEE.  */
2171       if (GET_CODE (op0) == NEG)
2172 	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2173       else if (GET_CODE (op1) == NEG)
2174 	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2175 
2176       /* (~a) + 1 -> -a */
2177       if (INTEGRAL_MODE_P (mode)
2178 	  && GET_CODE (op0) == NOT
2179 	  && trueop1 == const1_rtx)
2180 	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2181 
2182       /* Handle both-operands-constant cases.  We can only add
2183 	 CONST_INTs to constants since the sum of relocatable symbols
2184 	 can't be handled by most assemblers.  Don't add CONST_INT
2185 	 to CONST_INT since overflow won't be computed properly if wider
2186 	 than HOST_BITS_PER_WIDE_INT.  */
2187 
2188       if ((GET_CODE (op0) == CONST
2189 	   || GET_CODE (op0) == SYMBOL_REF
2190 	   || GET_CODE (op0) == LABEL_REF)
2191 	  && CONST_INT_P (op1))
2192 	return plus_constant (mode, op0, INTVAL (op1));
2193       else if ((GET_CODE (op1) == CONST
2194 		|| GET_CODE (op1) == SYMBOL_REF
2195 		|| GET_CODE (op1) == LABEL_REF)
2196 	       && CONST_INT_P (op0))
2197 	return plus_constant (mode, op1, INTVAL (op0));
2198 
2199       /* See if this is something like X * C - X or vice versa or
2200 	 if the multiplication is written as a shift.  If so, we can
2201 	 distribute and make a new multiply, shift, or maybe just
2202 	 have X (if C is 2 in the example above).  But don't make
2203 	 something more expensive than we had before.  */
2204 
2205       if (SCALAR_INT_MODE_P (mode))
2206 	{
2207 	  double_int coeff0, coeff1;
2208 	  rtx lhs = op0, rhs = op1;
2209 
2210 	  coeff0 = double_int_one;
2211 	  coeff1 = double_int_one;
2212 
2213 	  if (GET_CODE (lhs) == NEG)
2214 	    {
2215 	      coeff0 = double_int_minus_one;
2216 	      lhs = XEXP (lhs, 0);
2217 	    }
2218 	  else if (GET_CODE (lhs) == MULT
2219 		   && CONST_INT_P (XEXP (lhs, 1)))
2220 	    {
2221 	      coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2222 	      lhs = XEXP (lhs, 0);
2223 	    }
2224 	  else if (GET_CODE (lhs) == ASHIFT
2225 		   && CONST_INT_P (XEXP (lhs, 1))
2226                    && INTVAL (XEXP (lhs, 1)) >= 0
2227 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2228 	    {
2229 	      coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2230 	      lhs = XEXP (lhs, 0);
2231 	    }
2232 
2233 	  if (GET_CODE (rhs) == NEG)
2234 	    {
2235 	      coeff1 = double_int_minus_one;
2236 	      rhs = XEXP (rhs, 0);
2237 	    }
2238 	  else if (GET_CODE (rhs) == MULT
2239 		   && CONST_INT_P (XEXP (rhs, 1)))
2240 	    {
2241 	      coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
2242 	      rhs = XEXP (rhs, 0);
2243 	    }
2244 	  else if (GET_CODE (rhs) == ASHIFT
2245 		   && CONST_INT_P (XEXP (rhs, 1))
2246 		   && INTVAL (XEXP (rhs, 1)) >= 0
2247 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2248 	    {
2249 	      coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2250 	      rhs = XEXP (rhs, 0);
2251 	    }
2252 
2253 	  if (rtx_equal_p (lhs, rhs))
2254 	    {
2255 	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
2256 	      rtx coeff;
2257 	      double_int val;
2258 	      bool speed = optimize_function_for_speed_p (cfun);
2259 
2260 	      val = coeff0 + coeff1;
2261 	      coeff = immed_double_int_const (val, mode);
2262 
2263 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2264 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2265 		? tem : 0;
2266 	    }
2267 	}
2268 
2269       /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
2270       if (CONST_SCALAR_INT_P (op1)
2271 	  && GET_CODE (op0) == XOR
2272 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2273 	  && mode_signbit_p (mode, op1))
2274 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2275 				    simplify_gen_binary (XOR, mode, op1,
2276 							 XEXP (op0, 1)));
2277 
2278       /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
2279       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2280 	  && GET_CODE (op0) == MULT
2281 	  && GET_CODE (XEXP (op0, 0)) == NEG)
2282 	{
2283 	  rtx in1, in2;
2284 
2285 	  in1 = XEXP (XEXP (op0, 0), 0);
2286 	  in2 = XEXP (op0, 1);
2287 	  return simplify_gen_binary (MINUS, mode, op1,
2288 				      simplify_gen_binary (MULT, mode,
2289 							   in1, in2));
2290 	}
2291 
2292       /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2293 	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2294 	 is 1.  */
2295       if (COMPARISON_P (op0)
2296 	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2297 	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2298 	  && (reversed = reversed_comparison (op0, mode)))
2299 	return
2300 	  simplify_gen_unary (NEG, mode, reversed, mode);
2301 
2302       /* If one of the operands is a PLUS or a MINUS, see if we can
2303 	 simplify this by the associative law.
2304 	 Don't use the associative law for floating point.
2305 	 The inaccuracy makes it nonassociative,
2306 	 and subtle programs can break if operations are associated.  */
2307 
2308       if (INTEGRAL_MODE_P (mode)
2309 	  && (plus_minus_operand_p (op0)
2310 	      || plus_minus_operand_p (op1))
2311 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2312 	return tem;
2313 
2314       /* Reassociate floating point addition only when the user
2315 	 specifies associative math operations.  */
2316       if (FLOAT_MODE_P (mode)
2317 	  && flag_associative_math)
2318 	{
2319 	  tem = simplify_associative_operation (code, mode, op0, op1);
2320 	  if (tem)
2321 	    return tem;
2322 	}
2323       break;
2324 
2325     case COMPARE:
2326       /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
2327       if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2328 	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2329 	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2330 	{
2331 	  rtx xop00 = XEXP (op0, 0);
2332 	  rtx xop10 = XEXP (op1, 0);
2333 
2334 #ifdef HAVE_cc0
2335 	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2336 #else
2337 	    if (REG_P (xop00) && REG_P (xop10)
2338 		&& GET_MODE (xop00) == GET_MODE (xop10)
2339 		&& REGNO (xop00) == REGNO (xop10)
2340 		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
2341 		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
2342 #endif
2343 	      return xop00;
2344 	}
2345       break;
2346 
2347     case MINUS:
2348       /* We can't assume x-x is 0 even with non-IEEE floating point,
2349 	 but since it is zero except in very strange circumstances, we
2350 	 will treat it as zero with -ffinite-math-only.  */
2351       if (rtx_equal_p (trueop0, trueop1)
2352 	  && ! side_effects_p (op0)
2353 	  && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2354 	return CONST0_RTX (mode);
2355 
2356       /* Change subtraction from zero into negation.  (0 - x) is the
2357 	 same as -x when x is NaN, infinite, or finite and nonzero.
2358 	 But if the mode has signed zeros, and does not round towards
2359 	 -infinity, then 0 - 0 is 0, not -0.  */
2360       if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2361 	return simplify_gen_unary (NEG, mode, op1, mode);
2362 
2363       /* (-1 - a) is ~a.  */
2364       if (trueop0 == constm1_rtx)
2365 	return simplify_gen_unary (NOT, mode, op1, mode);
2366 
2367       /* Subtracting 0 has no effect unless the mode has signed zeros
2368 	 and supports rounding towards -infinity.  In such a case,
2369 	 0 - 0 is -0.  */
2370       if (!(HONOR_SIGNED_ZEROS (mode)
2371 	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2372 	  && trueop1 == CONST0_RTX (mode))
2373 	return op0;
2374 
2375       /* See if this is something like X * C - X or vice versa or
2376 	 if the multiplication is written as a shift.  If so, we can
2377 	 distribute and make a new multiply, shift, or maybe just
2378 	 have X (if C is 2 in the example above).  But don't make
2379 	 something more expensive than we had before.  */
2380 
2381       if (SCALAR_INT_MODE_P (mode))
2382 	{
2383 	  double_int coeff0, negcoeff1;
2384 	  rtx lhs = op0, rhs = op1;
2385 
2386 	  coeff0 = double_int_one;
2387 	  negcoeff1 = double_int_minus_one;
2388 
2389 	  if (GET_CODE (lhs) == NEG)
2390 	    {
2391 	      coeff0 = double_int_minus_one;
2392 	      lhs = XEXP (lhs, 0);
2393 	    }
2394 	  else if (GET_CODE (lhs) == MULT
2395 		   && CONST_INT_P (XEXP (lhs, 1)))
2396 	    {
2397 	      coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
2398 	      lhs = XEXP (lhs, 0);
2399 	    }
2400 	  else if (GET_CODE (lhs) == ASHIFT
2401 		   && CONST_INT_P (XEXP (lhs, 1))
2402 		   && INTVAL (XEXP (lhs, 1)) >= 0
2403 		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2404 	    {
2405 	      coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
2406 	      lhs = XEXP (lhs, 0);
2407 	    }
2408 
2409 	  if (GET_CODE (rhs) == NEG)
2410 	    {
2411 	      negcoeff1 = double_int_one;
2412 	      rhs = XEXP (rhs, 0);
2413 	    }
2414 	  else if (GET_CODE (rhs) == MULT
2415 		   && CONST_INT_P (XEXP (rhs, 1)))
2416 	    {
2417 	      negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
2418 	      rhs = XEXP (rhs, 0);
2419 	    }
2420 	  else if (GET_CODE (rhs) == ASHIFT
2421 		   && CONST_INT_P (XEXP (rhs, 1))
2422 		   && INTVAL (XEXP (rhs, 1)) >= 0
2423 		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2424 	    {
2425 	      negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
2426 	      negcoeff1 = -negcoeff1;
2427 	      rhs = XEXP (rhs, 0);
2428 	    }
2429 
2430 	  if (rtx_equal_p (lhs, rhs))
2431 	    {
2432 	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
2433 	      rtx coeff;
2434 	      double_int val;
2435 	      bool speed = optimize_function_for_speed_p (cfun);
2436 
2437 	      val = coeff0 + negcoeff1;
2438 	      coeff = immed_double_int_const (val, mode);
2439 
2440 	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2441 	      return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
2442 		? tem : 0;
2443 	    }
2444 	}
2445 
2446       /* (a - (-b)) -> (a + b).  True even for IEEE.  */
2447       if (GET_CODE (op1) == NEG)
2448 	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2449 
2450       /* (-x - c) may be simplified as (-c - x).  */
2451       if (GET_CODE (op0) == NEG
2452 	  && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2453 	{
2454 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
2455 	  if (tem)
2456 	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2457 	}
2458 
2459       /* Don't let a relocatable value get a negative coeff.  */
2460       if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2461 	return simplify_gen_binary (PLUS, mode,
2462 				    op0,
2463 				    neg_const_int (mode, op1));
2464 
2465       /* (x - (x & y)) -> (x & ~y) */
2466       if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2467 	{
2468 	  if (rtx_equal_p (op0, XEXP (op1, 0)))
2469 	    {
2470 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2471 					GET_MODE (XEXP (op1, 1)));
2472 	      return simplify_gen_binary (AND, mode, op0, tem);
2473 	    }
2474 	  if (rtx_equal_p (op0, XEXP (op1, 1)))
2475 	    {
2476 	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2477 					GET_MODE (XEXP (op1, 0)));
2478 	      return simplify_gen_binary (AND, mode, op0, tem);
2479 	    }
2480 	}
2481 
2482       /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2483 	 by reversing the comparison code if valid.  */
2484       if (STORE_FLAG_VALUE == 1
2485 	  && trueop0 == const1_rtx
2486 	  && COMPARISON_P (op1)
2487 	  && (reversed = reversed_comparison (op1, mode)))
2488 	return reversed;
2489 
2490       /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
2491       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2492 	  && GET_CODE (op1) == MULT
2493 	  && GET_CODE (XEXP (op1, 0)) == NEG)
2494 	{
2495 	  rtx in1, in2;
2496 
2497 	  in1 = XEXP (XEXP (op1, 0), 0);
2498 	  in2 = XEXP (op1, 1);
2499 	  return simplify_gen_binary (PLUS, mode,
2500 				      simplify_gen_binary (MULT, mode,
2501 							   in1, in2),
2502 				      op0);
2503 	}
2504 
2505       /* Canonicalize (minus (neg A) (mult B C)) to
2506 	 (minus (mult (neg B) C) A).  */
2507       if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2508 	  && GET_CODE (op1) == MULT
2509 	  && GET_CODE (op0) == NEG)
2510 	{
2511 	  rtx in1, in2;
2512 
2513 	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2514 	  in2 = XEXP (op1, 1);
2515 	  return simplify_gen_binary (MINUS, mode,
2516 				      simplify_gen_binary (MULT, mode,
2517 							   in1, in2),
2518 				      XEXP (op0, 0));
2519 	}
2520 
2521       /* If one of the operands is a PLUS or a MINUS, see if we can
2522 	 simplify this by the associative law.  This will, for example,
2523          canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2524 	 Don't use the associative law for floating point.
2525 	 The inaccuracy makes it nonassociative,
2526 	 and subtle programs can break if operations are associated.  */
2527 
2528       if (INTEGRAL_MODE_P (mode)
2529 	  && (plus_minus_operand_p (op0)
2530 	      || plus_minus_operand_p (op1))
2531 	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2532 	return tem;
2533       break;
2534 
2535     case MULT:
2536       if (trueop1 == constm1_rtx)
2537 	return simplify_gen_unary (NEG, mode, op0, mode);
2538 
2539       if (GET_CODE (op0) == NEG)
2540 	{
2541 	  rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2542 	  /* If op1 is a MULT as well and simplify_unary_operation
2543 	     just moved the NEG to the second operand, simplify_gen_binary
2544 	     below could through simplify_associative_operation move
2545 	     the NEG around again and recurse endlessly.  */
2546 	  if (temp
2547 	      && GET_CODE (op1) == MULT
2548 	      && GET_CODE (temp) == MULT
2549 	      && XEXP (op1, 0) == XEXP (temp, 0)
2550 	      && GET_CODE (XEXP (temp, 1)) == NEG
2551 	      && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2552 	    temp = NULL_RTX;
2553 	  if (temp)
2554 	    return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2555 	}
2556       if (GET_CODE (op1) == NEG)
2557 	{
2558 	  rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2559 	  /* If op0 is a MULT as well and simplify_unary_operation
2560 	     just moved the NEG to the second operand, simplify_gen_binary
2561 	     below could through simplify_associative_operation move
2562 	     the NEG around again and recurse endlessly.  */
2563 	  if (temp
2564 	      && GET_CODE (op0) == MULT
2565 	      && GET_CODE (temp) == MULT
2566 	      && XEXP (op0, 0) == XEXP (temp, 0)
2567 	      && GET_CODE (XEXP (temp, 1)) == NEG
2568 	      && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2569 	    temp = NULL_RTX;
2570 	  if (temp)
2571 	    return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2572 	}
2573 
2574       /* Maybe simplify x * 0 to 0.  The reduction is not valid if
2575 	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
2576 	 when the mode has signed zeros, since multiplying a negative
2577 	 number by 0 will give -0, not 0.  */
2578       if (!HONOR_NANS (mode)
2579 	  && !HONOR_SIGNED_ZEROS (mode)
2580 	  && trueop1 == CONST0_RTX (mode)
2581 	  && ! side_effects_p (op0))
2582 	return op1;
2583 
2584       /* In IEEE floating point, x*1 is not equivalent to x for
2585 	 signalling NaNs.  */
2586       if (!HONOR_SNANS (mode)
2587 	  && trueop1 == CONST1_RTX (mode))
2588 	return op0;
2589 
2590       /* Convert multiply by constant power of two into shift unless
2591 	 we are still generating RTL.  This test is a kludge.  */
2592       if (CONST_INT_P (trueop1)
2593 	  && (val = exact_log2 (UINTVAL (trueop1))) >= 0
2594 	  /* If the mode is larger than the host word size, and the
2595 	     uppermost bit is set, then this isn't a power of two due
2596 	     to implicit sign extension.  */
2597 	  && (width <= HOST_BITS_PER_WIDE_INT
2598 	      || val != HOST_BITS_PER_WIDE_INT - 1))
2599 	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2600 
2601       /* Likewise for multipliers wider than a word.  */
2602       if (CONST_DOUBLE_AS_INT_P (trueop1)
2603 	  && GET_MODE (op0) == mode
2604 	  && CONST_DOUBLE_LOW (trueop1) == 0
2605 	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
2606 	  && (val < HOST_BITS_PER_DOUBLE_INT - 1
2607 	      || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
2608 	return simplify_gen_binary (ASHIFT, mode, op0,
2609 				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2610 
2611       /* x*2 is x+x and x*(-1) is -x */
2612       if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2613 	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2614 	  && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2615 	  && GET_MODE (op0) == mode)
2616 	{
2617 	  REAL_VALUE_TYPE d;
2618 	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2619 
2620 	  if (REAL_VALUES_EQUAL (d, dconst2))
2621 	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2622 
2623 	  if (!HONOR_SNANS (mode)
2624 	      && REAL_VALUES_EQUAL (d, dconstm1))
2625 	    return simplify_gen_unary (NEG, mode, op0, mode);
2626 	}
2627 
2628       /* Optimize -x * -x as x * x.  */
2629       if (FLOAT_MODE_P (mode)
2630 	  && GET_CODE (op0) == NEG
2631 	  && GET_CODE (op1) == NEG
2632 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2633 	  && !side_effects_p (XEXP (op0, 0)))
2634 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2635 
2636       /* Likewise, optimize abs(x) * abs(x) as x * x.  */
2637       if (SCALAR_FLOAT_MODE_P (mode)
2638 	  && GET_CODE (op0) == ABS
2639 	  && GET_CODE (op1) == ABS
2640 	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2641 	  && !side_effects_p (XEXP (op0, 0)))
2642 	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2643 
2644       /* Reassociate multiplication, but for floating point MULTs
2645 	 only when the user specifies unsafe math optimizations.  */
2646       if (! FLOAT_MODE_P (mode)
2647 	  || flag_unsafe_math_optimizations)
2648 	{
2649 	  tem = simplify_associative_operation (code, mode, op0, op1);
2650 	  if (tem)
2651 	    return tem;
2652 	}
2653       break;
2654 
2655     case IOR:
2656       if (trueop1 == CONST0_RTX (mode))
2657 	return op0;
2658       if (INTEGRAL_MODE_P (mode)
2659 	  && trueop1 == CONSTM1_RTX (mode)
2660 	  && !side_effects_p (op0))
2661 	return op1;
2662       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2663 	return op0;
2664       /* A | (~A) -> -1 */
2665       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2666 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2667 	  && ! side_effects_p (op0)
2668 	  && SCALAR_INT_MODE_P (mode))
2669 	return constm1_rtx;
2670 
2671       /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
2672       if (CONST_INT_P (op1)
2673 	  && HWI_COMPUTABLE_MODE_P (mode)
2674 	  && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2675 	  && !side_effects_p (op0))
2676 	return op1;
2677 
2678       /* Canonicalize (X & C1) | C2.  */
2679       if (GET_CODE (op0) == AND
2680 	  && CONST_INT_P (trueop1)
2681 	  && CONST_INT_P (XEXP (op0, 1)))
2682 	{
2683 	  HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2684 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2685 	  HOST_WIDE_INT c2 = INTVAL (trueop1);
2686 
2687 	  /* If (C1&C2) == C1, then (X&C1)|C2 becomes X.  */
2688 	  if ((c1 & c2) == c1
2689 	      && !side_effects_p (XEXP (op0, 0)))
2690 	    return trueop1;
2691 
2692 	  /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2.  */
2693 	  if (((c1|c2) & mask) == mask)
2694 	    return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2695 
2696 	  /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2.  */
2697 	  if (((c1 & ~c2) & mask) != (c1 & mask))
2698 	    {
2699 	      tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2700 					 gen_int_mode (c1 & ~c2, mode));
2701 	      return simplify_gen_binary (IOR, mode, tem, op1);
2702 	    }
2703 	}
2704 
2705       /* Convert (A & B) | A to A.  */
2706       if (GET_CODE (op0) == AND
2707 	  && (rtx_equal_p (XEXP (op0, 0), op1)
2708 	      || rtx_equal_p (XEXP (op0, 1), op1))
2709 	  && ! side_effects_p (XEXP (op0, 0))
2710 	  && ! side_effects_p (XEXP (op0, 1)))
2711 	return op1;
2712 
2713       /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2714          mode size to (rotate A CX).  */
2715 
2716       if (GET_CODE (op1) == ASHIFT
2717           || GET_CODE (op1) == SUBREG)
2718         {
2719 	  opleft = op1;
2720 	  opright = op0;
2721 	}
2722       else
2723         {
2724 	  opright = op1;
2725 	  opleft = op0;
2726 	}
2727 
2728       if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2729           && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2730           && CONST_INT_P (XEXP (opleft, 1))
2731           && CONST_INT_P (XEXP (opright, 1))
2732           && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2733               == GET_MODE_PRECISION (mode)))
2734         return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2735 
2736       /* Same, but for ashift that has been "simplified" to a wider mode
2737         by simplify_shift_const.  */
2738 
2739       if (GET_CODE (opleft) == SUBREG
2740           && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2741           && GET_CODE (opright) == LSHIFTRT
2742           && GET_CODE (XEXP (opright, 0)) == SUBREG
2743           && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2744           && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2745           && (GET_MODE_SIZE (GET_MODE (opleft))
2746               < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2747           && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2748                           SUBREG_REG (XEXP (opright, 0)))
2749           && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2750           && CONST_INT_P (XEXP (opright, 1))
2751           && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2752               == GET_MODE_PRECISION (mode)))
2753         return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2754                                XEXP (SUBREG_REG (opleft), 1));
2755 
2756       /* If we have (ior (and (X C1) C2)), simplify this by making
2757 	 C1 as small as possible if C1 actually changes.  */
2758       if (CONST_INT_P (op1)
2759 	  && (HWI_COMPUTABLE_MODE_P (mode)
2760 	      || INTVAL (op1) > 0)
2761 	  && GET_CODE (op0) == AND
2762 	  && CONST_INT_P (XEXP (op0, 1))
2763 	  && CONST_INT_P (op1)
2764 	  && (UINTVAL (XEXP (op0, 1)) & UINTVAL (op1)) != 0)
2765 	return simplify_gen_binary (IOR, mode,
2766 				    simplify_gen_binary
2767 					  (AND, mode, XEXP (op0, 0),
2768 					   GEN_INT (UINTVAL (XEXP (op0, 1))
2769 						    & ~UINTVAL (op1))),
2770 				    op1);
2771 
2772       /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2773          a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2774 	 the PLUS does not affect any of the bits in OP1: then we can do
2775 	 the IOR as a PLUS and we can associate.  This is valid if OP1
2776          can be safely shifted left C bits.  */
2777       if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2778           && GET_CODE (XEXP (op0, 0)) == PLUS
2779           && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2780           && CONST_INT_P (XEXP (op0, 1))
2781           && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2782         {
2783           int count = INTVAL (XEXP (op0, 1));
2784           HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2785 
2786           if (mask >> count == INTVAL (trueop1)
2787               && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2788 	    return simplify_gen_binary (ASHIFTRT, mode,
2789 					plus_constant (mode, XEXP (op0, 0),
2790 						       mask),
2791 					XEXP (op0, 1));
2792         }
2793 
2794       tem = simplify_associative_operation (code, mode, op0, op1);
2795       if (tem)
2796 	return tem;
2797       break;
2798 
2799     case XOR:
2800       if (trueop1 == CONST0_RTX (mode))
2801 	return op0;
2802       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2803 	return simplify_gen_unary (NOT, mode, op0, mode);
2804       if (rtx_equal_p (trueop0, trueop1)
2805 	  && ! side_effects_p (op0)
2806 	  && GET_MODE_CLASS (mode) != MODE_CC)
2807 	 return CONST0_RTX (mode);
2808 
2809       /* Canonicalize XOR of the most significant bit to PLUS.  */
2810       if (CONST_SCALAR_INT_P (op1)
2811 	  && mode_signbit_p (mode, op1))
2812 	return simplify_gen_binary (PLUS, mode, op0, op1);
2813       /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2814       if (CONST_SCALAR_INT_P (op1)
2815 	  && GET_CODE (op0) == PLUS
2816 	  && CONST_SCALAR_INT_P (XEXP (op0, 1))
2817 	  && mode_signbit_p (mode, XEXP (op0, 1)))
2818 	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2819 				    simplify_gen_binary (XOR, mode, op1,
2820 							 XEXP (op0, 1)));
2821 
2822       /* If we are XORing two things that have no bits in common,
2823 	 convert them into an IOR.  This helps to detect rotation encoded
2824 	 using those methods and possibly other simplifications.  */
2825 
2826       if (HWI_COMPUTABLE_MODE_P (mode)
2827 	  && (nonzero_bits (op0, mode)
2828 	      & nonzero_bits (op1, mode)) == 0)
2829 	return (simplify_gen_binary (IOR, mode, op0, op1));
2830 
2831       /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2832 	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2833 	 (NOT y).  */
2834       {
2835 	int num_negated = 0;
2836 
2837 	if (GET_CODE (op0) == NOT)
2838 	  num_negated++, op0 = XEXP (op0, 0);
2839 	if (GET_CODE (op1) == NOT)
2840 	  num_negated++, op1 = XEXP (op1, 0);
2841 
2842 	if (num_negated == 2)
2843 	  return simplify_gen_binary (XOR, mode, op0, op1);
2844 	else if (num_negated == 1)
2845 	  return simplify_gen_unary (NOT, mode,
2846 				     simplify_gen_binary (XOR, mode, op0, op1),
2847 				     mode);
2848       }
2849 
2850       /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2851 	 correspond to a machine insn or result in further simplifications
2852 	 if B is a constant.  */
2853 
2854       if (GET_CODE (op0) == AND
2855 	  && rtx_equal_p (XEXP (op0, 1), op1)
2856 	  && ! side_effects_p (op1))
2857 	return simplify_gen_binary (AND, mode,
2858 				    simplify_gen_unary (NOT, mode,
2859 							XEXP (op0, 0), mode),
2860 				    op1);
2861 
2862       else if (GET_CODE (op0) == AND
2863 	       && rtx_equal_p (XEXP (op0, 0), op1)
2864 	       && ! side_effects_p (op1))
2865 	return simplify_gen_binary (AND, mode,
2866 				    simplify_gen_unary (NOT, mode,
2867 							XEXP (op0, 1), mode),
2868 				    op1);
2869 
2870       /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2871 	 we can transform like this:
2872             (A&B)^C == ~(A&B)&C | ~C&(A&B)
2873                     == (~A|~B)&C | ~C&(A&B)    * DeMorgan's Law
2874                     == ~A&C | ~B&C | A&(~C&B)  * Distribute and re-order
2875 	 Attempt a few simplifications when B and C are both constants.  */
2876       if (GET_CODE (op0) == AND
2877 	  && CONST_INT_P (op1)
2878 	  && CONST_INT_P (XEXP (op0, 1)))
2879 	{
2880 	  rtx a = XEXP (op0, 0);
2881 	  rtx b = XEXP (op0, 1);
2882 	  rtx c = op1;
2883 	  HOST_WIDE_INT bval = INTVAL (b);
2884 	  HOST_WIDE_INT cval = INTVAL (c);
2885 
2886 	  rtx na_c
2887 	    = simplify_binary_operation (AND, mode,
2888 					 simplify_gen_unary (NOT, mode, a, mode),
2889 					 c);
2890 	  if ((~cval & bval) == 0)
2891 	    {
2892 	      /* Try to simplify ~A&C | ~B&C.  */
2893 	      if (na_c != NULL_RTX)
2894 		return simplify_gen_binary (IOR, mode, na_c,
2895 					    GEN_INT (~bval & cval));
2896 	    }
2897 	  else
2898 	    {
2899 	      /* If ~A&C is zero, simplify A&(~C&B) | ~B&C.  */
2900 	      if (na_c == const0_rtx)
2901 		{
2902 		  rtx a_nc_b = simplify_gen_binary (AND, mode, a,
2903 						    GEN_INT (~cval & bval));
2904 		  return simplify_gen_binary (IOR, mode, a_nc_b,
2905 					      GEN_INT (~bval & cval));
2906 		}
2907 	    }
2908 	}
2909 
2910       /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2911 	 comparison if STORE_FLAG_VALUE is 1.  */
2912       if (STORE_FLAG_VALUE == 1
2913 	  && trueop1 == const1_rtx
2914 	  && COMPARISON_P (op0)
2915 	  && (reversed = reversed_comparison (op0, mode)))
2916 	return reversed;
2917 
2918       /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2919 	 is (lt foo (const_int 0)), so we can perform the above
2920 	 simplification if STORE_FLAG_VALUE is 1.  */
2921 
2922       if (STORE_FLAG_VALUE == 1
2923 	  && trueop1 == const1_rtx
2924 	  && GET_CODE (op0) == LSHIFTRT
2925 	  && CONST_INT_P (XEXP (op0, 1))
2926 	  && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
2927 	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2928 
2929       /* (xor (comparison foo bar) (const_int sign-bit))
2930 	 when STORE_FLAG_VALUE is the sign bit.  */
2931       if (val_signbit_p (mode, STORE_FLAG_VALUE)
2932 	  && trueop1 == const_true_rtx
2933 	  && COMPARISON_P (op0)
2934 	  && (reversed = reversed_comparison (op0, mode)))
2935 	return reversed;
2936 
2937       tem = simplify_associative_operation (code, mode, op0, op1);
2938       if (tem)
2939 	return tem;
2940       break;
2941 
2942     case AND:
2943       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2944 	return trueop1;
2945       if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2946 	return op0;
2947       if (HWI_COMPUTABLE_MODE_P (mode))
2948 	{
2949 	  HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2950 	  HOST_WIDE_INT nzop1;
2951 	  if (CONST_INT_P (trueop1))
2952 	    {
2953 	      HOST_WIDE_INT val1 = INTVAL (trueop1);
2954 	      /* If we are turning off bits already known off in OP0, we need
2955 		 not do an AND.  */
2956 	      if ((nzop0 & ~val1) == 0)
2957 		return op0;
2958 	    }
2959 	  nzop1 = nonzero_bits (trueop1, mode);
2960 	  /* If we are clearing all the nonzero bits, the result is zero.  */
2961 	  if ((nzop1 & nzop0) == 0
2962 	      && !side_effects_p (op0) && !side_effects_p (op1))
2963 	    return CONST0_RTX (mode);
2964 	}
2965       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2966 	  && GET_MODE_CLASS (mode) != MODE_CC)
2967 	return op0;
2968       /* A & (~A) -> 0 */
2969       if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2970 	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2971 	  && ! side_effects_p (op0)
2972 	  && GET_MODE_CLASS (mode) != MODE_CC)
2973 	return CONST0_RTX (mode);
2974 
2975       /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2976 	 there are no nonzero bits of C outside of X's mode.  */
2977       if ((GET_CODE (op0) == SIGN_EXTEND
2978 	   || GET_CODE (op0) == ZERO_EXTEND)
2979 	  && CONST_INT_P (trueop1)
2980 	  && HWI_COMPUTABLE_MODE_P (mode)
2981 	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2982 	      & UINTVAL (trueop1)) == 0)
2983 	{
2984 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2985 	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2986 				     gen_int_mode (INTVAL (trueop1),
2987 						   imode));
2988 	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2989 	}
2990 
2991       /* Transform (and (truncate X) C) into (truncate (and X C)).  This way
2992 	 we might be able to further simplify the AND with X and potentially
2993 	 remove the truncation altogether.  */
2994       if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2995 	{
2996 	  rtx x = XEXP (op0, 0);
2997 	  enum machine_mode xmode = GET_MODE (x);
2998 	  tem = simplify_gen_binary (AND, xmode, x,
2999 				     gen_int_mode (INTVAL (trueop1), xmode));
3000 	  return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3001 	}
3002 
3003       /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2).  */
3004       if (GET_CODE (op0) == IOR
3005 	  && CONST_INT_P (trueop1)
3006 	  && CONST_INT_P (XEXP (op0, 1)))
3007 	{
3008 	  HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3009 	  return simplify_gen_binary (IOR, mode,
3010 				      simplify_gen_binary (AND, mode,
3011 							   XEXP (op0, 0), op1),
3012 				      gen_int_mode (tmp, mode));
3013 	}
3014 
3015       /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3016 	 insn (and may simplify more).  */
3017       if (GET_CODE (op0) == XOR
3018 	  && rtx_equal_p (XEXP (op0, 0), op1)
3019 	  && ! side_effects_p (op1))
3020 	return simplify_gen_binary (AND, mode,
3021 				    simplify_gen_unary (NOT, mode,
3022 							XEXP (op0, 1), mode),
3023 				    op1);
3024 
3025       if (GET_CODE (op0) == XOR
3026 	  && rtx_equal_p (XEXP (op0, 1), op1)
3027 	  && ! side_effects_p (op1))
3028 	return simplify_gen_binary (AND, mode,
3029 				    simplify_gen_unary (NOT, mode,
3030 							XEXP (op0, 0), mode),
3031 				    op1);
3032 
3033       /* Similarly for (~(A ^ B)) & A.  */
3034       if (GET_CODE (op0) == NOT
3035 	  && GET_CODE (XEXP (op0, 0)) == XOR
3036 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3037 	  && ! side_effects_p (op1))
3038 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3039 
3040       if (GET_CODE (op0) == NOT
3041 	  && GET_CODE (XEXP (op0, 0)) == XOR
3042 	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3043 	  && ! side_effects_p (op1))
3044 	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3045 
3046       /* Convert (A | B) & A to A.  */
3047       if (GET_CODE (op0) == IOR
3048 	  && (rtx_equal_p (XEXP (op0, 0), op1)
3049 	      || rtx_equal_p (XEXP (op0, 1), op1))
3050 	  && ! side_effects_p (XEXP (op0, 0))
3051 	  && ! side_effects_p (XEXP (op0, 1)))
3052 	return op1;
3053 
3054       /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3055 	 ((A & N) + B) & M -> (A + B) & M
3056 	 Similarly if (N & M) == 0,
3057 	 ((A | N) + B) & M -> (A + B) & M
3058 	 and for - instead of + and/or ^ instead of |.
3059          Also, if (N & M) == 0, then
3060 	 (A +- N) & M -> A & M.  */
3061       if (CONST_INT_P (trueop1)
3062 	  && HWI_COMPUTABLE_MODE_P (mode)
3063 	  && ~UINTVAL (trueop1)
3064 	  && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3065 	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3066 	{
3067 	  rtx pmop[2];
3068 	  int which;
3069 
3070 	  pmop[0] = XEXP (op0, 0);
3071 	  pmop[1] = XEXP (op0, 1);
3072 
3073 	  if (CONST_INT_P (pmop[1])
3074 	      && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3075 	    return simplify_gen_binary (AND, mode, pmop[0], op1);
3076 
3077 	  for (which = 0; which < 2; which++)
3078 	    {
3079 	      tem = pmop[which];
3080 	      switch (GET_CODE (tem))
3081 		{
3082 		case AND:
3083 		  if (CONST_INT_P (XEXP (tem, 1))
3084 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3085 		      == UINTVAL (trueop1))
3086 		    pmop[which] = XEXP (tem, 0);
3087 		  break;
3088 		case IOR:
3089 		case XOR:
3090 		  if (CONST_INT_P (XEXP (tem, 1))
3091 		      && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3092 		    pmop[which] = XEXP (tem, 0);
3093 		  break;
3094 		default:
3095 		  break;
3096 		}
3097 	    }
3098 
3099 	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3100 	    {
3101 	      tem = simplify_gen_binary (GET_CODE (op0), mode,
3102 					 pmop[0], pmop[1]);
3103 	      return simplify_gen_binary (code, mode, tem, op1);
3104 	    }
3105 	}
3106 
3107       /* (and X (ior (not X) Y) -> (and X Y) */
3108       if (GET_CODE (op1) == IOR
3109 	  && GET_CODE (XEXP (op1, 0)) == NOT
3110 	  && op0 == XEXP (XEXP (op1, 0), 0))
3111        return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3112 
3113       /* (and (ior (not X) Y) X) -> (and X Y) */
3114       if (GET_CODE (op0) == IOR
3115 	  && GET_CODE (XEXP (op0, 0)) == NOT
3116 	  && op1 == XEXP (XEXP (op0, 0), 0))
3117 	return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3118 
3119       tem = simplify_associative_operation (code, mode, op0, op1);
3120       if (tem)
3121 	return tem;
3122       break;
3123 
3124     case UDIV:
3125       /* 0/x is 0 (or x&0 if x has side-effects).  */
3126       if (trueop0 == CONST0_RTX (mode))
3127 	{
3128 	  if (side_effects_p (op1))
3129 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3130 	  return trueop0;
3131 	}
3132       /* x/1 is x.  */
3133       if (trueop1 == CONST1_RTX (mode))
3134 	{
3135 	  tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3136 	  if (tem)
3137 	    return tem;
3138 	}
3139       /* Convert divide by power of two into shift.  */
3140       if (CONST_INT_P (trueop1)
3141 	  && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3142 	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3143       break;
3144 
3145     case DIV:
3146       /* Handle floating point and integers separately.  */
3147       if (SCALAR_FLOAT_MODE_P (mode))
3148 	{
3149 	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
3150 	     safe for modes with NaNs, since 0.0 / 0.0 will then be
3151 	     NaN rather than 0.0.  Nor is it safe for modes with signed
3152 	     zeros, since dividing 0 by a negative number gives -0.0  */
3153 	  if (trueop0 == CONST0_RTX (mode)
3154 	      && !HONOR_NANS (mode)
3155 	      && !HONOR_SIGNED_ZEROS (mode)
3156 	      && ! side_effects_p (op1))
3157 	    return op0;
3158 	  /* x/1.0 is x.  */
3159 	  if (trueop1 == CONST1_RTX (mode)
3160 	      && !HONOR_SNANS (mode))
3161 	    return op0;
3162 
3163 	  if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3164 	      && trueop1 != CONST0_RTX (mode))
3165 	    {
3166 	      REAL_VALUE_TYPE d;
3167 	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
3168 
3169 	      /* x/-1.0 is -x.  */
3170 	      if (REAL_VALUES_EQUAL (d, dconstm1)
3171 		  && !HONOR_SNANS (mode))
3172 		return simplify_gen_unary (NEG, mode, op0, mode);
3173 
3174 	      /* Change FP division by a constant into multiplication.
3175 		 Only do this with -freciprocal-math.  */
3176 	      if (flag_reciprocal_math
3177 		  && !REAL_VALUES_EQUAL (d, dconst0))
3178 		{
3179 		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
3180 		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3181 		  return simplify_gen_binary (MULT, mode, op0, tem);
3182 		}
3183 	    }
3184 	}
3185       else if (SCALAR_INT_MODE_P (mode))
3186 	{
3187 	  /* 0/x is 0 (or x&0 if x has side-effects).  */
3188 	  if (trueop0 == CONST0_RTX (mode)
3189 	      && !cfun->can_throw_non_call_exceptions)
3190 	    {
3191 	      if (side_effects_p (op1))
3192 		return simplify_gen_binary (AND, mode, op1, trueop0);
3193 	      return trueop0;
3194 	    }
3195 	  /* x/1 is x.  */
3196 	  if (trueop1 == CONST1_RTX (mode))
3197 	    {
3198 	      tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3199 	      if (tem)
3200 		return tem;
3201 	    }
3202 	  /* x/-1 is -x.  */
3203 	  if (trueop1 == constm1_rtx)
3204 	    {
3205 	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3206 	      if (x)
3207 		return simplify_gen_unary (NEG, mode, x, mode);
3208 	    }
3209 	}
3210       break;
3211 
3212     case UMOD:
3213       /* 0%x is 0 (or x&0 if x has side-effects).  */
3214       if (trueop0 == CONST0_RTX (mode))
3215 	{
3216 	  if (side_effects_p (op1))
3217 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3218 	  return trueop0;
3219 	}
3220       /* x%1 is 0 (of x&0 if x has side-effects).  */
3221       if (trueop1 == CONST1_RTX (mode))
3222 	{
3223 	  if (side_effects_p (op0))
3224 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3225 	  return CONST0_RTX (mode);
3226 	}
3227       /* Implement modulus by power of two as AND.  */
3228       if (CONST_INT_P (trueop1)
3229 	  && exact_log2 (UINTVAL (trueop1)) > 0)
3230 	return simplify_gen_binary (AND, mode, op0,
3231 				    GEN_INT (INTVAL (op1) - 1));
3232       break;
3233 
3234     case MOD:
3235       /* 0%x is 0 (or x&0 if x has side-effects).  */
3236       if (trueop0 == CONST0_RTX (mode))
3237 	{
3238 	  if (side_effects_p (op1))
3239 	    return simplify_gen_binary (AND, mode, op1, trueop0);
3240 	  return trueop0;
3241 	}
3242       /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
3243       if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3244 	{
3245 	  if (side_effects_p (op0))
3246 	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3247 	  return CONST0_RTX (mode);
3248 	}
3249       break;
3250 
3251     case ROTATERT:
3252     case ROTATE:
3253     case ASHIFTRT:
3254       if (trueop1 == CONST0_RTX (mode))
3255 	return op0;
3256       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3257 	return op0;
3258       /* Rotating ~0 always results in ~0.  */
3259       if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
3260 	  && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3261 	  && ! side_effects_p (op1))
3262 	return op0;
3263     canonicalize_shift:
3264       if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3265 	{
3266 	  val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
3267 	  if (val != INTVAL (op1))
3268 	    return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3269 	}
3270       break;
3271 
3272     case ASHIFT:
3273     case SS_ASHIFT:
3274     case US_ASHIFT:
3275       if (trueop1 == CONST0_RTX (mode))
3276 	return op0;
3277       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3278 	return op0;
3279       goto canonicalize_shift;
3280 
3281     case LSHIFTRT:
3282       if (trueop1 == CONST0_RTX (mode))
3283 	return op0;
3284       if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3285 	return op0;
3286       /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
3287       if (GET_CODE (op0) == CLZ
3288 	  && CONST_INT_P (trueop1)
3289 	  && STORE_FLAG_VALUE == 1
3290 	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
3291 	{
3292 	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
3293 	  unsigned HOST_WIDE_INT zero_val = 0;
3294 
3295 	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
3296 	      && zero_val == GET_MODE_PRECISION (imode)
3297 	      && INTVAL (trueop1) == exact_log2 (zero_val))
3298 	    return simplify_gen_relational (EQ, mode, imode,
3299 					    XEXP (op0, 0), const0_rtx);
3300 	}
3301       goto canonicalize_shift;
3302 
3303     case SMIN:
3304       if (width <= HOST_BITS_PER_WIDE_INT
3305 	  && mode_signbit_p (mode, trueop1)
3306 	  && ! side_effects_p (op0))
3307 	return op1;
3308       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3309 	return op0;
3310       tem = simplify_associative_operation (code, mode, op0, op1);
3311       if (tem)
3312 	return tem;
3313       break;
3314 
3315     case SMAX:
3316       if (width <= HOST_BITS_PER_WIDE_INT
3317 	  && CONST_INT_P (trueop1)
3318 	  && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3319 	  && ! side_effects_p (op0))
3320 	return op1;
3321       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3322 	return op0;
3323       tem = simplify_associative_operation (code, mode, op0, op1);
3324       if (tem)
3325 	return tem;
3326       break;
3327 
3328     case UMIN:
3329       if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3330 	return op1;
3331       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3332 	return op0;
3333       tem = simplify_associative_operation (code, mode, op0, op1);
3334       if (tem)
3335 	return tem;
3336       break;
3337 
3338     case UMAX:
3339       if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3340 	return op1;
3341       if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3342 	return op0;
3343       tem = simplify_associative_operation (code, mode, op0, op1);
3344       if (tem)
3345 	return tem;
3346       break;
3347 
3348     case SS_PLUS:
3349     case US_PLUS:
3350     case SS_MINUS:
3351     case US_MINUS:
3352     case SS_MULT:
3353     case US_MULT:
3354     case SS_DIV:
3355     case US_DIV:
3356       /* ??? There are simplifications that can be done.  */
3357       return 0;
3358 
3359     case VEC_SELECT:
3360       if (!VECTOR_MODE_P (mode))
3361 	{
3362 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3363 	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3364 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3365 	  gcc_assert (XVECLEN (trueop1, 0) == 1);
3366 	  gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3367 
3368 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3369 	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3370 						      (trueop1, 0, 0)));
3371 
3372 	  /* Extract a scalar element from a nested VEC_SELECT expression
3373 	     (with optional nested VEC_CONCAT expression).  Some targets
3374 	     (i386) extract scalar element from a vector using chain of
3375 	     nested VEC_SELECT expressions.  When input operand is a memory
3376 	     operand, this operation can be simplified to a simple scalar
3377 	     load from an offseted memory address.  */
3378 	  if (GET_CODE (trueop0) == VEC_SELECT)
3379 	    {
3380 	      rtx op0 = XEXP (trueop0, 0);
3381 	      rtx op1 = XEXP (trueop0, 1);
3382 
3383 	      enum machine_mode opmode = GET_MODE (op0);
3384 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
3385 	      int n_elts = GET_MODE_SIZE (opmode) / elt_size;
3386 
3387 	      int i = INTVAL (XVECEXP (trueop1, 0, 0));
3388 	      int elem;
3389 
3390 	      rtvec vec;
3391 	      rtx tmp_op, tmp;
3392 
3393 	      gcc_assert (GET_CODE (op1) == PARALLEL);
3394 	      gcc_assert (i < n_elts);
3395 
3396 	      /* Select element, pointed by nested selector.  */
3397 	      elem = INTVAL (XVECEXP (op1, 0, i));
3398 
3399 	      /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT.  */
3400 	      if (GET_CODE (op0) == VEC_CONCAT)
3401 		{
3402 		  rtx op00 = XEXP (op0, 0);
3403 		  rtx op01 = XEXP (op0, 1);
3404 
3405 		  enum machine_mode mode00, mode01;
3406 		  int n_elts00, n_elts01;
3407 
3408 		  mode00 = GET_MODE (op00);
3409 		  mode01 = GET_MODE (op01);
3410 
3411 		  /* Find out number of elements of each operand.  */
3412 		  if (VECTOR_MODE_P (mode00))
3413 		    {
3414 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
3415 		      n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
3416 		    }
3417 		  else
3418 		    n_elts00 = 1;
3419 
3420 		  if (VECTOR_MODE_P (mode01))
3421 		    {
3422 		      elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
3423 		      n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
3424 		    }
3425 		  else
3426 		    n_elts01 = 1;
3427 
3428 		  gcc_assert (n_elts == n_elts00 + n_elts01);
3429 
3430 		  /* Select correct operand of VEC_CONCAT
3431 		     and adjust selector. */
3432 		  if (elem < n_elts01)
3433 		    tmp_op = op00;
3434 		  else
3435 		    {
3436 		      tmp_op = op01;
3437 		      elem -= n_elts00;
3438 		    }
3439 		}
3440 	      else
3441 		tmp_op = op0;
3442 
3443 	      vec = rtvec_alloc (1);
3444 	      RTVEC_ELT (vec, 0) = GEN_INT (elem);
3445 
3446 	      tmp = gen_rtx_fmt_ee (code, mode,
3447 				    tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3448 	      return tmp;
3449 	    }
3450 	  if (GET_CODE (trueop0) == VEC_DUPLICATE
3451 	      && GET_MODE (XEXP (trueop0, 0)) == mode)
3452 	    return XEXP (trueop0, 0);
3453 	}
3454       else
3455 	{
3456 	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3457 	  gcc_assert (GET_MODE_INNER (mode)
3458 		      == GET_MODE_INNER (GET_MODE (trueop0)));
3459 	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
3460 
3461 	  if (GET_CODE (trueop0) == CONST_VECTOR)
3462 	    {
3463 	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3464 	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3465 	      rtvec v = rtvec_alloc (n_elts);
3466 	      unsigned int i;
3467 
3468 	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3469 	      for (i = 0; i < n_elts; i++)
3470 		{
3471 		  rtx x = XVECEXP (trueop1, 0, i);
3472 
3473 		  gcc_assert (CONST_INT_P (x));
3474 		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3475 						       INTVAL (x));
3476 		}
3477 
3478 	      return gen_rtx_CONST_VECTOR (mode, v);
3479 	    }
3480 
3481 	  /* Recognize the identity.  */
3482 	  if (GET_MODE (trueop0) == mode)
3483 	    {
3484 	      bool maybe_ident = true;
3485 	      for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3486 		{
3487 		  rtx j = XVECEXP (trueop1, 0, i);
3488 		  if (!CONST_INT_P (j) || INTVAL (j) != i)
3489 		    {
3490 		      maybe_ident = false;
3491 		      break;
3492 		    }
3493 		}
3494 	      if (maybe_ident)
3495 		return trueop0;
3496 	    }
3497 
3498 	  /* If we build {a,b} then permute it, build the result directly.  */
3499 	  if (XVECLEN (trueop1, 0) == 2
3500 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3501 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3502 	      && GET_CODE (trueop0) == VEC_CONCAT
3503 	      && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3504 	      && GET_MODE (XEXP (trueop0, 0)) == mode
3505 	      && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3506 	      && GET_MODE (XEXP (trueop0, 1)) == mode)
3507 	    {
3508 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3509 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3510 	      rtx subop0, subop1;
3511 
3512 	      gcc_assert (i0 < 4 && i1 < 4);
3513 	      subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3514 	      subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3515 
3516 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3517 	    }
3518 
3519 	  if (XVECLEN (trueop1, 0) == 2
3520 	      && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3521 	      && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3522 	      && GET_CODE (trueop0) == VEC_CONCAT
3523 	      && GET_MODE (trueop0) == mode)
3524 	    {
3525 	      unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3526 	      unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3527 	      rtx subop0, subop1;
3528 
3529 	      gcc_assert (i0 < 2 && i1 < 2);
3530 	      subop0 = XEXP (trueop0, i0);
3531 	      subop1 = XEXP (trueop0, i1);
3532 
3533 	      return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3534 	    }
3535 	}
3536 
3537       if (XVECLEN (trueop1, 0) == 1
3538 	  && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3539 	  && GET_CODE (trueop0) == VEC_CONCAT)
3540 	{
3541 	  rtx vec = trueop0;
3542 	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3543 
3544 	  /* Try to find the element in the VEC_CONCAT.  */
3545 	  while (GET_MODE (vec) != mode
3546 		 && GET_CODE (vec) == VEC_CONCAT)
3547 	    {
3548 	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3549 	      if (offset < vec_size)
3550 		vec = XEXP (vec, 0);
3551 	      else
3552 		{
3553 		  offset -= vec_size;
3554 		  vec = XEXP (vec, 1);
3555 		}
3556 	      vec = avoid_constant_pool_reference (vec);
3557 	    }
3558 
3559 	  if (GET_MODE (vec) == mode)
3560 	    return vec;
3561 	}
3562 
3563       return 0;
3564     case VEC_CONCAT:
3565       {
3566 	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3567 				      ? GET_MODE (trueop0)
3568 				      : GET_MODE_INNER (mode));
3569 	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3570 				      ? GET_MODE (trueop1)
3571 				      : GET_MODE_INNER (mode));
3572 
3573 	gcc_assert (VECTOR_MODE_P (mode));
3574 	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3575 		    == GET_MODE_SIZE (mode));
3576 
3577 	if (VECTOR_MODE_P (op0_mode))
3578 	  gcc_assert (GET_MODE_INNER (mode)
3579 		      == GET_MODE_INNER (op0_mode));
3580 	else
3581 	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3582 
3583 	if (VECTOR_MODE_P (op1_mode))
3584 	  gcc_assert (GET_MODE_INNER (mode)
3585 		      == GET_MODE_INNER (op1_mode));
3586 	else
3587 	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3588 
3589 	if ((GET_CODE (trueop0) == CONST_VECTOR
3590 	     || CONST_SCALAR_INT_P (trueop0)
3591 	     || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3592 	    && (GET_CODE (trueop1) == CONST_VECTOR
3593 		|| CONST_SCALAR_INT_P (trueop1)
3594 		|| CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3595 	  {
3596 	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3597 	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3598 	    rtvec v = rtvec_alloc (n_elts);
3599 	    unsigned int i;
3600 	    unsigned in_n_elts = 1;
3601 
3602 	    if (VECTOR_MODE_P (op0_mode))
3603 	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3604 	    for (i = 0; i < n_elts; i++)
3605 	      {
3606 		if (i < in_n_elts)
3607 		  {
3608 		    if (!VECTOR_MODE_P (op0_mode))
3609 		      RTVEC_ELT (v, i) = trueop0;
3610 		    else
3611 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3612 		  }
3613 		else
3614 		  {
3615 		    if (!VECTOR_MODE_P (op1_mode))
3616 		      RTVEC_ELT (v, i) = trueop1;
3617 		    else
3618 		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3619 							   i - in_n_elts);
3620 		  }
3621 	      }
3622 
3623 	    return gen_rtx_CONST_VECTOR (mode, v);
3624 	  }
3625 
3626 	/* Try to merge VEC_SELECTs from the same vector into a single one.  */
3627 	if (GET_CODE (trueop0) == VEC_SELECT
3628 	    && GET_CODE (trueop1) == VEC_SELECT
3629 	    && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0)))
3630 	  {
3631 	    rtx par0 = XEXP (trueop0, 1);
3632 	    rtx par1 = XEXP (trueop1, 1);
3633 	    int len0 = XVECLEN (par0, 0);
3634 	    int len1 = XVECLEN (par1, 0);
3635 	    rtvec vec = rtvec_alloc (len0 + len1);
3636 	    for (int i = 0; i < len0; i++)
3637 	      RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3638 	    for (int i = 0; i < len1; i++)
3639 	      RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3640 	    return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3641 					gen_rtx_PARALLEL (VOIDmode, vec));
3642 	  }
3643       }
3644       return 0;
3645 
3646     default:
3647       gcc_unreachable ();
3648     }
3649 
3650   return 0;
3651 }
3652 
3653 rtx
simplify_const_binary_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)3654 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3655 				 rtx op0, rtx op1)
3656 {
3657   HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3658   HOST_WIDE_INT val;
3659   unsigned int width = GET_MODE_PRECISION (mode);
3660 
3661   if (VECTOR_MODE_P (mode)
3662       && code != VEC_CONCAT
3663       && GET_CODE (op0) == CONST_VECTOR
3664       && GET_CODE (op1) == CONST_VECTOR)
3665     {
3666       unsigned n_elts = GET_MODE_NUNITS (mode);
3667       enum machine_mode op0mode = GET_MODE (op0);
3668       unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3669       enum machine_mode op1mode = GET_MODE (op1);
3670       unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3671       rtvec v = rtvec_alloc (n_elts);
3672       unsigned int i;
3673 
3674       gcc_assert (op0_n_elts == n_elts);
3675       gcc_assert (op1_n_elts == n_elts);
3676       for (i = 0; i < n_elts; i++)
3677 	{
3678 	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3679 					     CONST_VECTOR_ELT (op0, i),
3680 					     CONST_VECTOR_ELT (op1, i));
3681 	  if (!x)
3682 	    return 0;
3683 	  RTVEC_ELT (v, i) = x;
3684 	}
3685 
3686       return gen_rtx_CONST_VECTOR (mode, v);
3687     }
3688 
3689   if (VECTOR_MODE_P (mode)
3690       && code == VEC_CONCAT
3691       && (CONST_SCALAR_INT_P (op0)
3692 	  || GET_CODE (op0) == CONST_FIXED
3693 	  || CONST_DOUBLE_AS_FLOAT_P (op0))
3694       && (CONST_SCALAR_INT_P (op1)
3695 	  || CONST_DOUBLE_AS_FLOAT_P (op1)
3696 	  || GET_CODE (op1) == CONST_FIXED))
3697     {
3698       unsigned n_elts = GET_MODE_NUNITS (mode);
3699       rtvec v = rtvec_alloc (n_elts);
3700 
3701       gcc_assert (n_elts >= 2);
3702       if (n_elts == 2)
3703 	{
3704 	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3705 	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3706 
3707 	  RTVEC_ELT (v, 0) = op0;
3708 	  RTVEC_ELT (v, 1) = op1;
3709 	}
3710       else
3711 	{
3712 	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3713 	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3714 	  unsigned i;
3715 
3716 	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3717 	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3718 	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3719 
3720 	  for (i = 0; i < op0_n_elts; ++i)
3721 	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3722 	  for (i = 0; i < op1_n_elts; ++i)
3723 	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3724 	}
3725 
3726       return gen_rtx_CONST_VECTOR (mode, v);
3727     }
3728 
3729   if (SCALAR_FLOAT_MODE_P (mode)
3730       && CONST_DOUBLE_AS_FLOAT_P (op0)
3731       && CONST_DOUBLE_AS_FLOAT_P (op1)
3732       && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3733     {
3734       if (code == AND
3735 	  || code == IOR
3736 	  || code == XOR)
3737 	{
3738 	  long tmp0[4];
3739 	  long tmp1[4];
3740 	  REAL_VALUE_TYPE r;
3741 	  int i;
3742 
3743 	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3744 			  GET_MODE (op0));
3745 	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3746 			  GET_MODE (op1));
3747 	  for (i = 0; i < 4; i++)
3748 	    {
3749 	      switch (code)
3750 	      {
3751 	      case AND:
3752 		tmp0[i] &= tmp1[i];
3753 		break;
3754 	      case IOR:
3755 		tmp0[i] |= tmp1[i];
3756 		break;
3757 	      case XOR:
3758 		tmp0[i] ^= tmp1[i];
3759 		break;
3760 	      default:
3761 		gcc_unreachable ();
3762 	      }
3763 	    }
3764 	   real_from_target (&r, tmp0, mode);
3765 	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3766 	}
3767       else
3768 	{
3769 	  REAL_VALUE_TYPE f0, f1, value, result;
3770 	  bool inexact;
3771 
3772 	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3773 	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3774 	  real_convert (&f0, mode, &f0);
3775 	  real_convert (&f1, mode, &f1);
3776 
3777 	  if (HONOR_SNANS (mode)
3778 	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3779 	    return 0;
3780 
3781 	  if (code == DIV
3782 	      && REAL_VALUES_EQUAL (f1, dconst0)
3783 	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3784 	    return 0;
3785 
3786 	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3787 	      && flag_trapping_math
3788 	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3789 	    {
3790 	      int s0 = REAL_VALUE_NEGATIVE (f0);
3791 	      int s1 = REAL_VALUE_NEGATIVE (f1);
3792 
3793 	      switch (code)
3794 		{
3795 		case PLUS:
3796 		  /* Inf + -Inf = NaN plus exception.  */
3797 		  if (s0 != s1)
3798 		    return 0;
3799 		  break;
3800 		case MINUS:
3801 		  /* Inf - Inf = NaN plus exception.  */
3802 		  if (s0 == s1)
3803 		    return 0;
3804 		  break;
3805 		case DIV:
3806 		  /* Inf / Inf = NaN plus exception.  */
3807 		  return 0;
3808 		default:
3809 		  break;
3810 		}
3811 	    }
3812 
3813 	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3814 	      && flag_trapping_math
3815 	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3816 		  || (REAL_VALUE_ISINF (f1)
3817 		      && REAL_VALUES_EQUAL (f0, dconst0))))
3818 	    /* Inf * 0 = NaN plus exception.  */
3819 	    return 0;
3820 
3821 	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3822 				     &f0, &f1);
3823 	  real_convert (&result, mode, &value);
3824 
3825 	  /* Don't constant fold this floating point operation if
3826 	     the result has overflowed and flag_trapping_math.  */
3827 
3828 	  if (flag_trapping_math
3829 	      && MODE_HAS_INFINITIES (mode)
3830 	      && REAL_VALUE_ISINF (result)
3831 	      && !REAL_VALUE_ISINF (f0)
3832 	      && !REAL_VALUE_ISINF (f1))
3833 	    /* Overflow plus exception.  */
3834 	    return 0;
3835 
3836 	  /* Don't constant fold this floating point operation if the
3837 	     result may dependent upon the run-time rounding mode and
3838 	     flag_rounding_math is set, or if GCC's software emulation
3839 	     is unable to accurately represent the result.  */
3840 
3841 	  if ((flag_rounding_math
3842 	       || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3843 	      && (inexact || !real_identical (&result, &value)))
3844 	    return NULL_RTX;
3845 
3846 	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3847 	}
3848     }
3849 
3850   /* We can fold some multi-word operations.  */
3851   if (GET_MODE_CLASS (mode) == MODE_INT
3852       && width == HOST_BITS_PER_DOUBLE_INT
3853       && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
3854       && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
3855     {
3856       double_int o0, o1, res, tmp;
3857       bool overflow;
3858 
3859       o0 = rtx_to_double_int (op0);
3860       o1 = rtx_to_double_int (op1);
3861 
3862       switch (code)
3863 	{
3864 	case MINUS:
3865 	  /* A - B == A + (-B).  */
3866 	  o1 = -o1;
3867 
3868 	  /* Fall through....  */
3869 
3870 	case PLUS:
3871 	  res = o0 + o1;
3872 	  break;
3873 
3874 	case MULT:
3875 	  res = o0 * o1;
3876 	  break;
3877 
3878 	case DIV:
3879           res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3880 					 &tmp, &overflow);
3881 	  if (overflow)
3882 	    return 0;
3883 	  break;
3884 
3885 	case MOD:
3886           tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
3887 					 &res, &overflow);
3888 	  if (overflow)
3889 	    return 0;
3890 	  break;
3891 
3892 	case UDIV:
3893           res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3894 					 &tmp, &overflow);
3895 	  if (overflow)
3896 	    return 0;
3897 	  break;
3898 
3899 	case UMOD:
3900           tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
3901 					 &res, &overflow);
3902 	  if (overflow)
3903 	    return 0;
3904 	  break;
3905 
3906 	case AND:
3907 	  res = o0 & o1;
3908 	  break;
3909 
3910 	case IOR:
3911 	  res = o0 | o1;
3912 	  break;
3913 
3914 	case XOR:
3915 	  res = o0 ^ o1;
3916 	  break;
3917 
3918 	case SMIN:
3919 	  res = o0.smin (o1);
3920 	  break;
3921 
3922 	case SMAX:
3923 	  res = o0.smax (o1);
3924 	  break;
3925 
3926 	case UMIN:
3927 	  res = o0.umin (o1);
3928 	  break;
3929 
3930 	case UMAX:
3931 	  res = o0.umax (o1);
3932 	  break;
3933 
3934 	case LSHIFTRT:   case ASHIFTRT:
3935 	case ASHIFT:
3936 	case ROTATE:     case ROTATERT:
3937 	  {
3938 	    unsigned HOST_WIDE_INT cnt;
3939 
3940 	    if (SHIFT_COUNT_TRUNCATED)
3941 	      {
3942 		o1.high = 0;
3943 		o1.low &= GET_MODE_PRECISION (mode) - 1;
3944 	      }
3945 
3946 	    if (!o1.fits_uhwi ()
3947 	        || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
3948 	      return 0;
3949 
3950 	    cnt = o1.to_uhwi ();
3951 	    unsigned short prec = GET_MODE_PRECISION (mode);
3952 
3953 	    if (code == LSHIFTRT || code == ASHIFTRT)
3954 	      res = o0.rshift (cnt, prec, code == ASHIFTRT);
3955 	    else if (code == ASHIFT)
3956 	      res = o0.alshift (cnt, prec);
3957 	    else if (code == ROTATE)
3958 	      res = o0.lrotate (cnt, prec);
3959 	    else /* code == ROTATERT */
3960 	      res = o0.rrotate (cnt, prec);
3961 	  }
3962 	  break;
3963 
3964 	default:
3965 	  return 0;
3966 	}
3967 
3968       return immed_double_int_const (res, mode);
3969     }
3970 
3971   if (CONST_INT_P (op0) && CONST_INT_P (op1)
3972       && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3973     {
3974       /* Get the integer argument values in two forms:
3975          zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3976 
3977       arg0 = INTVAL (op0);
3978       arg1 = INTVAL (op1);
3979 
3980       if (width < HOST_BITS_PER_WIDE_INT)
3981         {
3982           arg0 &= GET_MODE_MASK (mode);
3983           arg1 &= GET_MODE_MASK (mode);
3984 
3985           arg0s = arg0;
3986 	  if (val_signbit_known_set_p (mode, arg0s))
3987 	    arg0s |= ~GET_MODE_MASK (mode);
3988 
3989           arg1s = arg1;
3990 	  if (val_signbit_known_set_p (mode, arg1s))
3991 	    arg1s |= ~GET_MODE_MASK (mode);
3992 	}
3993       else
3994 	{
3995 	  arg0s = arg0;
3996 	  arg1s = arg1;
3997 	}
3998 
3999       /* Compute the value of the arithmetic.  */
4000 
4001       switch (code)
4002 	{
4003 	case PLUS:
4004 	  val = arg0s + arg1s;
4005 	  break;
4006 
4007 	case MINUS:
4008 	  val = arg0s - arg1s;
4009 	  break;
4010 
4011 	case MULT:
4012 	  val = arg0s * arg1s;
4013 	  break;
4014 
4015 	case DIV:
4016 	  if (arg1s == 0
4017 	      || ((unsigned HOST_WIDE_INT) arg0s
4018 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4019 		  && arg1s == -1))
4020 	    return 0;
4021 	  val = arg0s / arg1s;
4022 	  break;
4023 
4024 	case MOD:
4025 	  if (arg1s == 0
4026 	      || ((unsigned HOST_WIDE_INT) arg0s
4027 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4028 		  && arg1s == -1))
4029 	    return 0;
4030 	  val = arg0s % arg1s;
4031 	  break;
4032 
4033 	case UDIV:
4034 	  if (arg1 == 0
4035 	      || ((unsigned HOST_WIDE_INT) arg0s
4036 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4037 		  && arg1s == -1))
4038 	    return 0;
4039 	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4040 	  break;
4041 
4042 	case UMOD:
4043 	  if (arg1 == 0
4044 	      || ((unsigned HOST_WIDE_INT) arg0s
4045 		  == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
4046 		  && arg1s == -1))
4047 	    return 0;
4048 	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4049 	  break;
4050 
4051 	case AND:
4052 	  val = arg0 & arg1;
4053 	  break;
4054 
4055 	case IOR:
4056 	  val = arg0 | arg1;
4057 	  break;
4058 
4059 	case XOR:
4060 	  val = arg0 ^ arg1;
4061 	  break;
4062 
4063 	case LSHIFTRT:
4064 	case ASHIFT:
4065 	case ASHIFTRT:
4066 	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4067 	     the value is in range.  We can't return any old value for
4068 	     out-of-range arguments because either the middle-end (via
4069 	     shift_truncation_mask) or the back-end might be relying on
4070 	     target-specific knowledge.  Nor can we rely on
4071 	     shift_truncation_mask, since the shift might not be part of an
4072 	     ashlM3, lshrM3 or ashrM3 instruction.  */
4073 	  if (SHIFT_COUNT_TRUNCATED)
4074 	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
4075 	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
4076 	    return 0;
4077 
4078 	  val = (code == ASHIFT
4079 		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
4080 		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
4081 
4082 	  /* Sign-extend the result for arithmetic right shifts.  */
4083 	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
4084 	    val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1);
4085 	  break;
4086 
4087 	case ROTATERT:
4088 	  if (arg1 < 0)
4089 	    return 0;
4090 
4091 	  arg1 %= width;
4092 	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4093 		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4094 	  break;
4095 
4096 	case ROTATE:
4097 	  if (arg1 < 0)
4098 	    return 0;
4099 
4100 	  arg1 %= width;
4101 	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4102 		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4103 	  break;
4104 
4105 	case COMPARE:
4106 	  /* Do nothing here.  */
4107 	  return 0;
4108 
4109 	case SMIN:
4110 	  val = arg0s <= arg1s ? arg0s : arg1s;
4111 	  break;
4112 
4113 	case UMIN:
4114 	  val = ((unsigned HOST_WIDE_INT) arg0
4115 		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4116 	  break;
4117 
4118 	case SMAX:
4119 	  val = arg0s > arg1s ? arg0s : arg1s;
4120 	  break;
4121 
4122 	case UMAX:
4123 	  val = ((unsigned HOST_WIDE_INT) arg0
4124 		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4125 	  break;
4126 
4127 	case SS_PLUS:
4128 	case US_PLUS:
4129 	case SS_MINUS:
4130 	case US_MINUS:
4131 	case SS_MULT:
4132 	case US_MULT:
4133 	case SS_DIV:
4134 	case US_DIV:
4135 	case SS_ASHIFT:
4136 	case US_ASHIFT:
4137 	  /* ??? There are simplifications that can be done.  */
4138 	  return 0;
4139 
4140 	default:
4141 	  gcc_unreachable ();
4142 	}
4143 
4144       return gen_int_mode (val, mode);
4145     }
4146 
4147   return NULL_RTX;
4148 }
4149 
4150 
4151 
4152 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4153    PLUS or MINUS.
4154 
4155    Rather than test for specific case, we do this by a brute-force method
4156    and do all possible simplifications until no more changes occur.  Then
4157    we rebuild the operation.  */
4158 
4159 struct simplify_plus_minus_op_data
4160 {
4161   rtx op;
4162   short neg;
4163 };
4164 
4165 static bool
simplify_plus_minus_op_data_cmp(rtx x,rtx y)4166 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4167 {
4168   int result;
4169 
4170   result = (commutative_operand_precedence (y)
4171 	    - commutative_operand_precedence (x));
4172   if (result)
4173     return result > 0;
4174 
4175   /* Group together equal REGs to do more simplification.  */
4176   if (REG_P (x) && REG_P (y))
4177     return REGNO (x) > REGNO (y);
4178   else
4179     return false;
4180 }
4181 
4182 static rtx
simplify_plus_minus(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)4183 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
4184 		     rtx op1)
4185 {
4186   struct simplify_plus_minus_op_data ops[8];
4187   rtx result, tem;
4188   int n_ops = 2, input_ops = 2;
4189   int changed, n_constants = 0, canonicalized = 0;
4190   int i, j;
4191 
4192   memset (ops, 0, sizeof ops);
4193 
4194   /* Set up the two operands and then expand them until nothing has been
4195      changed.  If we run out of room in our array, give up; this should
4196      almost never happen.  */
4197 
4198   ops[0].op = op0;
4199   ops[0].neg = 0;
4200   ops[1].op = op1;
4201   ops[1].neg = (code == MINUS);
4202 
4203   do
4204     {
4205       changed = 0;
4206 
4207       for (i = 0; i < n_ops; i++)
4208 	{
4209 	  rtx this_op = ops[i].op;
4210 	  int this_neg = ops[i].neg;
4211 	  enum rtx_code this_code = GET_CODE (this_op);
4212 
4213 	  switch (this_code)
4214 	    {
4215 	    case PLUS:
4216 	    case MINUS:
4217 	      if (n_ops == 7)
4218 		return NULL_RTX;
4219 
4220 	      ops[n_ops].op = XEXP (this_op, 1);
4221 	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4222 	      n_ops++;
4223 
4224 	      ops[i].op = XEXP (this_op, 0);
4225 	      input_ops++;
4226 	      changed = 1;
4227 	      canonicalized |= this_neg;
4228 	      break;
4229 
4230 	    case NEG:
4231 	      ops[i].op = XEXP (this_op, 0);
4232 	      ops[i].neg = ! this_neg;
4233 	      changed = 1;
4234 	      canonicalized = 1;
4235 	      break;
4236 
4237 	    case CONST:
4238 	      if (n_ops < 7
4239 		  && GET_CODE (XEXP (this_op, 0)) == PLUS
4240 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4241 		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4242 		{
4243 		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
4244 		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4245 		  ops[n_ops].neg = this_neg;
4246 		  n_ops++;
4247 		  changed = 1;
4248 	          canonicalized = 1;
4249 		}
4250 	      break;
4251 
4252 	    case NOT:
4253 	      /* ~a -> (-a - 1) */
4254 	      if (n_ops != 7)
4255 		{
4256 		  ops[n_ops].op = CONSTM1_RTX (mode);
4257 		  ops[n_ops++].neg = this_neg;
4258 		  ops[i].op = XEXP (this_op, 0);
4259 		  ops[i].neg = !this_neg;
4260 		  changed = 1;
4261 	          canonicalized = 1;
4262 		}
4263 	      break;
4264 
4265 	    case CONST_INT:
4266 	      n_constants++;
4267 	      if (this_neg)
4268 		{
4269 		  ops[i].op = neg_const_int (mode, this_op);
4270 		  ops[i].neg = 0;
4271 		  changed = 1;
4272 	          canonicalized = 1;
4273 		}
4274 	      break;
4275 
4276 	    default:
4277 	      break;
4278 	    }
4279 	}
4280     }
4281   while (changed);
4282 
4283   if (n_constants > 1)
4284     canonicalized = 1;
4285 
4286   gcc_assert (n_ops >= 2);
4287 
4288   /* If we only have two operands, we can avoid the loops.  */
4289   if (n_ops == 2)
4290     {
4291       enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4292       rtx lhs, rhs;
4293 
4294       /* Get the two operands.  Be careful with the order, especially for
4295 	 the cases where code == MINUS.  */
4296       if (ops[0].neg && ops[1].neg)
4297 	{
4298 	  lhs = gen_rtx_NEG (mode, ops[0].op);
4299 	  rhs = ops[1].op;
4300 	}
4301       else if (ops[0].neg)
4302 	{
4303 	  lhs = ops[1].op;
4304 	  rhs = ops[0].op;
4305 	}
4306       else
4307 	{
4308 	  lhs = ops[0].op;
4309 	  rhs = ops[1].op;
4310 	}
4311 
4312       return simplify_const_binary_operation (code, mode, lhs, rhs);
4313     }
4314 
4315   /* Now simplify each pair of operands until nothing changes.  */
4316   do
4317     {
4318       /* Insertion sort is good enough for an eight-element array.  */
4319       for (i = 1; i < n_ops; i++)
4320         {
4321           struct simplify_plus_minus_op_data save;
4322           j = i - 1;
4323           if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
4324 	    continue;
4325 
4326           canonicalized = 1;
4327           save = ops[i];
4328           do
4329 	    ops[j + 1] = ops[j];
4330           while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
4331           ops[j + 1] = save;
4332         }
4333 
4334       changed = 0;
4335       for (i = n_ops - 1; i > 0; i--)
4336 	for (j = i - 1; j >= 0; j--)
4337 	  {
4338 	    rtx lhs = ops[j].op, rhs = ops[i].op;
4339 	    int lneg = ops[j].neg, rneg = ops[i].neg;
4340 
4341 	    if (lhs != 0 && rhs != 0)
4342 	      {
4343 		enum rtx_code ncode = PLUS;
4344 
4345 		if (lneg != rneg)
4346 		  {
4347 		    ncode = MINUS;
4348 		    if (lneg)
4349 		      tem = lhs, lhs = rhs, rhs = tem;
4350 		  }
4351 		else if (swap_commutative_operands_p (lhs, rhs))
4352 		  tem = lhs, lhs = rhs, rhs = tem;
4353 
4354 		if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4355 		    && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4356 		  {
4357 		    rtx tem_lhs, tem_rhs;
4358 
4359 		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4360 		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4361 		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
4362 
4363 		    if (tem && !CONSTANT_P (tem))
4364 		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
4365 		  }
4366 		else
4367 		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4368 
4369 		/* Reject "simplifications" that just wrap the two
4370 		   arguments in a CONST.  Failure to do so can result
4371 		   in infinite recursion with simplify_binary_operation
4372 		   when it calls us to simplify CONST operations.  */
4373 		if (tem
4374 		    && ! (GET_CODE (tem) == CONST
4375 			  && GET_CODE (XEXP (tem, 0)) == ncode
4376 			  && XEXP (XEXP (tem, 0), 0) == lhs
4377 			  && XEXP (XEXP (tem, 0), 1) == rhs))
4378 		  {
4379 		    lneg &= rneg;
4380 		    if (GET_CODE (tem) == NEG)
4381 		      tem = XEXP (tem, 0), lneg = !lneg;
4382 		    if (CONST_INT_P (tem) && lneg)
4383 		      tem = neg_const_int (mode, tem), lneg = 0;
4384 
4385 		    ops[i].op = tem;
4386 		    ops[i].neg = lneg;
4387 		    ops[j].op = NULL_RTX;
4388 		    changed = 1;
4389 		    canonicalized = 1;
4390 		  }
4391 	      }
4392 	  }
4393 
4394       /* If nothing changed, fail.  */
4395       if (!canonicalized)
4396         return NULL_RTX;
4397 
4398       /* Pack all the operands to the lower-numbered entries.  */
4399       for (i = 0, j = 0; j < n_ops; j++)
4400         if (ops[j].op)
4401           {
4402 	    ops[i] = ops[j];
4403 	    i++;
4404           }
4405       n_ops = i;
4406     }
4407   while (changed);
4408 
4409   /* Create (minus -C X) instead of (neg (const (plus X C))).  */
4410   if (n_ops == 2
4411       && CONST_INT_P (ops[1].op)
4412       && CONSTANT_P (ops[0].op)
4413       && ops[0].neg)
4414     return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4415 
4416   /* We suppressed creation of trivial CONST expressions in the
4417      combination loop to avoid recursion.  Create one manually now.
4418      The combination loop should have ensured that there is exactly
4419      one CONST_INT, and the sort will have ensured that it is last
4420      in the array and that any other constant will be next-to-last.  */
4421 
4422   if (n_ops > 1
4423       && CONST_INT_P (ops[n_ops - 1].op)
4424       && CONSTANT_P (ops[n_ops - 2].op))
4425     {
4426       rtx value = ops[n_ops - 1].op;
4427       if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4428 	value = neg_const_int (mode, value);
4429       ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4430 					 INTVAL (value));
4431       n_ops--;
4432     }
4433 
4434   /* Put a non-negated operand first, if possible.  */
4435 
4436   for (i = 0; i < n_ops && ops[i].neg; i++)
4437     continue;
4438   if (i == n_ops)
4439     ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4440   else if (i != 0)
4441     {
4442       tem = ops[0].op;
4443       ops[0] = ops[i];
4444       ops[i].op = tem;
4445       ops[i].neg = 1;
4446     }
4447 
4448   /* Now make the result by performing the requested operations.  */
4449   result = ops[0].op;
4450   for (i = 1; i < n_ops; i++)
4451     result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4452 			     mode, result, ops[i].op);
4453 
4454   return result;
4455 }
4456 
4457 /* Check whether an operand is suitable for calling simplify_plus_minus.  */
4458 static bool
plus_minus_operand_p(const_rtx x)4459 plus_minus_operand_p (const_rtx x)
4460 {
4461   return GET_CODE (x) == PLUS
4462          || GET_CODE (x) == MINUS
4463 	 || (GET_CODE (x) == CONST
4464 	     && GET_CODE (XEXP (x, 0)) == PLUS
4465 	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4466 	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4467 }
4468 
4469 /* Like simplify_binary_operation except used for relational operators.
4470    MODE is the mode of the result. If MODE is VOIDmode, both operands must
4471    not also be VOIDmode.
4472 
4473    CMP_MODE specifies in which mode the comparison is done in, so it is
4474    the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
4475    the operands or, if both are VOIDmode, the operands are compared in
4476    "infinite precision".  */
4477 rtx
simplify_relational_operation(enum rtx_code code,enum machine_mode mode,enum machine_mode cmp_mode,rtx op0,rtx op1)4478 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
4479 			       enum machine_mode cmp_mode, rtx op0, rtx op1)
4480 {
4481   rtx tem, trueop0, trueop1;
4482 
4483   if (cmp_mode == VOIDmode)
4484     cmp_mode = GET_MODE (op0);
4485   if (cmp_mode == VOIDmode)
4486     cmp_mode = GET_MODE (op1);
4487 
4488   tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4489   if (tem)
4490     {
4491       if (SCALAR_FLOAT_MODE_P (mode))
4492 	{
4493           if (tem == const0_rtx)
4494             return CONST0_RTX (mode);
4495 #ifdef FLOAT_STORE_FLAG_VALUE
4496 	  {
4497 	    REAL_VALUE_TYPE val;
4498 	    val = FLOAT_STORE_FLAG_VALUE (mode);
4499 	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
4500 	  }
4501 #else
4502 	  return NULL_RTX;
4503 #endif
4504 	}
4505       if (VECTOR_MODE_P (mode))
4506 	{
4507 	  if (tem == const0_rtx)
4508 	    return CONST0_RTX (mode);
4509 #ifdef VECTOR_STORE_FLAG_VALUE
4510 	  {
4511 	    int i, units;
4512 	    rtvec v;
4513 
4514 	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4515 	    if (val == NULL_RTX)
4516 	      return NULL_RTX;
4517 	    if (val == const1_rtx)
4518 	      return CONST1_RTX (mode);
4519 
4520 	    units = GET_MODE_NUNITS (mode);
4521 	    v = rtvec_alloc (units);
4522 	    for (i = 0; i < units; i++)
4523 	      RTVEC_ELT (v, i) = val;
4524 	    return gen_rtx_raw_CONST_VECTOR (mode, v);
4525 	  }
4526 #else
4527 	  return NULL_RTX;
4528 #endif
4529 	}
4530 
4531       return tem;
4532     }
4533 
4534   /* For the following tests, ensure const0_rtx is op1.  */
4535   if (swap_commutative_operands_p (op0, op1)
4536       || (op0 == const0_rtx && op1 != const0_rtx))
4537     tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4538 
4539   /* If op0 is a compare, extract the comparison arguments from it.  */
4540   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4541     return simplify_gen_relational (code, mode, VOIDmode,
4542 				    XEXP (op0, 0), XEXP (op0, 1));
4543 
4544   if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4545       || CC0_P (op0))
4546     return NULL_RTX;
4547 
4548   trueop0 = avoid_constant_pool_reference (op0);
4549   trueop1 = avoid_constant_pool_reference (op1);
4550   return simplify_relational_operation_1 (code, mode, cmp_mode,
4551 		  			  trueop0, trueop1);
4552 }
4553 
4554 /* This part of simplify_relational_operation is only used when CMP_MODE
4555    is not in class MODE_CC (i.e. it is a real comparison).
4556 
4557    MODE is the mode of the result, while CMP_MODE specifies in which
4558    mode the comparison is done in, so it is the mode of the operands.  */
4559 
4560 static rtx
simplify_relational_operation_1(enum rtx_code code,enum machine_mode mode,enum machine_mode cmp_mode,rtx op0,rtx op1)4561 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4562 				 enum machine_mode cmp_mode, rtx op0, rtx op1)
4563 {
4564   enum rtx_code op0code = GET_CODE (op0);
4565 
4566   if (op1 == const0_rtx && COMPARISON_P (op0))
4567     {
4568       /* If op0 is a comparison, extract the comparison arguments
4569          from it.  */
4570       if (code == NE)
4571 	{
4572 	  if (GET_MODE (op0) == mode)
4573 	    return simplify_rtx (op0);
4574 	  else
4575 	    return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4576 					    XEXP (op0, 0), XEXP (op0, 1));
4577 	}
4578       else if (code == EQ)
4579 	{
4580 	  enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4581 	  if (new_code != UNKNOWN)
4582 	    return simplify_gen_relational (new_code, mode, VOIDmode,
4583 					    XEXP (op0, 0), XEXP (op0, 1));
4584 	}
4585     }
4586 
4587   /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4588      (GEU/LTU a -C).  Likewise for (LTU/GEU (PLUS a C) a).  */
4589   if ((code == LTU || code == GEU)
4590       && GET_CODE (op0) == PLUS
4591       && CONST_INT_P (XEXP (op0, 1))
4592       && (rtx_equal_p (op1, XEXP (op0, 0))
4593 	  || rtx_equal_p (op1, XEXP (op0, 1)))
4594       /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4595       && XEXP (op0, 1) != const0_rtx)
4596     {
4597       rtx new_cmp
4598 	= simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4599       return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4600 				      cmp_mode, XEXP (op0, 0), new_cmp);
4601     }
4602 
4603   /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a).  */
4604   if ((code == LTU || code == GEU)
4605       && GET_CODE (op0) == PLUS
4606       && rtx_equal_p (op1, XEXP (op0, 1))
4607       /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b).  */
4608       && !rtx_equal_p (op1, XEXP (op0, 0)))
4609     return simplify_gen_relational (code, mode, cmp_mode, op0,
4610 				    copy_rtx (XEXP (op0, 0)));
4611 
4612   if (op1 == const0_rtx)
4613     {
4614       /* Canonicalize (GTU x 0) as (NE x 0).  */
4615       if (code == GTU)
4616         return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4617       /* Canonicalize (LEU x 0) as (EQ x 0).  */
4618       if (code == LEU)
4619         return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4620     }
4621   else if (op1 == const1_rtx)
4622     {
4623       switch (code)
4624         {
4625         case GE:
4626 	  /* Canonicalize (GE x 1) as (GT x 0).  */
4627 	  return simplify_gen_relational (GT, mode, cmp_mode,
4628 					  op0, const0_rtx);
4629 	case GEU:
4630 	  /* Canonicalize (GEU x 1) as (NE x 0).  */
4631 	  return simplify_gen_relational (NE, mode, cmp_mode,
4632 					  op0, const0_rtx);
4633 	case LT:
4634 	  /* Canonicalize (LT x 1) as (LE x 0).  */
4635 	  return simplify_gen_relational (LE, mode, cmp_mode,
4636 					  op0, const0_rtx);
4637 	case LTU:
4638 	  /* Canonicalize (LTU x 1) as (EQ x 0).  */
4639 	  return simplify_gen_relational (EQ, mode, cmp_mode,
4640 					  op0, const0_rtx);
4641 	default:
4642 	  break;
4643 	}
4644     }
4645   else if (op1 == constm1_rtx)
4646     {
4647       /* Canonicalize (LE x -1) as (LT x 0).  */
4648       if (code == LE)
4649         return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4650       /* Canonicalize (GT x -1) as (GE x 0).  */
4651       if (code == GT)
4652         return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4653     }
4654 
4655   /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
4656   if ((code == EQ || code == NE)
4657       && (op0code == PLUS || op0code == MINUS)
4658       && CONSTANT_P (op1)
4659       && CONSTANT_P (XEXP (op0, 1))
4660       && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4661     {
4662       rtx x = XEXP (op0, 0);
4663       rtx c = XEXP (op0, 1);
4664       enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4665       rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4666 
4667       /* Detect an infinite recursive condition, where we oscillate at this
4668 	 simplification case between:
4669 	    A + B == C  <--->  C - B == A,
4670 	 where A, B, and C are all constants with non-simplifiable expressions,
4671 	 usually SYMBOL_REFs.  */
4672       if (GET_CODE (tem) == invcode
4673 	  && CONSTANT_P (x)
4674 	  && rtx_equal_p (c, XEXP (tem, 1)))
4675 	return NULL_RTX;
4676 
4677       return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4678     }
4679 
4680   /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4681      the same as (zero_extract:SI FOO (const_int 1) BAR).  */
4682   if (code == NE
4683       && op1 == const0_rtx
4684       && GET_MODE_CLASS (mode) == MODE_INT
4685       && cmp_mode != VOIDmode
4686       /* ??? Work-around BImode bugs in the ia64 backend.  */
4687       && mode != BImode
4688       && cmp_mode != BImode
4689       && nonzero_bits (op0, cmp_mode) == 1
4690       && STORE_FLAG_VALUE == 1)
4691     return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4692 	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4693 	   : lowpart_subreg (mode, op0, cmp_mode);
4694 
4695   /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
4696   if ((code == EQ || code == NE)
4697       && op1 == const0_rtx
4698       && op0code == XOR)
4699     return simplify_gen_relational (code, mode, cmp_mode,
4700 				    XEXP (op0, 0), XEXP (op0, 1));
4701 
4702   /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
4703   if ((code == EQ || code == NE)
4704       && op0code == XOR
4705       && rtx_equal_p (XEXP (op0, 0), op1)
4706       && !side_effects_p (XEXP (op0, 0)))
4707     return simplify_gen_relational (code, mode, cmp_mode,
4708 				    XEXP (op0, 1), const0_rtx);
4709 
4710   /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
4711   if ((code == EQ || code == NE)
4712       && op0code == XOR
4713       && rtx_equal_p (XEXP (op0, 1), op1)
4714       && !side_effects_p (XEXP (op0, 1)))
4715     return simplify_gen_relational (code, mode, cmp_mode,
4716 				    XEXP (op0, 0), const0_rtx);
4717 
4718   /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
4719   if ((code == EQ || code == NE)
4720       && op0code == XOR
4721       && CONST_SCALAR_INT_P (op1)
4722       && CONST_SCALAR_INT_P (XEXP (op0, 1)))
4723     return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4724 				    simplify_gen_binary (XOR, cmp_mode,
4725 							 XEXP (op0, 1), op1));
4726 
4727   if (op0code == POPCOUNT && op1 == const0_rtx)
4728     switch (code)
4729       {
4730       case EQ:
4731       case LE:
4732       case LEU:
4733 	/* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)).  */
4734 	return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4735 					XEXP (op0, 0), const0_rtx);
4736 
4737       case NE:
4738       case GT:
4739       case GTU:
4740 	/* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)).  */
4741 	return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4742 					XEXP (op0, 0), const0_rtx);
4743 
4744       default:
4745 	break;
4746       }
4747 
4748   return NULL_RTX;
4749 }
4750 
4751 enum
4752 {
4753   CMP_EQ = 1,
4754   CMP_LT = 2,
4755   CMP_GT = 4,
4756   CMP_LTU = 8,
4757   CMP_GTU = 16
4758 };
4759 
4760 
4761 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4762    KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4763    For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4764    logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4765    For floating-point comparisons, assume that the operands were ordered.  */
4766 
4767 static rtx
comparison_result(enum rtx_code code,int known_results)4768 comparison_result (enum rtx_code code, int known_results)
4769 {
4770   switch (code)
4771     {
4772     case EQ:
4773     case UNEQ:
4774       return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4775     case NE:
4776     case LTGT:
4777       return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4778 
4779     case LT:
4780     case UNLT:
4781       return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4782     case GE:
4783     case UNGE:
4784       return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4785 
4786     case GT:
4787     case UNGT:
4788       return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4789     case LE:
4790     case UNLE:
4791       return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4792 
4793     case LTU:
4794       return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4795     case GEU:
4796       return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4797 
4798     case GTU:
4799       return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4800     case LEU:
4801       return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4802 
4803     case ORDERED:
4804       return const_true_rtx;
4805     case UNORDERED:
4806       return const0_rtx;
4807     default:
4808       gcc_unreachable ();
4809     }
4810 }
4811 
4812 /* Check if the given comparison (done in the given MODE) is actually a
4813    tautology or a contradiction.
4814    If no simplification is possible, this function returns zero.
4815    Otherwise, it returns either const_true_rtx or const0_rtx.  */
4816 
4817 rtx
simplify_const_relational_operation(enum rtx_code code,enum machine_mode mode,rtx op0,rtx op1)4818 simplify_const_relational_operation (enum rtx_code code,
4819 				     enum machine_mode mode,
4820 				     rtx op0, rtx op1)
4821 {
4822   rtx tem;
4823   rtx trueop0;
4824   rtx trueop1;
4825 
4826   gcc_assert (mode != VOIDmode
4827 	      || (GET_MODE (op0) == VOIDmode
4828 		  && GET_MODE (op1) == VOIDmode));
4829 
4830   /* If op0 is a compare, extract the comparison arguments from it.  */
4831   if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4832     {
4833       op1 = XEXP (op0, 1);
4834       op0 = XEXP (op0, 0);
4835 
4836       if (GET_MODE (op0) != VOIDmode)
4837 	mode = GET_MODE (op0);
4838       else if (GET_MODE (op1) != VOIDmode)
4839 	mode = GET_MODE (op1);
4840       else
4841 	return 0;
4842     }
4843 
4844   /* We can't simplify MODE_CC values since we don't know what the
4845      actual comparison is.  */
4846   if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4847     return 0;
4848 
4849   /* Make sure the constant is second.  */
4850   if (swap_commutative_operands_p (op0, op1))
4851     {
4852       tem = op0, op0 = op1, op1 = tem;
4853       code = swap_condition (code);
4854     }
4855 
4856   trueop0 = avoid_constant_pool_reference (op0);
4857   trueop1 = avoid_constant_pool_reference (op1);
4858 
4859   /* For integer comparisons of A and B maybe we can simplify A - B and can
4860      then simplify a comparison of that with zero.  If A and B are both either
4861      a register or a CONST_INT, this can't help; testing for these cases will
4862      prevent infinite recursion here and speed things up.
4863 
4864      We can only do this for EQ and NE comparisons as otherwise we may
4865      lose or introduce overflow which we cannot disregard as undefined as
4866      we do not know the signedness of the operation on either the left or
4867      the right hand side of the comparison.  */
4868 
4869   if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4870       && (code == EQ || code == NE)
4871       && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4872 	    && (REG_P (op1) || CONST_INT_P (trueop1)))
4873       && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4874       /* We cannot do this if tem is a nonzero address.  */
4875       && ! nonzero_address_p (tem))
4876     return simplify_const_relational_operation (signed_condition (code),
4877 						mode, tem, const0_rtx);
4878 
4879   if (! HONOR_NANS (mode) && code == ORDERED)
4880     return const_true_rtx;
4881 
4882   if (! HONOR_NANS (mode) && code == UNORDERED)
4883     return const0_rtx;
4884 
4885   /* For modes without NaNs, if the two operands are equal, we know the
4886      result except if they have side-effects.  Even with NaNs we know
4887      the result of unordered comparisons and, if signaling NaNs are
4888      irrelevant, also the result of LT/GT/LTGT.  */
4889   if ((! HONOR_NANS (GET_MODE (trueop0))
4890        || code == UNEQ || code == UNLE || code == UNGE
4891        || ((code == LT || code == GT || code == LTGT)
4892 	   && ! HONOR_SNANS (GET_MODE (trueop0))))
4893       && rtx_equal_p (trueop0, trueop1)
4894       && ! side_effects_p (trueop0))
4895     return comparison_result (code, CMP_EQ);
4896 
4897   /* If the operands are floating-point constants, see if we can fold
4898      the result.  */
4899   if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
4900       && CONST_DOUBLE_AS_FLOAT_P (trueop1)
4901       && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4902     {
4903       REAL_VALUE_TYPE d0, d1;
4904 
4905       REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4906       REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4907 
4908       /* Comparisons are unordered iff at least one of the values is NaN.  */
4909       if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4910 	switch (code)
4911 	  {
4912 	  case UNEQ:
4913 	  case UNLT:
4914 	  case UNGT:
4915 	  case UNLE:
4916 	  case UNGE:
4917 	  case NE:
4918 	  case UNORDERED:
4919 	    return const_true_rtx;
4920 	  case EQ:
4921 	  case LT:
4922 	  case GT:
4923 	  case LE:
4924 	  case GE:
4925 	  case LTGT:
4926 	  case ORDERED:
4927 	    return const0_rtx;
4928 	  default:
4929 	    return 0;
4930 	  }
4931 
4932       return comparison_result (code,
4933 				(REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4934 				 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4935     }
4936 
4937   /* Otherwise, see if the operands are both integers.  */
4938   if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4939        && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
4940        && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
4941     {
4942       int width = GET_MODE_PRECISION (mode);
4943       HOST_WIDE_INT l0s, h0s, l1s, h1s;
4944       unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4945 
4946       /* Get the two words comprising each integer constant.  */
4947       if (CONST_DOUBLE_AS_INT_P (trueop0))
4948 	{
4949 	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4950 	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4951 	}
4952       else
4953 	{
4954 	  l0u = l0s = INTVAL (trueop0);
4955 	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
4956 	}
4957 
4958       if (CONST_DOUBLE_AS_INT_P (trueop1))
4959 	{
4960 	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4961 	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4962 	}
4963       else
4964 	{
4965 	  l1u = l1s = INTVAL (trueop1);
4966 	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
4967 	}
4968 
4969       /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4970 	 we have to sign or zero-extend the values.  */
4971       if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4972 	{
4973 	  l0u &= GET_MODE_MASK (mode);
4974 	  l1u &= GET_MODE_MASK (mode);
4975 
4976 	  if (val_signbit_known_set_p (mode, l0s))
4977 	    l0s |= ~GET_MODE_MASK (mode);
4978 
4979 	  if (val_signbit_known_set_p (mode, l1s))
4980 	    l1s |= ~GET_MODE_MASK (mode);
4981 	}
4982       if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4983 	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4984 
4985       if (h0u == h1u && l0u == l1u)
4986 	return comparison_result (code, CMP_EQ);
4987       else
4988 	{
4989 	  int cr;
4990 	  cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4991 	  cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4992 	  return comparison_result (code, cr);
4993 	}
4994     }
4995 
4996   /* Optimize comparisons with upper and lower bounds.  */
4997   if (HWI_COMPUTABLE_MODE_P (mode)
4998       && CONST_INT_P (trueop1))
4999     {
5000       int sign;
5001       unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
5002       HOST_WIDE_INT val = INTVAL (trueop1);
5003       HOST_WIDE_INT mmin, mmax;
5004 
5005       if (code == GEU
5006 	  || code == LEU
5007 	  || code == GTU
5008 	  || code == LTU)
5009 	sign = 0;
5010       else
5011 	sign = 1;
5012 
5013       /* Get a reduced range if the sign bit is zero.  */
5014       if (nonzero <= (GET_MODE_MASK (mode) >> 1))
5015 	{
5016 	  mmin = 0;
5017 	  mmax = nonzero;
5018 	}
5019       else
5020 	{
5021 	  rtx mmin_rtx, mmax_rtx;
5022 	  get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
5023 
5024 	  mmin = INTVAL (mmin_rtx);
5025 	  mmax = INTVAL (mmax_rtx);
5026 	  if (sign)
5027 	    {
5028 	      unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
5029 
5030 	      mmin >>= (sign_copies - 1);
5031 	      mmax >>= (sign_copies - 1);
5032 	    }
5033 	}
5034 
5035       switch (code)
5036 	{
5037 	/* x >= y is always true for y <= mmin, always false for y > mmax.  */
5038 	case GEU:
5039 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5040 	    return const_true_rtx;
5041 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5042 	    return const0_rtx;
5043 	  break;
5044 	case GE:
5045 	  if (val <= mmin)
5046 	    return const_true_rtx;
5047 	  if (val > mmax)
5048 	    return const0_rtx;
5049 	  break;
5050 
5051 	/* x <= y is always true for y >= mmax, always false for y < mmin.  */
5052 	case LEU:
5053 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5054 	    return const_true_rtx;
5055 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5056 	    return const0_rtx;
5057 	  break;
5058 	case LE:
5059 	  if (val >= mmax)
5060 	    return const_true_rtx;
5061 	  if (val < mmin)
5062 	    return const0_rtx;
5063 	  break;
5064 
5065 	case EQ:
5066 	  /* x == y is always false for y out of range.  */
5067 	  if (val < mmin || val > mmax)
5068 	    return const0_rtx;
5069 	  break;
5070 
5071 	/* x > y is always false for y >= mmax, always true for y < mmin.  */
5072 	case GTU:
5073 	  if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5074 	    return const0_rtx;
5075 	  if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5076 	    return const_true_rtx;
5077 	  break;
5078 	case GT:
5079 	  if (val >= mmax)
5080 	    return const0_rtx;
5081 	  if (val < mmin)
5082 	    return const_true_rtx;
5083 	  break;
5084 
5085 	/* x < y is always false for y <= mmin, always true for y > mmax.  */
5086 	case LTU:
5087 	  if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5088 	    return const0_rtx;
5089 	  if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5090 	    return const_true_rtx;
5091 	  break;
5092 	case LT:
5093 	  if (val <= mmin)
5094 	    return const0_rtx;
5095 	  if (val > mmax)
5096 	    return const_true_rtx;
5097 	  break;
5098 
5099 	case NE:
5100 	  /* x != y is always true for y out of range.  */
5101 	  if (val < mmin || val > mmax)
5102 	    return const_true_rtx;
5103 	  break;
5104 
5105 	default:
5106 	  break;
5107 	}
5108     }
5109 
5110   /* Optimize integer comparisons with zero.  */
5111   if (trueop1 == const0_rtx)
5112     {
5113       /* Some addresses are known to be nonzero.  We don't know
5114 	 their sign, but equality comparisons are known.  */
5115       if (nonzero_address_p (trueop0))
5116 	{
5117 	  if (code == EQ || code == LEU)
5118 	    return const0_rtx;
5119 	  if (code == NE || code == GTU)
5120 	    return const_true_rtx;
5121 	}
5122 
5123       /* See if the first operand is an IOR with a constant.  If so, we
5124 	 may be able to determine the result of this comparison.  */
5125       if (GET_CODE (op0) == IOR)
5126 	{
5127 	  rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5128 	  if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5129 	    {
5130 	      int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
5131 	      int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5132 			      && (UINTVAL (inner_const)
5133 				  & ((unsigned HOST_WIDE_INT) 1
5134 				     << sign_bitnum)));
5135 
5136 	      switch (code)
5137 		{
5138 		case EQ:
5139 		case LEU:
5140 		  return const0_rtx;
5141 		case NE:
5142 		case GTU:
5143 		  return const_true_rtx;
5144 		case LT:
5145 		case LE:
5146 		  if (has_sign)
5147 		    return const_true_rtx;
5148 		  break;
5149 		case GT:
5150 		case GE:
5151 		  if (has_sign)
5152 		    return const0_rtx;
5153 		  break;
5154 		default:
5155 		  break;
5156 		}
5157 	    }
5158 	}
5159     }
5160 
5161   /* Optimize comparison of ABS with zero.  */
5162   if (trueop1 == CONST0_RTX (mode)
5163       && (GET_CODE (trueop0) == ABS
5164 	  || (GET_CODE (trueop0) == FLOAT_EXTEND
5165 	      && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5166     {
5167       switch (code)
5168 	{
5169 	case LT:
5170 	  /* Optimize abs(x) < 0.0.  */
5171 	  if (!HONOR_SNANS (mode)
5172 	      && (!INTEGRAL_MODE_P (mode)
5173 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5174 	    {
5175 	      if (INTEGRAL_MODE_P (mode)
5176 		  && (issue_strict_overflow_warning
5177 		      (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5178 		warning (OPT_Wstrict_overflow,
5179 			 ("assuming signed overflow does not occur when "
5180 			  "assuming abs (x) < 0 is false"));
5181 	       return const0_rtx;
5182 	    }
5183 	  break;
5184 
5185 	case GE:
5186 	  /* Optimize abs(x) >= 0.0.  */
5187 	  if (!HONOR_NANS (mode)
5188 	      && (!INTEGRAL_MODE_P (mode)
5189 		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
5190 	    {
5191 	      if (INTEGRAL_MODE_P (mode)
5192 	          && (issue_strict_overflow_warning
5193 	    	  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
5194 	        warning (OPT_Wstrict_overflow,
5195 			 ("assuming signed overflow does not occur when "
5196 			  "assuming abs (x) >= 0 is true"));
5197 	      return const_true_rtx;
5198 	    }
5199 	  break;
5200 
5201 	case UNGE:
5202 	  /* Optimize ! (abs(x) < 0.0).  */
5203 	  return const_true_rtx;
5204 
5205 	default:
5206 	  break;
5207 	}
5208     }
5209 
5210   return 0;
5211 }
5212 
5213 /* Simplify CODE, an operation with result mode MODE and three operands,
5214    OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
5215    a constant.  Return 0 if no simplifications is possible.  */
5216 
5217 rtx
simplify_ternary_operation(enum rtx_code code,enum machine_mode mode,enum machine_mode op0_mode,rtx op0,rtx op1,rtx op2)5218 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
5219 			    enum machine_mode op0_mode, rtx op0, rtx op1,
5220 			    rtx op2)
5221 {
5222   unsigned int width = GET_MODE_PRECISION (mode);
5223   bool any_change = false;
5224   rtx tem;
5225 
5226   /* VOIDmode means "infinite" precision.  */
5227   if (width == 0)
5228     width = HOST_BITS_PER_WIDE_INT;
5229 
5230   switch (code)
5231     {
5232     case FMA:
5233       /* Simplify negations around the multiplication.  */
5234       /* -a * -b + c  =>  a * b + c.  */
5235       if (GET_CODE (op0) == NEG)
5236 	{
5237 	  tem = simplify_unary_operation (NEG, mode, op1, mode);
5238 	  if (tem)
5239 	    op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5240 	}
5241       else if (GET_CODE (op1) == NEG)
5242 	{
5243 	  tem = simplify_unary_operation (NEG, mode, op0, mode);
5244 	  if (tem)
5245 	    op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5246 	}
5247 
5248       /* Canonicalize the two multiplication operands.  */
5249       /* a * -b + c  =>  -b * a + c.  */
5250       if (swap_commutative_operands_p (op0, op1))
5251 	tem = op0, op0 = op1, op1 = tem, any_change = true;
5252 
5253       if (any_change)
5254 	return gen_rtx_FMA (mode, op0, op1, op2);
5255       return NULL_RTX;
5256 
5257     case SIGN_EXTRACT:
5258     case ZERO_EXTRACT:
5259       if (CONST_INT_P (op0)
5260 	  && CONST_INT_P (op1)
5261 	  && CONST_INT_P (op2)
5262 	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
5263 	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
5264 	{
5265 	  /* Extracting a bit-field from a constant */
5266 	  unsigned HOST_WIDE_INT val = UINTVAL (op0);
5267 	  HOST_WIDE_INT op1val = INTVAL (op1);
5268 	  HOST_WIDE_INT op2val = INTVAL (op2);
5269 	  if (BITS_BIG_ENDIAN)
5270 	    val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
5271 	  else
5272 	    val >>= op2val;
5273 
5274 	  if (HOST_BITS_PER_WIDE_INT != op1val)
5275 	    {
5276 	      /* First zero-extend.  */
5277 	      val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
5278 	      /* If desired, propagate sign bit.  */
5279 	      if (code == SIGN_EXTRACT
5280 		  && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
5281 		     != 0)
5282 		val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
5283 	    }
5284 
5285 	  return gen_int_mode (val, mode);
5286 	}
5287       break;
5288 
5289     case IF_THEN_ELSE:
5290       if (CONST_INT_P (op0))
5291 	return op0 != const0_rtx ? op1 : op2;
5292 
5293       /* Convert c ? a : a into "a".  */
5294       if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5295 	return op1;
5296 
5297       /* Convert a != b ? a : b into "a".  */
5298       if (GET_CODE (op0) == NE
5299 	  && ! side_effects_p (op0)
5300 	  && ! HONOR_NANS (mode)
5301 	  && ! HONOR_SIGNED_ZEROS (mode)
5302 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5303 	       && rtx_equal_p (XEXP (op0, 1), op2))
5304 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5305 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5306 	return op1;
5307 
5308       /* Convert a == b ? a : b into "b".  */
5309       if (GET_CODE (op0) == EQ
5310 	  && ! side_effects_p (op0)
5311 	  && ! HONOR_NANS (mode)
5312 	  && ! HONOR_SIGNED_ZEROS (mode)
5313 	  && ((rtx_equal_p (XEXP (op0, 0), op1)
5314 	       && rtx_equal_p (XEXP (op0, 1), op2))
5315 	      || (rtx_equal_p (XEXP (op0, 0), op2)
5316 		  && rtx_equal_p (XEXP (op0, 1), op1))))
5317 	return op2;
5318 
5319       if (COMPARISON_P (op0) && ! side_effects_p (op0))
5320 	{
5321 	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5322 					? GET_MODE (XEXP (op0, 1))
5323 					: GET_MODE (XEXP (op0, 0)));
5324 	  rtx temp;
5325 
5326 	  /* Look for happy constants in op1 and op2.  */
5327 	  if (CONST_INT_P (op1) && CONST_INT_P (op2))
5328 	    {
5329 	      HOST_WIDE_INT t = INTVAL (op1);
5330 	      HOST_WIDE_INT f = INTVAL (op2);
5331 
5332 	      if (t == STORE_FLAG_VALUE && f == 0)
5333 	        code = GET_CODE (op0);
5334 	      else if (t == 0 && f == STORE_FLAG_VALUE)
5335 		{
5336 		  enum rtx_code tmp;
5337 		  tmp = reversed_comparison_code (op0, NULL_RTX);
5338 		  if (tmp == UNKNOWN)
5339 		    break;
5340 		  code = tmp;
5341 		}
5342 	      else
5343 		break;
5344 
5345 	      return simplify_gen_relational (code, mode, cmp_mode,
5346 					      XEXP (op0, 0), XEXP (op0, 1));
5347 	    }
5348 
5349 	  if (cmp_mode == VOIDmode)
5350 	    cmp_mode = op0_mode;
5351 	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5352 			  			cmp_mode, XEXP (op0, 0),
5353 						XEXP (op0, 1));
5354 
5355 	  /* See if any simplifications were possible.  */
5356 	  if (temp)
5357 	    {
5358 	      if (CONST_INT_P (temp))
5359 		return temp == const0_rtx ? op2 : op1;
5360 	      else if (temp)
5361 	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5362 	    }
5363 	}
5364       break;
5365 
5366     case VEC_MERGE:
5367       gcc_assert (GET_MODE (op0) == mode);
5368       gcc_assert (GET_MODE (op1) == mode);
5369       gcc_assert (VECTOR_MODE_P (mode));
5370       op2 = avoid_constant_pool_reference (op2);
5371       if (CONST_INT_P (op2))
5372 	{
5373           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
5374 	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
5375 	  int mask = (1 << n_elts) - 1;
5376 
5377 	  if (!(INTVAL (op2) & mask))
5378 	    return op1;
5379 	  if ((INTVAL (op2) & mask) == mask)
5380 	    return op0;
5381 
5382 	  op0 = avoid_constant_pool_reference (op0);
5383 	  op1 = avoid_constant_pool_reference (op1);
5384 	  if (GET_CODE (op0) == CONST_VECTOR
5385 	      && GET_CODE (op1) == CONST_VECTOR)
5386 	    {
5387 	      rtvec v = rtvec_alloc (n_elts);
5388 	      unsigned int i;
5389 
5390 	      for (i = 0; i < n_elts; i++)
5391 		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
5392 				    ? CONST_VECTOR_ELT (op0, i)
5393 				    : CONST_VECTOR_ELT (op1, i));
5394 	      return gen_rtx_CONST_VECTOR (mode, v);
5395 	    }
5396 	}
5397       break;
5398 
5399     default:
5400       gcc_unreachable ();
5401     }
5402 
5403   return 0;
5404 }
5405 
5406 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5407    or CONST_VECTOR,
5408    returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5409 
5410    Works by unpacking OP into a collection of 8-bit values
5411    represented as a little-endian array of 'unsigned char', selecting by BYTE,
5412    and then repacking them again for OUTERMODE.  */
5413 
5414 static rtx
simplify_immed_subreg(enum machine_mode outermode,rtx op,enum machine_mode innermode,unsigned int byte)5415 simplify_immed_subreg (enum machine_mode outermode, rtx op,
5416 		       enum machine_mode innermode, unsigned int byte)
5417 {
5418   /* We support up to 512-bit values (for V8DFmode).  */
5419   enum {
5420     max_bitsize = 512,
5421     value_bit = 8,
5422     value_mask = (1 << value_bit) - 1
5423   };
5424   unsigned char value[max_bitsize / value_bit];
5425   int value_start;
5426   int i;
5427   int elem;
5428 
5429   int num_elem;
5430   rtx * elems;
5431   int elem_bitsize;
5432   rtx result_s;
5433   rtvec result_v = NULL;
5434   enum mode_class outer_class;
5435   enum machine_mode outer_submode;
5436 
5437   /* Some ports misuse CCmode.  */
5438   if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5439     return op;
5440 
5441   /* We have no way to represent a complex constant at the rtl level.  */
5442   if (COMPLEX_MODE_P (outermode))
5443     return NULL_RTX;
5444 
5445   /* Unpack the value.  */
5446 
5447   if (GET_CODE (op) == CONST_VECTOR)
5448     {
5449       num_elem = CONST_VECTOR_NUNITS (op);
5450       elems = &CONST_VECTOR_ELT (op, 0);
5451       elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
5452     }
5453   else
5454     {
5455       num_elem = 1;
5456       elems = &op;
5457       elem_bitsize = max_bitsize;
5458     }
5459   /* If this asserts, it is too complicated; reducing value_bit may help.  */
5460   gcc_assert (BITS_PER_UNIT % value_bit == 0);
5461   /* I don't know how to handle endianness of sub-units.  */
5462   gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
5463 
5464   for (elem = 0; elem < num_elem; elem++)
5465     {
5466       unsigned char * vp;
5467       rtx el = elems[elem];
5468 
5469       /* Vectors are kept in target memory order.  (This is probably
5470 	 a mistake.)  */
5471       {
5472 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5473 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5474 			  / BITS_PER_UNIT);
5475 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5476 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5477 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5478 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5479 	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
5480       }
5481 
5482       switch (GET_CODE (el))
5483 	{
5484 	case CONST_INT:
5485 	  for (i = 0;
5486 	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5487 	       i += value_bit)
5488 	    *vp++ = INTVAL (el) >> i;
5489 	  /* CONST_INTs are always logically sign-extended.  */
5490 	  for (; i < elem_bitsize; i += value_bit)
5491 	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
5492 	  break;
5493 
5494 	case CONST_DOUBLE:
5495 	  if (GET_MODE (el) == VOIDmode)
5496 	    {
5497 	      unsigned char extend = 0;
5498 	      /* If this triggers, someone should have generated a
5499 		 CONST_INT instead.  */
5500 	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
5501 
5502 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5503 		*vp++ = CONST_DOUBLE_LOW (el) >> i;
5504 	      while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
5505 		{
5506 		  *vp++
5507 		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
5508 		  i += value_bit;
5509 		}
5510 
5511 	      if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
5512 		extend = -1;
5513 	      for (; i < elem_bitsize; i += value_bit)
5514 		*vp++ = extend;
5515 	    }
5516 	  else
5517 	    {
5518 	      long tmp[max_bitsize / 32];
5519 	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
5520 
5521 	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
5522 	      gcc_assert (bitsize <= elem_bitsize);
5523 	      gcc_assert (bitsize % value_bit == 0);
5524 
5525 	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
5526 			      GET_MODE (el));
5527 
5528 	      /* real_to_target produces its result in words affected by
5529 		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5530 		 and use WORDS_BIG_ENDIAN instead; see the documentation
5531 	         of SUBREG in rtl.texi.  */
5532 	      for (i = 0; i < bitsize; i += value_bit)
5533 		{
5534 		  int ibase;
5535 		  if (WORDS_BIG_ENDIAN)
5536 		    ibase = bitsize - 1 - i;
5537 		  else
5538 		    ibase = i;
5539 		  *vp++ = tmp[ibase / 32] >> i % 32;
5540 		}
5541 
5542 	      /* It shouldn't matter what's done here, so fill it with
5543 		 zero.  */
5544 	      for (; i < elem_bitsize; i += value_bit)
5545 		*vp++ = 0;
5546 	    }
5547 	  break;
5548 
5549         case CONST_FIXED:
5550 	  if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5551 	    {
5552 	      for (i = 0; i < elem_bitsize; i += value_bit)
5553 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5554 	    }
5555 	  else
5556 	    {
5557 	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5558 		*vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5559               for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
5560 		   i += value_bit)
5561 		*vp++ = CONST_FIXED_VALUE_HIGH (el)
5562 			>> (i - HOST_BITS_PER_WIDE_INT);
5563 	      for (; i < elem_bitsize; i += value_bit)
5564 		*vp++ = 0;
5565 	    }
5566           break;
5567 
5568 	default:
5569 	  gcc_unreachable ();
5570 	}
5571     }
5572 
5573   /* Now, pick the right byte to start with.  */
5574   /* Renumber BYTE so that the least-significant byte is byte 0.  A special
5575      case is paradoxical SUBREGs, which shouldn't be adjusted since they
5576      will already have offset 0.  */
5577   if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5578     {
5579       unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5580 			- byte);
5581       unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5582       unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5583       byte = (subword_byte % UNITS_PER_WORD
5584 	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5585     }
5586 
5587   /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
5588      so if it's become negative it will instead be very large.)  */
5589   gcc_assert (byte < GET_MODE_SIZE (innermode));
5590 
5591   /* Convert from bytes to chunks of size value_bit.  */
5592   value_start = byte * (BITS_PER_UNIT / value_bit);
5593 
5594   /* Re-pack the value.  */
5595 
5596   if (VECTOR_MODE_P (outermode))
5597     {
5598       num_elem = GET_MODE_NUNITS (outermode);
5599       result_v = rtvec_alloc (num_elem);
5600       elems = &RTVEC_ELT (result_v, 0);
5601       outer_submode = GET_MODE_INNER (outermode);
5602     }
5603   else
5604     {
5605       num_elem = 1;
5606       elems = &result_s;
5607       outer_submode = outermode;
5608     }
5609 
5610   outer_class = GET_MODE_CLASS (outer_submode);
5611   elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5612 
5613   gcc_assert (elem_bitsize % value_bit == 0);
5614   gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5615 
5616   for (elem = 0; elem < num_elem; elem++)
5617     {
5618       unsigned char *vp;
5619 
5620       /* Vectors are stored in target memory order.  (This is probably
5621 	 a mistake.)  */
5622       {
5623 	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5624 	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5625 			  / BITS_PER_UNIT);
5626 	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5627 	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5628 	unsigned bytele = (subword_byte % UNITS_PER_WORD
5629 			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5630 	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5631       }
5632 
5633       switch (outer_class)
5634 	{
5635 	case MODE_INT:
5636 	case MODE_PARTIAL_INT:
5637 	  {
5638 	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
5639 
5640 	    for (i = 0;
5641 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5642 		 i += value_bit)
5643 	      lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5644 	    for (; i < elem_bitsize; i += value_bit)
5645 	      hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5646 		     << (i - HOST_BITS_PER_WIDE_INT);
5647 
5648 	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
5649 	       know why.  */
5650 	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5651 	      elems[elem] = gen_int_mode (lo, outer_submode);
5652 	    else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
5653 	      elems[elem] = immed_double_const (lo, hi, outer_submode);
5654 	    else
5655 	      return NULL_RTX;
5656 	  }
5657 	  break;
5658 
5659 	case MODE_FLOAT:
5660 	case MODE_DECIMAL_FLOAT:
5661 	  {
5662 	    REAL_VALUE_TYPE r;
5663 	    long tmp[max_bitsize / 32];
5664 
5665 	    /* real_from_target wants its input in words affected by
5666 	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
5667 	       and use WORDS_BIG_ENDIAN instead; see the documentation
5668 	       of SUBREG in rtl.texi.  */
5669 	    for (i = 0; i < max_bitsize / 32; i++)
5670 	      tmp[i] = 0;
5671 	    for (i = 0; i < elem_bitsize; i += value_bit)
5672 	      {
5673 		int ibase;
5674 		if (WORDS_BIG_ENDIAN)
5675 		  ibase = elem_bitsize - 1 - i;
5676 		else
5677 		  ibase = i;
5678 		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5679 	      }
5680 
5681 	    real_from_target (&r, tmp, outer_submode);
5682 	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5683 	  }
5684 	  break;
5685 
5686 	case MODE_FRACT:
5687 	case MODE_UFRACT:
5688 	case MODE_ACCUM:
5689 	case MODE_UACCUM:
5690 	  {
5691 	    FIXED_VALUE_TYPE f;
5692 	    f.data.low = 0;
5693 	    f.data.high = 0;
5694 	    f.mode = outer_submode;
5695 
5696 	    for (i = 0;
5697 		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5698 		 i += value_bit)
5699 	      f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
5700 	    for (; i < elem_bitsize; i += value_bit)
5701 	      f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
5702 			     << (i - HOST_BITS_PER_WIDE_INT));
5703 
5704 	    elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5705           }
5706           break;
5707 
5708 	default:
5709 	  gcc_unreachable ();
5710 	}
5711     }
5712   if (VECTOR_MODE_P (outermode))
5713     return gen_rtx_CONST_VECTOR (outermode, result_v);
5714   else
5715     return result_s;
5716 }
5717 
5718 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5719    Return 0 if no simplifications are possible.  */
5720 rtx
simplify_subreg(enum machine_mode outermode,rtx op,enum machine_mode innermode,unsigned int byte)5721 simplify_subreg (enum machine_mode outermode, rtx op,
5722 		 enum machine_mode innermode, unsigned int byte)
5723 {
5724   /* Little bit of sanity checking.  */
5725   gcc_assert (innermode != VOIDmode);
5726   gcc_assert (outermode != VOIDmode);
5727   gcc_assert (innermode != BLKmode);
5728   gcc_assert (outermode != BLKmode);
5729 
5730   gcc_assert (GET_MODE (op) == innermode
5731 	      || GET_MODE (op) == VOIDmode);
5732 
5733   if ((byte % GET_MODE_SIZE (outermode)) != 0)
5734     return NULL_RTX;
5735 
5736   if (byte >= GET_MODE_SIZE (innermode))
5737     return NULL_RTX;
5738 
5739   if (outermode == innermode && !byte)
5740     return op;
5741 
5742   if (CONST_SCALAR_INT_P (op)
5743       || CONST_DOUBLE_AS_FLOAT_P (op)
5744       || GET_CODE (op) == CONST_FIXED
5745       || GET_CODE (op) == CONST_VECTOR)
5746     return simplify_immed_subreg (outermode, op, innermode, byte);
5747 
5748   /* Changing mode twice with SUBREG => just change it once,
5749      or not at all if changing back op starting mode.  */
5750   if (GET_CODE (op) == SUBREG)
5751     {
5752       enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5753       int final_offset = byte + SUBREG_BYTE (op);
5754       rtx newx;
5755 
5756       if (outermode == innermostmode
5757 	  && byte == 0 && SUBREG_BYTE (op) == 0)
5758 	return SUBREG_REG (op);
5759 
5760       /* The SUBREG_BYTE represents offset, as if the value were stored
5761 	 in memory.  Irritating exception is paradoxical subreg, where
5762 	 we define SUBREG_BYTE to be 0.  On big endian machines, this
5763 	 value should be negative.  For a moment, undo this exception.  */
5764       if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5765 	{
5766 	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5767 	  if (WORDS_BIG_ENDIAN)
5768 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5769 	  if (BYTES_BIG_ENDIAN)
5770 	    final_offset += difference % UNITS_PER_WORD;
5771 	}
5772       if (SUBREG_BYTE (op) == 0
5773 	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5774 	{
5775 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5776 	  if (WORDS_BIG_ENDIAN)
5777 	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5778 	  if (BYTES_BIG_ENDIAN)
5779 	    final_offset += difference % UNITS_PER_WORD;
5780 	}
5781 
5782       /* See whether resulting subreg will be paradoxical.  */
5783       if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5784 	{
5785 	  /* In nonparadoxical subregs we can't handle negative offsets.  */
5786 	  if (final_offset < 0)
5787 	    return NULL_RTX;
5788 	  /* Bail out in case resulting subreg would be incorrect.  */
5789 	  if (final_offset % GET_MODE_SIZE (outermode)
5790 	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5791 	    return NULL_RTX;
5792 	}
5793       else
5794 	{
5795 	  int offset = 0;
5796 	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5797 
5798 	  /* In paradoxical subreg, see if we are still looking on lower part.
5799 	     If so, our SUBREG_BYTE will be 0.  */
5800 	  if (WORDS_BIG_ENDIAN)
5801 	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5802 	  if (BYTES_BIG_ENDIAN)
5803 	    offset += difference % UNITS_PER_WORD;
5804 	  if (offset == final_offset)
5805 	    final_offset = 0;
5806 	  else
5807 	    return NULL_RTX;
5808 	}
5809 
5810       /* Recurse for further possible simplifications.  */
5811       newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5812 			      final_offset);
5813       if (newx)
5814 	return newx;
5815       if (validate_subreg (outermode, innermostmode,
5816 			   SUBREG_REG (op), final_offset))
5817 	{
5818 	  newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5819 	  if (SUBREG_PROMOTED_VAR_P (op)
5820 	      && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5821 	      && GET_MODE_CLASS (outermode) == MODE_INT
5822 	      && IN_RANGE (GET_MODE_SIZE (outermode),
5823 			   GET_MODE_SIZE (innermode),
5824 			   GET_MODE_SIZE (innermostmode))
5825 	      && subreg_lowpart_p (newx))
5826 	    {
5827 	      SUBREG_PROMOTED_VAR_P (newx) = 1;
5828 	      SUBREG_PROMOTED_UNSIGNED_SET
5829 		(newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5830 	    }
5831 	  return newx;
5832 	}
5833       return NULL_RTX;
5834     }
5835 
5836   /* SUBREG of a hard register => just change the register number
5837      and/or mode.  If the hard register is not valid in that mode,
5838      suppress this simplification.  If the hard register is the stack,
5839      frame, or argument pointer, leave this as a SUBREG.  */
5840 
5841   if (REG_P (op) && HARD_REGISTER_P (op))
5842     {
5843       unsigned int regno, final_regno;
5844 
5845       regno = REGNO (op);
5846       final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5847       if (HARD_REGISTER_NUM_P (final_regno))
5848 	{
5849 	  rtx x;
5850 	  int final_offset = byte;
5851 
5852 	  /* Adjust offset for paradoxical subregs.  */
5853 	  if (byte == 0
5854 	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5855 	    {
5856 	      int difference = (GET_MODE_SIZE (innermode)
5857 				- GET_MODE_SIZE (outermode));
5858 	      if (WORDS_BIG_ENDIAN)
5859 		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5860 	      if (BYTES_BIG_ENDIAN)
5861 		final_offset += difference % UNITS_PER_WORD;
5862 	    }
5863 
5864 	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5865 
5866 	  /* Propagate original regno.  We don't have any way to specify
5867 	     the offset inside original regno, so do so only for lowpart.
5868 	     The information is used only by alias analysis that can not
5869 	     grog partial register anyway.  */
5870 
5871 	  if (subreg_lowpart_offset (outermode, innermode) == byte)
5872 	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5873 	  return x;
5874 	}
5875     }
5876 
5877   /* If we have a SUBREG of a register that we are replacing and we are
5878      replacing it with a MEM, make a new MEM and try replacing the
5879      SUBREG with it.  Don't do this if the MEM has a mode-dependent address
5880      or if we would be widening it.  */
5881 
5882   if (MEM_P (op)
5883       && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
5884       /* Allow splitting of volatile memory references in case we don't
5885          have instruction to move the whole thing.  */
5886       && (! MEM_VOLATILE_P (op)
5887 	  || ! have_insn_for (SET, innermode))
5888       && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5889     return adjust_address_nv (op, outermode, byte);
5890 
5891   /* Handle complex values represented as CONCAT
5892      of real and imaginary part.  */
5893   if (GET_CODE (op) == CONCAT)
5894     {
5895       unsigned int part_size, final_offset;
5896       rtx part, res;
5897 
5898       part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5899       if (byte < part_size)
5900 	{
5901 	  part = XEXP (op, 0);
5902 	  final_offset = byte;
5903 	}
5904       else
5905 	{
5906 	  part = XEXP (op, 1);
5907 	  final_offset = byte - part_size;
5908 	}
5909 
5910       if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5911 	return NULL_RTX;
5912 
5913       res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5914       if (res)
5915 	return res;
5916       if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5917 	return gen_rtx_SUBREG (outermode, part, final_offset);
5918       return NULL_RTX;
5919     }
5920 
5921   /* A SUBREG resulting from a zero extension may fold to zero if
5922      it extracts higher bits that the ZERO_EXTEND's source bits.  */
5923   if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
5924     {
5925       unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5926       if (bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
5927 	return CONST0_RTX (outermode);
5928     }
5929 
5930   if (SCALAR_INT_MODE_P (outermode)
5931       && SCALAR_INT_MODE_P (innermode)
5932       && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
5933       && byte == subreg_lowpart_offset (outermode, innermode))
5934     {
5935       rtx tem = simplify_truncation (outermode, op, innermode);
5936       if (tem)
5937 	return tem;
5938     }
5939 
5940   return NULL_RTX;
5941 }
5942 
5943 /* Make a SUBREG operation or equivalent if it folds.  */
5944 
5945 rtx
simplify_gen_subreg(enum machine_mode outermode,rtx op,enum machine_mode innermode,unsigned int byte)5946 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5947 		     enum machine_mode innermode, unsigned int byte)
5948 {
5949   rtx newx;
5950 
5951   newx = simplify_subreg (outermode, op, innermode, byte);
5952   if (newx)
5953     return newx;
5954 
5955   if (GET_CODE (op) == SUBREG
5956       || GET_CODE (op) == CONCAT
5957       || GET_MODE (op) == VOIDmode)
5958     return NULL_RTX;
5959 
5960   if (validate_subreg (outermode, innermode, op, byte))
5961     return gen_rtx_SUBREG (outermode, op, byte);
5962 
5963   return NULL_RTX;
5964 }
5965 
5966 /* Simplify X, an rtx expression.
5967 
5968    Return the simplified expression or NULL if no simplifications
5969    were possible.
5970 
5971    This is the preferred entry point into the simplification routines;
5972    however, we still allow passes to call the more specific routines.
5973 
5974    Right now GCC has three (yes, three) major bodies of RTL simplification
5975    code that need to be unified.
5976 
5977 	1. fold_rtx in cse.c.  This code uses various CSE specific
5978 	   information to aid in RTL simplification.
5979 
5980 	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
5981 	   it uses combine specific information to aid in RTL
5982 	   simplification.
5983 
5984 	3. The routines in this file.
5985 
5986 
5987    Long term we want to only have one body of simplification code; to
5988    get to that state I recommend the following steps:
5989 
5990 	1. Pour over fold_rtx & simplify_rtx and move any simplifications
5991 	   which are not pass dependent state into these routines.
5992 
5993 	2. As code is moved by #1, change fold_rtx & simplify_rtx to
5994 	   use this routine whenever possible.
5995 
5996 	3. Allow for pass dependent state to be provided to these
5997 	   routines and add simplifications based on the pass dependent
5998 	   state.  Remove code from cse.c & combine.c that becomes
5999 	   redundant/dead.
6000 
6001     It will take time, but ultimately the compiler will be easier to
6002     maintain and improve.  It's totally silly that when we add a
6003     simplification that it needs to be added to 4 places (3 for RTL
6004     simplification and 1 for tree simplification.  */
6005 
6006 rtx
simplify_rtx(const_rtx x)6007 simplify_rtx (const_rtx x)
6008 {
6009   const enum rtx_code code = GET_CODE (x);
6010   const enum machine_mode mode = GET_MODE (x);
6011 
6012   switch (GET_RTX_CLASS (code))
6013     {
6014     case RTX_UNARY:
6015       return simplify_unary_operation (code, mode,
6016 				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6017     case RTX_COMM_ARITH:
6018       if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6019 	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6020 
6021       /* Fall through....  */
6022 
6023     case RTX_BIN_ARITH:
6024       return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6025 
6026     case RTX_TERNARY:
6027     case RTX_BITFIELD_OPS:
6028       return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6029 					 XEXP (x, 0), XEXP (x, 1),
6030 					 XEXP (x, 2));
6031 
6032     case RTX_COMPARE:
6033     case RTX_COMM_COMPARE:
6034       return simplify_relational_operation (code, mode,
6035                                             ((GET_MODE (XEXP (x, 0))
6036                                              != VOIDmode)
6037                                             ? GET_MODE (XEXP (x, 0))
6038                                             : GET_MODE (XEXP (x, 1))),
6039                                             XEXP (x, 0),
6040                                             XEXP (x, 1));
6041 
6042     case RTX_EXTRA:
6043       if (code == SUBREG)
6044 	return simplify_subreg (mode, SUBREG_REG (x),
6045 				GET_MODE (SUBREG_REG (x)),
6046 				SUBREG_BYTE (x));
6047       break;
6048 
6049     case RTX_OBJ:
6050       if (code == LO_SUM)
6051 	{
6052 	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
6053 	  if (GET_CODE (XEXP (x, 0)) == HIGH
6054 	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6055 	  return XEXP (x, 1);
6056 	}
6057       break;
6058 
6059     default:
6060       break;
6061     }
6062   return NULL;
6063 }
6064