xref: /dragonfly/contrib/gcc-8.0/gcc/expr.c (revision 58e805e6)
1 /* Convert tree expression to rtl instructions, for GNU compiler.
2    Copyright (C) 1988-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "ssa.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "attribs.h"
43 #include "varasm.h"
44 #include "except.h"
45 #include "insn-attr.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "stmt.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
51 #include "expr.h"
52 #include "optabs-tree.h"
53 #include "libfuncs.h"
54 #include "reload.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-ssa-live.h"
58 #include "tree-outof-ssa.h"
59 #include "tree-ssa-address.h"
60 #include "builtins.h"
61 #include "tree-chkp.h"
62 #include "rtl-chkp.h"
63 #include "ccmp.h"
64 #include "rtx-vector-builder.h"
65 
66 
67 /* If this is nonzero, we do not bother generating VOLATILE
68    around volatile memory references, and we are willing to
69    output indirect addresses.  If cse is to follow, we reject
70    indirect addresses so a useful potential cse is generated;
71    if it is used only once, instruction combination will produce
72    the same indirect address eventually.  */
73 int cse_not_expected;
74 
75 static bool block_move_libcall_safe_for_call_parm (void);
76 static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT,
77 					unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
78 					unsigned HOST_WIDE_INT);
79 static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
80 static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
81 static rtx_insn *compress_float_constant (rtx, rtx);
82 static rtx get_subtarget (rtx);
83 static void store_constructor (tree, rtx, int, poly_int64, bool);
84 static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
85 			machine_mode, tree, alias_set_type, bool, bool);
86 
87 static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
88 
89 static int is_aligning_offset (const_tree, const_tree);
90 static rtx reduce_to_bit_field_precision (rtx, rtx, tree);
91 static rtx do_store_flag (sepops, rtx, machine_mode);
92 #ifdef PUSH_ROUNDING
93 static void emit_single_push_insn (machine_mode, rtx, tree);
94 #endif
95 static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx,
96 			  profile_probability);
97 static rtx const_vector_from_tree (tree);
98 static rtx const_scalar_mask_from_tree (scalar_int_mode, tree);
99 static tree tree_expr_size (const_tree);
100 static HOST_WIDE_INT int_expr_size (tree);
101 static void convert_mode_scalar (rtx, rtx, int);
102 
103 
104 /* This is run to set up which modes can be used
105    directly in memory and to initialize the block move optab.  It is run
106    at the beginning of compilation and when the target is reinitialized.  */
107 
108 void
init_expr_target(void)109 init_expr_target (void)
110 {
111   rtx pat;
112   int num_clobbers;
113   rtx mem, mem1;
114   rtx reg;
115 
116   /* Try indexing by frame ptr and try by stack ptr.
117      It is known that on the Convex the stack ptr isn't a valid index.
118      With luck, one or the other is valid on any machine.  */
119   mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
120   mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx);
121 
122   /* A scratch register we can modify in-place below to avoid
123      useless RTL allocations.  */
124   reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
125 
126   rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN));
127   pat = gen_rtx_SET (NULL_RTX, NULL_RTX);
128   PATTERN (insn) = pat;
129 
130   for (machine_mode mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
131        mode = (machine_mode) ((int) mode + 1))
132     {
133       int regno;
134 
135       direct_load[(int) mode] = direct_store[(int) mode] = 0;
136       PUT_MODE (mem, mode);
137       PUT_MODE (mem1, mode);
138 
139       /* See if there is some register that can be used in this mode and
140 	 directly loaded or stored from memory.  */
141 
142       if (mode != VOIDmode && mode != BLKmode)
143 	for (regno = 0; regno < FIRST_PSEUDO_REGISTER
144 	     && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
145 	     regno++)
146 	  {
147 	    if (!targetm.hard_regno_mode_ok (regno, mode))
148 	      continue;
149 
150 	    set_mode_and_regno (reg, mode, regno);
151 
152 	    SET_SRC (pat) = mem;
153 	    SET_DEST (pat) = reg;
154 	    if (recog (pat, insn, &num_clobbers) >= 0)
155 	      direct_load[(int) mode] = 1;
156 
157 	    SET_SRC (pat) = mem1;
158 	    SET_DEST (pat) = reg;
159 	    if (recog (pat, insn, &num_clobbers) >= 0)
160 	      direct_load[(int) mode] = 1;
161 
162 	    SET_SRC (pat) = reg;
163 	    SET_DEST (pat) = mem;
164 	    if (recog (pat, insn, &num_clobbers) >= 0)
165 	      direct_store[(int) mode] = 1;
166 
167 	    SET_SRC (pat) = reg;
168 	    SET_DEST (pat) = mem1;
169 	    if (recog (pat, insn, &num_clobbers) >= 0)
170 	      direct_store[(int) mode] = 1;
171 	  }
172     }
173 
174   mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
175 
176   opt_scalar_float_mode mode_iter;
177   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)
178     {
179       scalar_float_mode mode = mode_iter.require ();
180       scalar_float_mode srcmode;
181       FOR_EACH_MODE_UNTIL (srcmode, mode)
182 	{
183 	  enum insn_code ic;
184 
185 	  ic = can_extend_p (mode, srcmode, 0);
186 	  if (ic == CODE_FOR_nothing)
187 	    continue;
188 
189 	  PUT_MODE (mem, srcmode);
190 
191 	  if (insn_operand_matches (ic, 1, mem))
192 	    float_extend_from_mem[mode][srcmode] = true;
193 	}
194     }
195 }
196 
197 /* This is run at the start of compiling a function.  */
198 
199 void
init_expr(void)200 init_expr (void)
201 {
202   memset (&crtl->expr, 0, sizeof (crtl->expr));
203 }
204 
205 /* Copy data from FROM to TO, where the machine modes are not the same.
206    Both modes may be integer, or both may be floating, or both may be
207    fixed-point.
208    UNSIGNEDP should be nonzero if FROM is an unsigned type.
209    This causes zero-extension instead of sign-extension.  */
210 
211 void
convert_move(rtx to,rtx from,int unsignedp)212 convert_move (rtx to, rtx from, int unsignedp)
213 {
214   machine_mode to_mode = GET_MODE (to);
215   machine_mode from_mode = GET_MODE (from);
216 
217   gcc_assert (to_mode != BLKmode);
218   gcc_assert (from_mode != BLKmode);
219 
220   /* If the source and destination are already the same, then there's
221      nothing to do.  */
222   if (to == from)
223     return;
224 
225   /* If FROM is a SUBREG that indicates that we have already done at least
226      the required extension, strip it.  We don't handle such SUBREGs as
227      TO here.  */
228 
229   scalar_int_mode to_int_mode;
230   if (GET_CODE (from) == SUBREG
231       && SUBREG_PROMOTED_VAR_P (from)
232       && is_a <scalar_int_mode> (to_mode, &to_int_mode)
233       && (GET_MODE_PRECISION (subreg_promoted_mode (from))
234 	  >= GET_MODE_PRECISION (to_int_mode))
235       && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
236     from = gen_lowpart (to_int_mode, from), from_mode = to_int_mode;
237 
238   gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
239 
240   if (to_mode == from_mode
241       || (from_mode == VOIDmode && CONSTANT_P (from)))
242     {
243       emit_move_insn (to, from);
244       return;
245     }
246 
247   if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
248     {
249       gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
250 			    GET_MODE_BITSIZE (to_mode)));
251 
252       if (VECTOR_MODE_P (to_mode))
253 	from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
254       else
255 	to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0);
256 
257       emit_move_insn (to, from);
258       return;
259     }
260 
261   if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT)
262     {
263       convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp);
264       convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp);
265       return;
266     }
267 
268   convert_mode_scalar (to, from, unsignedp);
269 }
270 
271 /* Like convert_move, but deals only with scalar modes.  */
272 
273 static void
convert_mode_scalar(rtx to,rtx from,int unsignedp)274 convert_mode_scalar (rtx to, rtx from, int unsignedp)
275 {
276   /* Both modes should be scalar types.  */
277   scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
278   scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
279   bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
280   bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
281   enum insn_code code;
282   rtx libcall;
283 
284   gcc_assert (to_real == from_real);
285 
286   /* rtx code for making an equivalent value.  */
287   enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
288 			      : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
289 
290   if (to_real)
291     {
292       rtx value;
293       rtx_insn *insns;
294       convert_optab tab;
295 
296       gcc_assert ((GET_MODE_PRECISION (from_mode)
297 		   != GET_MODE_PRECISION (to_mode))
298 		  || (DECIMAL_FLOAT_MODE_P (from_mode)
299 		      != DECIMAL_FLOAT_MODE_P (to_mode)));
300 
301       if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
302 	/* Conversion between decimal float and binary float, same size.  */
303 	tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
304       else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
305 	tab = sext_optab;
306       else
307 	tab = trunc_optab;
308 
309       /* Try converting directly if the insn is supported.  */
310 
311       code = convert_optab_handler (tab, to_mode, from_mode);
312       if (code != CODE_FOR_nothing)
313 	{
314 	  emit_unop_insn (code, to, from,
315 			  tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE);
316 	  return;
317 	}
318 
319       /* Otherwise use a libcall.  */
320       libcall = convert_optab_libfunc (tab, to_mode, from_mode);
321 
322       /* Is this conversion implemented yet?  */
323       gcc_assert (libcall);
324 
325       start_sequence ();
326       value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode,
327 				       from, from_mode);
328       insns = get_insns ();
329       end_sequence ();
330       emit_libcall_block (insns, to, value,
331 			  tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
332 								       from)
333 			  : gen_rtx_FLOAT_EXTEND (to_mode, from));
334       return;
335     }
336 
337   /* Handle pointer conversion.  */			/* SPEE 900220.  */
338   /* If the target has a converter from FROM_MODE to TO_MODE, use it.  */
339   {
340     convert_optab ctab;
341 
342     if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
343       ctab = trunc_optab;
344     else if (unsignedp)
345       ctab = zext_optab;
346     else
347       ctab = sext_optab;
348 
349     if (convert_optab_handler (ctab, to_mode, from_mode)
350 	!= CODE_FOR_nothing)
351       {
352 	emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode),
353 			to, from, UNKNOWN);
354 	return;
355       }
356   }
357 
358   /* Targets are expected to provide conversion insns between PxImode and
359      xImode for all MODE_PARTIAL_INT modes they use, but no others.  */
360   if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
361     {
362       scalar_int_mode full_mode
363 	= smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
364 
365       gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
366 		  != CODE_FOR_nothing);
367 
368       if (full_mode != from_mode)
369 	from = convert_to_mode (full_mode, from, unsignedp);
370       emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
371 		      to, from, UNKNOWN);
372       return;
373     }
374   if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
375     {
376       rtx new_from;
377       scalar_int_mode full_mode
378 	= smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
379       convert_optab ctab = unsignedp ? zext_optab : sext_optab;
380       enum insn_code icode;
381 
382       icode = convert_optab_handler (ctab, full_mode, from_mode);
383       gcc_assert (icode != CODE_FOR_nothing);
384 
385       if (to_mode == full_mode)
386 	{
387 	  emit_unop_insn (icode, to, from, UNKNOWN);
388 	  return;
389 	}
390 
391       new_from = gen_reg_rtx (full_mode);
392       emit_unop_insn (icode, new_from, from, UNKNOWN);
393 
394       /* else proceed to integer conversions below.  */
395       from_mode = full_mode;
396       from = new_from;
397     }
398 
399    /* Make sure both are fixed-point modes or both are not.  */
400    gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==
401 	       ALL_SCALAR_FIXED_POINT_MODE_P (to_mode));
402    if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode))
403     {
404       /* If we widen from_mode to to_mode and they are in the same class,
405 	 we won't saturate the result.
406 	 Otherwise, always saturate the result to play safe.  */
407       if (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
408 	  && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
409 	expand_fixed_convert (to, from, 0, 0);
410       else
411 	expand_fixed_convert (to, from, 0, 1);
412       return;
413     }
414 
415   /* Now both modes are integers.  */
416 
417   /* Handle expanding beyond a word.  */
418   if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
419       && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
420     {
421       rtx_insn *insns;
422       rtx lowpart;
423       rtx fill_value;
424       rtx lowfrom;
425       int i;
426       scalar_mode lowpart_mode;
427       int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
428 
429       /* Try converting directly if the insn is supported.  */
430       if ((code = can_extend_p (to_mode, from_mode, unsignedp))
431 	  != CODE_FOR_nothing)
432 	{
433 	  /* If FROM is a SUBREG, put it into a register.  Do this
434 	     so that we always generate the same set of insns for
435 	     better cse'ing; if an intermediate assignment occurred,
436 	     we won't be doing the operation directly on the SUBREG.  */
437 	  if (optimize > 0 && GET_CODE (from) == SUBREG)
438 	    from = force_reg (from_mode, from);
439 	  emit_unop_insn (code, to, from, equiv_code);
440 	  return;
441 	}
442       /* Next, try converting via full word.  */
443       else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
444 	       && ((code = can_extend_p (to_mode, word_mode, unsignedp))
445 		   != CODE_FOR_nothing))
446 	{
447 	  rtx word_to = gen_reg_rtx (word_mode);
448 	  if (REG_P (to))
449 	    {
450 	      if (reg_overlap_mentioned_p (to, from))
451 		from = force_reg (from_mode, from);
452 	      emit_clobber (to);
453 	    }
454 	  convert_move (word_to, from, unsignedp);
455 	  emit_unop_insn (code, to, word_to, equiv_code);
456 	  return;
457 	}
458 
459       /* No special multiword conversion insn; do it by hand.  */
460       start_sequence ();
461 
462       /* Since we will turn this into a no conflict block, we must ensure
463          the source does not overlap the target so force it into an isolated
464          register when maybe so.  Likewise for any MEM input, since the
465          conversion sequence might require several references to it and we
466          must ensure we're getting the same value every time.  */
467 
468       if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
469 	from = force_reg (from_mode, from);
470 
471       /* Get a copy of FROM widened to a word, if necessary.  */
472       if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
473 	lowpart_mode = word_mode;
474       else
475 	lowpart_mode = from_mode;
476 
477       lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
478 
479       lowpart = gen_lowpart (lowpart_mode, to);
480       emit_move_insn (lowpart, lowfrom);
481 
482       /* Compute the value to put in each remaining word.  */
483       if (unsignedp)
484 	fill_value = const0_rtx;
485       else
486 	fill_value = emit_store_flag_force (gen_reg_rtx (word_mode),
487 					    LT, lowfrom, const0_rtx,
488 					    lowpart_mode, 0, -1);
489 
490       /* Fill the remaining words.  */
491       for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
492 	{
493 	  int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
494 	  rtx subword = operand_subword (to, index, 1, to_mode);
495 
496 	  gcc_assert (subword);
497 
498 	  if (fill_value != subword)
499 	    emit_move_insn (subword, fill_value);
500 	}
501 
502       insns = get_insns ();
503       end_sequence ();
504 
505       emit_insn (insns);
506       return;
507     }
508 
509   /* Truncating multi-word to a word or less.  */
510   if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
511       && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
512     {
513       if (!((MEM_P (from)
514 	     && ! MEM_VOLATILE_P (from)
515 	     && direct_load[(int) to_mode]
516 	     && ! mode_dependent_address_p (XEXP (from, 0),
517 					    MEM_ADDR_SPACE (from)))
518 	    || REG_P (from)
519 	    || GET_CODE (from) == SUBREG))
520 	from = force_reg (from_mode, from);
521       convert_move (to, gen_lowpart (word_mode, from), 0);
522       return;
523     }
524 
525   /* Now follow all the conversions between integers
526      no more than a word long.  */
527 
528   /* For truncation, usually we can just refer to FROM in a narrower mode.  */
529   if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
530       && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
531     {
532       if (!((MEM_P (from)
533 	     && ! MEM_VOLATILE_P (from)
534 	     && direct_load[(int) to_mode]
535 	     && ! mode_dependent_address_p (XEXP (from, 0),
536 					    MEM_ADDR_SPACE (from)))
537 	    || REG_P (from)
538 	    || GET_CODE (from) == SUBREG))
539 	from = force_reg (from_mode, from);
540       if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
541 	  && !targetm.hard_regno_mode_ok (REGNO (from), to_mode))
542 	from = copy_to_reg (from);
543       emit_move_insn (to, gen_lowpart (to_mode, from));
544       return;
545     }
546 
547   /* Handle extension.  */
548   if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
549     {
550       /* Convert directly if that works.  */
551       if ((code = can_extend_p (to_mode, from_mode, unsignedp))
552 	  != CODE_FOR_nothing)
553 	{
554 	  emit_unop_insn (code, to, from, equiv_code);
555 	  return;
556 	}
557       else
558 	{
559 	  scalar_mode intermediate;
560 	  rtx tmp;
561 	  int shift_amount;
562 
563 	  /* Search for a mode to convert via.  */
564 	  opt_scalar_mode intermediate_iter;
565 	  FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
566 	    {
567 	      scalar_mode intermediate = intermediate_iter.require ();
568 	      if (((can_extend_p (to_mode, intermediate, unsignedp)
569 		    != CODE_FOR_nothing)
570 		   || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
571 		       && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
572 							 intermediate)))
573 		  && (can_extend_p (intermediate, from_mode, unsignedp)
574 		      != CODE_FOR_nothing))
575 		{
576 		  convert_move (to, convert_to_mode (intermediate, from,
577 						     unsignedp), unsignedp);
578 		  return;
579 		}
580 	    }
581 
582 	  /* No suitable intermediate mode.
583 	     Generate what we need with	shifts.  */
584 	  shift_amount = (GET_MODE_PRECISION (to_mode)
585 			  - GET_MODE_PRECISION (from_mode));
586 	  from = gen_lowpart (to_mode, force_reg (from_mode, from));
587 	  tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
588 			      to, unsignedp);
589 	  tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
590 			      to, unsignedp);
591 	  if (tmp != to)
592 	    emit_move_insn (to, tmp);
593 	  return;
594 	}
595     }
596 
597   /* Support special truncate insns for certain modes.  */
598   if (convert_optab_handler (trunc_optab, to_mode,
599 			     from_mode) != CODE_FOR_nothing)
600     {
601       emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
602 		      to, from, UNKNOWN);
603       return;
604     }
605 
606   /* Handle truncation of volatile memrefs, and so on;
607      the things that couldn't be truncated directly,
608      and for which there was no special instruction.
609 
610      ??? Code above formerly short-circuited this, for most integer
611      mode pairs, with a force_reg in from_mode followed by a recursive
612      call to this routine.  Appears always to have been wrong.  */
613   if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
614     {
615       rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
616       emit_move_insn (to, temp);
617       return;
618     }
619 
620   /* Mode combination is not recognized.  */
621   gcc_unreachable ();
622 }
623 
624 /* Return an rtx for a value that would result
625    from converting X to mode MODE.
626    Both X and MODE may be floating, or both integer.
627    UNSIGNEDP is nonzero if X is an unsigned value.
628    This can be done by referring to a part of X in place
629    or by copying to a new temporary with conversion.  */
630 
631 rtx
convert_to_mode(machine_mode mode,rtx x,int unsignedp)632 convert_to_mode (machine_mode mode, rtx x, int unsignedp)
633 {
634   return convert_modes (mode, VOIDmode, x, unsignedp);
635 }
636 
637 /* Return an rtx for a value that would result
638    from converting X from mode OLDMODE to mode MODE.
639    Both modes may be floating, or both integer.
640    UNSIGNEDP is nonzero if X is an unsigned value.
641 
642    This can be done by referring to a part of X in place
643    or by copying to a new temporary with conversion.
644 
645    You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode.  */
646 
647 rtx
convert_modes(machine_mode mode,machine_mode oldmode,rtx x,int unsignedp)648 convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
649 {
650   rtx temp;
651   scalar_int_mode int_mode;
652 
653   /* If FROM is a SUBREG that indicates that we have already done at least
654      the required extension, strip it.  */
655 
656   if (GET_CODE (x) == SUBREG
657       && SUBREG_PROMOTED_VAR_P (x)
658       && is_a <scalar_int_mode> (mode, &int_mode)
659       && (GET_MODE_PRECISION (subreg_promoted_mode (x))
660 	  >= GET_MODE_PRECISION (int_mode))
661       && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
662     x = gen_lowpart (int_mode, SUBREG_REG (x));
663 
664   if (GET_MODE (x) != VOIDmode)
665     oldmode = GET_MODE (x);
666 
667   if (mode == oldmode)
668     return x;
669 
670   if (CONST_SCALAR_INT_P (x)
671       && is_int_mode (mode, &int_mode))
672     {
673       /* If the caller did not tell us the old mode, then there is not
674 	 much to do with respect to canonicalization.  We have to
675 	 assume that all the bits are significant.  */
676       if (GET_MODE_CLASS (oldmode) != MODE_INT)
677 	oldmode = MAX_MODE_INT;
678       wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
679 				   GET_MODE_PRECISION (int_mode),
680 				   unsignedp ? UNSIGNED : SIGNED);
681       return immed_wide_int_const (w, int_mode);
682     }
683 
684   /* We can do this with a gen_lowpart if both desired and current modes
685      are integer, and this is either a constant integer, a register, or a
686      non-volatile MEM. */
687   scalar_int_mode int_oldmode;
688   if (is_int_mode (mode, &int_mode)
689       && is_int_mode (oldmode, &int_oldmode)
690       && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
691       && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
692 	  || CONST_POLY_INT_P (x)
693           || (REG_P (x)
694               && (!HARD_REGISTER_P (x)
695 		  || targetm.hard_regno_mode_ok (REGNO (x), int_mode))
696               && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
697    return gen_lowpart (int_mode, x);
698 
699   /* Converting from integer constant into mode is always equivalent to an
700      subreg operation.  */
701   if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
702     {
703       gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
704 			    GET_MODE_BITSIZE (oldmode)));
705       return simplify_gen_subreg (mode, x, oldmode, 0);
706     }
707 
708   temp = gen_reg_rtx (mode);
709   convert_move (temp, x, unsignedp);
710   return temp;
711 }
712 
713 /* Return the largest alignment we can use for doing a move (or store)
714    of MAX_PIECES.  ALIGN is the largest alignment we could use.  */
715 
716 static unsigned int
alignment_for_piecewise_move(unsigned int max_pieces,unsigned int align)717 alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
718 {
719   scalar_int_mode tmode
720     = int_mode_for_size (max_pieces * BITS_PER_UNIT, 1).require ();
721 
722   if (align >= GET_MODE_ALIGNMENT (tmode))
723     align = GET_MODE_ALIGNMENT (tmode);
724   else
725     {
726       scalar_int_mode xmode = NARROWEST_INT_MODE;
727       opt_scalar_int_mode mode_iter;
728       FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
729 	{
730 	  tmode = mode_iter.require ();
731 	  if (GET_MODE_SIZE (tmode) > max_pieces
732 	      || targetm.slow_unaligned_access (tmode, align))
733 	    break;
734 	  xmode = tmode;
735 	}
736 
737       align = MAX (align, GET_MODE_ALIGNMENT (xmode));
738     }
739 
740   return align;
741 }
742 
743 /* Return the widest integer mode that is narrower than SIZE bytes.  */
744 
745 static scalar_int_mode
widest_int_mode_for_size(unsigned int size)746 widest_int_mode_for_size (unsigned int size)
747 {
748   scalar_int_mode result = NARROWEST_INT_MODE;
749 
750   gcc_checking_assert (size > 1);
751 
752   opt_scalar_int_mode tmode;
753   FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
754     if (GET_MODE_SIZE (tmode.require ()) < size)
755       result = tmode.require ();
756 
757   return result;
758 }
759 
760 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
761    and should be performed piecewise.  */
762 
763 static bool
can_do_by_pieces(unsigned HOST_WIDE_INT len,unsigned int align,enum by_pieces_operation op)764 can_do_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align,
765 		  enum by_pieces_operation op)
766 {
767   return targetm.use_by_pieces_infrastructure_p (len, align, op,
768 						 optimize_insn_for_speed_p ());
769 }
770 
771 /* Determine whether the LEN bytes can be moved by using several move
772    instructions.  Return nonzero if a call to move_by_pieces should
773    succeed.  */
774 
775 bool
can_move_by_pieces(unsigned HOST_WIDE_INT len,unsigned int align)776 can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align)
777 {
778   return can_do_by_pieces (len, align, MOVE_BY_PIECES);
779 }
780 
781 /* Return number of insns required to perform operation OP by pieces
782    for L bytes.  ALIGN (in bits) is maximum alignment we can assume.  */
783 
784 unsigned HOST_WIDE_INT
by_pieces_ninsns(unsigned HOST_WIDE_INT l,unsigned int align,unsigned int max_size,by_pieces_operation op)785 by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align,
786 		  unsigned int max_size, by_pieces_operation op)
787 {
788   unsigned HOST_WIDE_INT n_insns = 0;
789 
790   align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
791 
792   while (max_size > 1 && l > 0)
793     {
794       scalar_int_mode mode = widest_int_mode_for_size (max_size);
795       enum insn_code icode;
796 
797       unsigned int modesize = GET_MODE_SIZE (mode);
798 
799       icode = optab_handler (mov_optab, mode);
800       if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
801 	{
802 	  unsigned HOST_WIDE_INT n_pieces = l / modesize;
803 	  l %= modesize;
804 	  switch (op)
805 	    {
806 	    default:
807 	      n_insns += n_pieces;
808 	      break;
809 
810 	    case COMPARE_BY_PIECES:
811 	      int batch = targetm.compare_by_pieces_branch_ratio (mode);
812 	      int batch_ops = 4 * batch - 1;
813 	      unsigned HOST_WIDE_INT full = n_pieces / batch;
814 	      n_insns += full * batch_ops;
815 	      if (n_pieces % batch != 0)
816 		n_insns++;
817 	      break;
818 
819 	    }
820 	}
821       max_size = modesize;
822     }
823 
824   gcc_assert (!l);
825   return n_insns;
826 }
827 
828 /* Used when performing piecewise block operations, holds information
829    about one of the memory objects involved.  The member functions
830    can be used to generate code for loading from the object and
831    updating the address when iterating.  */
832 
833 class pieces_addr
834 {
835   /* The object being referenced, a MEM.  Can be NULL_RTX to indicate
836      stack pushes.  */
837   rtx m_obj;
838   /* The address of the object.  Can differ from that seen in the
839      MEM rtx if we copied the address to a register.  */
840   rtx m_addr;
841   /* Nonzero if the address on the object has an autoincrement already,
842      signifies whether that was an increment or decrement.  */
843   signed char m_addr_inc;
844   /* Nonzero if we intend to use autoinc without the address already
845      having autoinc form.  We will insert add insns around each memory
846      reference, expecting later passes to form autoinc addressing modes.
847      The only supported options are predecrement and postincrement.  */
848   signed char m_explicit_inc;
849   /* True if we have either of the two possible cases of using
850      autoincrement.  */
851   bool m_auto;
852   /* True if this is an address to be used for load operations rather
853      than stores.  */
854   bool m_is_load;
855 
856   /* Optionally, a function to obtain constants for any given offset into
857      the objects, and data associated with it.  */
858   by_pieces_constfn m_constfn;
859   void *m_cfndata;
860 public:
861   pieces_addr (rtx, bool, by_pieces_constfn, void *);
862   rtx adjust (scalar_int_mode, HOST_WIDE_INT);
863   void increment_address (HOST_WIDE_INT);
864   void maybe_predec (HOST_WIDE_INT);
865   void maybe_postinc (HOST_WIDE_INT);
866   void decide_autoinc (machine_mode, bool, HOST_WIDE_INT);
get_addr_inc()867   int get_addr_inc ()
868   {
869     return m_addr_inc;
870   }
871 };
872 
873 /* Initialize a pieces_addr structure from an object OBJ.  IS_LOAD is
874    true if the operation to be performed on this object is a load
875    rather than a store.  For stores, OBJ can be NULL, in which case we
876    assume the operation is a stack push.  For loads, the optional
877    CONSTFN and its associated CFNDATA can be used in place of the
878    memory load.  */
879 
pieces_addr(rtx obj,bool is_load,by_pieces_constfn constfn,void * cfndata)880 pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn,
881 			  void *cfndata)
882   : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata)
883 {
884   m_addr_inc = 0;
885   m_auto = false;
886   if (obj)
887     {
888       rtx addr = XEXP (obj, 0);
889       rtx_code code = GET_CODE (addr);
890       m_addr = addr;
891       bool dec = code == PRE_DEC || code == POST_DEC;
892       bool inc = code == PRE_INC || code == POST_INC;
893       m_auto = inc || dec;
894       if (m_auto)
895 	m_addr_inc = dec ? -1 : 1;
896 
897       /* While we have always looked for these codes here, the code
898 	 implementing the memory operation has never handled them.
899 	 Support could be added later if necessary or beneficial.  */
900       gcc_assert (code != PRE_INC && code != POST_DEC);
901     }
902   else
903     {
904       m_addr = NULL_RTX;
905       if (!is_load)
906 	{
907 	  m_auto = true;
908 	  if (STACK_GROWS_DOWNWARD)
909 	    m_addr_inc = -1;
910 	  else
911 	    m_addr_inc = 1;
912 	}
913       else
914 	gcc_assert (constfn != NULL);
915     }
916   m_explicit_inc = 0;
917   if (constfn)
918     gcc_assert (is_load);
919 }
920 
921 /* Decide whether to use autoinc for an address involved in a memory op.
922    MODE is the mode of the accesses, REVERSE is true if we've decided to
923    perform the operation starting from the end, and LEN is the length of
924    the operation.  Don't override an earlier decision to set m_auto.  */
925 
926 void
decide_autoinc(machine_mode ARG_UNUSED (mode),bool reverse,HOST_WIDE_INT len)927 pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode), bool reverse,
928 			     HOST_WIDE_INT len)
929 {
930   if (m_auto || m_obj == NULL_RTX)
931     return;
932 
933   bool use_predec = (m_is_load
934 		     ? USE_LOAD_PRE_DECREMENT (mode)
935 		     : USE_STORE_PRE_DECREMENT (mode));
936   bool use_postinc = (m_is_load
937 		      ? USE_LOAD_POST_INCREMENT (mode)
938 		      : USE_STORE_POST_INCREMENT (mode));
939   machine_mode addr_mode = get_address_mode (m_obj);
940 
941   if (use_predec && reverse)
942     {
943       m_addr = copy_to_mode_reg (addr_mode,
944 				 plus_constant (addr_mode,
945 						m_addr, len));
946       m_auto = true;
947       m_explicit_inc = -1;
948     }
949   else if (use_postinc && !reverse)
950     {
951       m_addr = copy_to_mode_reg (addr_mode, m_addr);
952       m_auto = true;
953       m_explicit_inc = 1;
954     }
955   else if (CONSTANT_P (m_addr))
956     m_addr = copy_to_mode_reg (addr_mode, m_addr);
957 }
958 
959 /* Adjust the address to refer to the data at OFFSET in MODE.  If we
960    are using autoincrement for this address, we don't add the offset,
961    but we still modify the MEM's properties.  */
962 
963 rtx
adjust(scalar_int_mode mode,HOST_WIDE_INT offset)964 pieces_addr::adjust (scalar_int_mode mode, HOST_WIDE_INT offset)
965 {
966   if (m_constfn)
967     return m_constfn (m_cfndata, offset, mode);
968   if (m_obj == NULL_RTX)
969     return NULL_RTX;
970   if (m_auto)
971     return adjust_automodify_address (m_obj, mode, m_addr, offset);
972   else
973     return adjust_address (m_obj, mode, offset);
974 }
975 
976 /* Emit an add instruction to increment the address by SIZE.  */
977 
978 void
increment_address(HOST_WIDE_INT size)979 pieces_addr::increment_address (HOST_WIDE_INT size)
980 {
981   rtx amount = gen_int_mode (size, GET_MODE (m_addr));
982   emit_insn (gen_add2_insn (m_addr, amount));
983 }
984 
985 /* If we are supposed to decrement the address after each access, emit code
986    to do so now.  Increment by SIZE (which has should have the correct sign
987    already).  */
988 
989 void
maybe_predec(HOST_WIDE_INT size)990 pieces_addr::maybe_predec (HOST_WIDE_INT size)
991 {
992   if (m_explicit_inc >= 0)
993     return;
994   gcc_assert (HAVE_PRE_DECREMENT);
995   increment_address (size);
996 }
997 
998 /* If we are supposed to decrement the address after each access, emit code
999    to do so now.  Increment by SIZE.  */
1000 
1001 void
maybe_postinc(HOST_WIDE_INT size)1002 pieces_addr::maybe_postinc (HOST_WIDE_INT size)
1003 {
1004   if (m_explicit_inc <= 0)
1005     return;
1006   gcc_assert (HAVE_POST_INCREMENT);
1007   increment_address (size);
1008 }
1009 
1010 /* This structure is used by do_op_by_pieces to describe the operation
1011    to be performed.  */
1012 
1013 class op_by_pieces_d
1014 {
1015  protected:
1016   pieces_addr m_to, m_from;
1017   unsigned HOST_WIDE_INT m_len;
1018   HOST_WIDE_INT m_offset;
1019   unsigned int m_align;
1020   unsigned int m_max_size;
1021   bool m_reverse;
1022 
1023   /* Virtual functions, overriden by derived classes for the specific
1024      operation.  */
1025   virtual void generate (rtx, rtx, machine_mode) = 0;
1026   virtual bool prepare_mode (machine_mode, unsigned int) = 0;
finish_mode(machine_mode)1027   virtual void finish_mode (machine_mode)
1028   {
1029   }
1030 
1031  public:
1032   op_by_pieces_d (rtx, bool, rtx, bool, by_pieces_constfn, void *,
1033 		  unsigned HOST_WIDE_INT, unsigned int);
1034   void run ();
1035 };
1036 
1037 /* The constructor for an op_by_pieces_d structure.  We require two
1038    objects named TO and FROM, which are identified as loads or stores
1039    by TO_LOAD and FROM_LOAD.  If FROM is a load, the optional FROM_CFN
1040    and its associated FROM_CFN_DATA can be used to replace loads with
1041    constant values.  LEN describes the length of the operation.  */
1042 
op_by_pieces_d(rtx to,bool to_load,rtx from,bool from_load,by_pieces_constfn from_cfn,void * from_cfn_data,unsigned HOST_WIDE_INT len,unsigned int align)1043 op_by_pieces_d::op_by_pieces_d (rtx to, bool to_load,
1044 				rtx from, bool from_load,
1045 				by_pieces_constfn from_cfn,
1046 				void *from_cfn_data,
1047 				unsigned HOST_WIDE_INT len,
1048 				unsigned int align)
1049   : m_to (to, to_load, NULL, NULL),
1050     m_from (from, from_load, from_cfn, from_cfn_data),
1051     m_len (len), m_max_size (MOVE_MAX_PIECES + 1)
1052 {
1053   int toi = m_to.get_addr_inc ();
1054   int fromi = m_from.get_addr_inc ();
1055   if (toi >= 0 && fromi >= 0)
1056     m_reverse = false;
1057   else if (toi <= 0 && fromi <= 0)
1058     m_reverse = true;
1059   else
1060     gcc_unreachable ();
1061 
1062   m_offset = m_reverse ? len : 0;
1063   align = MIN (to ? MEM_ALIGN (to) : align,
1064 	       from ? MEM_ALIGN (from) : align);
1065 
1066   /* If copying requires more than two move insns,
1067      copy addresses to registers (to make displacements shorter)
1068      and use post-increment if available.  */
1069   if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2)
1070     {
1071       /* Find the mode of the largest comparison.  */
1072       scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1073 
1074       m_from.decide_autoinc (mode, m_reverse, len);
1075       m_to.decide_autoinc (mode, m_reverse, len);
1076     }
1077 
1078   align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
1079   m_align = align;
1080 }
1081 
1082 /* This function contains the main loop used for expanding a block
1083    operation.  First move what we can in the largest integer mode,
1084    then go to successively smaller modes.  For every access, call
1085    GENFUN with the two operands and the EXTRA_DATA.  */
1086 
1087 void
run()1088 op_by_pieces_d::run ()
1089 {
1090   while (m_max_size > 1 && m_len > 0)
1091     {
1092       scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1093 
1094       if (prepare_mode (mode, m_align))
1095 	{
1096 	  unsigned int size = GET_MODE_SIZE (mode);
1097 	  rtx to1 = NULL_RTX, from1;
1098 
1099 	  while (m_len >= size)
1100 	    {
1101 	      if (m_reverse)
1102 		m_offset -= size;
1103 
1104 	      to1 = m_to.adjust (mode, m_offset);
1105 	      from1 = m_from.adjust (mode, m_offset);
1106 
1107 	      m_to.maybe_predec (-(HOST_WIDE_INT)size);
1108 	      m_from.maybe_predec (-(HOST_WIDE_INT)size);
1109 
1110 	      generate (to1, from1, mode);
1111 
1112 	      m_to.maybe_postinc (size);
1113 	      m_from.maybe_postinc (size);
1114 
1115 	      if (!m_reverse)
1116 		m_offset += size;
1117 
1118 	      m_len -= size;
1119 	    }
1120 
1121 	  finish_mode (mode);
1122 	}
1123 
1124       m_max_size = GET_MODE_SIZE (mode);
1125     }
1126 
1127   /* The code above should have handled everything.  */
1128   gcc_assert (!m_len);
1129 }
1130 
1131 /* Derived class from op_by_pieces_d, providing support for block move
1132    operations.  */
1133 
1134 class move_by_pieces_d : public op_by_pieces_d
1135 {
1136   insn_gen_fn m_gen_fun;
1137   void generate (rtx, rtx, machine_mode);
1138   bool prepare_mode (machine_mode, unsigned int);
1139 
1140  public:
move_by_pieces_d(rtx to,rtx from,unsigned HOST_WIDE_INT len,unsigned int align)1141   move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1142 		    unsigned int align)
1143     : op_by_pieces_d (to, false, from, true, NULL, NULL, len, align)
1144   {
1145   }
1146   rtx finish_endp (int);
1147 };
1148 
1149 /* Return true if MODE can be used for a set of copies, given an
1150    alignment ALIGN.  Prepare whatever data is necessary for later
1151    calls to generate.  */
1152 
1153 bool
prepare_mode(machine_mode mode,unsigned int align)1154 move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1155 {
1156   insn_code icode = optab_handler (mov_optab, mode);
1157   m_gen_fun = GEN_FCN (icode);
1158   return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1159 }
1160 
1161 /* A callback used when iterating for a compare_by_pieces_operation.
1162    OP0 and OP1 are the values that have been loaded and should be
1163    compared in MODE.  If OP0 is NULL, this means we should generate a
1164    push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1165    gen function that should be used to generate the mode.  */
1166 
1167 void
generate(rtx op0,rtx op1,machine_mode mode ATTRIBUTE_UNUSED)1168 move_by_pieces_d::generate (rtx op0, rtx op1,
1169 			    machine_mode mode ATTRIBUTE_UNUSED)
1170 {
1171 #ifdef PUSH_ROUNDING
1172   if (op0 == NULL_RTX)
1173     {
1174       emit_single_push_insn (mode, op1, NULL);
1175       return;
1176     }
1177 #endif
1178   emit_insn (m_gen_fun (op0, op1));
1179 }
1180 
1181 /* Perform the final adjustment at the end of a string to obtain the
1182    correct return value for the block operation.  If ENDP is 1 return
1183    memory at the end ala mempcpy, and if ENDP is 2 return memory the
1184    end minus one byte ala stpcpy.  */
1185 
1186 rtx
finish_endp(int endp)1187 move_by_pieces_d::finish_endp (int endp)
1188 {
1189   gcc_assert (!m_reverse);
1190   if (endp == 2)
1191     {
1192       m_to.maybe_postinc (-1);
1193       --m_offset;
1194     }
1195   return m_to.adjust (QImode, m_offset);
1196 }
1197 
1198 /* Generate several move instructions to copy LEN bytes from block FROM to
1199    block TO.  (These are MEM rtx's with BLKmode).
1200 
1201    If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1202    used to push FROM to the stack.
1203 
1204    ALIGN is maximum stack alignment we can assume.
1205 
1206    If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
1207    mempcpy, and if ENDP is 2 return memory the end minus one byte ala
1208    stpcpy.  */
1209 
1210 rtx
move_by_pieces(rtx to,rtx from,unsigned HOST_WIDE_INT len,unsigned int align,int endp)1211 move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1212 		unsigned int align, int endp)
1213 {
1214 #ifndef PUSH_ROUNDING
1215   if (to == NULL)
1216     gcc_unreachable ();
1217 #endif
1218 
1219   move_by_pieces_d data (to, from, len, align);
1220 
1221   data.run ();
1222 
1223   if (endp)
1224     return data.finish_endp (endp);
1225   else
1226     return to;
1227 }
1228 
1229 /* Derived class from op_by_pieces_d, providing support for block move
1230    operations.  */
1231 
1232 class store_by_pieces_d : public op_by_pieces_d
1233 {
1234   insn_gen_fn m_gen_fun;
1235   void generate (rtx, rtx, machine_mode);
1236   bool prepare_mode (machine_mode, unsigned int);
1237 
1238  public:
store_by_pieces_d(rtx to,by_pieces_constfn cfn,void * cfn_data,unsigned HOST_WIDE_INT len,unsigned int align)1239   store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data,
1240 		     unsigned HOST_WIDE_INT len, unsigned int align)
1241     : op_by_pieces_d (to, false, NULL_RTX, true, cfn, cfn_data, len, align)
1242   {
1243   }
1244   rtx finish_endp (int);
1245 };
1246 
1247 /* Return true if MODE can be used for a set of stores, given an
1248    alignment ALIGN.  Prepare whatever data is necessary for later
1249    calls to generate.  */
1250 
1251 bool
prepare_mode(machine_mode mode,unsigned int align)1252 store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1253 {
1254   insn_code icode = optab_handler (mov_optab, mode);
1255   m_gen_fun = GEN_FCN (icode);
1256   return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1257 }
1258 
1259 /* A callback used when iterating for a store_by_pieces_operation.
1260    OP0 and OP1 are the values that have been loaded and should be
1261    compared in MODE.  If OP0 is NULL, this means we should generate a
1262    push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1263    gen function that should be used to generate the mode.  */
1264 
1265 void
generate(rtx op0,rtx op1,machine_mode)1266 store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode)
1267 {
1268   emit_insn (m_gen_fun (op0, op1));
1269 }
1270 
1271 /* Perform the final adjustment at the end of a string to obtain the
1272    correct return value for the block operation.  If ENDP is 1 return
1273    memory at the end ala mempcpy, and if ENDP is 2 return memory the
1274    end minus one byte ala stpcpy.  */
1275 
1276 rtx
finish_endp(int endp)1277 store_by_pieces_d::finish_endp (int endp)
1278 {
1279   gcc_assert (!m_reverse);
1280   if (endp == 2)
1281     {
1282       m_to.maybe_postinc (-1);
1283       --m_offset;
1284     }
1285   return m_to.adjust (QImode, m_offset);
1286 }
1287 
1288 /* Determine whether the LEN bytes generated by CONSTFUN can be
1289    stored to memory using several move instructions.  CONSTFUNDATA is
1290    a pointer which will be passed as argument in every CONSTFUN call.
1291    ALIGN is maximum alignment we can assume.  MEMSETP is true if this is
1292    a memset operation and false if it's a copy of a constant string.
1293    Return nonzero if a call to store_by_pieces should succeed.  */
1294 
1295 int
can_store_by_pieces(unsigned HOST_WIDE_INT len,rtx (* constfun)(void *,HOST_WIDE_INT,scalar_int_mode),void * constfundata,unsigned int align,bool memsetp)1296 can_store_by_pieces (unsigned HOST_WIDE_INT len,
1297 		     rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1298 		     void *constfundata, unsigned int align, bool memsetp)
1299 {
1300   unsigned HOST_WIDE_INT l;
1301   unsigned int max_size;
1302   HOST_WIDE_INT offset = 0;
1303   enum insn_code icode;
1304   int reverse;
1305   /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it.  */
1306   rtx cst ATTRIBUTE_UNUSED;
1307 
1308   if (len == 0)
1309     return 1;
1310 
1311   if (!targetm.use_by_pieces_infrastructure_p (len, align,
1312 					       memsetp
1313 						 ? SET_BY_PIECES
1314 						 : STORE_BY_PIECES,
1315 					       optimize_insn_for_speed_p ()))
1316     return 0;
1317 
1318   align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
1319 
1320   /* We would first store what we can in the largest integer mode, then go to
1321      successively smaller modes.  */
1322 
1323   for (reverse = 0;
1324        reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT);
1325        reverse++)
1326     {
1327       l = len;
1328       max_size = STORE_MAX_PIECES + 1;
1329       while (max_size > 1 && l > 0)
1330 	{
1331 	  scalar_int_mode mode = widest_int_mode_for_size (max_size);
1332 
1333 	  icode = optab_handler (mov_optab, mode);
1334 	  if (icode != CODE_FOR_nothing
1335 	      && align >= GET_MODE_ALIGNMENT (mode))
1336 	    {
1337 	      unsigned int size = GET_MODE_SIZE (mode);
1338 
1339 	      while (l >= size)
1340 		{
1341 		  if (reverse)
1342 		    offset -= size;
1343 
1344 		  cst = (*constfun) (constfundata, offset, mode);
1345 		  if (!targetm.legitimate_constant_p (mode, cst))
1346 		    return 0;
1347 
1348 		  if (!reverse)
1349 		    offset += size;
1350 
1351 		  l -= size;
1352 		}
1353 	    }
1354 
1355 	  max_size = GET_MODE_SIZE (mode);
1356 	}
1357 
1358       /* The code above should have handled everything.  */
1359       gcc_assert (!l);
1360     }
1361 
1362   return 1;
1363 }
1364 
1365 /* Generate several move instructions to store LEN bytes generated by
1366    CONSTFUN to block TO.  (A MEM rtx with BLKmode).  CONSTFUNDATA is a
1367    pointer which will be passed as argument in every CONSTFUN call.
1368    ALIGN is maximum alignment we can assume.  MEMSETP is true if this is
1369    a memset operation and false if it's a copy of a constant string.
1370    If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
1371    mempcpy, and if ENDP is 2 return memory the end minus one byte ala
1372    stpcpy.  */
1373 
1374 rtx
store_by_pieces(rtx to,unsigned HOST_WIDE_INT len,rtx (* constfun)(void *,HOST_WIDE_INT,scalar_int_mode),void * constfundata,unsigned int align,bool memsetp,int endp)1375 store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
1376 		 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1377 		 void *constfundata, unsigned int align, bool memsetp, int endp)
1378 {
1379   if (len == 0)
1380     {
1381       gcc_assert (endp != 2);
1382       return to;
1383     }
1384 
1385   gcc_assert (targetm.use_by_pieces_infrastructure_p
1386 		(len, align,
1387 		 memsetp ? SET_BY_PIECES : STORE_BY_PIECES,
1388 		 optimize_insn_for_speed_p ()));
1389 
1390   store_by_pieces_d data (to, constfun, constfundata, len, align);
1391   data.run ();
1392 
1393   if (endp)
1394     return data.finish_endp (endp);
1395   else
1396     return to;
1397 }
1398 
1399 /* Callback routine for clear_by_pieces.
1400    Return const0_rtx unconditionally.  */
1401 
1402 static rtx
clear_by_pieces_1(void *,HOST_WIDE_INT,scalar_int_mode)1403 clear_by_pieces_1 (void *, HOST_WIDE_INT, scalar_int_mode)
1404 {
1405   return const0_rtx;
1406 }
1407 
1408 /* Generate several move instructions to clear LEN bytes of block TO.  (A MEM
1409    rtx with BLKmode).  ALIGN is maximum alignment we can assume.  */
1410 
1411 static void
clear_by_pieces(rtx to,unsigned HOST_WIDE_INT len,unsigned int align)1412 clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
1413 {
1414   if (len == 0)
1415     return;
1416 
1417   store_by_pieces_d data (to, clear_by_pieces_1, NULL, len, align);
1418   data.run ();
1419 }
1420 
1421 /* Context used by compare_by_pieces_genfn.  It stores the fail label
1422    to jump to in case of miscomparison, and for branch ratios greater than 1,
1423    it stores an accumulator and the current and maximum counts before
1424    emitting another branch.  */
1425 
1426 class compare_by_pieces_d : public op_by_pieces_d
1427 {
1428   rtx_code_label *m_fail_label;
1429   rtx m_accumulator;
1430   int m_count, m_batch;
1431 
1432   void generate (rtx, rtx, machine_mode);
1433   bool prepare_mode (machine_mode, unsigned int);
1434   void finish_mode (machine_mode);
1435  public:
compare_by_pieces_d(rtx op0,rtx op1,by_pieces_constfn op1_cfn,void * op1_cfn_data,HOST_WIDE_INT len,int align,rtx_code_label * fail_label)1436   compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn,
1437 		       void *op1_cfn_data, HOST_WIDE_INT len, int align,
1438 		       rtx_code_label *fail_label)
1439     : op_by_pieces_d (op0, true, op1, true, op1_cfn, op1_cfn_data, len, align)
1440   {
1441     m_fail_label = fail_label;
1442   }
1443 };
1444 
1445 /* A callback used when iterating for a compare_by_pieces_operation.
1446    OP0 and OP1 are the values that have been loaded and should be
1447    compared in MODE.  DATA holds a pointer to the compare_by_pieces_data
1448    context structure.  */
1449 
1450 void
generate(rtx op0,rtx op1,machine_mode mode)1451 compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode)
1452 {
1453   if (m_batch > 1)
1454     {
1455       rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX,
1456 			       true, OPTAB_LIB_WIDEN);
1457       if (m_count != 0)
1458 	temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp,
1459 			     true, OPTAB_LIB_WIDEN);
1460       m_accumulator = temp;
1461 
1462       if (++m_count < m_batch)
1463 	return;
1464 
1465       m_count = 0;
1466       op0 = m_accumulator;
1467       op1 = const0_rtx;
1468       m_accumulator = NULL_RTX;
1469     }
1470   do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX, NULL,
1471 			   m_fail_label, profile_probability::uninitialized ());
1472 }
1473 
1474 /* Return true if MODE can be used for a set of moves and comparisons,
1475    given an alignment ALIGN.  Prepare whatever data is necessary for
1476    later calls to generate.  */
1477 
1478 bool
prepare_mode(machine_mode mode,unsigned int align)1479 compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1480 {
1481   insn_code icode = optab_handler (mov_optab, mode);
1482   if (icode == CODE_FOR_nothing
1483       || align < GET_MODE_ALIGNMENT (mode)
1484       || !can_compare_p (EQ, mode, ccp_jump))
1485     return false;
1486   m_batch = targetm.compare_by_pieces_branch_ratio (mode);
1487   if (m_batch < 0)
1488     return false;
1489   m_accumulator = NULL_RTX;
1490   m_count = 0;
1491   return true;
1492 }
1493 
1494 /* Called after expanding a series of comparisons in MODE.  If we have
1495    accumulated results for which we haven't emitted a branch yet, do
1496    so now.  */
1497 
1498 void
finish_mode(machine_mode mode)1499 compare_by_pieces_d::finish_mode (machine_mode mode)
1500 {
1501   if (m_accumulator != NULL_RTX)
1502     do_compare_rtx_and_jump (m_accumulator, const0_rtx, NE, true, mode,
1503 			     NULL_RTX, NULL, m_fail_label,
1504 			     profile_probability::uninitialized ());
1505 }
1506 
1507 /* Generate several move instructions to compare LEN bytes from blocks
1508    ARG0 and ARG1.  (These are MEM rtx's with BLKmode).
1509 
1510    If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1511    used to push FROM to the stack.
1512 
1513    ALIGN is maximum stack alignment we can assume.
1514 
1515    Optionally, the caller can pass a constfn and associated data in A1_CFN
1516    and A1_CFN_DATA. describing that the second operand being compared is a
1517    known constant and how to obtain its data.  */
1518 
1519 static rtx
compare_by_pieces(rtx arg0,rtx arg1,unsigned HOST_WIDE_INT len,rtx target,unsigned int align,by_pieces_constfn a1_cfn,void * a1_cfn_data)1520 compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
1521 		   rtx target, unsigned int align,
1522 		   by_pieces_constfn a1_cfn, void *a1_cfn_data)
1523 {
1524   rtx_code_label *fail_label = gen_label_rtx ();
1525   rtx_code_label *end_label = gen_label_rtx ();
1526 
1527   if (target == NULL_RTX
1528       || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
1529     target = gen_reg_rtx (TYPE_MODE (integer_type_node));
1530 
1531   compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align,
1532 			    fail_label);
1533 
1534   data.run ();
1535 
1536   emit_move_insn (target, const0_rtx);
1537   emit_jump (end_label);
1538   emit_barrier ();
1539   emit_label (fail_label);
1540   emit_move_insn (target, const1_rtx);
1541   emit_label (end_label);
1542 
1543   return target;
1544 }
1545 
1546 /* Emit code to move a block Y to a block X.  This may be done with
1547    string-move instructions, with multiple scalar move instructions,
1548    or with a library call.
1549 
1550    Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1551    SIZE is an rtx that says how long they are.
1552    ALIGN is the maximum alignment we can assume they have.
1553    METHOD describes what kind of copy this is, and what mechanisms may be used.
1554    MIN_SIZE is the minimal size of block to move
1555    MAX_SIZE is the maximal size of block to move, if it can not be represented
1556    in unsigned HOST_WIDE_INT, than it is mask of all ones.
1557 
1558    Return the address of the new block, if memcpy is called and returns it,
1559    0 otherwise.  */
1560 
1561 rtx
emit_block_move_hints(rtx x,rtx y,rtx size,enum block_op_methods method,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)1562 emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
1563 		       unsigned int expected_align, HOST_WIDE_INT expected_size,
1564 		       unsigned HOST_WIDE_INT min_size,
1565 		       unsigned HOST_WIDE_INT max_size,
1566 		       unsigned HOST_WIDE_INT probable_max_size)
1567 {
1568   int may_use_call;
1569   rtx retval = 0;
1570   unsigned int align;
1571 
1572   gcc_assert (size);
1573   if (CONST_INT_P (size) && INTVAL (size) == 0)
1574     return 0;
1575 
1576   switch (method)
1577     {
1578     case BLOCK_OP_NORMAL:
1579     case BLOCK_OP_TAILCALL:
1580       may_use_call = 1;
1581       break;
1582 
1583     case BLOCK_OP_CALL_PARM:
1584       may_use_call = block_move_libcall_safe_for_call_parm ();
1585 
1586       /* Make inhibit_defer_pop nonzero around the library call
1587 	 to force it to pop the arguments right away.  */
1588       NO_DEFER_POP;
1589       break;
1590 
1591     case BLOCK_OP_NO_LIBCALL:
1592       may_use_call = 0;
1593       break;
1594 
1595     case BLOCK_OP_NO_LIBCALL_RET:
1596       may_use_call = -1;
1597       break;
1598 
1599     default:
1600       gcc_unreachable ();
1601     }
1602 
1603   gcc_assert (MEM_P (x) && MEM_P (y));
1604   align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1605   gcc_assert (align >= BITS_PER_UNIT);
1606 
1607   /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1608      block copy is more efficient for other large modes, e.g. DCmode.  */
1609   x = adjust_address (x, BLKmode, 0);
1610   y = adjust_address (y, BLKmode, 0);
1611 
1612   /* Set MEM_SIZE as appropriate for this block copy.  The main place this
1613      can be incorrect is coming from __builtin_memcpy.  */
1614   if (CONST_INT_P (size))
1615     {
1616       x = shallow_copy_rtx (x);
1617       y = shallow_copy_rtx (y);
1618       set_mem_size (x, INTVAL (size));
1619       set_mem_size (y, INTVAL (size));
1620     }
1621 
1622   if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
1623     move_by_pieces (x, y, INTVAL (size), align, 0);
1624   else if (emit_block_move_via_movmem (x, y, size, align,
1625 				       expected_align, expected_size,
1626 				       min_size, max_size, probable_max_size))
1627     ;
1628   else if (may_use_call
1629 	   && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
1630 	   && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
1631     {
1632       if (may_use_call < 0)
1633 	return pc_rtx;
1634 
1635       retval = emit_block_copy_via_libcall (x, y, size,
1636 					    method == BLOCK_OP_TAILCALL);
1637     }
1638 
1639   else
1640     emit_block_move_via_loop (x, y, size, align);
1641 
1642   if (method == BLOCK_OP_CALL_PARM)
1643     OK_DEFER_POP;
1644 
1645   return retval;
1646 }
1647 
1648 rtx
emit_block_move(rtx x,rtx y,rtx size,enum block_op_methods method)1649 emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
1650 {
1651   unsigned HOST_WIDE_INT max, min = 0;
1652   if (GET_CODE (size) == CONST_INT)
1653     min = max = UINTVAL (size);
1654   else
1655     max = GET_MODE_MASK (GET_MODE (size));
1656   return emit_block_move_hints (x, y, size, method, 0, -1,
1657 				min, max, max);
1658 }
1659 
1660 /* A subroutine of emit_block_move.  Returns true if calling the
1661    block move libcall will not clobber any parameters which may have
1662    already been placed on the stack.  */
1663 
1664 static bool
block_move_libcall_safe_for_call_parm(void)1665 block_move_libcall_safe_for_call_parm (void)
1666 {
1667 #if defined (REG_PARM_STACK_SPACE)
1668   tree fn;
1669 #endif
1670 
1671   /* If arguments are pushed on the stack, then they're safe.  */
1672   if (PUSH_ARGS)
1673     return true;
1674 
1675   /* If registers go on the stack anyway, any argument is sure to clobber
1676      an outgoing argument.  */
1677 #if defined (REG_PARM_STACK_SPACE)
1678   fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1679   /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1680      depend on its argument.  */
1681   (void) fn;
1682   if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
1683       && REG_PARM_STACK_SPACE (fn) != 0)
1684     return false;
1685 #endif
1686 
1687   /* If any argument goes in memory, then it might clobber an outgoing
1688      argument.  */
1689   {
1690     CUMULATIVE_ARGS args_so_far_v;
1691     cumulative_args_t args_so_far;
1692     tree fn, arg;
1693 
1694     fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1695     INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
1696     args_so_far = pack_cumulative_args (&args_so_far_v);
1697 
1698     arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
1699     for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
1700       {
1701 	machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
1702 	rtx tmp = targetm.calls.function_arg (args_so_far, mode,
1703 					      NULL_TREE, true);
1704 	if (!tmp || !REG_P (tmp))
1705 	  return false;
1706 	if (targetm.calls.arg_partial_bytes (args_so_far, mode, NULL, 1))
1707 	  return false;
1708 	targetm.calls.function_arg_advance (args_so_far, mode,
1709 					    NULL_TREE, true);
1710       }
1711   }
1712   return true;
1713 }
1714 
1715 /* A subroutine of emit_block_move.  Expand a movmem pattern;
1716    return true if successful.  */
1717 
1718 static bool
emit_block_move_via_movmem(rtx x,rtx y,rtx size,unsigned int align,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)1719 emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
1720 			    unsigned int expected_align, HOST_WIDE_INT expected_size,
1721 			    unsigned HOST_WIDE_INT min_size,
1722 			    unsigned HOST_WIDE_INT max_size,
1723 			    unsigned HOST_WIDE_INT probable_max_size)
1724 {
1725   int save_volatile_ok = volatile_ok;
1726 
1727   if (expected_align < align)
1728     expected_align = align;
1729   if (expected_size != -1)
1730     {
1731       if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
1732 	expected_size = probable_max_size;
1733       if ((unsigned HOST_WIDE_INT)expected_size < min_size)
1734 	expected_size = min_size;
1735     }
1736 
1737   /* Since this is a move insn, we don't care about volatility.  */
1738   volatile_ok = 1;
1739 
1740   /* Try the most limited insn first, because there's no point
1741      including more than one in the machine description unless
1742      the more limited one has some advantage.  */
1743 
1744   opt_scalar_int_mode mode_iter;
1745   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
1746     {
1747       scalar_int_mode mode = mode_iter.require ();
1748       enum insn_code code = direct_optab_handler (movmem_optab, mode);
1749 
1750       if (code != CODE_FOR_nothing
1751 	  /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1752 	     here because if SIZE is less than the mode mask, as it is
1753 	     returned by the macro, it will definitely be less than the
1754 	     actual mode mask.  Since SIZE is within the Pmode address
1755 	     space, we limit MODE to Pmode.  */
1756 	  && ((CONST_INT_P (size)
1757 	       && ((unsigned HOST_WIDE_INT) INTVAL (size)
1758 		   <= (GET_MODE_MASK (mode) >> 1)))
1759 	      || max_size <= (GET_MODE_MASK (mode) >> 1)
1760 	      || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
1761 	{
1762 	  struct expand_operand ops[9];
1763 	  unsigned int nops;
1764 
1765 	  /* ??? When called via emit_block_move_for_call, it'd be
1766 	     nice if there were some way to inform the backend, so
1767 	     that it doesn't fail the expansion because it thinks
1768 	     emitting the libcall would be more efficient.  */
1769 	  nops = insn_data[(int) code].n_generator_args;
1770 	  gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
1771 
1772 	  create_fixed_operand (&ops[0], x);
1773 	  create_fixed_operand (&ops[1], y);
1774 	  /* The check above guarantees that this size conversion is valid.  */
1775 	  create_convert_operand_to (&ops[2], size, mode, true);
1776 	  create_integer_operand (&ops[3], align / BITS_PER_UNIT);
1777 	  if (nops >= 6)
1778 	    {
1779 	      create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
1780 	      create_integer_operand (&ops[5], expected_size);
1781 	    }
1782 	  if (nops >= 8)
1783 	    {
1784 	      create_integer_operand (&ops[6], min_size);
1785 	      /* If we can not represent the maximal size,
1786 		 make parameter NULL.  */
1787 	      if ((HOST_WIDE_INT) max_size != -1)
1788 	        create_integer_operand (&ops[7], max_size);
1789 	      else
1790 		create_fixed_operand (&ops[7], NULL);
1791 	    }
1792 	  if (nops == 9)
1793 	    {
1794 	      /* If we can not represent the maximal size,
1795 		 make parameter NULL.  */
1796 	      if ((HOST_WIDE_INT) probable_max_size != -1)
1797 	        create_integer_operand (&ops[8], probable_max_size);
1798 	      else
1799 		create_fixed_operand (&ops[8], NULL);
1800 	    }
1801 	  if (maybe_expand_insn (code, nops, ops))
1802 	    {
1803 	      volatile_ok = save_volatile_ok;
1804 	      return true;
1805 	    }
1806 	}
1807     }
1808 
1809   volatile_ok = save_volatile_ok;
1810   return false;
1811 }
1812 
1813 /* A subroutine of emit_block_move.  Copy the data via an explicit
1814    loop.  This is used only when libcalls are forbidden.  */
1815 /* ??? It'd be nice to copy in hunks larger than QImode.  */
1816 
1817 static void
emit_block_move_via_loop(rtx x,rtx y,rtx size,unsigned int align ATTRIBUTE_UNUSED)1818 emit_block_move_via_loop (rtx x, rtx y, rtx size,
1819 			  unsigned int align ATTRIBUTE_UNUSED)
1820 {
1821   rtx_code_label *cmp_label, *top_label;
1822   rtx iter, x_addr, y_addr, tmp;
1823   machine_mode x_addr_mode = get_address_mode (x);
1824   machine_mode y_addr_mode = get_address_mode (y);
1825   machine_mode iter_mode;
1826 
1827   iter_mode = GET_MODE (size);
1828   if (iter_mode == VOIDmode)
1829     iter_mode = word_mode;
1830 
1831   top_label = gen_label_rtx ();
1832   cmp_label = gen_label_rtx ();
1833   iter = gen_reg_rtx (iter_mode);
1834 
1835   emit_move_insn (iter, const0_rtx);
1836 
1837   x_addr = force_operand (XEXP (x, 0), NULL_RTX);
1838   y_addr = force_operand (XEXP (y, 0), NULL_RTX);
1839   do_pending_stack_adjust ();
1840 
1841   emit_jump (cmp_label);
1842   emit_label (top_label);
1843 
1844   tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
1845   x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
1846 
1847   if (x_addr_mode != y_addr_mode)
1848     tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
1849   y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
1850 
1851   x = change_address (x, QImode, x_addr);
1852   y = change_address (y, QImode, y_addr);
1853 
1854   emit_move_insn (x, y);
1855 
1856   tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
1857 			     true, OPTAB_LIB_WIDEN);
1858   if (tmp != iter)
1859     emit_move_insn (iter, tmp);
1860 
1861   emit_label (cmp_label);
1862 
1863   emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
1864 			   true, top_label,
1865 			   profile_probability::guessed_always ()
1866 				.apply_scale (9, 10));
1867 }
1868 
1869 /* Expand a call to memcpy or memmove or memcmp, and return the result.
1870    TAILCALL is true if this is a tail call.  */
1871 
1872 rtx
emit_block_op_via_libcall(enum built_in_function fncode,rtx dst,rtx src,rtx size,bool tailcall)1873 emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src,
1874 			   rtx size, bool tailcall)
1875 {
1876   rtx dst_addr, src_addr;
1877   tree call_expr, dst_tree, src_tree, size_tree;
1878   machine_mode size_mode;
1879 
1880   /* Since dst and src are passed to a libcall, mark the corresponding
1881      tree EXPR as addressable.  */
1882   tree dst_expr = MEM_EXPR (dst);
1883   tree src_expr = MEM_EXPR (src);
1884   if (dst_expr)
1885     mark_addressable (dst_expr);
1886   if (src_expr)
1887     mark_addressable (src_expr);
1888 
1889   dst_addr = copy_addr_to_reg (XEXP (dst, 0));
1890   dst_addr = convert_memory_address (ptr_mode, dst_addr);
1891   dst_tree = make_tree (ptr_type_node, dst_addr);
1892 
1893   src_addr = copy_addr_to_reg (XEXP (src, 0));
1894   src_addr = convert_memory_address (ptr_mode, src_addr);
1895   src_tree = make_tree (ptr_type_node, src_addr);
1896 
1897   size_mode = TYPE_MODE (sizetype);
1898   size = convert_to_mode (size_mode, size, 1);
1899   size = copy_to_mode_reg (size_mode, size);
1900   size_tree = make_tree (sizetype, size);
1901 
1902   /* It is incorrect to use the libcall calling conventions for calls to
1903      memcpy/memmove/memcmp because they can be provided by the user.  */
1904   tree fn = builtin_decl_implicit (fncode);
1905   call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
1906   CALL_EXPR_TAILCALL (call_expr) = tailcall;
1907 
1908   return expand_call (call_expr, NULL_RTX, false);
1909 }
1910 
1911 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
1912    ARG3_TYPE is the type of ARG3_RTX.  Return the result rtx on success,
1913    otherwise return null.  */
1914 
1915 rtx
expand_cmpstrn_or_cmpmem(insn_code icode,rtx target,rtx arg1_rtx,rtx arg2_rtx,tree arg3_type,rtx arg3_rtx,HOST_WIDE_INT align)1916 expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
1917 			  rtx arg2_rtx, tree arg3_type, rtx arg3_rtx,
1918 			  HOST_WIDE_INT align)
1919 {
1920   machine_mode insn_mode = insn_data[icode].operand[0].mode;
1921 
1922   if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
1923     target = NULL_RTX;
1924 
1925   struct expand_operand ops[5];
1926   create_output_operand (&ops[0], target, insn_mode);
1927   create_fixed_operand (&ops[1], arg1_rtx);
1928   create_fixed_operand (&ops[2], arg2_rtx);
1929   create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type),
1930 			       TYPE_UNSIGNED (arg3_type));
1931   create_integer_operand (&ops[4], align);
1932   if (maybe_expand_insn (icode, 5, ops))
1933     return ops[0].value;
1934   return NULL_RTX;
1935 }
1936 
1937 /* Expand a block compare between X and Y with length LEN using the
1938    cmpmem optab, placing the result in TARGET.  LEN_TYPE is the type
1939    of the expression that was used to calculate the length.  ALIGN
1940    gives the known minimum common alignment.  */
1941 
1942 static rtx
emit_block_cmp_via_cmpmem(rtx x,rtx y,rtx len,tree len_type,rtx target,unsigned align)1943 emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
1944 			   unsigned align)
1945 {
1946   /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
1947      implementing memcmp because it will stop if it encounters two
1948      zero bytes.  */
1949   insn_code icode = direct_optab_handler (cmpmem_optab, SImode);
1950 
1951   if (icode == CODE_FOR_nothing)
1952     return NULL_RTX;
1953 
1954   return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align);
1955 }
1956 
1957 /* Emit code to compare a block Y to a block X.  This may be done with
1958    string-compare instructions, with multiple scalar instructions,
1959    or with a library call.
1960 
1961    Both X and Y must be MEM rtx's.  LEN is an rtx that says how long
1962    they are.  LEN_TYPE is the type of the expression that was used to
1963    calculate it.
1964 
1965    If EQUALITY_ONLY is true, it means we don't have to return the tri-state
1966    value of a normal memcmp call, instead we can just compare for equality.
1967    If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
1968    returning NULL_RTX.
1969 
1970    Optionally, the caller can pass a constfn and associated data in Y_CFN
1971    and Y_CFN_DATA. describing that the second operand being compared is a
1972    known constant and how to obtain its data.
1973    Return the result of the comparison, or NULL_RTX if we failed to
1974    perform the operation.  */
1975 
1976 rtx
emit_block_cmp_hints(rtx x,rtx y,rtx len,tree len_type,rtx target,bool equality_only,by_pieces_constfn y_cfn,void * y_cfndata)1977 emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
1978 		      bool equality_only, by_pieces_constfn y_cfn,
1979 		      void *y_cfndata)
1980 {
1981   rtx result = 0;
1982 
1983   if (CONST_INT_P (len) && INTVAL (len) == 0)
1984     return const0_rtx;
1985 
1986   gcc_assert (MEM_P (x) && MEM_P (y));
1987   unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1988   gcc_assert (align >= BITS_PER_UNIT);
1989 
1990   x = adjust_address (x, BLKmode, 0);
1991   y = adjust_address (y, BLKmode, 0);
1992 
1993   if (equality_only
1994       && CONST_INT_P (len)
1995       && can_do_by_pieces (INTVAL (len), align, COMPARE_BY_PIECES))
1996     result = compare_by_pieces (x, y, INTVAL (len), target, align,
1997 				y_cfn, y_cfndata);
1998   else
1999     result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
2000 
2001   return result;
2002 }
2003 
2004 /* Copy all or part of a value X into registers starting at REGNO.
2005    The number of registers to be filled is NREGS.  */
2006 
2007 void
move_block_to_reg(int regno,rtx x,int nregs,machine_mode mode)2008 move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode)
2009 {
2010   if (nregs == 0)
2011     return;
2012 
2013   if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
2014     x = validize_mem (force_const_mem (mode, x));
2015 
2016   /* See if the machine can do this with a load multiple insn.  */
2017   if (targetm.have_load_multiple ())
2018     {
2019       rtx_insn *last = get_last_insn ();
2020       rtx first = gen_rtx_REG (word_mode, regno);
2021       if (rtx_insn *pat = targetm.gen_load_multiple (first, x,
2022 						     GEN_INT (nregs)))
2023 	{
2024 	  emit_insn (pat);
2025 	  return;
2026 	}
2027       else
2028 	delete_insns_since (last);
2029     }
2030 
2031   for (int i = 0; i < nregs; i++)
2032     emit_move_insn (gen_rtx_REG (word_mode, regno + i),
2033 		    operand_subword_force (x, i, mode));
2034 }
2035 
2036 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2037    The number of registers to be filled is NREGS.  */
2038 
2039 void
move_block_from_reg(int regno,rtx x,int nregs)2040 move_block_from_reg (int regno, rtx x, int nregs)
2041 {
2042   if (nregs == 0)
2043     return;
2044 
2045   /* See if the machine can do this with a store multiple insn.  */
2046   if (targetm.have_store_multiple ())
2047     {
2048       rtx_insn *last = get_last_insn ();
2049       rtx first = gen_rtx_REG (word_mode, regno);
2050       if (rtx_insn *pat = targetm.gen_store_multiple (x, first,
2051 						      GEN_INT (nregs)))
2052 	{
2053 	  emit_insn (pat);
2054 	  return;
2055 	}
2056       else
2057 	delete_insns_since (last);
2058     }
2059 
2060   for (int i = 0; i < nregs; i++)
2061     {
2062       rtx tem = operand_subword (x, i, 1, BLKmode);
2063 
2064       gcc_assert (tem);
2065 
2066       emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
2067     }
2068 }
2069 
2070 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2071    ORIG, where ORIG is a non-consecutive group of registers represented by
2072    a PARALLEL.  The clone is identical to the original except in that the
2073    original set of registers is replaced by a new set of pseudo registers.
2074    The new set has the same modes as the original set.  */
2075 
2076 rtx
gen_group_rtx(rtx orig)2077 gen_group_rtx (rtx orig)
2078 {
2079   int i, length;
2080   rtx *tmps;
2081 
2082   gcc_assert (GET_CODE (orig) == PARALLEL);
2083 
2084   length = XVECLEN (orig, 0);
2085   tmps = XALLOCAVEC (rtx, length);
2086 
2087   /* Skip a NULL entry in first slot.  */
2088   i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
2089 
2090   if (i)
2091     tmps[0] = 0;
2092 
2093   for (; i < length; i++)
2094     {
2095       machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0));
2096       rtx offset = XEXP (XVECEXP (orig, 0, i), 1);
2097 
2098       tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset);
2099     }
2100 
2101   return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
2102 }
2103 
2104 /* A subroutine of emit_group_load.  Arguments as for emit_group_load,
2105    except that values are placed in TMPS[i], and must later be moved
2106    into corresponding XEXP (XVECEXP (DST, 0, i), 0) element.  */
2107 
2108 static void
emit_group_load_1(rtx * tmps,rtx dst,rtx orig_src,tree type,poly_int64 ssize)2109 emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
2110 		   poly_int64 ssize)
2111 {
2112   rtx src;
2113   int start, i;
2114   machine_mode m = GET_MODE (orig_src);
2115 
2116   gcc_assert (GET_CODE (dst) == PARALLEL);
2117 
2118   if (m != VOIDmode
2119       && !SCALAR_INT_MODE_P (m)
2120       && !MEM_P (orig_src)
2121       && GET_CODE (orig_src) != CONCAT)
2122     {
2123       scalar_int_mode imode;
2124       if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
2125 	{
2126 	  src = gen_reg_rtx (imode);
2127 	  emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
2128 	}
2129       else
2130 	{
2131 	  src = assign_stack_temp (GET_MODE (orig_src), ssize);
2132 	  emit_move_insn (src, orig_src);
2133 	}
2134       emit_group_load_1 (tmps, dst, src, type, ssize);
2135       return;
2136     }
2137 
2138   /* Check for a NULL entry, used to indicate that the parameter goes
2139      both on the stack and in registers.  */
2140   if (XEXP (XVECEXP (dst, 0, 0), 0))
2141     start = 0;
2142   else
2143     start = 1;
2144 
2145   /* Process the pieces.  */
2146   for (i = start; i < XVECLEN (dst, 0); i++)
2147     {
2148       machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
2149       poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
2150       poly_int64 bytelen = GET_MODE_SIZE (mode);
2151       poly_int64 shift = 0;
2152 
2153       /* Handle trailing fragments that run over the size of the struct.
2154 	 It's the target's responsibility to make sure that the fragment
2155 	 cannot be strictly smaller in some cases and strictly larger
2156 	 in others.  */
2157       gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2158       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2159 	{
2160 	  /* Arrange to shift the fragment to where it belongs.
2161 	     extract_bit_field loads to the lsb of the reg.  */
2162 	  if (
2163 #ifdef BLOCK_REG_PADDING
2164 	      BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
2165 	      == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2166 #else
2167 	      BYTES_BIG_ENDIAN
2168 #endif
2169 	      )
2170 	    shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2171 	  bytelen = ssize - bytepos;
2172 	  gcc_assert (maybe_gt (bytelen, 0));
2173 	}
2174 
2175       /* If we won't be loading directly from memory, protect the real source
2176 	 from strange tricks we might play; but make sure that the source can
2177 	 be loaded directly into the destination.  */
2178       src = orig_src;
2179       if (!MEM_P (orig_src)
2180 	  && (!CONSTANT_P (orig_src)
2181 	      || (GET_MODE (orig_src) != mode
2182 		  && GET_MODE (orig_src) != VOIDmode)))
2183 	{
2184 	  if (GET_MODE (orig_src) == VOIDmode)
2185 	    src = gen_reg_rtx (mode);
2186 	  else
2187 	    src = gen_reg_rtx (GET_MODE (orig_src));
2188 
2189 	  emit_move_insn (src, orig_src);
2190 	}
2191 
2192       /* Optimize the access just a bit.  */
2193       if (MEM_P (src)
2194 	  && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
2195 	      || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
2196 	  && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
2197 	  && known_eq (bytelen, GET_MODE_SIZE (mode)))
2198 	{
2199 	  tmps[i] = gen_reg_rtx (mode);
2200 	  emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
2201 	}
2202       else if (COMPLEX_MODE_P (mode)
2203 	       && GET_MODE (src) == mode
2204 	       && known_eq (bytelen, GET_MODE_SIZE (mode)))
2205 	/* Let emit_move_complex do the bulk of the work.  */
2206 	tmps[i] = src;
2207       else if (GET_CODE (src) == CONCAT)
2208 	{
2209 	  poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
2210 	  poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
2211 	  unsigned int elt;
2212 	  poly_int64 subpos;
2213 
2214 	  if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
2215 	      && known_le (subpos + bytelen, slen0))
2216 	    {
2217 	      /* The following assumes that the concatenated objects all
2218 		 have the same size.  In this case, a simple calculation
2219 		 can be used to determine the object and the bit field
2220 		 to be extracted.  */
2221 	      tmps[i] = XEXP (src, elt);
2222 	      if (maybe_ne (subpos, 0)
2223 		  || maybe_ne (subpos + bytelen, slen0)
2224 		  || (!CONSTANT_P (tmps[i])
2225 		      && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
2226 		tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
2227 					     subpos * BITS_PER_UNIT,
2228 					     1, NULL_RTX, mode, mode, false,
2229 					     NULL);
2230 	    }
2231 	  else
2232 	    {
2233 	      rtx mem;
2234 
2235 	      gcc_assert (known_eq (bytepos, 0));
2236 	      mem = assign_stack_temp (GET_MODE (src), slen);
2237 	      emit_move_insn (mem, src);
2238 	      tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
2239 					   0, 1, NULL_RTX, mode, mode, false,
2240 					   NULL);
2241 	    }
2242 	}
2243       /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2244 	 SIMD register, which is currently broken.  While we get GCC
2245 	 to emit proper RTL for these cases, let's dump to memory.  */
2246       else if (VECTOR_MODE_P (GET_MODE (dst))
2247 	       && REG_P (src))
2248 	{
2249 	  poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
2250 	  rtx mem;
2251 
2252 	  mem = assign_stack_temp (GET_MODE (src), slen);
2253 	  emit_move_insn (mem, src);
2254 	  tmps[i] = adjust_address (mem, mode, bytepos);
2255 	}
2256       else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
2257                && XVECLEN (dst, 0) > 1)
2258         tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
2259       else if (CONSTANT_P (src))
2260 	{
2261 	  if (known_eq (bytelen, ssize))
2262 	    tmps[i] = src;
2263 	  else
2264 	    {
2265 	      rtx first, second;
2266 
2267 	      /* TODO: const_wide_int can have sizes other than this...  */
2268 	      gcc_assert (known_eq (2 * bytelen, ssize));
2269 	      split_double (src, &first, &second);
2270 	      if (i)
2271 		tmps[i] = second;
2272 	      else
2273 		tmps[i] = first;
2274 	    }
2275 	}
2276       else if (REG_P (src) && GET_MODE (src) == mode)
2277 	tmps[i] = src;
2278       else
2279 	tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
2280 				     bytepos * BITS_PER_UNIT, 1, NULL_RTX,
2281 				     mode, mode, false, NULL);
2282 
2283       if (maybe_ne (shift, 0))
2284 	tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
2285 				shift, tmps[i], 0);
2286     }
2287 }
2288 
2289 /* Emit code to move a block SRC of type TYPE to a block DST,
2290    where DST is non-consecutive registers represented by a PARALLEL.
2291    SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2292    if not known.  */
2293 
2294 void
emit_group_load(rtx dst,rtx src,tree type,poly_int64 ssize)2295 emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
2296 {
2297   rtx *tmps;
2298   int i;
2299 
2300   tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0));
2301   emit_group_load_1 (tmps, dst, src, type, ssize);
2302 
2303   /* Copy the extracted pieces into the proper (probable) hard regs.  */
2304   for (i = 0; i < XVECLEN (dst, 0); i++)
2305     {
2306       rtx d = XEXP (XVECEXP (dst, 0, i), 0);
2307       if (d == NULL)
2308 	continue;
2309       emit_move_insn (d, tmps[i]);
2310     }
2311 }
2312 
2313 /* Similar, but load SRC into new pseudos in a format that looks like
2314    PARALLEL.  This can later be fed to emit_group_move to get things
2315    in the right place.  */
2316 
2317 rtx
emit_group_load_into_temps(rtx parallel,rtx src,tree type,poly_int64 ssize)2318 emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
2319 {
2320   rtvec vec;
2321   int i;
2322 
2323   vec = rtvec_alloc (XVECLEN (parallel, 0));
2324   emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
2325 
2326   /* Convert the vector to look just like the original PARALLEL, except
2327      with the computed values.  */
2328   for (i = 0; i < XVECLEN (parallel, 0); i++)
2329     {
2330       rtx e = XVECEXP (parallel, 0, i);
2331       rtx d = XEXP (e, 0);
2332 
2333       if (d)
2334 	{
2335 	  d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
2336 	  e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
2337 	}
2338       RTVEC_ELT (vec, i) = e;
2339     }
2340 
2341   return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
2342 }
2343 
2344 /* Emit code to move a block SRC to block DST, where SRC and DST are
2345    non-consecutive groups of registers, each represented by a PARALLEL.  */
2346 
2347 void
emit_group_move(rtx dst,rtx src)2348 emit_group_move (rtx dst, rtx src)
2349 {
2350   int i;
2351 
2352   gcc_assert (GET_CODE (src) == PARALLEL
2353 	      && GET_CODE (dst) == PARALLEL
2354 	      && XVECLEN (src, 0) == XVECLEN (dst, 0));
2355 
2356   /* Skip first entry if NULL.  */
2357   for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++)
2358     emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0),
2359 		    XEXP (XVECEXP (src, 0, i), 0));
2360 }
2361 
2362 /* Move a group of registers represented by a PARALLEL into pseudos.  */
2363 
2364 rtx
emit_group_move_into_temps(rtx src)2365 emit_group_move_into_temps (rtx src)
2366 {
2367   rtvec vec = rtvec_alloc (XVECLEN (src, 0));
2368   int i;
2369 
2370   for (i = 0; i < XVECLEN (src, 0); i++)
2371     {
2372       rtx e = XVECEXP (src, 0, i);
2373       rtx d = XEXP (e, 0);
2374 
2375       if (d)
2376 	e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
2377       RTVEC_ELT (vec, i) = e;
2378     }
2379 
2380   return gen_rtx_PARALLEL (GET_MODE (src), vec);
2381 }
2382 
2383 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2384    where SRC is non-consecutive registers represented by a PARALLEL.
2385    SSIZE represents the total size of block ORIG_DST, or -1 if not
2386    known.  */
2387 
2388 void
emit_group_store(rtx orig_dst,rtx src,tree type ATTRIBUTE_UNUSED,poly_int64 ssize)2389 emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
2390 		  poly_int64 ssize)
2391 {
2392   rtx *tmps, dst;
2393   int start, finish, i;
2394   machine_mode m = GET_MODE (orig_dst);
2395 
2396   gcc_assert (GET_CODE (src) == PARALLEL);
2397 
2398   if (!SCALAR_INT_MODE_P (m)
2399       && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
2400     {
2401       scalar_int_mode imode;
2402       if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
2403 	{
2404 	  dst = gen_reg_rtx (imode);
2405 	  emit_group_store (dst, src, type, ssize);
2406 	  dst = gen_lowpart (GET_MODE (orig_dst), dst);
2407 	}
2408       else
2409 	{
2410 	  dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
2411 	  emit_group_store (dst, src, type, ssize);
2412 	}
2413       emit_move_insn (orig_dst, dst);
2414       return;
2415     }
2416 
2417   /* Check for a NULL entry, used to indicate that the parameter goes
2418      both on the stack and in registers.  */
2419   if (XEXP (XVECEXP (src, 0, 0), 0))
2420     start = 0;
2421   else
2422     start = 1;
2423   finish = XVECLEN (src, 0);
2424 
2425   tmps = XALLOCAVEC (rtx, finish);
2426 
2427   /* Copy the (probable) hard regs into pseudos.  */
2428   for (i = start; i < finish; i++)
2429     {
2430       rtx reg = XEXP (XVECEXP (src, 0, i), 0);
2431       if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2432 	{
2433 	  tmps[i] = gen_reg_rtx (GET_MODE (reg));
2434 	  emit_move_insn (tmps[i], reg);
2435 	}
2436       else
2437 	tmps[i] = reg;
2438     }
2439 
2440   /* If we won't be storing directly into memory, protect the real destination
2441      from strange tricks we might play.  */
2442   dst = orig_dst;
2443   if (GET_CODE (dst) == PARALLEL)
2444     {
2445       rtx temp;
2446 
2447       /* We can get a PARALLEL dst if there is a conditional expression in
2448 	 a return statement.  In that case, the dst and src are the same,
2449 	 so no action is necessary.  */
2450       if (rtx_equal_p (dst, src))
2451 	return;
2452 
2453       /* It is unclear if we can ever reach here, but we may as well handle
2454 	 it.  Allocate a temporary, and split this into a store/load to/from
2455 	 the temporary.  */
2456       temp = assign_stack_temp (GET_MODE (dst), ssize);
2457       emit_group_store (temp, src, type, ssize);
2458       emit_group_load (dst, temp, type, ssize);
2459       return;
2460     }
2461   else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
2462     {
2463       machine_mode outer = GET_MODE (dst);
2464       machine_mode inner;
2465       poly_int64 bytepos;
2466       bool done = false;
2467       rtx temp;
2468 
2469       if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
2470 	dst = gen_reg_rtx (outer);
2471 
2472       /* Make life a bit easier for combine.  */
2473       /* If the first element of the vector is the low part
2474 	 of the destination mode, use a paradoxical subreg to
2475 	 initialize the destination.  */
2476       if (start < finish)
2477 	{
2478 	  inner = GET_MODE (tmps[start]);
2479 	  bytepos = subreg_lowpart_offset (inner, outer);
2480 	  if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, start), 1)), bytepos))
2481 	    {
2482 	      temp = simplify_gen_subreg (outer, tmps[start],
2483 					  inner, 0);
2484 	      if (temp)
2485 		{
2486 		  emit_move_insn (dst, temp);
2487 		  done = true;
2488 		  start++;
2489 		}
2490 	    }
2491 	}
2492 
2493       /* If the first element wasn't the low part, try the last.  */
2494       if (!done
2495 	  && start < finish - 1)
2496 	{
2497 	  inner = GET_MODE (tmps[finish - 1]);
2498 	  bytepos = subreg_lowpart_offset (inner, outer);
2499 	  if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, finish - 1), 1)),
2500 			bytepos))
2501 	    {
2502 	      temp = simplify_gen_subreg (outer, tmps[finish - 1],
2503 					  inner, 0);
2504 	      if (temp)
2505 		{
2506 		  emit_move_insn (dst, temp);
2507 		  done = true;
2508 		  finish--;
2509 		}
2510 	    }
2511 	}
2512 
2513       /* Otherwise, simply initialize the result to zero.  */
2514       if (!done)
2515         emit_move_insn (dst, CONST0_RTX (outer));
2516     }
2517 
2518   /* Process the pieces.  */
2519   for (i = start; i < finish; i++)
2520     {
2521       poly_int64 bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
2522       machine_mode mode = GET_MODE (tmps[i]);
2523       poly_int64 bytelen = GET_MODE_SIZE (mode);
2524       poly_uint64 adj_bytelen;
2525       rtx dest = dst;
2526 
2527       /* Handle trailing fragments that run over the size of the struct.
2528 	 It's the target's responsibility to make sure that the fragment
2529 	 cannot be strictly smaller in some cases and strictly larger
2530 	 in others.  */
2531       gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2532       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2533 	adj_bytelen = ssize - bytepos;
2534       else
2535 	adj_bytelen = bytelen;
2536 
2537       if (GET_CODE (dst) == CONCAT)
2538 	{
2539 	  if (known_le (bytepos + adj_bytelen,
2540 			GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2541 	    dest = XEXP (dst, 0);
2542 	  else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2543 	    {
2544 	      bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
2545 	      dest = XEXP (dst, 1);
2546 	    }
2547 	  else
2548 	    {
2549 	      machine_mode dest_mode = GET_MODE (dest);
2550 	      machine_mode tmp_mode = GET_MODE (tmps[i]);
2551 
2552 	      gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
2553 
2554 	      if (GET_MODE_ALIGNMENT (dest_mode)
2555 		  >= GET_MODE_ALIGNMENT (tmp_mode))
2556 		{
2557 		  dest = assign_stack_temp (dest_mode,
2558 					    GET_MODE_SIZE (dest_mode));
2559 		  emit_move_insn (adjust_address (dest,
2560 						  tmp_mode,
2561 						  bytepos),
2562 				  tmps[i]);
2563 		  dst = dest;
2564 		}
2565 	      else
2566 		{
2567 		  dest = assign_stack_temp (tmp_mode,
2568 					    GET_MODE_SIZE (tmp_mode));
2569 		  emit_move_insn (dest, tmps[i]);
2570 		  dst = adjust_address (dest, dest_mode, bytepos);
2571 		}
2572 	      break;
2573 	    }
2574 	}
2575 
2576       /* Handle trailing fragments that run over the size of the struct.  */
2577       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2578 	{
2579 	  /* store_bit_field always takes its value from the lsb.
2580 	     Move the fragment to the lsb if it's not already there.  */
2581 	  if (
2582 #ifdef BLOCK_REG_PADDING
2583 	      BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
2584 	      == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2585 #else
2586 	      BYTES_BIG_ENDIAN
2587 #endif
2588 	      )
2589 	    {
2590 	      poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2591 	      tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
2592 				      shift, tmps[i], 0);
2593 	    }
2594 
2595 	  /* Make sure not to write past the end of the struct.  */
2596 	  store_bit_field (dest,
2597 			   adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2598 			   bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT - 1,
2599 			   VOIDmode, tmps[i], false);
2600 	}
2601 
2602       /* Optimize the access just a bit.  */
2603       else if (MEM_P (dest)
2604 	       && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
2605 		   || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
2606 	       && multiple_p (bytepos * BITS_PER_UNIT,
2607 			      GET_MODE_ALIGNMENT (mode))
2608 	       && known_eq (bytelen, GET_MODE_SIZE (mode)))
2609 	emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
2610 
2611       else
2612 	store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2613 			 0, 0, mode, tmps[i], false);
2614     }
2615 
2616   /* Copy from the pseudo into the (probable) hard reg.  */
2617   if (orig_dst != dst)
2618     emit_move_insn (orig_dst, dst);
2619 }
2620 
2621 /* Return a form of X that does not use a PARALLEL.  TYPE is the type
2622    of the value stored in X.  */
2623 
2624 rtx
maybe_emit_group_store(rtx x,tree type)2625 maybe_emit_group_store (rtx x, tree type)
2626 {
2627   machine_mode mode = TYPE_MODE (type);
2628   gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
2629   if (GET_CODE (x) == PARALLEL)
2630     {
2631       rtx result = gen_reg_rtx (mode);
2632       emit_group_store (result, x, type, int_size_in_bytes (type));
2633       return result;
2634     }
2635   return x;
2636 }
2637 
2638 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2639 
2640    This is used on targets that return BLKmode values in registers.  */
2641 
2642 static void
copy_blkmode_from_reg(rtx target,rtx srcreg,tree type)2643 copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
2644 {
2645   unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
2646   rtx src = NULL, dst = NULL;
2647   unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
2648   unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
2649   /* No current ABI uses variable-sized modes to pass a BLKmnode type.  */
2650   fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg));
2651   fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target));
2652   fixed_size_mode copy_mode;
2653 
2654   /* BLKmode registers created in the back-end shouldn't have survived.  */
2655   gcc_assert (mode != BLKmode);
2656 
2657   /* If the structure doesn't take up a whole number of words, see whether
2658      SRCREG is padded on the left or on the right.  If it's on the left,
2659      set PADDING_CORRECTION to the number of bits to skip.
2660 
2661      In most ABIs, the structure will be returned at the least end of
2662      the register, which translates to right padding on little-endian
2663      targets and left padding on big-endian targets.  The opposite
2664      holds if the structure is returned at the most significant
2665      end of the register.  */
2666   if (bytes % UNITS_PER_WORD != 0
2667       && (targetm.calls.return_in_msb (type)
2668 	  ? !BYTES_BIG_ENDIAN
2669 	  : BYTES_BIG_ENDIAN))
2670     padding_correction
2671       = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
2672 
2673   /* We can use a single move if we have an exact mode for the size.  */
2674   else if (MEM_P (target)
2675 	   && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
2676 	       || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
2677 	   && bytes == GET_MODE_SIZE (mode))
2678   {
2679     emit_move_insn (adjust_address (target, mode, 0), srcreg);
2680     return;
2681   }
2682 
2683   /* And if we additionally have the same mode for a register.  */
2684   else if (REG_P (target)
2685 	   && GET_MODE (target) == mode
2686 	   && bytes == GET_MODE_SIZE (mode))
2687   {
2688     emit_move_insn (target, srcreg);
2689     return;
2690   }
2691 
2692   /* This code assumes srcreg is at least a full word.  If it isn't, copy it
2693      into a new pseudo which is a full word.  */
2694   if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2695     {
2696       srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
2697       mode = word_mode;
2698     }
2699 
2700   /* Copy the structure BITSIZE bits at a time.  If the target lives in
2701      memory, take care of not reading/writing past its end by selecting
2702      a copy mode suited to BITSIZE.  This should always be possible given
2703      how it is computed.
2704 
2705      If the target lives in register, make sure not to select a copy mode
2706      larger than the mode of the register.
2707 
2708      We could probably emit more efficient code for machines which do not use
2709      strict alignment, but it doesn't seem worth the effort at the current
2710      time.  */
2711 
2712   copy_mode = word_mode;
2713   if (MEM_P (target))
2714     {
2715       opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
2716       if (mem_mode.exists ())
2717 	copy_mode = mem_mode.require ();
2718     }
2719   else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2720     copy_mode = tmode;
2721 
2722   for (bitpos = 0, xbitpos = padding_correction;
2723        bitpos < bytes * BITS_PER_UNIT;
2724        bitpos += bitsize, xbitpos += bitsize)
2725     {
2726       /* We need a new source operand each time xbitpos is on a
2727 	 word boundary and when xbitpos == padding_correction
2728 	 (the first time through).  */
2729       if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
2730 	src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
2731 
2732       /* We need a new destination operand each time bitpos is on
2733 	 a word boundary.  */
2734       if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2735 	dst = target;
2736       else if (bitpos % BITS_PER_WORD == 0)
2737 	dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
2738 
2739       /* Use xbitpos for the source extraction (right justified) and
2740 	 bitpos for the destination store (left justified).  */
2741       store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
2742 		       extract_bit_field (src, bitsize,
2743 					  xbitpos % BITS_PER_WORD, 1,
2744 					  NULL_RTX, copy_mode, copy_mode,
2745 					  false, NULL),
2746 		       false);
2747     }
2748 }
2749 
2750 /* Copy BLKmode value SRC into a register of mode MODE_IN.  Return the
2751    register if it contains any data, otherwise return null.
2752 
2753    This is used on targets that return BLKmode values in registers.  */
2754 
2755 rtx
copy_blkmode_to_reg(machine_mode mode_in,tree src)2756 copy_blkmode_to_reg (machine_mode mode_in, tree src)
2757 {
2758   int i, n_regs;
2759   unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0, bytes;
2760   unsigned int bitsize;
2761   rtx *dst_words, dst, x, src_word = NULL_RTX, dst_word = NULL_RTX;
2762   /* No current ABI uses variable-sized modes to pass a BLKmnode type.  */
2763   fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
2764   fixed_size_mode dst_mode;
2765 
2766   gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
2767 
2768   x = expand_normal (src);
2769 
2770   bytes = arg_int_size_in_bytes (TREE_TYPE (src));
2771   if (bytes == 0)
2772     return NULL_RTX;
2773 
2774   /* If the structure doesn't take up a whole number of words, see
2775      whether the register value should be padded on the left or on
2776      the right.  Set PADDING_CORRECTION to the number of padding
2777      bits needed on the left side.
2778 
2779      In most ABIs, the structure will be returned at the least end of
2780      the register, which translates to right padding on little-endian
2781      targets and left padding on big-endian targets.  The opposite
2782      holds if the structure is returned at the most significant
2783      end of the register.  */
2784   if (bytes % UNITS_PER_WORD != 0
2785       && (targetm.calls.return_in_msb (TREE_TYPE (src))
2786 	  ? !BYTES_BIG_ENDIAN
2787 	  : BYTES_BIG_ENDIAN))
2788     padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
2789 					   * BITS_PER_UNIT));
2790 
2791   n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2792   dst_words = XALLOCAVEC (rtx, n_regs);
2793   bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
2794 
2795   /* Copy the structure BITSIZE bits at a time.  */
2796   for (bitpos = 0, xbitpos = padding_correction;
2797        bitpos < bytes * BITS_PER_UNIT;
2798        bitpos += bitsize, xbitpos += bitsize)
2799     {
2800       /* We need a new destination pseudo each time xbitpos is
2801 	 on a word boundary and when xbitpos == padding_correction
2802 	 (the first time through).  */
2803       if (xbitpos % BITS_PER_WORD == 0
2804 	  || xbitpos == padding_correction)
2805 	{
2806 	  /* Generate an appropriate register.  */
2807 	  dst_word = gen_reg_rtx (word_mode);
2808 	  dst_words[xbitpos / BITS_PER_WORD] = dst_word;
2809 
2810 	  /* Clear the destination before we move anything into it.  */
2811 	  emit_move_insn (dst_word, CONST0_RTX (word_mode));
2812 	}
2813 
2814       /* We need a new source operand each time bitpos is on a word
2815 	 boundary.  */
2816       if (bitpos % BITS_PER_WORD == 0)
2817 	src_word = operand_subword_force (x, bitpos / BITS_PER_WORD, BLKmode);
2818 
2819       /* Use bitpos for the source extraction (left justified) and
2820 	 xbitpos for the destination store (right justified).  */
2821       store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
2822 		       0, 0, word_mode,
2823 		       extract_bit_field (src_word, bitsize,
2824 					  bitpos % BITS_PER_WORD, 1,
2825 					  NULL_RTX, word_mode, word_mode,
2826 					  false, NULL),
2827 		       false);
2828     }
2829 
2830   if (mode == BLKmode)
2831     {
2832       /* Find the smallest integer mode large enough to hold the
2833 	 entire structure.  */
2834       opt_scalar_int_mode mode_iter;
2835       FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2836 	if (GET_MODE_SIZE (mode_iter.require ()) >= bytes)
2837 	  break;
2838 
2839       /* A suitable mode should have been found.  */
2840       mode = mode_iter.require ();
2841     }
2842 
2843   if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
2844     dst_mode = word_mode;
2845   else
2846     dst_mode = mode;
2847   dst = gen_reg_rtx (dst_mode);
2848 
2849   for (i = 0; i < n_regs; i++)
2850     emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]);
2851 
2852   if (mode != dst_mode)
2853     dst = gen_lowpart (mode, dst);
2854 
2855   return dst;
2856 }
2857 
2858 /* Add a USE expression for REG to the (possibly empty) list pointed
2859    to by CALL_FUSAGE.  REG must denote a hard register.  */
2860 
2861 void
use_reg_mode(rtx * call_fusage,rtx reg,machine_mode mode)2862 use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2863 {
2864   gcc_assert (REG_P (reg));
2865 
2866   if (!HARD_REGISTER_P (reg))
2867     return;
2868 
2869   *call_fusage
2870     = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg), *call_fusage);
2871 }
2872 
2873 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
2874    to by CALL_FUSAGE.  REG must denote a hard register.  */
2875 
2876 void
clobber_reg_mode(rtx * call_fusage,rtx reg,machine_mode mode)2877 clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2878 {
2879   gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
2880 
2881   *call_fusage
2882     = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg), *call_fusage);
2883 }
2884 
2885 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
2886    starting at REGNO.  All of these registers must be hard registers.  */
2887 
2888 void
use_regs(rtx * call_fusage,int regno,int nregs)2889 use_regs (rtx *call_fusage, int regno, int nregs)
2890 {
2891   int i;
2892 
2893   gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
2894 
2895   for (i = 0; i < nregs; i++)
2896     use_reg (call_fusage, regno_reg_rtx[regno + i]);
2897 }
2898 
2899 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
2900    PARALLEL REGS.  This is for calls that pass values in multiple
2901    non-contiguous locations.  The Irix 6 ABI has examples of this.  */
2902 
2903 void
use_group_regs(rtx * call_fusage,rtx regs)2904 use_group_regs (rtx *call_fusage, rtx regs)
2905 {
2906   int i;
2907 
2908   for (i = 0; i < XVECLEN (regs, 0); i++)
2909     {
2910       rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
2911 
2912       /* A NULL entry means the parameter goes both on the stack and in
2913 	 registers.  This can also be a MEM for targets that pass values
2914 	 partially on the stack and partially in registers.  */
2915       if (reg != 0 && REG_P (reg))
2916 	use_reg (call_fusage, reg);
2917     }
2918 }
2919 
2920 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2921    assigment and the code of the expresion on the RHS is CODE.  Return
2922    NULL otherwise.  */
2923 
2924 static gimple *
get_def_for_expr(tree name,enum tree_code code)2925 get_def_for_expr (tree name, enum tree_code code)
2926 {
2927   gimple *def_stmt;
2928 
2929   if (TREE_CODE (name) != SSA_NAME)
2930     return NULL;
2931 
2932   def_stmt = get_gimple_for_ssa_name (name);
2933   if (!def_stmt
2934       || gimple_assign_rhs_code (def_stmt) != code)
2935     return NULL;
2936 
2937   return def_stmt;
2938 }
2939 
2940 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2941    assigment and the class of the expresion on the RHS is CLASS.  Return
2942    NULL otherwise.  */
2943 
2944 static gimple *
get_def_for_expr_class(tree name,enum tree_code_class tclass)2945 get_def_for_expr_class (tree name, enum tree_code_class tclass)
2946 {
2947   gimple *def_stmt;
2948 
2949   if (TREE_CODE (name) != SSA_NAME)
2950     return NULL;
2951 
2952   def_stmt = get_gimple_for_ssa_name (name);
2953   if (!def_stmt
2954       || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
2955     return NULL;
2956 
2957   return def_stmt;
2958 }
2959 
2960 /* Write zeros through the storage of OBJECT.  If OBJECT has BLKmode, SIZE is
2961    its length in bytes.  */
2962 
2963 rtx
clear_storage_hints(rtx object,rtx size,enum block_op_methods method,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)2964 clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
2965 		     unsigned int expected_align, HOST_WIDE_INT expected_size,
2966 		     unsigned HOST_WIDE_INT min_size,
2967 		     unsigned HOST_WIDE_INT max_size,
2968 		     unsigned HOST_WIDE_INT probable_max_size)
2969 {
2970   machine_mode mode = GET_MODE (object);
2971   unsigned int align;
2972 
2973   gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
2974 
2975   /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
2976      just move a zero.  Otherwise, do this a piece at a time.  */
2977   if (mode != BLKmode
2978       && CONST_INT_P (size)
2979       && known_eq (INTVAL (size), GET_MODE_SIZE (mode)))
2980     {
2981       rtx zero = CONST0_RTX (mode);
2982       if (zero != NULL)
2983 	{
2984 	  emit_move_insn (object, zero);
2985 	  return NULL;
2986 	}
2987 
2988       if (COMPLEX_MODE_P (mode))
2989 	{
2990 	  zero = CONST0_RTX (GET_MODE_INNER (mode));
2991 	  if (zero != NULL)
2992 	    {
2993 	      write_complex_part (object, zero, 0);
2994 	      write_complex_part (object, zero, 1);
2995 	      return NULL;
2996 	    }
2997 	}
2998     }
2999 
3000   if (size == const0_rtx)
3001     return NULL;
3002 
3003   align = MEM_ALIGN (object);
3004 
3005   if (CONST_INT_P (size)
3006       && targetm.use_by_pieces_infrastructure_p (INTVAL (size), align,
3007 						 CLEAR_BY_PIECES,
3008 						 optimize_insn_for_speed_p ()))
3009     clear_by_pieces (object, INTVAL (size), align);
3010   else if (set_storage_via_setmem (object, size, const0_rtx, align,
3011 				   expected_align, expected_size,
3012 				   min_size, max_size, probable_max_size))
3013     ;
3014   else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
3015     return set_storage_via_libcall (object, size, const0_rtx,
3016 				    method == BLOCK_OP_TAILCALL);
3017   else
3018     gcc_unreachable ();
3019 
3020   return NULL;
3021 }
3022 
3023 rtx
clear_storage(rtx object,rtx size,enum block_op_methods method)3024 clear_storage (rtx object, rtx size, enum block_op_methods method)
3025 {
3026   unsigned HOST_WIDE_INT max, min = 0;
3027   if (GET_CODE (size) == CONST_INT)
3028     min = max = UINTVAL (size);
3029   else
3030     max = GET_MODE_MASK (GET_MODE (size));
3031   return clear_storage_hints (object, size, method, 0, -1, min, max, max);
3032 }
3033 
3034 
3035 /* A subroutine of clear_storage.  Expand a call to memset.
3036    Return the return value of memset, 0 otherwise.  */
3037 
3038 rtx
set_storage_via_libcall(rtx object,rtx size,rtx val,bool tailcall)3039 set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
3040 {
3041   tree call_expr, fn, object_tree, size_tree, val_tree;
3042   machine_mode size_mode;
3043 
3044   object = copy_addr_to_reg (XEXP (object, 0));
3045   object_tree = make_tree (ptr_type_node, object);
3046 
3047   if (!CONST_INT_P (val))
3048     val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
3049   val_tree = make_tree (integer_type_node, val);
3050 
3051   size_mode = TYPE_MODE (sizetype);
3052   size = convert_to_mode (size_mode, size, 1);
3053   size = copy_to_mode_reg (size_mode, size);
3054   size_tree = make_tree (sizetype, size);
3055 
3056   /* It is incorrect to use the libcall calling conventions for calls to
3057      memset because it can be provided by the user.  */
3058   fn = builtin_decl_implicit (BUILT_IN_MEMSET);
3059   call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree);
3060   CALL_EXPR_TAILCALL (call_expr) = tailcall;
3061 
3062   return expand_call (call_expr, NULL_RTX, false);
3063 }
3064 
3065 /* Expand a setmem pattern; return true if successful.  */
3066 
3067 bool
set_storage_via_setmem(rtx object,rtx size,rtx val,unsigned int align,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)3068 set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
3069 			unsigned int expected_align, HOST_WIDE_INT expected_size,
3070 			unsigned HOST_WIDE_INT min_size,
3071 			unsigned HOST_WIDE_INT max_size,
3072 			unsigned HOST_WIDE_INT probable_max_size)
3073 {
3074   /* Try the most limited insn first, because there's no point
3075      including more than one in the machine description unless
3076      the more limited one has some advantage.  */
3077 
3078   if (expected_align < align)
3079     expected_align = align;
3080   if (expected_size != -1)
3081     {
3082       if ((unsigned HOST_WIDE_INT)expected_size > max_size)
3083 	expected_size = max_size;
3084       if ((unsigned HOST_WIDE_INT)expected_size < min_size)
3085 	expected_size = min_size;
3086     }
3087 
3088   opt_scalar_int_mode mode_iter;
3089   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3090     {
3091       scalar_int_mode mode = mode_iter.require ();
3092       enum insn_code code = direct_optab_handler (setmem_optab, mode);
3093 
3094       if (code != CODE_FOR_nothing
3095 	  /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3096 	     here because if SIZE is less than the mode mask, as it is
3097 	     returned by the macro, it will definitely be less than the
3098 	     actual mode mask.  Since SIZE is within the Pmode address
3099 	     space, we limit MODE to Pmode.  */
3100 	  && ((CONST_INT_P (size)
3101 	       && ((unsigned HOST_WIDE_INT) INTVAL (size)
3102 		   <= (GET_MODE_MASK (mode) >> 1)))
3103 	      || max_size <= (GET_MODE_MASK (mode) >> 1)
3104 	      || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
3105 	{
3106 	  struct expand_operand ops[9];
3107 	  unsigned int nops;
3108 
3109 	  nops = insn_data[(int) code].n_generator_args;
3110 	  gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
3111 
3112 	  create_fixed_operand (&ops[0], object);
3113 	  /* The check above guarantees that this size conversion is valid.  */
3114 	  create_convert_operand_to (&ops[1], size, mode, true);
3115 	  create_convert_operand_from (&ops[2], val, byte_mode, true);
3116 	  create_integer_operand (&ops[3], align / BITS_PER_UNIT);
3117 	  if (nops >= 6)
3118 	    {
3119 	      create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
3120 	      create_integer_operand (&ops[5], expected_size);
3121 	    }
3122 	  if (nops >= 8)
3123 	    {
3124 	      create_integer_operand (&ops[6], min_size);
3125 	      /* If we can not represent the maximal size,
3126 		 make parameter NULL.  */
3127 	      if ((HOST_WIDE_INT) max_size != -1)
3128 	        create_integer_operand (&ops[7], max_size);
3129 	      else
3130 		create_fixed_operand (&ops[7], NULL);
3131 	    }
3132 	  if (nops == 9)
3133 	    {
3134 	      /* If we can not represent the maximal size,
3135 		 make parameter NULL.  */
3136 	      if ((HOST_WIDE_INT) probable_max_size != -1)
3137 	        create_integer_operand (&ops[8], probable_max_size);
3138 	      else
3139 		create_fixed_operand (&ops[8], NULL);
3140 	    }
3141 	  if (maybe_expand_insn (code, nops, ops))
3142 	    return true;
3143 	}
3144     }
3145 
3146   return false;
3147 }
3148 
3149 
3150 /* Write to one of the components of the complex value CPLX.  Write VAL to
3151    the real part if IMAG_P is false, and the imaginary part if its true.  */
3152 
3153 void
write_complex_part(rtx cplx,rtx val,bool imag_p)3154 write_complex_part (rtx cplx, rtx val, bool imag_p)
3155 {
3156   machine_mode cmode;
3157   scalar_mode imode;
3158   unsigned ibitsize;
3159 
3160   if (GET_CODE (cplx) == CONCAT)
3161     {
3162       emit_move_insn (XEXP (cplx, imag_p), val);
3163       return;
3164     }
3165 
3166   cmode = GET_MODE (cplx);
3167   imode = GET_MODE_INNER (cmode);
3168   ibitsize = GET_MODE_BITSIZE (imode);
3169 
3170   /* For MEMs simplify_gen_subreg may generate an invalid new address
3171      because, e.g., the original address is considered mode-dependent
3172      by the target, which restricts simplify_subreg from invoking
3173      adjust_address_nv.  Instead of preparing fallback support for an
3174      invalid address, we call adjust_address_nv directly.  */
3175   if (MEM_P (cplx))
3176     {
3177       emit_move_insn (adjust_address_nv (cplx, imode,
3178 					 imag_p ? GET_MODE_SIZE (imode) : 0),
3179 		      val);
3180       return;
3181     }
3182 
3183   /* If the sub-object is at least word sized, then we know that subregging
3184      will work.  This special case is important, since store_bit_field
3185      wants to operate on integer modes, and there's rarely an OImode to
3186      correspond to TCmode.  */
3187   if (ibitsize >= BITS_PER_WORD
3188       /* For hard regs we have exact predicates.  Assume we can split
3189 	 the original object if it spans an even number of hard regs.
3190 	 This special case is important for SCmode on 64-bit platforms
3191 	 where the natural size of floating-point regs is 32-bit.  */
3192       || (REG_P (cplx)
3193 	  && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3194 	  && REG_NREGS (cplx) % 2 == 0))
3195     {
3196       rtx part = simplify_gen_subreg (imode, cplx, cmode,
3197 				      imag_p ? GET_MODE_SIZE (imode) : 0);
3198       if (part)
3199         {
3200 	  emit_move_insn (part, val);
3201 	  return;
3202 	}
3203       else
3204 	/* simplify_gen_subreg may fail for sub-word MEMs.  */
3205 	gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3206     }
3207 
3208   store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val,
3209 		   false);
3210 }
3211 
3212 /* Extract one of the components of the complex value CPLX.  Extract the
3213    real part if IMAG_P is false, and the imaginary part if it's true.  */
3214 
3215 rtx
read_complex_part(rtx cplx,bool imag_p)3216 read_complex_part (rtx cplx, bool imag_p)
3217 {
3218   machine_mode cmode;
3219   scalar_mode imode;
3220   unsigned ibitsize;
3221 
3222   if (GET_CODE (cplx) == CONCAT)
3223     return XEXP (cplx, imag_p);
3224 
3225   cmode = GET_MODE (cplx);
3226   imode = GET_MODE_INNER (cmode);
3227   ibitsize = GET_MODE_BITSIZE (imode);
3228 
3229   /* Special case reads from complex constants that got spilled to memory.  */
3230   if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
3231     {
3232       tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
3233       if (decl && TREE_CODE (decl) == COMPLEX_CST)
3234 	{
3235 	  tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
3236 	  if (CONSTANT_CLASS_P (part))
3237 	    return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
3238 	}
3239     }
3240 
3241   /* For MEMs simplify_gen_subreg may generate an invalid new address
3242      because, e.g., the original address is considered mode-dependent
3243      by the target, which restricts simplify_subreg from invoking
3244      adjust_address_nv.  Instead of preparing fallback support for an
3245      invalid address, we call adjust_address_nv directly.  */
3246   if (MEM_P (cplx))
3247     return adjust_address_nv (cplx, imode,
3248 			      imag_p ? GET_MODE_SIZE (imode) : 0);
3249 
3250   /* If the sub-object is at least word sized, then we know that subregging
3251      will work.  This special case is important, since extract_bit_field
3252      wants to operate on integer modes, and there's rarely an OImode to
3253      correspond to TCmode.  */
3254   if (ibitsize >= BITS_PER_WORD
3255       /* For hard regs we have exact predicates.  Assume we can split
3256 	 the original object if it spans an even number of hard regs.
3257 	 This special case is important for SCmode on 64-bit platforms
3258 	 where the natural size of floating-point regs is 32-bit.  */
3259       || (REG_P (cplx)
3260 	  && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3261 	  && REG_NREGS (cplx) % 2 == 0))
3262     {
3263       rtx ret = simplify_gen_subreg (imode, cplx, cmode,
3264 				     imag_p ? GET_MODE_SIZE (imode) : 0);
3265       if (ret)
3266         return ret;
3267       else
3268 	/* simplify_gen_subreg may fail for sub-word MEMs.  */
3269 	gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3270     }
3271 
3272   return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
3273 			    true, NULL_RTX, imode, imode, false, NULL);
3274 }
3275 
3276 /* A subroutine of emit_move_insn_1.  Yet another lowpart generator.
3277    NEW_MODE and OLD_MODE are the same size.  Return NULL if X cannot be
3278    represented in NEW_MODE.  If FORCE is true, this will never happen, as
3279    we'll force-create a SUBREG if needed.  */
3280 
3281 static rtx
emit_move_change_mode(machine_mode new_mode,machine_mode old_mode,rtx x,bool force)3282 emit_move_change_mode (machine_mode new_mode,
3283 		       machine_mode old_mode, rtx x, bool force)
3284 {
3285   rtx ret;
3286 
3287   if (push_operand (x, GET_MODE (x)))
3288     {
3289       ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
3290       MEM_COPY_ATTRIBUTES (ret, x);
3291     }
3292   else if (MEM_P (x))
3293     {
3294       /* We don't have to worry about changing the address since the
3295 	 size in bytes is supposed to be the same.  */
3296       if (reload_in_progress)
3297 	{
3298 	  /* Copy the MEM to change the mode and move any
3299 	     substitutions from the old MEM to the new one.  */
3300 	  ret = adjust_address_nv (x, new_mode, 0);
3301 	  copy_replacements (x, ret);
3302 	}
3303       else
3304 	ret = adjust_address (x, new_mode, 0);
3305     }
3306   else
3307     {
3308       /* Note that we do want simplify_subreg's behavior of validating
3309 	 that the new mode is ok for a hard register.  If we were to use
3310 	 simplify_gen_subreg, we would create the subreg, but would
3311 	 probably run into the target not being able to implement it.  */
3312       /* Except, of course, when FORCE is true, when this is exactly what
3313 	 we want.  Which is needed for CCmodes on some targets.  */
3314       if (force)
3315 	ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
3316       else
3317 	ret = simplify_subreg (new_mode, x, old_mode, 0);
3318     }
3319 
3320   return ret;
3321 }
3322 
3323 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X using
3324    an integer mode of the same size as MODE.  Returns the instruction
3325    emitted, or NULL if such a move could not be generated.  */
3326 
3327 static rtx_insn *
emit_move_via_integer(machine_mode mode,rtx x,rtx y,bool force)3328 emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
3329 {
3330   scalar_int_mode imode;
3331   enum insn_code code;
3332 
3333   /* There must exist a mode of the exact size we require.  */
3334   if (!int_mode_for_mode (mode).exists (&imode))
3335     return NULL;
3336 
3337   /* The target must support moves in this mode.  */
3338   code = optab_handler (mov_optab, imode);
3339   if (code == CODE_FOR_nothing)
3340     return NULL;
3341 
3342   x = emit_move_change_mode (imode, mode, x, force);
3343   if (x == NULL_RTX)
3344     return NULL;
3345   y = emit_move_change_mode (imode, mode, y, force);
3346   if (y == NULL_RTX)
3347     return NULL;
3348   return emit_insn (GEN_FCN (code) (x, y));
3349 }
3350 
3351 /* A subroutine of emit_move_insn_1.  X is a push_operand in MODE.
3352    Return an equivalent MEM that does not use an auto-increment.  */
3353 
3354 rtx
emit_move_resolve_push(machine_mode mode,rtx x)3355 emit_move_resolve_push (machine_mode mode, rtx x)
3356 {
3357   enum rtx_code code = GET_CODE (XEXP (x, 0));
3358   rtx temp;
3359 
3360   poly_int64 adjust = GET_MODE_SIZE (mode);
3361 #ifdef PUSH_ROUNDING
3362   adjust = PUSH_ROUNDING (adjust);
3363 #endif
3364   if (code == PRE_DEC || code == POST_DEC)
3365     adjust = -adjust;
3366   else if (code == PRE_MODIFY || code == POST_MODIFY)
3367     {
3368       rtx expr = XEXP (XEXP (x, 0), 1);
3369 
3370       gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
3371       poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
3372       if (GET_CODE (expr) == MINUS)
3373 	val = -val;
3374       gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
3375       adjust = val;
3376     }
3377 
3378   /* Do not use anti_adjust_stack, since we don't want to update
3379      stack_pointer_delta.  */
3380   temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
3381 			      gen_int_mode (adjust, Pmode), stack_pointer_rtx,
3382 			      0, OPTAB_LIB_WIDEN);
3383   if (temp != stack_pointer_rtx)
3384     emit_move_insn (stack_pointer_rtx, temp);
3385 
3386   switch (code)
3387     {
3388     case PRE_INC:
3389     case PRE_DEC:
3390     case PRE_MODIFY:
3391       temp = stack_pointer_rtx;
3392       break;
3393     case POST_INC:
3394     case POST_DEC:
3395     case POST_MODIFY:
3396       temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
3397       break;
3398     default:
3399       gcc_unreachable ();
3400     }
3401 
3402   return replace_equiv_address (x, temp);
3403 }
3404 
3405 /* A subroutine of emit_move_complex.  Generate a move from Y into X.
3406    X is known to satisfy push_operand, and MODE is known to be complex.
3407    Returns the last instruction emitted.  */
3408 
3409 rtx_insn *
emit_move_complex_push(machine_mode mode,rtx x,rtx y)3410 emit_move_complex_push (machine_mode mode, rtx x, rtx y)
3411 {
3412   scalar_mode submode = GET_MODE_INNER (mode);
3413   bool imag_first;
3414 
3415 #ifdef PUSH_ROUNDING
3416   poly_int64 submodesize = GET_MODE_SIZE (submode);
3417 
3418   /* In case we output to the stack, but the size is smaller than the
3419      machine can push exactly, we need to use move instructions.  */
3420   if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
3421     {
3422       x = emit_move_resolve_push (mode, x);
3423       return emit_move_insn (x, y);
3424     }
3425 #endif
3426 
3427   /* Note that the real part always precedes the imag part in memory
3428      regardless of machine's endianness.  */
3429   switch (GET_CODE (XEXP (x, 0)))
3430     {
3431     case PRE_DEC:
3432     case POST_DEC:
3433       imag_first = true;
3434       break;
3435     case PRE_INC:
3436     case POST_INC:
3437       imag_first = false;
3438       break;
3439     default:
3440       gcc_unreachable ();
3441     }
3442 
3443   emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3444 		  read_complex_part (y, imag_first));
3445   return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3446 			 read_complex_part (y, !imag_first));
3447 }
3448 
3449 /* A subroutine of emit_move_complex.  Perform the move from Y to X
3450    via two moves of the parts.  Returns the last instruction emitted.  */
3451 
3452 rtx_insn *
emit_move_complex_parts(rtx x,rtx y)3453 emit_move_complex_parts (rtx x, rtx y)
3454 {
3455   /* Show the output dies here.  This is necessary for SUBREGs
3456      of pseudos since we cannot track their lifetimes correctly;
3457      hard regs shouldn't appear here except as return values.  */
3458   if (!reload_completed && !reload_in_progress
3459       && REG_P (x) && !reg_overlap_mentioned_p (x, y))
3460     emit_clobber (x);
3461 
3462   write_complex_part (x, read_complex_part (y, false), false);
3463   write_complex_part (x, read_complex_part (y, true), true);
3464 
3465   return get_last_insn ();
3466 }
3467 
3468 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3469    MODE is known to be complex.  Returns the last instruction emitted.  */
3470 
3471 static rtx_insn *
emit_move_complex(machine_mode mode,rtx x,rtx y)3472 emit_move_complex (machine_mode mode, rtx x, rtx y)
3473 {
3474   bool try_int;
3475 
3476   /* Need to take special care for pushes, to maintain proper ordering
3477      of the data, and possibly extra padding.  */
3478   if (push_operand (x, mode))
3479     return emit_move_complex_push (mode, x, y);
3480 
3481   /* See if we can coerce the target into moving both values at once, except
3482      for floating point where we favor moving as parts if this is easy.  */
3483   if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3484       && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
3485       && !(REG_P (x)
3486 	   && HARD_REGISTER_P (x)
3487 	   && REG_NREGS (x) == 1)
3488       && !(REG_P (y)
3489 	   && HARD_REGISTER_P (y)
3490 	   && REG_NREGS (y) == 1))
3491     try_int = false;
3492   /* Not possible if the values are inherently not adjacent.  */
3493   else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
3494     try_int = false;
3495   /* Is possible if both are registers (or subregs of registers).  */
3496   else if (register_operand (x, mode) && register_operand (y, mode))
3497     try_int = true;
3498   /* If one of the operands is a memory, and alignment constraints
3499      are friendly enough, we may be able to do combined memory operations.
3500      We do not attempt this if Y is a constant because that combination is
3501      usually better with the by-parts thing below.  */
3502   else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
3503 	   && (!STRICT_ALIGNMENT
3504 	       || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
3505     try_int = true;
3506   else
3507     try_int = false;
3508 
3509   if (try_int)
3510     {
3511       rtx_insn *ret;
3512 
3513       /* For memory to memory moves, optimal behavior can be had with the
3514 	 existing block move logic.  */
3515       if (MEM_P (x) && MEM_P (y))
3516 	{
3517 	  emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
3518 			   BLOCK_OP_NO_LIBCALL);
3519 	  return get_last_insn ();
3520 	}
3521 
3522       ret = emit_move_via_integer (mode, x, y, true);
3523       if (ret)
3524 	return ret;
3525     }
3526 
3527   return emit_move_complex_parts (x, y);
3528 }
3529 
3530 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3531    MODE is known to be MODE_CC.  Returns the last instruction emitted.  */
3532 
3533 static rtx_insn *
emit_move_ccmode(machine_mode mode,rtx x,rtx y)3534 emit_move_ccmode (machine_mode mode, rtx x, rtx y)
3535 {
3536   rtx_insn *ret;
3537 
3538   /* Assume all MODE_CC modes are equivalent; if we have movcc, use it.  */
3539   if (mode != CCmode)
3540     {
3541       enum insn_code code = optab_handler (mov_optab, CCmode);
3542       if (code != CODE_FOR_nothing)
3543 	{
3544 	  x = emit_move_change_mode (CCmode, mode, x, true);
3545 	  y = emit_move_change_mode (CCmode, mode, y, true);
3546 	  return emit_insn (GEN_FCN (code) (x, y));
3547 	}
3548     }
3549 
3550   /* Otherwise, find the MODE_INT mode of the same width.  */
3551   ret = emit_move_via_integer (mode, x, y, false);
3552   gcc_assert (ret != NULL);
3553   return ret;
3554 }
3555 
3556 /* Return true if word I of OP lies entirely in the
3557    undefined bits of a paradoxical subreg.  */
3558 
3559 static bool
undefined_operand_subword_p(const_rtx op,int i)3560 undefined_operand_subword_p (const_rtx op, int i)
3561 {
3562   if (GET_CODE (op) != SUBREG)
3563     return false;
3564   machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3565   poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
3566   return (known_ge (offset, GET_MODE_SIZE (innermostmode))
3567 	  || known_le (offset, -UNITS_PER_WORD));
3568 }
3569 
3570 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3571    MODE is any multi-word or full-word mode that lacks a move_insn
3572    pattern.  Note that you will get better code if you define such
3573    patterns, even if they must turn into multiple assembler instructions.  */
3574 
3575 static rtx_insn *
emit_move_multi_word(machine_mode mode,rtx x,rtx y)3576 emit_move_multi_word (machine_mode mode, rtx x, rtx y)
3577 {
3578   rtx_insn *last_insn = 0;
3579   rtx_insn *seq;
3580   rtx inner;
3581   bool need_clobber;
3582   int i, mode_size;
3583 
3584   /* This function can only handle cases where the number of words is
3585      known at compile time.  */
3586   mode_size = GET_MODE_SIZE (mode).to_constant ();
3587   gcc_assert (mode_size >= UNITS_PER_WORD);
3588 
3589   /* If X is a push on the stack, do the push now and replace
3590      X with a reference to the stack pointer.  */
3591   if (push_operand (x, mode))
3592     x = emit_move_resolve_push (mode, x);
3593 
3594   /* If we are in reload, see if either operand is a MEM whose address
3595      is scheduled for replacement.  */
3596   if (reload_in_progress && MEM_P (x)
3597       && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
3598     x = replace_equiv_address_nv (x, inner);
3599   if (reload_in_progress && MEM_P (y)
3600       && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
3601     y = replace_equiv_address_nv (y, inner);
3602 
3603   start_sequence ();
3604 
3605   need_clobber = false;
3606   for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
3607     {
3608       rtx xpart = operand_subword (x, i, 1, mode);
3609       rtx ypart;
3610 
3611       /* Do not generate code for a move if it would come entirely
3612 	 from the undefined bits of a paradoxical subreg.  */
3613       if (undefined_operand_subword_p (y, i))
3614 	continue;
3615 
3616       ypart = operand_subword (y, i, 1, mode);
3617 
3618       /* If we can't get a part of Y, put Y into memory if it is a
3619 	 constant.  Otherwise, force it into a register.  Then we must
3620 	 be able to get a part of Y.  */
3621       if (ypart == 0 && CONSTANT_P (y))
3622 	{
3623 	  y = use_anchored_address (force_const_mem (mode, y));
3624 	  ypart = operand_subword (y, i, 1, mode);
3625 	}
3626       else if (ypart == 0)
3627 	ypart = operand_subword_force (y, i, mode);
3628 
3629       gcc_assert (xpart && ypart);
3630 
3631       need_clobber |= (GET_CODE (xpart) == SUBREG);
3632 
3633       last_insn = emit_move_insn (xpart, ypart);
3634     }
3635 
3636   seq = get_insns ();
3637   end_sequence ();
3638 
3639   /* Show the output dies here.  This is necessary for SUBREGs
3640      of pseudos since we cannot track their lifetimes correctly;
3641      hard regs shouldn't appear here except as return values.
3642      We never want to emit such a clobber after reload.  */
3643   if (x != y
3644       && ! (reload_in_progress || reload_completed)
3645       && need_clobber != 0)
3646     emit_clobber (x);
3647 
3648   emit_insn (seq);
3649 
3650   return last_insn;
3651 }
3652 
3653 /* Low level part of emit_move_insn.
3654    Called just like emit_move_insn, but assumes X and Y
3655    are basically valid.  */
3656 
3657 rtx_insn *
emit_move_insn_1(rtx x,rtx y)3658 emit_move_insn_1 (rtx x, rtx y)
3659 {
3660   machine_mode mode = GET_MODE (x);
3661   enum insn_code code;
3662 
3663   gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
3664 
3665   code = optab_handler (mov_optab, mode);
3666   if (code != CODE_FOR_nothing)
3667     return emit_insn (GEN_FCN (code) (x, y));
3668 
3669   /* Expand complex moves by moving real part and imag part.  */
3670   if (COMPLEX_MODE_P (mode))
3671     return emit_move_complex (mode, x, y);
3672 
3673   if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT
3674       || ALL_FIXED_POINT_MODE_P (mode))
3675     {
3676       rtx_insn *result = emit_move_via_integer (mode, x, y, true);
3677 
3678       /* If we can't find an integer mode, use multi words.  */
3679       if (result)
3680 	return result;
3681       else
3682 	return emit_move_multi_word (mode, x, y);
3683     }
3684 
3685   if (GET_MODE_CLASS (mode) == MODE_CC)
3686     return emit_move_ccmode (mode, x, y);
3687 
3688   /* Try using a move pattern for the corresponding integer mode.  This is
3689      only safe when simplify_subreg can convert MODE constants into integer
3690      constants.  At present, it can only do this reliably if the value
3691      fits within a HOST_WIDE_INT.  */
3692   if (!CONSTANT_P (y)
3693       || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
3694     {
3695       rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
3696 
3697       if (ret)
3698 	{
3699 	  if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
3700 	    return ret;
3701 	}
3702     }
3703 
3704   return emit_move_multi_word (mode, x, y);
3705 }
3706 
3707 /* Generate code to copy Y into X.
3708    Both Y and X must have the same mode, except that
3709    Y can be a constant with VOIDmode.
3710    This mode cannot be BLKmode; use emit_block_move for that.
3711 
3712    Return the last instruction emitted.  */
3713 
3714 rtx_insn *
emit_move_insn(rtx x,rtx y)3715 emit_move_insn (rtx x, rtx y)
3716 {
3717   machine_mode mode = GET_MODE (x);
3718   rtx y_cst = NULL_RTX;
3719   rtx_insn *last_insn;
3720   rtx set;
3721 
3722   gcc_assert (mode != BLKmode
3723 	      && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
3724 
3725   if (CONSTANT_P (y))
3726     {
3727       if (optimize
3728 	  && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3729 	  && (last_insn = compress_float_constant (x, y)))
3730 	return last_insn;
3731 
3732       y_cst = y;
3733 
3734       if (!targetm.legitimate_constant_p (mode, y))
3735 	{
3736 	  y = force_const_mem (mode, y);
3737 
3738 	  /* If the target's cannot_force_const_mem prevented the spill,
3739 	     assume that the target's move expanders will also take care
3740 	     of the non-legitimate constant.  */
3741 	  if (!y)
3742 	    y = y_cst;
3743 	  else
3744 	    y = use_anchored_address (y);
3745 	}
3746     }
3747 
3748   /* If X or Y are memory references, verify that their addresses are valid
3749      for the machine.  */
3750   if (MEM_P (x)
3751       && (! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
3752 					 MEM_ADDR_SPACE (x))
3753 	  && ! push_operand (x, GET_MODE (x))))
3754     x = validize_mem (x);
3755 
3756   if (MEM_P (y)
3757       && ! memory_address_addr_space_p (GET_MODE (y), XEXP (y, 0),
3758 					MEM_ADDR_SPACE (y)))
3759     y = validize_mem (y);
3760 
3761   gcc_assert (mode != BLKmode);
3762 
3763   last_insn = emit_move_insn_1 (x, y);
3764 
3765   if (y_cst && REG_P (x)
3766       && (set = single_set (last_insn)) != NULL_RTX
3767       && SET_DEST (set) == x
3768       && ! rtx_equal_p (y_cst, SET_SRC (set)))
3769     set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
3770 
3771   return last_insn;
3772 }
3773 
3774 /* Generate the body of an instruction to copy Y into X.
3775    It may be a list of insns, if one insn isn't enough.  */
3776 
3777 rtx_insn *
gen_move_insn(rtx x,rtx y)3778 gen_move_insn (rtx x, rtx y)
3779 {
3780   rtx_insn *seq;
3781 
3782   start_sequence ();
3783   emit_move_insn_1 (x, y);
3784   seq = get_insns ();
3785   end_sequence ();
3786   return seq;
3787 }
3788 
3789 /* If Y is representable exactly in a narrower mode, and the target can
3790    perform the extension directly from constant or memory, then emit the
3791    move as an extension.  */
3792 
3793 static rtx_insn *
compress_float_constant(rtx x,rtx y)3794 compress_float_constant (rtx x, rtx y)
3795 {
3796   machine_mode dstmode = GET_MODE (x);
3797   machine_mode orig_srcmode = GET_MODE (y);
3798   machine_mode srcmode;
3799   const REAL_VALUE_TYPE *r;
3800   int oldcost, newcost;
3801   bool speed = optimize_insn_for_speed_p ();
3802 
3803   r = CONST_DOUBLE_REAL_VALUE (y);
3804 
3805   if (targetm.legitimate_constant_p (dstmode, y))
3806     oldcost = set_src_cost (y, orig_srcmode, speed);
3807   else
3808     oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
3809 
3810   FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
3811     {
3812       enum insn_code ic;
3813       rtx trunc_y;
3814       rtx_insn *last_insn;
3815 
3816       /* Skip if the target can't extend this way.  */
3817       ic = can_extend_p (dstmode, srcmode, 0);
3818       if (ic == CODE_FOR_nothing)
3819 	continue;
3820 
3821       /* Skip if the narrowed value isn't exact.  */
3822       if (! exact_real_truncate (srcmode, r))
3823 	continue;
3824 
3825       trunc_y = const_double_from_real_value (*r, srcmode);
3826 
3827       if (targetm.legitimate_constant_p (srcmode, trunc_y))
3828 	{
3829 	  /* Skip if the target needs extra instructions to perform
3830 	     the extension.  */
3831 	  if (!insn_operand_matches (ic, 1, trunc_y))
3832 	    continue;
3833 	  /* This is valid, but may not be cheaper than the original. */
3834 	  newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3835 				  dstmode, speed);
3836 	  if (oldcost < newcost)
3837 	    continue;
3838 	}
3839       else if (float_extend_from_mem[dstmode][srcmode])
3840 	{
3841 	  trunc_y = force_const_mem (srcmode, trunc_y);
3842 	  /* This is valid, but may not be cheaper than the original. */
3843 	  newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3844 				  dstmode, speed);
3845 	  if (oldcost < newcost)
3846 	    continue;
3847 	  trunc_y = validize_mem (trunc_y);
3848 	}
3849       else
3850 	continue;
3851 
3852       /* For CSE's benefit, force the compressed constant pool entry
3853 	 into a new pseudo.  This constant may be used in different modes,
3854 	 and if not, combine will put things back together for us.  */
3855       trunc_y = force_reg (srcmode, trunc_y);
3856 
3857       /* If x is a hard register, perform the extension into a pseudo,
3858 	 so that e.g. stack realignment code is aware of it.  */
3859       rtx target = x;
3860       if (REG_P (x) && HARD_REGISTER_P (x))
3861 	target = gen_reg_rtx (dstmode);
3862 
3863       emit_unop_insn (ic, target, trunc_y, UNKNOWN);
3864       last_insn = get_last_insn ();
3865 
3866       if (REG_P (target))
3867 	set_unique_reg_note (last_insn, REG_EQUAL, y);
3868 
3869       if (target != x)
3870 	return emit_move_insn (x, target);
3871       return last_insn;
3872     }
3873 
3874   return NULL;
3875 }
3876 
3877 /* Pushing data onto the stack.  */
3878 
3879 /* Push a block of length SIZE (perhaps variable)
3880    and return an rtx to address the beginning of the block.
3881    The value may be virtual_outgoing_args_rtx.
3882 
3883    EXTRA is the number of bytes of padding to push in addition to SIZE.
3884    BELOW nonzero means this padding comes at low addresses;
3885    otherwise, the padding comes at high addresses.  */
3886 
3887 rtx
push_block(rtx size,poly_int64 extra,int below)3888 push_block (rtx size, poly_int64 extra, int below)
3889 {
3890   rtx temp;
3891 
3892   size = convert_modes (Pmode, ptr_mode, size, 1);
3893   if (CONSTANT_P (size))
3894     anti_adjust_stack (plus_constant (Pmode, size, extra));
3895   else if (REG_P (size) && known_eq (extra, 0))
3896     anti_adjust_stack (size);
3897   else
3898     {
3899       temp = copy_to_mode_reg (Pmode, size);
3900       if (maybe_ne (extra, 0))
3901 	temp = expand_binop (Pmode, add_optab, temp,
3902 			     gen_int_mode (extra, Pmode),
3903 			     temp, 0, OPTAB_LIB_WIDEN);
3904       anti_adjust_stack (temp);
3905     }
3906 
3907   if (STACK_GROWS_DOWNWARD)
3908     {
3909       temp = virtual_outgoing_args_rtx;
3910       if (maybe_ne (extra, 0) && below)
3911 	temp = plus_constant (Pmode, temp, extra);
3912     }
3913   else
3914     {
3915       if (CONST_INT_P (size))
3916 	temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
3917 			      -INTVAL (size) - (below ? 0 : extra));
3918       else if (maybe_ne (extra, 0) && !below)
3919 	temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3920 			     negate_rtx (Pmode, plus_constant (Pmode, size,
3921 							       extra)));
3922       else
3923 	temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3924 			     negate_rtx (Pmode, size));
3925     }
3926 
3927   return memory_address (NARROWEST_INT_MODE, temp);
3928 }
3929 
3930 /* A utility routine that returns the base of an auto-inc memory, or NULL.  */
3931 
3932 static rtx
mem_autoinc_base(rtx mem)3933 mem_autoinc_base (rtx mem)
3934 {
3935   if (MEM_P (mem))
3936     {
3937       rtx addr = XEXP (mem, 0);
3938       if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3939 	return XEXP (addr, 0);
3940     }
3941   return NULL;
3942 }
3943 
3944 /* A utility routine used here, in reload, and in try_split.  The insns
3945    after PREV up to and including LAST are known to adjust the stack,
3946    with a final value of END_ARGS_SIZE.  Iterate backward from LAST
3947    placing notes as appropriate.  PREV may be NULL, indicating the
3948    entire insn sequence prior to LAST should be scanned.
3949 
3950    The set of allowed stack pointer modifications is small:
3951      (1) One or more auto-inc style memory references (aka pushes),
3952      (2) One or more addition/subtraction with the SP as destination,
3953      (3) A single move insn with the SP as destination,
3954      (4) A call_pop insn,
3955      (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
3956 
3957    Insns in the sequence that do not modify the SP are ignored,
3958    except for noreturn calls.
3959 
3960    The return value is the amount of adjustment that can be trivially
3961    verified, via immediate operand or auto-inc.  If the adjustment
3962    cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN.  */
3963 
3964 poly_int64
find_args_size_adjust(rtx_insn * insn)3965 find_args_size_adjust (rtx_insn *insn)
3966 {
3967   rtx dest, set, pat;
3968   int i;
3969 
3970   pat = PATTERN (insn);
3971   set = NULL;
3972 
3973   /* Look for a call_pop pattern.  */
3974   if (CALL_P (insn))
3975     {
3976       /* We have to allow non-call_pop patterns for the case
3977 	 of emit_single_push_insn of a TLS address.  */
3978       if (GET_CODE (pat) != PARALLEL)
3979 	return 0;
3980 
3981       /* All call_pop have a stack pointer adjust in the parallel.
3982 	 The call itself is always first, and the stack adjust is
3983 	 usually last, so search from the end.  */
3984       for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
3985 	{
3986 	  set = XVECEXP (pat, 0, i);
3987 	  if (GET_CODE (set) != SET)
3988 	    continue;
3989 	  dest = SET_DEST (set);
3990 	  if (dest == stack_pointer_rtx)
3991 	    break;
3992 	}
3993       /* We'd better have found the stack pointer adjust.  */
3994       if (i == 0)
3995 	return 0;
3996       /* Fall through to process the extracted SET and DEST
3997 	 as if it was a standalone insn.  */
3998     }
3999   else if (GET_CODE (pat) == SET)
4000     set = pat;
4001   else if ((set = single_set (insn)) != NULL)
4002     ;
4003   else if (GET_CODE (pat) == PARALLEL)
4004     {
4005       /* ??? Some older ports use a parallel with a stack adjust
4006 	 and a store for a PUSH_ROUNDING pattern, rather than a
4007 	 PRE/POST_MODIFY rtx.  Don't force them to update yet...  */
4008       /* ??? See h8300 and m68k, pushqi1.  */
4009       for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
4010 	{
4011 	  set = XVECEXP (pat, 0, i);
4012 	  if (GET_CODE (set) != SET)
4013 	    continue;
4014 	  dest = SET_DEST (set);
4015 	  if (dest == stack_pointer_rtx)
4016 	    break;
4017 
4018 	  /* We do not expect an auto-inc of the sp in the parallel.  */
4019 	  gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx);
4020 	  gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4021 			       != stack_pointer_rtx);
4022 	}
4023       if (i < 0)
4024 	return 0;
4025     }
4026   else
4027     return 0;
4028 
4029   dest = SET_DEST (set);
4030 
4031   /* Look for direct modifications of the stack pointer.  */
4032   if (REG_P (dest) && REGNO (dest) == STACK_POINTER_REGNUM)
4033     {
4034       /* Look for a trivial adjustment, otherwise assume nothing.  */
4035       /* Note that the SPU restore_stack_block pattern refers to
4036 	 the stack pointer in V4SImode.  Consider that non-trivial.  */
4037       if (SCALAR_INT_MODE_P (GET_MODE (dest))
4038 	  && GET_CODE (SET_SRC (set)) == PLUS
4039 	  && XEXP (SET_SRC (set), 0) == stack_pointer_rtx
4040 	  && CONST_INT_P (XEXP (SET_SRC (set), 1)))
4041 	return INTVAL (XEXP (SET_SRC (set), 1));
4042       /* ??? Reload can generate no-op moves, which will be cleaned
4043 	 up later.  Recognize it and continue searching.  */
4044       else if (rtx_equal_p (dest, SET_SRC (set)))
4045 	return 0;
4046       else
4047 	return HOST_WIDE_INT_MIN;
4048     }
4049   else
4050     {
4051       rtx mem, addr;
4052 
4053       /* Otherwise only think about autoinc patterns.  */
4054       if (mem_autoinc_base (dest) == stack_pointer_rtx)
4055 	{
4056 	  mem = dest;
4057 	  gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4058 			       != stack_pointer_rtx);
4059 	}
4060       else if (mem_autoinc_base (SET_SRC (set)) == stack_pointer_rtx)
4061 	mem = SET_SRC (set);
4062       else
4063 	return 0;
4064 
4065       addr = XEXP (mem, 0);
4066       switch (GET_CODE (addr))
4067 	{
4068 	case PRE_INC:
4069 	case POST_INC:
4070 	  return GET_MODE_SIZE (GET_MODE (mem));
4071 	case PRE_DEC:
4072 	case POST_DEC:
4073 	  return -GET_MODE_SIZE (GET_MODE (mem));
4074 	case PRE_MODIFY:
4075 	case POST_MODIFY:
4076 	  addr = XEXP (addr, 1);
4077 	  gcc_assert (GET_CODE (addr) == PLUS);
4078 	  gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
4079 	  gcc_assert (CONST_INT_P (XEXP (addr, 1)));
4080 	  return INTVAL (XEXP (addr, 1));
4081 	default:
4082 	  gcc_unreachable ();
4083 	}
4084     }
4085 }
4086 
4087 poly_int64
fixup_args_size_notes(rtx_insn * prev,rtx_insn * last,poly_int64 end_args_size)4088 fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
4089 		       poly_int64 end_args_size)
4090 {
4091   poly_int64 args_size = end_args_size;
4092   bool saw_unknown = false;
4093   rtx_insn *insn;
4094 
4095   for (insn = last; insn != prev; insn = PREV_INSN (insn))
4096     {
4097       if (!NONDEBUG_INSN_P (insn))
4098 	continue;
4099 
4100       /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4101 	 a call argument containing a TLS address that itself requires
4102 	 a call to __tls_get_addr.  The handling of stack_pointer_delta
4103 	 in emit_single_push_insn is supposed to ensure that any such
4104 	 notes are already correct.  */
4105       rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
4106       gcc_assert (!note || known_eq (args_size, get_args_size (note)));
4107 
4108       poly_int64 this_delta = find_args_size_adjust (insn);
4109       if (known_eq (this_delta, 0))
4110 	{
4111 	  if (!CALL_P (insn)
4112 	      || ACCUMULATE_OUTGOING_ARGS
4113 	      || find_reg_note (insn, REG_NORETURN, NULL_RTX) == NULL_RTX)
4114 	    continue;
4115 	}
4116 
4117       gcc_assert (!saw_unknown);
4118       if (known_eq (this_delta, HOST_WIDE_INT_MIN))
4119 	saw_unknown = true;
4120 
4121       if (!note)
4122 	add_args_size_note (insn, args_size);
4123       if (STACK_GROWS_DOWNWARD)
4124 	this_delta = -poly_uint64 (this_delta);
4125 
4126       if (saw_unknown)
4127 	args_size = HOST_WIDE_INT_MIN;
4128       else
4129 	args_size -= this_delta;
4130     }
4131 
4132   return args_size;
4133 }
4134 
4135 #ifdef PUSH_ROUNDING
4136 /* Emit single push insn.  */
4137 
4138 static void
emit_single_push_insn_1(machine_mode mode,rtx x,tree type)4139 emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
4140 {
4141   rtx dest_addr;
4142   poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
4143   rtx dest;
4144   enum insn_code icode;
4145 
4146   /* If there is push pattern, use it.  Otherwise try old way of throwing
4147      MEM representing push operation to move expander.  */
4148   icode = optab_handler (push_optab, mode);
4149   if (icode != CODE_FOR_nothing)
4150     {
4151       struct expand_operand ops[1];
4152 
4153       create_input_operand (&ops[0], x, mode);
4154       if (maybe_expand_insn (icode, 1, ops))
4155 	return;
4156     }
4157   if (known_eq (GET_MODE_SIZE (mode), rounded_size))
4158     dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
4159   /* If we are to pad downward, adjust the stack pointer first and
4160      then store X into the stack location using an offset.  This is
4161      because emit_move_insn does not know how to pad; it does not have
4162      access to type.  */
4163   else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
4164     {
4165       emit_move_insn (stack_pointer_rtx,
4166 		      expand_binop (Pmode,
4167 				    STACK_GROWS_DOWNWARD ? sub_optab
4168 				    : add_optab,
4169 				    stack_pointer_rtx,
4170 				    gen_int_mode (rounded_size, Pmode),
4171 				    NULL_RTX, 0, OPTAB_LIB_WIDEN));
4172 
4173       poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
4174       if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
4175 	/* We have already decremented the stack pointer, so get the
4176 	   previous value.  */
4177 	offset += rounded_size;
4178 
4179       if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
4180 	/* We have already incremented the stack pointer, so get the
4181 	   previous value.  */
4182 	offset -= rounded_size;
4183 
4184       dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
4185     }
4186   else
4187     {
4188       if (STACK_GROWS_DOWNWARD)
4189 	/* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC.  */
4190 	dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
4191       else
4192 	/* ??? This seems wrong if STACK_PUSH_CODE == POST_INC.  */
4193 	dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
4194 
4195       dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
4196     }
4197 
4198   dest = gen_rtx_MEM (mode, dest_addr);
4199 
4200   if (type != 0)
4201     {
4202       set_mem_attributes (dest, type, 1);
4203 
4204       if (cfun->tail_call_marked)
4205 	/* Function incoming arguments may overlap with sibling call
4206 	   outgoing arguments and we cannot allow reordering of reads
4207 	   from function arguments with stores to outgoing arguments
4208 	   of sibling calls.  */
4209 	set_mem_alias_set (dest, 0);
4210     }
4211   emit_move_insn (dest, x);
4212 }
4213 
4214 /* Emit and annotate a single push insn.  */
4215 
4216 static void
emit_single_push_insn(machine_mode mode,rtx x,tree type)4217 emit_single_push_insn (machine_mode mode, rtx x, tree type)
4218 {
4219   poly_int64 delta, old_delta = stack_pointer_delta;
4220   rtx_insn *prev = get_last_insn ();
4221   rtx_insn *last;
4222 
4223   emit_single_push_insn_1 (mode, x, type);
4224 
4225   /* Adjust stack_pointer_delta to describe the situation after the push
4226      we just performed.  Note that we must do this after the push rather
4227      than before the push in case calculating X needs pushes and pops of
4228      its own (e.g. if calling __tls_get_addr).  The REG_ARGS_SIZE notes
4229      for such pushes and pops must not include the effect of the future
4230      push of X.  */
4231   stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
4232 
4233   last = get_last_insn ();
4234 
4235   /* Notice the common case where we emitted exactly one insn.  */
4236   if (PREV_INSN (last) == prev)
4237     {
4238       add_args_size_note (last, stack_pointer_delta);
4239       return;
4240     }
4241 
4242   delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
4243   gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
4244 	      || known_eq (delta, old_delta));
4245 }
4246 #endif
4247 
4248 /* If reading SIZE bytes from X will end up reading from
4249    Y return the number of bytes that overlap.  Return -1
4250    if there is no overlap or -2 if we can't determine
4251    (for example when X and Y have different base registers).  */
4252 
4253 static int
memory_load_overlap(rtx x,rtx y,HOST_WIDE_INT size)4254 memory_load_overlap (rtx x, rtx y, HOST_WIDE_INT size)
4255 {
4256   rtx tmp = plus_constant (Pmode, x, size);
4257   rtx sub = simplify_gen_binary (MINUS, Pmode, tmp, y);
4258 
4259   if (!CONST_INT_P (sub))
4260     return -2;
4261 
4262   HOST_WIDE_INT val = INTVAL (sub);
4263 
4264   return IN_RANGE (val, 1, size) ? val : -1;
4265 }
4266 
4267 /* Generate code to push X onto the stack, assuming it has mode MODE and
4268    type TYPE.
4269    MODE is redundant except when X is a CONST_INT (since they don't
4270    carry mode info).
4271    SIZE is an rtx for the size of data to be copied (in bytes),
4272    needed only if X is BLKmode.
4273    Return true if successful.  May return false if asked to push a
4274    partial argument during a sibcall optimization (as specified by
4275    SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4276    to not overlap.
4277 
4278    ALIGN (in bits) is maximum alignment we can assume.
4279 
4280    If PARTIAL and REG are both nonzero, then copy that many of the first
4281    bytes of X into registers starting with REG, and push the rest of X.
4282    The amount of space pushed is decreased by PARTIAL bytes.
4283    REG must be a hard register in this case.
4284    If REG is zero but PARTIAL is not, take any all others actions for an
4285    argument partially in registers, but do not actually load any
4286    registers.
4287 
4288    EXTRA is the amount in bytes of extra space to leave next to this arg.
4289    This is ignored if an argument block has already been allocated.
4290 
4291    On a machine that lacks real push insns, ARGS_ADDR is the address of
4292    the bottom of the argument block for this call.  We use indexing off there
4293    to store the arg.  On machines with push insns, ARGS_ADDR is 0 when a
4294    argument block has not been preallocated.
4295 
4296    ARGS_SO_FAR is the size of args previously pushed for this call.
4297 
4298    REG_PARM_STACK_SPACE is nonzero if functions require stack space
4299    for arguments passed in registers.  If nonzero, it will be the number
4300    of bytes required.  */
4301 
4302 bool
emit_push_insn(rtx x,machine_mode mode,tree type,rtx size,unsigned int align,int partial,rtx reg,poly_int64 extra,rtx args_addr,rtx args_so_far,int reg_parm_stack_space,rtx alignment_pad,bool sibcall_p)4303 emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
4304 		unsigned int align, int partial, rtx reg, poly_int64 extra,
4305 		rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
4306 		rtx alignment_pad, bool sibcall_p)
4307 {
4308   rtx xinner;
4309   pad_direction stack_direction
4310     = STACK_GROWS_DOWNWARD ? PAD_DOWNWARD : PAD_UPWARD;
4311 
4312   /* Decide where to pad the argument: PAD_DOWNWARD for below,
4313      PAD_UPWARD for above, or PAD_NONE for don't pad it.
4314      Default is below for small data on big-endian machines; else above.  */
4315   pad_direction where_pad = targetm.calls.function_arg_padding (mode, type);
4316 
4317   /* Invert direction if stack is post-decrement.
4318      FIXME: why?  */
4319   if (STACK_PUSH_CODE == POST_DEC)
4320     if (where_pad != PAD_NONE)
4321       where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD);
4322 
4323   xinner = x;
4324 
4325   int nregs = partial / UNITS_PER_WORD;
4326   rtx *tmp_regs = NULL;
4327   int overlapping = 0;
4328 
4329   if (mode == BLKmode
4330       || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)))
4331     {
4332       /* Copy a block into the stack, entirely or partially.  */
4333 
4334       rtx temp;
4335       int used;
4336       int offset;
4337       int skip;
4338 
4339       offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4340       used = partial - offset;
4341 
4342       if (mode != BLKmode)
4343 	{
4344 	  /* A value is to be stored in an insufficiently aligned
4345 	     stack slot; copy via a suitably aligned slot if
4346 	     necessary.  */
4347 	  size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
4348 	  if (!MEM_P (xinner))
4349 	    {
4350 	      temp = assign_temp (type, 1, 1);
4351 	      emit_move_insn (temp, xinner);
4352 	      xinner = temp;
4353 	    }
4354 	}
4355 
4356       gcc_assert (size);
4357 
4358       /* USED is now the # of bytes we need not copy to the stack
4359 	 because registers will take care of them.  */
4360 
4361       if (partial != 0)
4362 	xinner = adjust_address (xinner, BLKmode, used);
4363 
4364       /* If the partial register-part of the arg counts in its stack size,
4365 	 skip the part of stack space corresponding to the registers.
4366 	 Otherwise, start copying to the beginning of the stack space,
4367 	 by setting SKIP to 0.  */
4368       skip = (reg_parm_stack_space == 0) ? 0 : used;
4369 
4370 #ifdef PUSH_ROUNDING
4371       /* Do it with several push insns if that doesn't take lots of insns
4372 	 and if there is no difficulty with push insns that skip bytes
4373 	 on the stack for alignment purposes.  */
4374       if (args_addr == 0
4375 	  && PUSH_ARGS
4376 	  && CONST_INT_P (size)
4377 	  && skip == 0
4378 	  && MEM_ALIGN (xinner) >= align
4379 	  && can_move_by_pieces ((unsigned) INTVAL (size) - used, align)
4380 	  /* Here we avoid the case of a structure whose weak alignment
4381 	     forces many pushes of a small amount of data,
4382 	     and such small pushes do rounding that causes trouble.  */
4383 	  && ((!targetm.slow_unaligned_access (word_mode, align))
4384 	      || align >= BIGGEST_ALIGNMENT
4385 	      || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
4386 			   align / BITS_PER_UNIT))
4387 	  && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
4388 	{
4389 	  /* Push padding now if padding above and stack grows down,
4390 	     or if padding below and stack grows up.
4391 	     But if space already allocated, this has already been done.  */
4392 	  if (maybe_ne (extra, 0)
4393 	      && args_addr == 0
4394 	      && where_pad != PAD_NONE
4395 	      && where_pad != stack_direction)
4396 	    anti_adjust_stack (gen_int_mode (extra, Pmode));
4397 
4398 	  move_by_pieces (NULL, xinner, INTVAL (size) - used, align, 0);
4399 	}
4400       else
4401 #endif /* PUSH_ROUNDING  */
4402 	{
4403 	  rtx target;
4404 
4405 	  /* Otherwise make space on the stack and copy the data
4406 	     to the address of that space.  */
4407 
4408 	  /* Deduct words put into registers from the size we must copy.  */
4409 	  if (partial != 0)
4410 	    {
4411 	      if (CONST_INT_P (size))
4412 		size = GEN_INT (INTVAL (size) - used);
4413 	      else
4414 		size = expand_binop (GET_MODE (size), sub_optab, size,
4415 				     gen_int_mode (used, GET_MODE (size)),
4416 				     NULL_RTX, 0, OPTAB_LIB_WIDEN);
4417 	    }
4418 
4419 	  /* Get the address of the stack space.
4420 	     In this case, we do not deal with EXTRA separately.
4421 	     A single stack adjust will do.  */
4422 	  if (! args_addr)
4423 	    {
4424 	      temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
4425 	      extra = 0;
4426 	    }
4427 	  else if (CONST_INT_P (args_so_far))
4428 	    temp = memory_address (BLKmode,
4429 				   plus_constant (Pmode, args_addr,
4430 						  skip + INTVAL (args_so_far)));
4431 	  else
4432 	    temp = memory_address (BLKmode,
4433 				   plus_constant (Pmode,
4434 						  gen_rtx_PLUS (Pmode,
4435 								args_addr,
4436 								args_so_far),
4437 						  skip));
4438 
4439 	  if (!ACCUMULATE_OUTGOING_ARGS)
4440 	    {
4441 	      /* If the source is referenced relative to the stack pointer,
4442 		 copy it to another register to stabilize it.  We do not need
4443 		 to do this if we know that we won't be changing sp.  */
4444 
4445 	      if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
4446 		  || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
4447 		temp = copy_to_reg (temp);
4448 	    }
4449 
4450 	  target = gen_rtx_MEM (BLKmode, temp);
4451 
4452 	  /* We do *not* set_mem_attributes here, because incoming arguments
4453 	     may overlap with sibling call outgoing arguments and we cannot
4454 	     allow reordering of reads from function arguments with stores
4455 	     to outgoing arguments of sibling calls.  We do, however, want
4456 	     to record the alignment of the stack slot.  */
4457 	  /* ALIGN may well be better aligned than TYPE, e.g. due to
4458 	     PARM_BOUNDARY.  Assume the caller isn't lying.  */
4459 	  set_mem_align (target, align);
4460 
4461 	  /* If part should go in registers and pushing to that part would
4462 	     overwrite some of the values that need to go into regs, load the
4463 	     overlapping values into temporary pseudos to be moved into the hard
4464 	     regs at the end after the stack pushing has completed.
4465 	     We cannot load them directly into the hard regs here because
4466 	     they can be clobbered by the block move expansions.
4467 	     See PR 65358.  */
4468 
4469 	  if (partial > 0 && reg != 0 && mode == BLKmode
4470 	      && GET_CODE (reg) != PARALLEL)
4471 	    {
4472 	      overlapping = memory_load_overlap (XEXP (x, 0), temp, partial);
4473 	      if (overlapping > 0)
4474 	        {
4475 		  gcc_assert (overlapping % UNITS_PER_WORD == 0);
4476 		  overlapping /= UNITS_PER_WORD;
4477 
4478 		  tmp_regs = XALLOCAVEC (rtx, overlapping);
4479 
4480 		  for (int i = 0; i < overlapping; i++)
4481 		    tmp_regs[i] = gen_reg_rtx (word_mode);
4482 
4483 		  for (int i = 0; i < overlapping; i++)
4484 		    emit_move_insn (tmp_regs[i],
4485 				    operand_subword_force (target, i, mode));
4486 	        }
4487 	      else if (overlapping == -1)
4488 		overlapping = 0;
4489 	      /* Could not determine whether there is overlap.
4490 	         Fail the sibcall.  */
4491 	      else
4492 		{
4493 		  overlapping = 0;
4494 		  if (sibcall_p)
4495 		    return false;
4496 		}
4497 	    }
4498 	  emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM);
4499 	}
4500     }
4501   else if (partial > 0)
4502     {
4503       /* Scalar partly in registers.  This case is only supported
4504 	 for fixed-wdth modes.  */
4505       int size = GET_MODE_SIZE (mode).to_constant ();
4506       size /= UNITS_PER_WORD;
4507       int i;
4508       int not_stack;
4509       /* # bytes of start of argument
4510 	 that we must make space for but need not store.  */
4511       int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4512       int args_offset = INTVAL (args_so_far);
4513       int skip;
4514 
4515       /* Push padding now if padding above and stack grows down,
4516 	 or if padding below and stack grows up.
4517 	 But if space already allocated, this has already been done.  */
4518       if (maybe_ne (extra, 0)
4519 	  && args_addr == 0
4520 	  && where_pad != PAD_NONE
4521 	  && where_pad != stack_direction)
4522 	anti_adjust_stack (gen_int_mode (extra, Pmode));
4523 
4524       /* If we make space by pushing it, we might as well push
4525 	 the real data.  Otherwise, we can leave OFFSET nonzero
4526 	 and leave the space uninitialized.  */
4527       if (args_addr == 0)
4528 	offset = 0;
4529 
4530       /* Now NOT_STACK gets the number of words that we don't need to
4531 	 allocate on the stack.  Convert OFFSET to words too.  */
4532       not_stack = (partial - offset) / UNITS_PER_WORD;
4533       offset /= UNITS_PER_WORD;
4534 
4535       /* If the partial register-part of the arg counts in its stack size,
4536 	 skip the part of stack space corresponding to the registers.
4537 	 Otherwise, start copying to the beginning of the stack space,
4538 	 by setting SKIP to 0.  */
4539       skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
4540 
4541       if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
4542 	x = validize_mem (force_const_mem (mode, x));
4543 
4544       /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4545 	 SUBREGs of such registers are not allowed.  */
4546       if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4547 	   && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
4548 	x = copy_to_reg (x);
4549 
4550       /* Loop over all the words allocated on the stack for this arg.  */
4551       /* We can do it by words, because any scalar bigger than a word
4552 	 has a size a multiple of a word.  */
4553       for (i = size - 1; i >= not_stack; i--)
4554 	if (i >= not_stack + offset)
4555 	  if (!emit_push_insn (operand_subword_force (x, i, mode),
4556 			  word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
4557 			  0, args_addr,
4558 			  GEN_INT (args_offset + ((i - not_stack + skip)
4559 						  * UNITS_PER_WORD)),
4560 			  reg_parm_stack_space, alignment_pad, sibcall_p))
4561 	    return false;
4562     }
4563   else
4564     {
4565       rtx addr;
4566       rtx dest;
4567 
4568       /* Push padding now if padding above and stack grows down,
4569 	 or if padding below and stack grows up.
4570 	 But if space already allocated, this has already been done.  */
4571       if (maybe_ne (extra, 0)
4572 	  && args_addr == 0
4573 	  && where_pad != PAD_NONE
4574 	  && where_pad != stack_direction)
4575 	anti_adjust_stack (gen_int_mode (extra, Pmode));
4576 
4577 #ifdef PUSH_ROUNDING
4578       if (args_addr == 0 && PUSH_ARGS)
4579 	emit_single_push_insn (mode, x, type);
4580       else
4581 #endif
4582 	{
4583 	  addr = simplify_gen_binary (PLUS, Pmode, args_addr, args_so_far);
4584 	  dest = gen_rtx_MEM (mode, memory_address (mode, addr));
4585 
4586 	  /* We do *not* set_mem_attributes here, because incoming arguments
4587 	     may overlap with sibling call outgoing arguments and we cannot
4588 	     allow reordering of reads from function arguments with stores
4589 	     to outgoing arguments of sibling calls.  We do, however, want
4590 	     to record the alignment of the stack slot.  */
4591 	  /* ALIGN may well be better aligned than TYPE, e.g. due to
4592 	     PARM_BOUNDARY.  Assume the caller isn't lying.  */
4593 	  set_mem_align (dest, align);
4594 
4595 	  emit_move_insn (dest, x);
4596 	}
4597     }
4598 
4599   /* Move the partial arguments into the registers and any overlapping
4600      values that we moved into the pseudos in tmp_regs.  */
4601   if (partial > 0 && reg != 0)
4602     {
4603       /* Handle calls that pass values in multiple non-contiguous locations.
4604 	 The Irix 6 ABI has examples of this.  */
4605       if (GET_CODE (reg) == PARALLEL)
4606 	emit_group_load (reg, x, type, -1);
4607       else
4608         {
4609 	  gcc_assert (partial % UNITS_PER_WORD == 0);
4610 	  move_block_to_reg (REGNO (reg), x, nregs - overlapping, mode);
4611 
4612 	  for (int i = 0; i < overlapping; i++)
4613 	    emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)
4614 						    + nregs - overlapping + i),
4615 			    tmp_regs[i]);
4616 
4617 	}
4618     }
4619 
4620   if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
4621     anti_adjust_stack (gen_int_mode (extra, Pmode));
4622 
4623   if (alignment_pad && args_addr == 0)
4624     anti_adjust_stack (alignment_pad);
4625 
4626   return true;
4627 }
4628 
4629 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4630    operations.  */
4631 
4632 static rtx
get_subtarget(rtx x)4633 get_subtarget (rtx x)
4634 {
4635   return (optimize
4636           || x == 0
4637 	   /* Only registers can be subtargets.  */
4638 	   || !REG_P (x)
4639 	   /* Don't use hard regs to avoid extending their life.  */
4640 	   || REGNO (x) < FIRST_PSEUDO_REGISTER
4641 	  ? 0 : x);
4642 }
4643 
4644 /* A subroutine of expand_assignment.  Optimize FIELD op= VAL, where
4645    FIELD is a bitfield.  Returns true if the optimization was successful,
4646    and there's nothing else to do.  */
4647 
4648 static bool
optimize_bitfield_assignment_op(poly_uint64 pbitsize,poly_uint64 pbitpos,poly_uint64 pbitregion_start,poly_uint64 pbitregion_end,machine_mode mode1,rtx str_rtx,tree to,tree src,bool reverse)4649 optimize_bitfield_assignment_op (poly_uint64 pbitsize,
4650 				 poly_uint64 pbitpos,
4651 				 poly_uint64 pbitregion_start,
4652 				 poly_uint64 pbitregion_end,
4653 				 machine_mode mode1, rtx str_rtx,
4654 				 tree to, tree src, bool reverse)
4655 {
4656   /* str_mode is not guaranteed to be a scalar type.  */
4657   machine_mode str_mode = GET_MODE (str_rtx);
4658   unsigned int str_bitsize;
4659   tree op0, op1;
4660   rtx value, result;
4661   optab binop;
4662   gimple *srcstmt;
4663   enum tree_code code;
4664 
4665   unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
4666   if (mode1 != VOIDmode
4667       || !pbitsize.is_constant (&bitsize)
4668       || !pbitpos.is_constant (&bitpos)
4669       || !pbitregion_start.is_constant (&bitregion_start)
4670       || !pbitregion_end.is_constant (&bitregion_end)
4671       || bitsize >= BITS_PER_WORD
4672       || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
4673       || str_bitsize > BITS_PER_WORD
4674       || TREE_SIDE_EFFECTS (to)
4675       || TREE_THIS_VOLATILE (to))
4676     return false;
4677 
4678   STRIP_NOPS (src);
4679   if (TREE_CODE (src) != SSA_NAME)
4680     return false;
4681   if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
4682     return false;
4683 
4684   srcstmt = get_gimple_for_ssa_name (src);
4685   if (!srcstmt
4686       || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
4687     return false;
4688 
4689   code = gimple_assign_rhs_code (srcstmt);
4690 
4691   op0 = gimple_assign_rhs1 (srcstmt);
4692 
4693   /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
4694      to find its initialization.  Hopefully the initialization will
4695      be from a bitfield load.  */
4696   if (TREE_CODE (op0) == SSA_NAME)
4697     {
4698       gimple *op0stmt = get_gimple_for_ssa_name (op0);
4699 
4700       /* We want to eventually have OP0 be the same as TO, which
4701 	 should be a bitfield.  */
4702       if (!op0stmt
4703 	  || !is_gimple_assign (op0stmt)
4704 	  || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
4705 	return false;
4706       op0 = gimple_assign_rhs1 (op0stmt);
4707     }
4708 
4709   op1 = gimple_assign_rhs2 (srcstmt);
4710 
4711   if (!operand_equal_p (to, op0, 0))
4712     return false;
4713 
4714   if (MEM_P (str_rtx))
4715     {
4716       unsigned HOST_WIDE_INT offset1;
4717 
4718       if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
4719 	str_bitsize = BITS_PER_WORD;
4720 
4721       scalar_int_mode best_mode;
4722       if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end,
4723 			  MEM_ALIGN (str_rtx), str_bitsize, false, &best_mode))
4724 	return false;
4725       str_mode = best_mode;
4726       str_bitsize = GET_MODE_BITSIZE (best_mode);
4727 
4728       offset1 = bitpos;
4729       bitpos %= str_bitsize;
4730       offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
4731       str_rtx = adjust_address (str_rtx, str_mode, offset1);
4732     }
4733   else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
4734     return false;
4735 
4736   /* If the bit field covers the whole REG/MEM, store_field
4737      will likely generate better code.  */
4738   if (bitsize >= str_bitsize)
4739     return false;
4740 
4741   /* We can't handle fields split across multiple entities.  */
4742   if (bitpos + bitsize > str_bitsize)
4743     return false;
4744 
4745   if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
4746     bitpos = str_bitsize - bitpos - bitsize;
4747 
4748   switch (code)
4749     {
4750     case PLUS_EXPR:
4751     case MINUS_EXPR:
4752       /* For now, just optimize the case of the topmost bitfield
4753 	 where we don't need to do any masking and also
4754 	 1 bit bitfields where xor can be used.
4755 	 We might win by one instruction for the other bitfields
4756 	 too if insv/extv instructions aren't used, so that
4757 	 can be added later.  */
4758       if ((reverse || bitpos + bitsize != str_bitsize)
4759 	  && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
4760 	break;
4761 
4762       value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4763       value = convert_modes (str_mode,
4764 			     TYPE_MODE (TREE_TYPE (op1)), value,
4765 			     TYPE_UNSIGNED (TREE_TYPE (op1)));
4766 
4767       /* We may be accessing data outside the field, which means
4768 	 we can alias adjacent data.  */
4769       if (MEM_P (str_rtx))
4770 	{
4771 	  str_rtx = shallow_copy_rtx (str_rtx);
4772 	  set_mem_alias_set (str_rtx, 0);
4773 	  set_mem_expr (str_rtx, 0);
4774 	}
4775 
4776       if (bitsize == 1 && (reverse || bitpos + bitsize != str_bitsize))
4777 	{
4778 	  value = expand_and (str_mode, value, const1_rtx, NULL);
4779 	  binop = xor_optab;
4780 	}
4781       else
4782 	binop = code == PLUS_EXPR ? add_optab : sub_optab;
4783 
4784       value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4785       if (reverse)
4786 	value = flip_storage_order (str_mode, value);
4787       result = expand_binop (str_mode, binop, str_rtx,
4788 			     value, str_rtx, 1, OPTAB_WIDEN);
4789       if (result != str_rtx)
4790 	emit_move_insn (str_rtx, result);
4791       return true;
4792 
4793     case BIT_IOR_EXPR:
4794     case BIT_XOR_EXPR:
4795       if (TREE_CODE (op1) != INTEGER_CST)
4796 	break;
4797       value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4798       value = convert_modes (str_mode,
4799 			     TYPE_MODE (TREE_TYPE (op1)), value,
4800 			     TYPE_UNSIGNED (TREE_TYPE (op1)));
4801 
4802       /* We may be accessing data outside the field, which means
4803 	 we can alias adjacent data.  */
4804       if (MEM_P (str_rtx))
4805 	{
4806 	  str_rtx = shallow_copy_rtx (str_rtx);
4807 	  set_mem_alias_set (str_rtx, 0);
4808 	  set_mem_expr (str_rtx, 0);
4809 	}
4810 
4811       binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
4812       if (bitpos + bitsize != str_bitsize)
4813 	{
4814 	  rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1,
4815 				   str_mode);
4816 	  value = expand_and (str_mode, value, mask, NULL_RTX);
4817 	}
4818       value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4819       if (reverse)
4820 	value = flip_storage_order (str_mode, value);
4821       result = expand_binop (str_mode, binop, str_rtx,
4822 			     value, str_rtx, 1, OPTAB_WIDEN);
4823       if (result != str_rtx)
4824 	emit_move_insn (str_rtx, result);
4825       return true;
4826 
4827     default:
4828       break;
4829     }
4830 
4831   return false;
4832 }
4833 
4834 /* In the C++ memory model, consecutive bit fields in a structure are
4835    considered one memory location.
4836 
4837    Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
4838    returns the bit range of consecutive bits in which this COMPONENT_REF
4839    belongs.  The values are returned in *BITSTART and *BITEND.  *BITPOS
4840    and *OFFSET may be adjusted in the process.
4841 
4842    If the access does not need to be restricted, 0 is returned in both
4843    *BITSTART and *BITEND.  */
4844 
4845 void
get_bit_range(poly_uint64_pod * bitstart,poly_uint64_pod * bitend,tree exp,poly_int64_pod * bitpos,tree * offset)4846 get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
4847 	       poly_int64_pod *bitpos, tree *offset)
4848 {
4849   poly_int64 bitoffset;
4850   tree field, repr;
4851 
4852   gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
4853 
4854   field = TREE_OPERAND (exp, 1);
4855   repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
4856   /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
4857      need to limit the range we can access.  */
4858   if (!repr)
4859     {
4860       *bitstart = *bitend = 0;
4861       return;
4862     }
4863 
4864   /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
4865      part of a larger bit field, then the representative does not serve any
4866      useful purpose.  This can occur in Ada.  */
4867   if (handled_component_p (TREE_OPERAND (exp, 0)))
4868     {
4869       machine_mode rmode;
4870       poly_int64 rbitsize, rbitpos;
4871       tree roffset;
4872       int unsignedp, reversep, volatilep = 0;
4873       get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
4874 			   &roffset, &rmode, &unsignedp, &reversep,
4875 			   &volatilep);
4876       if (!multiple_p (rbitpos, BITS_PER_UNIT))
4877 	{
4878 	  *bitstart = *bitend = 0;
4879 	  return;
4880 	}
4881     }
4882 
4883   /* Compute the adjustment to bitpos from the offset of the field
4884      relative to the representative.  DECL_FIELD_OFFSET of field and
4885      repr are the same by construction if they are not constants,
4886      see finish_bitfield_layout.  */
4887   poly_uint64 field_offset, repr_offset;
4888   if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
4889       && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
4890     bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
4891   else
4892     bitoffset = 0;
4893   bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
4894 		- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
4895 
4896   /* If the adjustment is larger than bitpos, we would have a negative bit
4897      position for the lower bound and this may wreak havoc later.  Adjust
4898      offset and bitpos to make the lower bound non-negative in that case.  */
4899   if (maybe_gt (bitoffset, *bitpos))
4900     {
4901       poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
4902       poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
4903 
4904       *bitpos += adjust_bits;
4905       if (*offset == NULL_TREE)
4906 	*offset = size_int (-adjust_bytes);
4907       else
4908 	*offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
4909       *bitstart = 0;
4910     }
4911   else
4912     *bitstart = *bitpos - bitoffset;
4913 
4914   *bitend = *bitstart + tree_to_uhwi (DECL_SIZE (repr)) - 1;
4915 }
4916 
4917 /* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside
4918    in memory and has non-BLKmode.  DECL_RTL must not be a MEM; if
4919    DECL_RTL was not set yet, return NORTL.  */
4920 
4921 static inline bool
addr_expr_of_non_mem_decl_p_1(tree addr,bool nortl)4922 addr_expr_of_non_mem_decl_p_1 (tree addr, bool nortl)
4923 {
4924   if (TREE_CODE (addr) != ADDR_EXPR)
4925     return false;
4926 
4927   tree base = TREE_OPERAND (addr, 0);
4928 
4929   if (!DECL_P (base)
4930       || TREE_ADDRESSABLE (base)
4931       || DECL_MODE (base) == BLKmode)
4932     return false;
4933 
4934   if (!DECL_RTL_SET_P (base))
4935     return nortl;
4936 
4937   return (!MEM_P (DECL_RTL (base)));
4938 }
4939 
4940 /* Returns true if the MEM_REF REF refers to an object that does not
4941    reside in memory and has non-BLKmode.  */
4942 
4943 static inline bool
mem_ref_refers_to_non_mem_p(tree ref)4944 mem_ref_refers_to_non_mem_p (tree ref)
4945 {
4946   tree base = TREE_OPERAND (ref, 0);
4947   return addr_expr_of_non_mem_decl_p_1 (base, false);
4948 }
4949 
4950 /* Expand an assignment that stores the value of FROM into TO.  If NONTEMPORAL
4951    is true, try generating a nontemporal store.  */
4952 
4953 void
expand_assignment(tree to,tree from,bool nontemporal)4954 expand_assignment (tree to, tree from, bool nontemporal)
4955 {
4956   rtx to_rtx = 0;
4957   rtx result;
4958   machine_mode mode;
4959   unsigned int align;
4960   enum insn_code icode;
4961 
4962   /* Don't crash if the lhs of the assignment was erroneous.  */
4963   if (TREE_CODE (to) == ERROR_MARK)
4964     {
4965       expand_normal (from);
4966       return;
4967     }
4968 
4969   /* Optimize away no-op moves without side-effects.  */
4970   if (operand_equal_p (to, from, 0))
4971     return;
4972 
4973   /* Handle misaligned stores.  */
4974   mode = TYPE_MODE (TREE_TYPE (to));
4975   if ((TREE_CODE (to) == MEM_REF
4976        || TREE_CODE (to) == TARGET_MEM_REF)
4977       && mode != BLKmode
4978       && !mem_ref_refers_to_non_mem_p (to)
4979       && ((align = get_object_alignment (to))
4980 	  < GET_MODE_ALIGNMENT (mode))
4981       && (((icode = optab_handler (movmisalign_optab, mode))
4982 	   != CODE_FOR_nothing)
4983 	  || targetm.slow_unaligned_access (mode, align)))
4984     {
4985       rtx reg, mem;
4986 
4987       reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
4988       reg = force_not_mem (reg);
4989       mem = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
4990       if (TREE_CODE (to) == MEM_REF && REF_REVERSE_STORAGE_ORDER (to))
4991 	reg = flip_storage_order (mode, reg);
4992 
4993       if (icode != CODE_FOR_nothing)
4994 	{
4995 	  struct expand_operand ops[2];
4996 
4997 	  create_fixed_operand (&ops[0], mem);
4998 	  create_input_operand (&ops[1], reg, mode);
4999 	  /* The movmisalign<mode> pattern cannot fail, else the assignment
5000 	     would silently be omitted.  */
5001 	  expand_insn (icode, 2, ops);
5002 	}
5003       else
5004 	store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg,
5005 			 false);
5006       return;
5007     }
5008 
5009   /* Assignment of a structure component needs special treatment
5010      if the structure component's rtx is not simply a MEM.
5011      Assignment of an array element at a constant index, and assignment of
5012      an array element in an unaligned packed structure field, has the same
5013      problem.  Same for (partially) storing into a non-memory object.  */
5014   if (handled_component_p (to)
5015       || (TREE_CODE (to) == MEM_REF
5016 	  && (REF_REVERSE_STORAGE_ORDER (to)
5017 	      || mem_ref_refers_to_non_mem_p (to)))
5018       || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
5019     {
5020       machine_mode mode1;
5021       poly_int64 bitsize, bitpos;
5022       poly_uint64 bitregion_start = 0;
5023       poly_uint64 bitregion_end = 0;
5024       tree offset;
5025       int unsignedp, reversep, volatilep = 0;
5026       tree tem;
5027 
5028       push_temp_slots ();
5029       tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
5030 				 &unsignedp, &reversep, &volatilep);
5031 
5032       /* Make sure bitpos is not negative, it can wreak havoc later.  */
5033       if (maybe_lt (bitpos, 0))
5034 	{
5035 	  gcc_assert (offset == NULL_TREE);
5036 	  offset = size_int (bits_to_bytes_round_down (bitpos));
5037 	  bitpos = num_trailing_bits (bitpos);
5038 	}
5039 
5040       if (TREE_CODE (to) == COMPONENT_REF
5041 	  && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
5042 	get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
5043       /* The C++ memory model naturally applies to byte-aligned fields.
5044 	 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5045 	 BITSIZE are not byte-aligned, there is no need to limit the range
5046 	 we can access.  This can occur with packed structures in Ada.  */
5047       else if (maybe_gt (bitsize, 0)
5048 	       && multiple_p (bitsize, BITS_PER_UNIT)
5049 	       && multiple_p (bitpos, BITS_PER_UNIT))
5050 	{
5051 	  bitregion_start = bitpos;
5052 	  bitregion_end = bitpos + bitsize - 1;
5053 	}
5054 
5055       to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
5056 
5057       /* If the field has a mode, we want to access it in the
5058 	 field's mode, not the computed mode.
5059 	 If a MEM has VOIDmode (external with incomplete type),
5060 	 use BLKmode for it instead.  */
5061       if (MEM_P (to_rtx))
5062 	{
5063 	  if (mode1 != VOIDmode)
5064 	    to_rtx = adjust_address (to_rtx, mode1, 0);
5065 	  else if (GET_MODE (to_rtx) == VOIDmode)
5066 	    to_rtx = adjust_address (to_rtx, BLKmode, 0);
5067 	}
5068 
5069       if (offset != 0)
5070 	{
5071 	  machine_mode address_mode;
5072 	  rtx offset_rtx;
5073 
5074 	  if (!MEM_P (to_rtx))
5075 	    {
5076 	      /* We can get constant negative offsets into arrays with broken
5077 		 user code.  Translate this to a trap instead of ICEing.  */
5078 	      gcc_assert (TREE_CODE (offset) == INTEGER_CST);
5079 	      expand_builtin_trap ();
5080 	      to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
5081 	    }
5082 
5083 	  offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
5084 	  address_mode = get_address_mode (to_rtx);
5085 	  if (GET_MODE (offset_rtx) != address_mode)
5086 	    {
5087 		/* We cannot be sure that the RTL in offset_rtx is valid outside
5088 		   of a memory address context, so force it into a register
5089 		   before attempting to convert it to the desired mode.  */
5090 	      offset_rtx = force_operand (offset_rtx, NULL_RTX);
5091 	      offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
5092 	    }
5093 
5094 	  /* If we have an expression in OFFSET_RTX and a non-zero
5095 	     byte offset in BITPOS, adding the byte offset before the
5096 	     OFFSET_RTX results in better intermediate code, which makes
5097 	     later rtl optimization passes perform better.
5098 
5099 	     We prefer intermediate code like this:
5100 
5101 	     r124:DI=r123:DI+0x18
5102 	     [r124:DI]=r121:DI
5103 
5104 	     ... instead of ...
5105 
5106 	     r124:DI=r123:DI+0x10
5107 	     [r124:DI+0x8]=r121:DI
5108 
5109 	     This is only done for aligned data values, as these can
5110 	     be expected to result in single move instructions.  */
5111 	  poly_int64 bytepos;
5112 	  if (mode1 != VOIDmode
5113 	      && maybe_ne (bitpos, 0)
5114 	      && maybe_gt (bitsize, 0)
5115 	      && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
5116 	      && multiple_p (bitpos, bitsize)
5117 	      && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
5118 	      && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
5119 	    {
5120 	      to_rtx = adjust_address (to_rtx, mode1, bytepos);
5121 	      bitregion_start = 0;
5122 	      if (known_ge (bitregion_end, poly_uint64 (bitpos)))
5123 		bitregion_end -= bitpos;
5124 	      bitpos = 0;
5125 	    }
5126 
5127 	  to_rtx = offset_address (to_rtx, offset_rtx,
5128 				   highest_pow2_factor_for_target (to,
5129 				   				   offset));
5130 	}
5131 
5132       /* No action is needed if the target is not a memory and the field
5133 	 lies completely outside that target.  This can occur if the source
5134 	 code contains an out-of-bounds access to a small array.  */
5135       if (!MEM_P (to_rtx)
5136 	  && GET_MODE (to_rtx) != BLKmode
5137 	  && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
5138 	{
5139 	  expand_normal (from);
5140 	  result = NULL;
5141 	}
5142       /* Handle expand_expr of a complex value returning a CONCAT.  */
5143       else if (GET_CODE (to_rtx) == CONCAT)
5144 	{
5145 	  machine_mode to_mode = GET_MODE (to_rtx);
5146 	  gcc_checking_assert (COMPLEX_MODE_P (to_mode));
5147 	  poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
5148 	  unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
5149 	  if (TYPE_MODE (TREE_TYPE (from)) == to_mode
5150 	      && known_eq (bitpos, 0)
5151 	      && known_eq (bitsize, mode_bitsize))
5152 	    result = store_expr (from, to_rtx, false, nontemporal, reversep);
5153 	  else if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE_INNER (to_mode)
5154 		   && known_eq (bitsize, inner_bitsize)
5155 		   && (known_eq (bitpos, 0)
5156 		       || known_eq (bitpos, inner_bitsize)))
5157 	    result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
5158 				 false, nontemporal, reversep);
5159 	  else if (known_le (bitpos + bitsize, inner_bitsize))
5160 	    result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
5161 				  bitregion_start, bitregion_end,
5162 				  mode1, from, get_alias_set (to),
5163 				  nontemporal, reversep);
5164 	  else if (known_ge (bitpos, inner_bitsize))
5165 	    result = store_field (XEXP (to_rtx, 1), bitsize,
5166 				  bitpos - inner_bitsize,
5167 				  bitregion_start, bitregion_end,
5168 				  mode1, from, get_alias_set (to),
5169 				  nontemporal, reversep);
5170 	  else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
5171 	    {
5172 	      result = expand_normal (from);
5173 	      if (GET_CODE (result) == CONCAT)
5174 		{
5175 		  to_mode = GET_MODE_INNER (to_mode);
5176 		  machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
5177 		  rtx from_real
5178 		    = simplify_gen_subreg (to_mode, XEXP (result, 0),
5179 					   from_mode, 0);
5180 		  rtx from_imag
5181 		    = simplify_gen_subreg (to_mode, XEXP (result, 1),
5182 					   from_mode, 0);
5183 		  if (!from_real || !from_imag)
5184 		    goto concat_store_slow;
5185 		  emit_move_insn (XEXP (to_rtx, 0), from_real);
5186 		  emit_move_insn (XEXP (to_rtx, 1), from_imag);
5187 		}
5188 	      else
5189 		{
5190 		  rtx from_rtx
5191 		    = simplify_gen_subreg (to_mode, result,
5192 					   TYPE_MODE (TREE_TYPE (from)), 0);
5193 		  if (from_rtx)
5194 		    {
5195 		      emit_move_insn (XEXP (to_rtx, 0),
5196 				      read_complex_part (from_rtx, false));
5197 		      emit_move_insn (XEXP (to_rtx, 1),
5198 				      read_complex_part (from_rtx, true));
5199 		    }
5200 		  else
5201 		    {
5202 		      machine_mode to_mode
5203 			= GET_MODE_INNER (GET_MODE (to_rtx));
5204 		      rtx from_real
5205 			= simplify_gen_subreg (to_mode, result,
5206 					       TYPE_MODE (TREE_TYPE (from)),
5207 					       0);
5208 		      rtx from_imag
5209 			= simplify_gen_subreg (to_mode, result,
5210 					       TYPE_MODE (TREE_TYPE (from)),
5211 					       GET_MODE_SIZE (to_mode));
5212 		      if (!from_real || !from_imag)
5213 			goto concat_store_slow;
5214 		      emit_move_insn (XEXP (to_rtx, 0), from_real);
5215 		      emit_move_insn (XEXP (to_rtx, 1), from_imag);
5216 		    }
5217 		}
5218 	    }
5219 	  else
5220 	    {
5221 	    concat_store_slow:;
5222 	      rtx temp = assign_stack_temp (to_mode,
5223 					    GET_MODE_SIZE (GET_MODE (to_rtx)));
5224 	      write_complex_part (temp, XEXP (to_rtx, 0), false);
5225 	      write_complex_part (temp, XEXP (to_rtx, 1), true);
5226 	      result = store_field (temp, bitsize, bitpos,
5227 				    bitregion_start, bitregion_end,
5228 				    mode1, from, get_alias_set (to),
5229 				    nontemporal, reversep);
5230 	      emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
5231 	      emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
5232 	    }
5233 	}
5234       /* For calls to functions returning variable length structures, if TO_RTX
5235 	 is not a MEM, go through a MEM because we must not create temporaries
5236 	 of the VLA type.  */
5237       else if (!MEM_P (to_rtx)
5238 	       && TREE_CODE (from) == CALL_EXPR
5239 	       && COMPLETE_TYPE_P (TREE_TYPE (from))
5240 	       && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) != INTEGER_CST)
5241 	{
5242 	  rtx temp = assign_stack_temp (GET_MODE (to_rtx),
5243 					GET_MODE_SIZE (GET_MODE (to_rtx)));
5244 	  result = store_field (temp, bitsize, bitpos, bitregion_start,
5245 				bitregion_end, mode1, from, get_alias_set (to),
5246 				nontemporal, reversep);
5247 	  emit_move_insn (to_rtx, temp);
5248 	}
5249       else
5250 	{
5251 	  if (MEM_P (to_rtx))
5252 	    {
5253 	      /* If the field is at offset zero, we could have been given the
5254 		 DECL_RTX of the parent struct.  Don't munge it.  */
5255 	      to_rtx = shallow_copy_rtx (to_rtx);
5256 	      set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
5257 	      if (volatilep)
5258 		MEM_VOLATILE_P (to_rtx) = 1;
5259 	    }
5260 
5261 	  if (optimize_bitfield_assignment_op (bitsize, bitpos,
5262 					       bitregion_start, bitregion_end,
5263 					       mode1, to_rtx, to, from,
5264 					       reversep))
5265 	    result = NULL;
5266 	  else
5267 	    result = store_field (to_rtx, bitsize, bitpos,
5268 				  bitregion_start, bitregion_end,
5269 				  mode1, from, get_alias_set (to),
5270 				  nontemporal, reversep);
5271 	}
5272 
5273       if (result)
5274 	preserve_temp_slots (result);
5275       pop_temp_slots ();
5276       return;
5277     }
5278 
5279   /* If the rhs is a function call and its value is not an aggregate,
5280      call the function before we start to compute the lhs.
5281      This is needed for correct code for cases such as
5282      val = setjmp (buf) on machines where reference to val
5283      requires loading up part of an address in a separate insn.
5284 
5285      Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5286      since it might be a promoted variable where the zero- or sign- extension
5287      needs to be done.  Handling this in the normal way is safe because no
5288      computation is done before the call.  The same is true for SSA names.  */
5289   if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
5290       && COMPLETE_TYPE_P (TREE_TYPE (from))
5291       && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
5292       && ! (((VAR_P (to)
5293 	      || TREE_CODE (to) == PARM_DECL
5294 	      || TREE_CODE (to) == RESULT_DECL)
5295 	     && REG_P (DECL_RTL (to)))
5296 	    || TREE_CODE (to) == SSA_NAME))
5297     {
5298       rtx value;
5299       rtx bounds;
5300 
5301       push_temp_slots ();
5302       value = expand_normal (from);
5303 
5304       /* Split value and bounds to store them separately.  */
5305       chkp_split_slot (value, &value, &bounds);
5306 
5307       if (to_rtx == 0)
5308 	to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5309 
5310       /* Handle calls that return values in multiple non-contiguous locations.
5311 	 The Irix 6 ABI has examples of this.  */
5312       if (GET_CODE (to_rtx) == PARALLEL)
5313 	{
5314 	  if (GET_CODE (value) == PARALLEL)
5315 	    emit_group_move (to_rtx, value);
5316 	  else
5317 	    emit_group_load (to_rtx, value, TREE_TYPE (from),
5318 			     int_size_in_bytes (TREE_TYPE (from)));
5319 	}
5320       else if (GET_CODE (value) == PARALLEL)
5321 	emit_group_store (to_rtx, value, TREE_TYPE (from),
5322 			  int_size_in_bytes (TREE_TYPE (from)));
5323       else if (GET_MODE (to_rtx) == BLKmode)
5324 	{
5325 	  /* Handle calls that return BLKmode values in registers.  */
5326 	  if (REG_P (value))
5327 	    copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
5328 	  else
5329 	    emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
5330 	}
5331       else
5332 	{
5333 	  if (POINTER_TYPE_P (TREE_TYPE (to)))
5334 	    value = convert_memory_address_addr_space
5335 	      (as_a <scalar_int_mode> (GET_MODE (to_rtx)), value,
5336 	       TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to))));
5337 
5338 	  emit_move_insn (to_rtx, value);
5339 	}
5340 
5341       /* Store bounds if required.  */
5342       if (bounds
5343 	  && (BOUNDED_P (to) || chkp_type_has_pointer (TREE_TYPE (to))))
5344 	{
5345 	  gcc_assert (MEM_P (to_rtx));
5346 	  chkp_emit_bounds_store (bounds, value, to_rtx);
5347 	}
5348 
5349       preserve_temp_slots (to_rtx);
5350       pop_temp_slots ();
5351       return;
5352     }
5353 
5354   /* Ordinary treatment.  Expand TO to get a REG or MEM rtx.  */
5355   to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5356 
5357   /* Don't move directly into a return register.  */
5358   if (TREE_CODE (to) == RESULT_DECL
5359       && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
5360     {
5361       rtx temp;
5362 
5363       push_temp_slots ();
5364 
5365       /* If the source is itself a return value, it still is in a pseudo at
5366 	 this point so we can move it back to the return register directly.  */
5367       if (REG_P (to_rtx)
5368 	  && TYPE_MODE (TREE_TYPE (from)) == BLKmode
5369 	  && TREE_CODE (from) != CALL_EXPR)
5370 	temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
5371       else
5372 	temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
5373 
5374       /* Handle calls that return values in multiple non-contiguous locations.
5375 	 The Irix 6 ABI has examples of this.  */
5376       if (GET_CODE (to_rtx) == PARALLEL)
5377 	{
5378 	  if (GET_CODE (temp) == PARALLEL)
5379 	    emit_group_move (to_rtx, temp);
5380 	  else
5381 	    emit_group_load (to_rtx, temp, TREE_TYPE (from),
5382 			     int_size_in_bytes (TREE_TYPE (from)));
5383 	}
5384       else if (temp)
5385 	emit_move_insn (to_rtx, temp);
5386 
5387       preserve_temp_slots (to_rtx);
5388       pop_temp_slots ();
5389       return;
5390     }
5391 
5392   /* In case we are returning the contents of an object which overlaps
5393      the place the value is being stored, use a safe function when copying
5394      a value through a pointer into a structure value return block.  */
5395   if (TREE_CODE (to) == RESULT_DECL
5396       && TREE_CODE (from) == INDIRECT_REF
5397       && ADDR_SPACE_GENERIC_P
5398 	   (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from, 0)))))
5399       && refs_may_alias_p (to, from)
5400       && cfun->returns_struct
5401       && !cfun->returns_pcc_struct)
5402     {
5403       rtx from_rtx, size;
5404 
5405       push_temp_slots ();
5406       size = expr_size (from);
5407       from_rtx = expand_normal (from);
5408 
5409       emit_block_move_via_libcall (XEXP (to_rtx, 0), XEXP (from_rtx, 0), size);
5410 
5411       preserve_temp_slots (to_rtx);
5412       pop_temp_slots ();
5413       return;
5414     }
5415 
5416   /* Compute FROM and store the value in the rtx we got.  */
5417 
5418   push_temp_slots ();
5419   result = store_expr_with_bounds (from, to_rtx, 0, nontemporal, false, to);
5420   preserve_temp_slots (result);
5421   pop_temp_slots ();
5422   return;
5423 }
5424 
5425 /* Emits nontemporal store insn that moves FROM to TO.  Returns true if this
5426    succeeded, false otherwise.  */
5427 
5428 bool
emit_storent_insn(rtx to,rtx from)5429 emit_storent_insn (rtx to, rtx from)
5430 {
5431   struct expand_operand ops[2];
5432   machine_mode mode = GET_MODE (to);
5433   enum insn_code code = optab_handler (storent_optab, mode);
5434 
5435   if (code == CODE_FOR_nothing)
5436     return false;
5437 
5438   create_fixed_operand (&ops[0], to);
5439   create_input_operand (&ops[1], from, mode);
5440   return maybe_expand_insn (code, 2, ops);
5441 }
5442 
5443 /* Generate code for computing expression EXP,
5444    and storing the value into TARGET.
5445 
5446    If the mode is BLKmode then we may return TARGET itself.
5447    It turns out that in BLKmode it doesn't cause a problem.
5448    because C has no operators that could combine two different
5449    assignments into the same BLKmode object with different values
5450    with no sequence point.  Will other languages need this to
5451    be more thorough?
5452 
5453    If CALL_PARAM_P is nonzero, this is a store into a call param on the
5454    stack, and block moves may need to be treated specially.
5455 
5456    If NONTEMPORAL is true, try using a nontemporal store instruction.
5457 
5458    If REVERSE is true, the store is to be done in reverse order.
5459 
5460    If BTARGET is not NULL then computed bounds of EXP are
5461    associated with BTARGET.  */
5462 
5463 rtx
store_expr_with_bounds(tree exp,rtx target,int call_param_p,bool nontemporal,bool reverse,tree btarget)5464 store_expr_with_bounds (tree exp, rtx target, int call_param_p,
5465 			bool nontemporal, bool reverse, tree btarget)
5466 {
5467   rtx temp;
5468   rtx alt_rtl = NULL_RTX;
5469   location_t loc = curr_insn_location ();
5470 
5471   if (VOID_TYPE_P (TREE_TYPE (exp)))
5472     {
5473       /* C++ can generate ?: expressions with a throw expression in one
5474 	 branch and an rvalue in the other. Here, we resolve attempts to
5475 	 store the throw expression's nonexistent result.  */
5476       gcc_assert (!call_param_p);
5477       expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
5478       return NULL_RTX;
5479     }
5480   if (TREE_CODE (exp) == COMPOUND_EXPR)
5481     {
5482       /* Perform first part of compound expression, then assign from second
5483 	 part.  */
5484       expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
5485 		   call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5486       return store_expr_with_bounds (TREE_OPERAND (exp, 1), target,
5487 				     call_param_p, nontemporal, reverse,
5488 				     btarget);
5489     }
5490   else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
5491     {
5492       /* For conditional expression, get safe form of the target.  Then
5493 	 test the condition, doing the appropriate assignment on either
5494 	 side.  This avoids the creation of unnecessary temporaries.
5495 	 For non-BLKmode, it is more efficient not to do this.  */
5496 
5497       rtx_code_label *lab1 = gen_label_rtx (), *lab2 = gen_label_rtx ();
5498 
5499       do_pending_stack_adjust ();
5500       NO_DEFER_POP;
5501       jumpifnot (TREE_OPERAND (exp, 0), lab1,
5502 		 profile_probability::uninitialized ());
5503       store_expr_with_bounds (TREE_OPERAND (exp, 1), target, call_param_p,
5504 			      nontemporal, reverse, btarget);
5505       emit_jump_insn (targetm.gen_jump (lab2));
5506       emit_barrier ();
5507       emit_label (lab1);
5508       store_expr_with_bounds (TREE_OPERAND (exp, 2), target, call_param_p,
5509 			      nontemporal, reverse, btarget);
5510       emit_label (lab2);
5511       OK_DEFER_POP;
5512 
5513       return NULL_RTX;
5514     }
5515   else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
5516     /* If this is a scalar in a register that is stored in a wider mode
5517        than the declared mode, compute the result into its declared mode
5518        and then convert to the wider mode.  Our value is the computed
5519        expression.  */
5520     {
5521       rtx inner_target = 0;
5522       scalar_int_mode outer_mode = subreg_unpromoted_mode (target);
5523       scalar_int_mode inner_mode = subreg_promoted_mode (target);
5524 
5525       /* We can do the conversion inside EXP, which will often result
5526 	 in some optimizations.  Do the conversion in two steps: first
5527 	 change the signedness, if needed, then the extend.  But don't
5528 	 do this if the type of EXP is a subtype of something else
5529 	 since then the conversion might involve more than just
5530 	 converting modes.  */
5531       if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
5532 	  && TREE_TYPE (TREE_TYPE (exp)) == 0
5533 	  && GET_MODE_PRECISION (outer_mode)
5534 	     == TYPE_PRECISION (TREE_TYPE (exp)))
5535 	{
5536 	  if (!SUBREG_CHECK_PROMOTED_SIGN (target,
5537 					  TYPE_UNSIGNED (TREE_TYPE (exp))))
5538 	    {
5539 	      /* Some types, e.g. Fortran's logical*4, won't have a signed
5540 		 version, so use the mode instead.  */
5541 	      tree ntype
5542 		= (signed_or_unsigned_type_for
5543 		   (SUBREG_PROMOTED_SIGN (target), TREE_TYPE (exp)));
5544 	      if (ntype == NULL)
5545 		ntype = lang_hooks.types.type_for_mode
5546 		  (TYPE_MODE (TREE_TYPE (exp)),
5547 		   SUBREG_PROMOTED_SIGN (target));
5548 
5549 	      exp = fold_convert_loc (loc, ntype, exp);
5550 	    }
5551 
5552 	  exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
5553 				  (inner_mode, SUBREG_PROMOTED_SIGN (target)),
5554 				  exp);
5555 
5556 	  inner_target = SUBREG_REG (target);
5557 	}
5558 
5559       temp = expand_expr (exp, inner_target, VOIDmode,
5560 			  call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5561 
5562       /* Handle bounds returned by call.  */
5563       if (TREE_CODE (exp) == CALL_EXPR)
5564 	{
5565 	  rtx bounds;
5566 	  chkp_split_slot (temp, &temp, &bounds);
5567 	  if (bounds && btarget)
5568 	    {
5569 	      gcc_assert (TREE_CODE (btarget) == SSA_NAME);
5570 	      rtx tmp = targetm.calls.load_returned_bounds (bounds);
5571 	      chkp_set_rtl_bounds (btarget, tmp);
5572 	    }
5573 	}
5574 
5575       /* If TEMP is a VOIDmode constant, use convert_modes to make
5576 	 sure that we properly convert it.  */
5577       if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
5578 	{
5579 	  temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
5580 				temp, SUBREG_PROMOTED_SIGN (target));
5581 	  temp = convert_modes (inner_mode, outer_mode, temp,
5582 				SUBREG_PROMOTED_SIGN (target));
5583 	}
5584 
5585       convert_move (SUBREG_REG (target), temp,
5586 		    SUBREG_PROMOTED_SIGN (target));
5587 
5588       return NULL_RTX;
5589     }
5590   else if ((TREE_CODE (exp) == STRING_CST
5591 	    || (TREE_CODE (exp) == MEM_REF
5592 		&& TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
5593 		&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5594 		   == STRING_CST
5595 		&& integer_zerop (TREE_OPERAND (exp, 1))))
5596 	   && !nontemporal && !call_param_p
5597 	   && MEM_P (target))
5598     {
5599       /* Optimize initialization of an array with a STRING_CST.  */
5600       HOST_WIDE_INT exp_len, str_copy_len;
5601       rtx dest_mem;
5602       tree str = TREE_CODE (exp) == STRING_CST
5603 		 ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
5604 
5605       exp_len = int_expr_size (exp);
5606       if (exp_len <= 0)
5607 	goto normal_expr;
5608 
5609       if (TREE_STRING_LENGTH (str) <= 0)
5610 	goto normal_expr;
5611 
5612       str_copy_len = strlen (TREE_STRING_POINTER (str));
5613       if (str_copy_len < TREE_STRING_LENGTH (str) - 1)
5614 	goto normal_expr;
5615 
5616       str_copy_len = TREE_STRING_LENGTH (str);
5617       if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0
5618 	  && TREE_STRING_POINTER (str)[TREE_STRING_LENGTH (str) - 1] == '\0')
5619 	{
5620 	  str_copy_len += STORE_MAX_PIECES - 1;
5621 	  str_copy_len &= ~(STORE_MAX_PIECES - 1);
5622 	}
5623       str_copy_len = MIN (str_copy_len, exp_len);
5624       if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
5625 				CONST_CAST (char *, TREE_STRING_POINTER (str)),
5626 				MEM_ALIGN (target), false))
5627 	goto normal_expr;
5628 
5629       dest_mem = target;
5630 
5631       dest_mem = store_by_pieces (dest_mem,
5632 				  str_copy_len, builtin_strncpy_read_str,
5633 				  CONST_CAST (char *,
5634 					      TREE_STRING_POINTER (str)),
5635 				  MEM_ALIGN (target), false,
5636 				  exp_len > str_copy_len ? 1 : 0);
5637       if (exp_len > str_copy_len)
5638 	clear_storage (adjust_address (dest_mem, BLKmode, 0),
5639 		       GEN_INT (exp_len - str_copy_len),
5640 		       BLOCK_OP_NORMAL);
5641       return NULL_RTX;
5642     }
5643   else
5644     {
5645       rtx tmp_target;
5646 
5647   normal_expr:
5648       /* If we want to use a nontemporal or a reverse order store, force the
5649 	 value into a register first.  */
5650       tmp_target = nontemporal || reverse ? NULL_RTX : target;
5651       temp = expand_expr_real (exp, tmp_target, GET_MODE (target),
5652 			       (call_param_p
5653 				? EXPAND_STACK_PARM : EXPAND_NORMAL),
5654 			       &alt_rtl, false);
5655 
5656       /* Handle bounds returned by call.  */
5657       if (TREE_CODE (exp) == CALL_EXPR)
5658 	{
5659 	  rtx bounds;
5660 	  chkp_split_slot (temp, &temp, &bounds);
5661 	  if (bounds && btarget)
5662 	    {
5663 	      gcc_assert (TREE_CODE (btarget) == SSA_NAME);
5664 	      rtx tmp = targetm.calls.load_returned_bounds (bounds);
5665 	      chkp_set_rtl_bounds (btarget, tmp);
5666 	    }
5667 	}
5668     }
5669 
5670   /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
5671      the same as that of TARGET, adjust the constant.  This is needed, for
5672      example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
5673      only a word-sized value.  */
5674   if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
5675       && TREE_CODE (exp) != ERROR_MARK
5676       && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
5677     {
5678       if (GET_MODE_CLASS (GET_MODE (target))
5679 	  != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
5680 	  && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
5681 		       GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
5682 	{
5683 	  rtx t = simplify_gen_subreg (GET_MODE (target), temp,
5684 				       TYPE_MODE (TREE_TYPE (exp)), 0);
5685 	  if (t)
5686 	    temp = t;
5687 	}
5688       if (GET_MODE (temp) == VOIDmode)
5689 	temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
5690 			      temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5691     }
5692 
5693   /* If value was not generated in the target, store it there.
5694      Convert the value to TARGET's type first if necessary and emit the
5695      pending incrementations that have been queued when expanding EXP.
5696      Note that we cannot emit the whole queue blindly because this will
5697      effectively disable the POST_INC optimization later.
5698 
5699      If TEMP and TARGET compare equal according to rtx_equal_p, but
5700      one or both of them are volatile memory refs, we have to distinguish
5701      two cases:
5702      - expand_expr has used TARGET.  In this case, we must not generate
5703        another copy.  This can be detected by TARGET being equal according
5704        to == .
5705      - expand_expr has not used TARGET - that means that the source just
5706        happens to have the same RTX form.  Since temp will have been created
5707        by expand_expr, it will compare unequal according to == .
5708        We must generate a copy in this case, to reach the correct number
5709        of volatile memory references.  */
5710 
5711   if ((! rtx_equal_p (temp, target)
5712        || (temp != target && (side_effects_p (temp)
5713 			      || side_effects_p (target))))
5714       && TREE_CODE (exp) != ERROR_MARK
5715       /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
5716 	 but TARGET is not valid memory reference, TEMP will differ
5717 	 from TARGET although it is really the same location.  */
5718       && !(alt_rtl
5719 	   && rtx_equal_p (alt_rtl, target)
5720 	   && !side_effects_p (alt_rtl)
5721 	   && !side_effects_p (target))
5722       /* If there's nothing to copy, don't bother.  Don't call
5723 	 expr_size unless necessary, because some front-ends (C++)
5724 	 expr_size-hook must not be given objects that are not
5725 	 supposed to be bit-copied or bit-initialized.  */
5726       && expr_size (exp) != const0_rtx)
5727     {
5728       if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
5729 	{
5730 	  if (GET_MODE (target) == BLKmode)
5731 	    {
5732 	      /* Handle calls that return BLKmode values in registers.  */
5733 	      if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
5734 		copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
5735 	      else
5736 		store_bit_field (target,
5737 				 INTVAL (expr_size (exp)) * BITS_PER_UNIT,
5738 				 0, 0, 0, GET_MODE (temp), temp, reverse);
5739 	    }
5740 	  else
5741 	    convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5742 	}
5743 
5744       else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
5745 	{
5746 	  /* Handle copying a string constant into an array.  The string
5747 	     constant may be shorter than the array.  So copy just the string's
5748 	     actual length, and clear the rest.  First get the size of the data
5749 	     type of the string, which is actually the size of the target.  */
5750 	  rtx size = expr_size (exp);
5751 
5752 	  if (CONST_INT_P (size)
5753 	      && INTVAL (size) < TREE_STRING_LENGTH (exp))
5754 	    emit_block_move (target, temp, size,
5755 			     (call_param_p
5756 			      ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5757 	  else
5758 	    {
5759 	      machine_mode pointer_mode
5760 		= targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
5761 	      machine_mode address_mode = get_address_mode (target);
5762 
5763 	      /* Compute the size of the data to copy from the string.  */
5764 	      tree copy_size
5765 		= size_binop_loc (loc, MIN_EXPR,
5766 				  make_tree (sizetype, size),
5767 				  size_int (TREE_STRING_LENGTH (exp)));
5768 	      rtx copy_size_rtx
5769 		= expand_expr (copy_size, NULL_RTX, VOIDmode,
5770 			       (call_param_p
5771 				? EXPAND_STACK_PARM : EXPAND_NORMAL));
5772 	      rtx_code_label *label = 0;
5773 
5774 	      /* Copy that much.  */
5775 	      copy_size_rtx = convert_to_mode (pointer_mode, copy_size_rtx,
5776 					       TYPE_UNSIGNED (sizetype));
5777 	      emit_block_move (target, temp, copy_size_rtx,
5778 			       (call_param_p
5779 				? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5780 
5781 	      /* Figure out how much is left in TARGET that we have to clear.
5782 		 Do all calculations in pointer_mode.  */
5783 	      if (CONST_INT_P (copy_size_rtx))
5784 		{
5785 		  size = plus_constant (address_mode, size,
5786 					-INTVAL (copy_size_rtx));
5787 		  target = adjust_address (target, BLKmode,
5788 					   INTVAL (copy_size_rtx));
5789 		}
5790 	      else
5791 		{
5792 		  size = expand_binop (TYPE_MODE (sizetype), sub_optab, size,
5793 				       copy_size_rtx, NULL_RTX, 0,
5794 				       OPTAB_LIB_WIDEN);
5795 
5796 		  if (GET_MODE (copy_size_rtx) != address_mode)
5797 		    copy_size_rtx = convert_to_mode (address_mode,
5798 						     copy_size_rtx,
5799 						     TYPE_UNSIGNED (sizetype));
5800 
5801 		  target = offset_address (target, copy_size_rtx,
5802 					   highest_pow2_factor (copy_size));
5803 		  label = gen_label_rtx ();
5804 		  emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX,
5805 					   GET_MODE (size), 0, label);
5806 		}
5807 
5808 	      if (size != const0_rtx)
5809 		clear_storage (target, size, BLOCK_OP_NORMAL);
5810 
5811 	      if (label)
5812 		emit_label (label);
5813 	    }
5814 	}
5815       /* Handle calls that return values in multiple non-contiguous locations.
5816 	 The Irix 6 ABI has examples of this.  */
5817       else if (GET_CODE (target) == PARALLEL)
5818 	{
5819 	  if (GET_CODE (temp) == PARALLEL)
5820 	    emit_group_move (target, temp);
5821 	  else
5822 	    emit_group_load (target, temp, TREE_TYPE (exp),
5823 			     int_size_in_bytes (TREE_TYPE (exp)));
5824 	}
5825       else if (GET_CODE (temp) == PARALLEL)
5826 	emit_group_store (target, temp, TREE_TYPE (exp),
5827 			  int_size_in_bytes (TREE_TYPE (exp)));
5828       else if (GET_MODE (temp) == BLKmode)
5829 	emit_block_move (target, temp, expr_size (exp),
5830 			 (call_param_p
5831 			  ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5832       /* If we emit a nontemporal store, there is nothing else to do.  */
5833       else if (nontemporal && emit_storent_insn (target, temp))
5834 	;
5835       else
5836 	{
5837 	  if (reverse)
5838 	    temp = flip_storage_order (GET_MODE (target), temp);
5839 	  temp = force_operand (temp, target);
5840 	  if (temp != target)
5841 	    emit_move_insn (target, temp);
5842 	}
5843     }
5844 
5845   return NULL_RTX;
5846 }
5847 
5848 /* Same as store_expr_with_bounds but ignoring bounds of EXP.  */
5849 rtx
store_expr(tree exp,rtx target,int call_param_p,bool nontemporal,bool reverse)5850 store_expr (tree exp, rtx target, int call_param_p, bool nontemporal,
5851 	    bool reverse)
5852 {
5853   return store_expr_with_bounds (exp, target, call_param_p, nontemporal,
5854 				 reverse, NULL);
5855 }
5856 
5857 /* Return true if field F of structure TYPE is a flexible array.  */
5858 
5859 static bool
flexible_array_member_p(const_tree f,const_tree type)5860 flexible_array_member_p (const_tree f, const_tree type)
5861 {
5862   const_tree tf;
5863 
5864   tf = TREE_TYPE (f);
5865   return (DECL_CHAIN (f) == NULL
5866 	  && TREE_CODE (tf) == ARRAY_TYPE
5867 	  && TYPE_DOMAIN (tf)
5868 	  && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
5869 	  && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
5870 	  && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
5871 	  && int_size_in_bytes (type) >= 0);
5872 }
5873 
5874 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
5875    must have in order for it to completely initialize a value of type TYPE.
5876    Return -1 if the number isn't known.
5877 
5878    If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE.  */
5879 
5880 static HOST_WIDE_INT
count_type_elements(const_tree type,bool for_ctor_p)5881 count_type_elements (const_tree type, bool for_ctor_p)
5882 {
5883   switch (TREE_CODE (type))
5884     {
5885     case ARRAY_TYPE:
5886       {
5887 	tree nelts;
5888 
5889 	nelts = array_type_nelts (type);
5890 	if (nelts && tree_fits_uhwi_p (nelts))
5891 	  {
5892 	    unsigned HOST_WIDE_INT n;
5893 
5894 	    n = tree_to_uhwi (nelts) + 1;
5895 	    if (n == 0 || for_ctor_p)
5896 	      return n;
5897 	    else
5898 	      return n * count_type_elements (TREE_TYPE (type), false);
5899 	  }
5900 	return for_ctor_p ? -1 : 1;
5901       }
5902 
5903     case RECORD_TYPE:
5904       {
5905 	unsigned HOST_WIDE_INT n;
5906 	tree f;
5907 
5908 	n = 0;
5909 	for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5910 	  if (TREE_CODE (f) == FIELD_DECL)
5911 	    {
5912 	      if (!for_ctor_p)
5913 		n += count_type_elements (TREE_TYPE (f), false);
5914 	      else if (!flexible_array_member_p (f, type))
5915 		/* Don't count flexible arrays, which are not supposed
5916 		   to be initialized.  */
5917 		n += 1;
5918 	    }
5919 
5920 	return n;
5921       }
5922 
5923     case UNION_TYPE:
5924     case QUAL_UNION_TYPE:
5925       {
5926 	tree f;
5927 	HOST_WIDE_INT n, m;
5928 
5929 	gcc_assert (!for_ctor_p);
5930 	/* Estimate the number of scalars in each field and pick the
5931 	   maximum.  Other estimates would do instead; the idea is simply
5932 	   to make sure that the estimate is not sensitive to the ordering
5933 	   of the fields.  */
5934 	n = 1;
5935 	for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5936 	  if (TREE_CODE (f) == FIELD_DECL)
5937 	    {
5938 	      m = count_type_elements (TREE_TYPE (f), false);
5939 	      /* If the field doesn't span the whole union, add an extra
5940 		 scalar for the rest.  */
5941 	      if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
5942 				    TYPE_SIZE (type)) != 1)
5943 		m++;
5944 	      if (n < m)
5945 		n = m;
5946 	    }
5947 	return n;
5948       }
5949 
5950     case COMPLEX_TYPE:
5951       return 2;
5952 
5953     case VECTOR_TYPE:
5954       {
5955 	unsigned HOST_WIDE_INT nelts;
5956 	if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
5957 	  return nelts;
5958 	else
5959 	  return -1;
5960       }
5961 
5962     case INTEGER_TYPE:
5963     case REAL_TYPE:
5964     case FIXED_POINT_TYPE:
5965     case ENUMERAL_TYPE:
5966     case BOOLEAN_TYPE:
5967     case POINTER_TYPE:
5968     case OFFSET_TYPE:
5969     case REFERENCE_TYPE:
5970     case NULLPTR_TYPE:
5971       return 1;
5972 
5973     case ERROR_MARK:
5974       return 0;
5975 
5976     case VOID_TYPE:
5977     case METHOD_TYPE:
5978     case FUNCTION_TYPE:
5979     case LANG_TYPE:
5980     default:
5981       gcc_unreachable ();
5982     }
5983 }
5984 
5985 /* Helper for categorize_ctor_elements.  Identical interface.  */
5986 
5987 static bool
categorize_ctor_elements_1(const_tree ctor,HOST_WIDE_INT * p_nz_elts,HOST_WIDE_INT * p_unique_nz_elts,HOST_WIDE_INT * p_init_elts,bool * p_complete)5988 categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
5989 			    HOST_WIDE_INT *p_unique_nz_elts,
5990 			    HOST_WIDE_INT *p_init_elts, bool *p_complete)
5991 {
5992   unsigned HOST_WIDE_INT idx;
5993   HOST_WIDE_INT nz_elts, unique_nz_elts, init_elts, num_fields;
5994   tree value, purpose, elt_type;
5995 
5996   /* Whether CTOR is a valid constant initializer, in accordance with what
5997      initializer_constant_valid_p does.  If inferred from the constructor
5998      elements, true until proven otherwise.  */
5999   bool const_from_elts_p = constructor_static_from_elts_p (ctor);
6000   bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
6001 
6002   nz_elts = 0;
6003   unique_nz_elts = 0;
6004   init_elts = 0;
6005   num_fields = 0;
6006   elt_type = NULL_TREE;
6007 
6008   FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
6009     {
6010       HOST_WIDE_INT mult = 1;
6011 
6012       if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
6013 	{
6014 	  tree lo_index = TREE_OPERAND (purpose, 0);
6015 	  tree hi_index = TREE_OPERAND (purpose, 1);
6016 
6017 	  if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index))
6018 	    mult = (tree_to_uhwi (hi_index)
6019 		    - tree_to_uhwi (lo_index) + 1);
6020 	}
6021       num_fields += mult;
6022       elt_type = TREE_TYPE (value);
6023 
6024       switch (TREE_CODE (value))
6025 	{
6026 	case CONSTRUCTOR:
6027 	  {
6028 	    HOST_WIDE_INT nz = 0, unz = 0, ic = 0;
6029 
6030 	    bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &unz,
6031 							   &ic, p_complete);
6032 
6033 	    nz_elts += mult * nz;
6034 	    unique_nz_elts += unz;
6035  	    init_elts += mult * ic;
6036 
6037 	    if (const_from_elts_p && const_p)
6038 	      const_p = const_elt_p;
6039 	  }
6040 	  break;
6041 
6042 	case INTEGER_CST:
6043 	case REAL_CST:
6044 	case FIXED_CST:
6045 	  if (!initializer_zerop (value))
6046 	    {
6047 	      nz_elts += mult;
6048 	      unique_nz_elts++;
6049 	    }
6050 	  init_elts += mult;
6051 	  break;
6052 
6053 	case STRING_CST:
6054 	  nz_elts += mult * TREE_STRING_LENGTH (value);
6055 	  unique_nz_elts += TREE_STRING_LENGTH (value);
6056 	  init_elts += mult * TREE_STRING_LENGTH (value);
6057 	  break;
6058 
6059 	case COMPLEX_CST:
6060 	  if (!initializer_zerop (TREE_REALPART (value)))
6061 	    {
6062 	      nz_elts += mult;
6063 	      unique_nz_elts++;
6064 	    }
6065 	  if (!initializer_zerop (TREE_IMAGPART (value)))
6066 	    {
6067 	      nz_elts += mult;
6068 	      unique_nz_elts++;
6069 	    }
6070 	  init_elts += 2 * mult;
6071 	  break;
6072 
6073 	case VECTOR_CST:
6074 	  {
6075 	    /* We can only construct constant-length vectors using
6076 	       CONSTRUCTOR.  */
6077 	    unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
6078 	    for (unsigned int i = 0; i < nunits; ++i)
6079 	      {
6080 		tree v = VECTOR_CST_ELT (value, i);
6081 		if (!initializer_zerop (v))
6082 		  {
6083 		    nz_elts += mult;
6084 		    unique_nz_elts++;
6085 		  }
6086 		init_elts += mult;
6087 	      }
6088 	  }
6089 	  break;
6090 
6091 	default:
6092 	  {
6093 	    HOST_WIDE_INT tc = count_type_elements (elt_type, false);
6094 	    nz_elts += mult * tc;
6095 	    unique_nz_elts += tc;
6096 	    init_elts += mult * tc;
6097 
6098 	    if (const_from_elts_p && const_p)
6099 	      const_p
6100 		= initializer_constant_valid_p (value,
6101 						elt_type,
6102 						TYPE_REVERSE_STORAGE_ORDER
6103 						(TREE_TYPE (ctor)))
6104 		  != NULL_TREE;
6105 	  }
6106 	  break;
6107 	}
6108     }
6109 
6110   if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
6111 						num_fields, elt_type))
6112     *p_complete = false;
6113 
6114   *p_nz_elts += nz_elts;
6115   *p_unique_nz_elts += unique_nz_elts;
6116   *p_init_elts += init_elts;
6117 
6118   return const_p;
6119 }
6120 
6121 /* Examine CTOR to discover:
6122    * how many scalar fields are set to nonzero values,
6123      and place it in *P_NZ_ELTS;
6124    * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
6125      high - low + 1 (this can be useful for callers to determine ctors
6126      that could be cheaply initialized with - perhaps nested - loops
6127      compared to copied from huge read-only data),
6128      and place it in *P_UNIQUE_NZ_ELTS;
6129    * how many scalar fields in total are in CTOR,
6130      and place it in *P_ELT_COUNT.
6131    * whether the constructor is complete -- in the sense that every
6132      meaningful byte is explicitly given a value --
6133      and place it in *P_COMPLETE.
6134 
6135    Return whether or not CTOR is a valid static constant initializer, the same
6136    as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0".  */
6137 
6138 bool
categorize_ctor_elements(const_tree ctor,HOST_WIDE_INT * p_nz_elts,HOST_WIDE_INT * p_unique_nz_elts,HOST_WIDE_INT * p_init_elts,bool * p_complete)6139 categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6140 			  HOST_WIDE_INT *p_unique_nz_elts,
6141 			  HOST_WIDE_INT *p_init_elts, bool *p_complete)
6142 {
6143   *p_nz_elts = 0;
6144   *p_unique_nz_elts = 0;
6145   *p_init_elts = 0;
6146   *p_complete = true;
6147 
6148   return categorize_ctor_elements_1 (ctor, p_nz_elts, p_unique_nz_elts,
6149 				     p_init_elts, p_complete);
6150 }
6151 
6152 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6153    of which had type LAST_TYPE.  Each element was itself a complete
6154    initializer, in the sense that every meaningful byte was explicitly
6155    given a value.  Return true if the same is true for the constructor
6156    as a whole.  */
6157 
6158 bool
complete_ctor_at_level_p(const_tree type,HOST_WIDE_INT num_elts,const_tree last_type)6159 complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
6160 			  const_tree last_type)
6161 {
6162   if (TREE_CODE (type) == UNION_TYPE
6163       || TREE_CODE (type) == QUAL_UNION_TYPE)
6164     {
6165       if (num_elts == 0)
6166 	return false;
6167 
6168       gcc_assert (num_elts == 1 && last_type);
6169 
6170       /* ??? We could look at each element of the union, and find the
6171 	 largest element.  Which would avoid comparing the size of the
6172 	 initialized element against any tail padding in the union.
6173 	 Doesn't seem worth the effort...  */
6174       return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
6175     }
6176 
6177   return count_type_elements (type, true) == num_elts;
6178 }
6179 
6180 /* Return 1 if EXP contains mostly (3/4) zeros.  */
6181 
6182 static int
mostly_zeros_p(const_tree exp)6183 mostly_zeros_p (const_tree exp)
6184 {
6185   if (TREE_CODE (exp) == CONSTRUCTOR)
6186     {
6187       HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6188       bool complete_p;
6189 
6190       categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6191 				&complete_p);
6192       return !complete_p || nz_elts < init_elts / 4;
6193     }
6194 
6195   return initializer_zerop (exp);
6196 }
6197 
6198 /* Return 1 if EXP contains all zeros.  */
6199 
6200 static int
all_zeros_p(const_tree exp)6201 all_zeros_p (const_tree exp)
6202 {
6203   if (TREE_CODE (exp) == CONSTRUCTOR)
6204     {
6205       HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6206       bool complete_p;
6207 
6208       categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6209 				&complete_p);
6210       return nz_elts == 0;
6211     }
6212 
6213   return initializer_zerop (exp);
6214 }
6215 
6216 /* Helper function for store_constructor.
6217    TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6218    CLEARED is as for store_constructor.
6219    ALIAS_SET is the alias set to use for any stores.
6220    If REVERSE is true, the store is to be done in reverse order.
6221 
6222    This provides a recursive shortcut back to store_constructor when it isn't
6223    necessary to go through store_field.  This is so that we can pass through
6224    the cleared field to let store_constructor know that we may not have to
6225    clear a substructure if the outer structure has already been cleared.  */
6226 
6227 static void
store_constructor_field(rtx target,poly_uint64 bitsize,poly_int64 bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,machine_mode mode,tree exp,int cleared,alias_set_type alias_set,bool reverse)6228 store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
6229 			 poly_uint64 bitregion_start,
6230 			 poly_uint64 bitregion_end,
6231 			 machine_mode mode,
6232 			 tree exp, int cleared,
6233 			 alias_set_type alias_set, bool reverse)
6234 {
6235   poly_int64 bytepos;
6236   poly_uint64 bytesize;
6237   if (TREE_CODE (exp) == CONSTRUCTOR
6238       /* We can only call store_constructor recursively if the size and
6239 	 bit position are on a byte boundary.  */
6240       && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
6241       && maybe_ne (bitsize, 0U)
6242       && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
6243       /* If we have a nonzero bitpos for a register target, then we just
6244 	 let store_field do the bitfield handling.  This is unlikely to
6245 	 generate unnecessary clear instructions anyways.  */
6246       && (known_eq (bitpos, 0) || MEM_P (target)))
6247     {
6248       if (MEM_P (target))
6249 	{
6250 	  machine_mode target_mode = GET_MODE (target);
6251 	  if (target_mode != BLKmode
6252 	      && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
6253 	    target_mode = BLKmode;
6254 	  target = adjust_address (target, target_mode, bytepos);
6255 	}
6256 
6257 
6258       /* Update the alias set, if required.  */
6259       if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
6260 	  && MEM_ALIAS_SET (target) != 0)
6261 	{
6262 	  target = copy_rtx (target);
6263 	  set_mem_alias_set (target, alias_set);
6264 	}
6265 
6266       store_constructor (exp, target, cleared, bytesize, reverse);
6267     }
6268   else
6269     store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
6270 		 exp, alias_set, false, reverse);
6271 }
6272 
6273 
6274 /* Returns the number of FIELD_DECLs in TYPE.  */
6275 
6276 static int
fields_length(const_tree type)6277 fields_length (const_tree type)
6278 {
6279   tree t = TYPE_FIELDS (type);
6280   int count = 0;
6281 
6282   for (; t; t = DECL_CHAIN (t))
6283     if (TREE_CODE (t) == FIELD_DECL)
6284       ++count;
6285 
6286   return count;
6287 }
6288 
6289 
6290 /* Store the value of constructor EXP into the rtx TARGET.
6291    TARGET is either a REG or a MEM; we know it cannot conflict, since
6292    safe_from_p has been called.
6293    CLEARED is true if TARGET is known to have been zero'd.
6294    SIZE is the number of bytes of TARGET we are allowed to modify: this
6295    may not be the same as the size of EXP if we are assigning to a field
6296    which has been packed to exclude padding bits.
6297    If REVERSE is true, the store is to be done in reverse order.  */
6298 
6299 static void
store_constructor(tree exp,rtx target,int cleared,poly_int64 size,bool reverse)6300 store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
6301 		   bool reverse)
6302 {
6303   tree type = TREE_TYPE (exp);
6304   HOST_WIDE_INT exp_size = int_size_in_bytes (type);
6305   poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
6306 
6307   switch (TREE_CODE (type))
6308     {
6309     case RECORD_TYPE:
6310     case UNION_TYPE:
6311     case QUAL_UNION_TYPE:
6312       {
6313 	unsigned HOST_WIDE_INT idx;
6314 	tree field, value;
6315 
6316 	/* The storage order is specified for every aggregate type.  */
6317 	reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6318 
6319 	/* If size is zero or the target is already cleared, do nothing.  */
6320 	if (known_eq (size, 0) || cleared)
6321 	  cleared = 1;
6322 	/* We either clear the aggregate or indicate the value is dead.  */
6323 	else if ((TREE_CODE (type) == UNION_TYPE
6324 		  || TREE_CODE (type) == QUAL_UNION_TYPE)
6325 		 && ! CONSTRUCTOR_ELTS (exp))
6326 	  /* If the constructor is empty, clear the union.  */
6327 	  {
6328 	    clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
6329 	    cleared = 1;
6330 	  }
6331 
6332 	/* If we are building a static constructor into a register,
6333 	   set the initial value as zero so we can fold the value into
6334 	   a constant.  But if more than one register is involved,
6335 	   this probably loses.  */
6336 	else if (REG_P (target) && TREE_STATIC (exp)
6337 		 && known_le (GET_MODE_SIZE (GET_MODE (target)),
6338 			      REGMODE_NATURAL_SIZE (GET_MODE (target))))
6339 	  {
6340 	    emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6341 	    cleared = 1;
6342 	  }
6343 
6344         /* If the constructor has fewer fields than the structure or
6345 	   if we are initializing the structure to mostly zeros, clear
6346 	   the whole structure first.  Don't do this if TARGET is a
6347 	   register whose mode size isn't equal to SIZE since
6348 	   clear_storage can't handle this case.  */
6349 	else if (known_size_p (size)
6350 		 && (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
6351 		     || mostly_zeros_p (exp))
6352 		 && (!REG_P (target)
6353 		     || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
6354 	  {
6355 	    clear_storage (target, gen_int_mode (size, Pmode),
6356 			   BLOCK_OP_NORMAL);
6357 	    cleared = 1;
6358 	  }
6359 
6360 	if (REG_P (target) && !cleared)
6361 	  emit_clobber (target);
6362 
6363 	/* Store each element of the constructor into the
6364 	   corresponding field of TARGET.  */
6365 	FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
6366 	  {
6367 	    machine_mode mode;
6368 	    HOST_WIDE_INT bitsize;
6369 	    HOST_WIDE_INT bitpos = 0;
6370 	    tree offset;
6371 	    rtx to_rtx = target;
6372 
6373 	    /* Just ignore missing fields.  We cleared the whole
6374 	       structure, above, if any fields are missing.  */
6375 	    if (field == 0)
6376 	      continue;
6377 
6378 	    if (cleared && initializer_zerop (value))
6379 	      continue;
6380 
6381 	    if (tree_fits_uhwi_p (DECL_SIZE (field)))
6382 	      bitsize = tree_to_uhwi (DECL_SIZE (field));
6383 	    else
6384 	      gcc_unreachable ();
6385 
6386 	    mode = DECL_MODE (field);
6387 	    if (DECL_BIT_FIELD (field))
6388 	      mode = VOIDmode;
6389 
6390 	    offset = DECL_FIELD_OFFSET (field);
6391 	    if (tree_fits_shwi_p (offset)
6392 		&& tree_fits_shwi_p (bit_position (field)))
6393 	      {
6394 		bitpos = int_bit_position (field);
6395 		offset = NULL_TREE;
6396 	      }
6397 	    else
6398 	      gcc_unreachable ();
6399 
6400 	    /* If this initializes a field that is smaller than a
6401 	       word, at the start of a word, try to widen it to a full
6402 	       word.  This special case allows us to output C++ member
6403 	       function initializations in a form that the optimizers
6404 	       can understand.  */
6405 	    if (WORD_REGISTER_OPERATIONS
6406 		&& REG_P (target)
6407 		&& bitsize < BITS_PER_WORD
6408 		&& bitpos % BITS_PER_WORD == 0
6409 		&& GET_MODE_CLASS (mode) == MODE_INT
6410 		&& TREE_CODE (value) == INTEGER_CST
6411 		&& exp_size >= 0
6412 		&& bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
6413 	      {
6414 		tree type = TREE_TYPE (value);
6415 
6416 		if (TYPE_PRECISION (type) < BITS_PER_WORD)
6417 		  {
6418 		    type = lang_hooks.types.type_for_mode
6419 		      (word_mode, TYPE_UNSIGNED (type));
6420 		    value = fold_convert (type, value);
6421 		    /* Make sure the bits beyond the original bitsize are zero
6422 		       so that we can correctly avoid extra zeroing stores in
6423 		       later constructor elements.  */
6424 		    tree bitsize_mask
6425 		      = wide_int_to_tree (type, wi::mask (bitsize, false,
6426 							   BITS_PER_WORD));
6427 		    value = fold_build2 (BIT_AND_EXPR, type, value, bitsize_mask);
6428 		  }
6429 
6430 		if (BYTES_BIG_ENDIAN)
6431 		  value
6432 		   = fold_build2 (LSHIFT_EXPR, type, value,
6433 				   build_int_cst (type,
6434 						  BITS_PER_WORD - bitsize));
6435 		bitsize = BITS_PER_WORD;
6436 		mode = word_mode;
6437 	      }
6438 
6439 	    if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
6440 		&& DECL_NONADDRESSABLE_P (field))
6441 	      {
6442 		to_rtx = copy_rtx (to_rtx);
6443 		MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
6444 	      }
6445 
6446 	    store_constructor_field (to_rtx, bitsize, bitpos,
6447 				     0, bitregion_end, mode,
6448 				     value, cleared,
6449 				     get_alias_set (TREE_TYPE (field)),
6450 				     reverse);
6451 	  }
6452 	break;
6453       }
6454     case ARRAY_TYPE:
6455       {
6456 	tree value, index;
6457 	unsigned HOST_WIDE_INT i;
6458 	int need_to_clear;
6459 	tree domain;
6460 	tree elttype = TREE_TYPE (type);
6461 	int const_bounds_p;
6462 	HOST_WIDE_INT minelt = 0;
6463 	HOST_WIDE_INT maxelt = 0;
6464 
6465 	/* The storage order is specified for every aggregate type.  */
6466 	reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6467 
6468 	domain = TYPE_DOMAIN (type);
6469 	const_bounds_p = (TYPE_MIN_VALUE (domain)
6470 			  && TYPE_MAX_VALUE (domain)
6471 			  && tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
6472 			  && tree_fits_shwi_p (TYPE_MAX_VALUE (domain)));
6473 
6474 	/* If we have constant bounds for the range of the type, get them.  */
6475 	if (const_bounds_p)
6476 	  {
6477 	    minelt = tree_to_shwi (TYPE_MIN_VALUE (domain));
6478 	    maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain));
6479 	  }
6480 
6481 	/* If the constructor has fewer elements than the array, clear
6482            the whole array first.  Similarly if this is static
6483            constructor of a non-BLKmode object.  */
6484 	if (cleared)
6485 	  need_to_clear = 0;
6486 	else if (REG_P (target) && TREE_STATIC (exp))
6487 	  need_to_clear = 1;
6488 	else
6489 	  {
6490 	    unsigned HOST_WIDE_INT idx;
6491 	    tree index, value;
6492 	    HOST_WIDE_INT count = 0, zero_count = 0;
6493 	    need_to_clear = ! const_bounds_p;
6494 
6495 	    /* This loop is a more accurate version of the loop in
6496 	       mostly_zeros_p (it handles RANGE_EXPR in an index).  It
6497 	       is also needed to check for missing elements.  */
6498 	    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
6499 	      {
6500 		HOST_WIDE_INT this_node_count;
6501 
6502 		if (need_to_clear)
6503 		  break;
6504 
6505 		if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6506 		  {
6507 		    tree lo_index = TREE_OPERAND (index, 0);
6508 		    tree hi_index = TREE_OPERAND (index, 1);
6509 
6510 		    if (! tree_fits_uhwi_p (lo_index)
6511 			|| ! tree_fits_uhwi_p (hi_index))
6512 		      {
6513 			need_to_clear = 1;
6514 			break;
6515 		      }
6516 
6517 		    this_node_count = (tree_to_uhwi (hi_index)
6518 				       - tree_to_uhwi (lo_index) + 1);
6519 		  }
6520 		else
6521 		  this_node_count = 1;
6522 
6523 		count += this_node_count;
6524 		if (mostly_zeros_p (value))
6525 		  zero_count += this_node_count;
6526 	      }
6527 
6528 	    /* Clear the entire array first if there are any missing
6529 	       elements, or if the incidence of zero elements is >=
6530 	       75%.  */
6531 	    if (! need_to_clear
6532 		&& (count < maxelt - minelt + 1
6533 		    || 4 * zero_count >= 3 * count))
6534 	      need_to_clear = 1;
6535 	  }
6536 
6537 	if (need_to_clear && maybe_gt (size, 0))
6538 	  {
6539 	    if (REG_P (target))
6540 	      emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6541 	    else
6542 	      clear_storage (target, gen_int_mode (size, Pmode),
6543 			     BLOCK_OP_NORMAL);
6544 	    cleared = 1;
6545 	  }
6546 
6547 	if (!cleared && REG_P (target))
6548 	  /* Inform later passes that the old value is dead.  */
6549 	  emit_clobber (target);
6550 
6551 	/* Store each element of the constructor into the
6552 	   corresponding element of TARGET, determined by counting the
6553 	   elements.  */
6554 	FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
6555 	  {
6556 	    machine_mode mode;
6557 	    poly_int64 bitsize;
6558 	    HOST_WIDE_INT bitpos;
6559 	    rtx xtarget = target;
6560 
6561 	    if (cleared && initializer_zerop (value))
6562 	      continue;
6563 
6564 	    mode = TYPE_MODE (elttype);
6565 	    if (mode == BLKmode)
6566 	      bitsize = (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6567 			 ? tree_to_uhwi (TYPE_SIZE (elttype))
6568 			 : -1);
6569 	    else
6570 	      bitsize = GET_MODE_BITSIZE (mode);
6571 
6572 	    if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6573 	      {
6574 		tree lo_index = TREE_OPERAND (index, 0);
6575 		tree hi_index = TREE_OPERAND (index, 1);
6576 		rtx index_r, pos_rtx;
6577 		HOST_WIDE_INT lo, hi, count;
6578 		tree position;
6579 
6580 		/* If the range is constant and "small", unroll the loop.  */
6581 		if (const_bounds_p
6582 		    && tree_fits_shwi_p (lo_index)
6583 		    && tree_fits_shwi_p (hi_index)
6584 		    && (lo = tree_to_shwi (lo_index),
6585 			hi = tree_to_shwi (hi_index),
6586 			count = hi - lo + 1,
6587 			(!MEM_P (target)
6588 			 || count <= 2
6589 			 || (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6590 			     && (tree_to_uhwi (TYPE_SIZE (elttype)) * count
6591 				 <= 40 * 8)))))
6592 		  {
6593 		    lo -= minelt;  hi -= minelt;
6594 		    for (; lo <= hi; lo++)
6595 		      {
6596 			bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype));
6597 
6598 			if (MEM_P (target)
6599 			    && !MEM_KEEP_ALIAS_SET_P (target)
6600 			    && TREE_CODE (type) == ARRAY_TYPE
6601 			    && TYPE_NONALIASED_COMPONENT (type))
6602 			  {
6603 			    target = copy_rtx (target);
6604 			    MEM_KEEP_ALIAS_SET_P (target) = 1;
6605 			  }
6606 
6607 			store_constructor_field
6608 			  (target, bitsize, bitpos, 0, bitregion_end,
6609 			   mode, value, cleared,
6610 			   get_alias_set (elttype), reverse);
6611 		      }
6612 		  }
6613 		else
6614 		  {
6615 		    rtx_code_label *loop_start = gen_label_rtx ();
6616 		    rtx_code_label *loop_end = gen_label_rtx ();
6617 		    tree exit_cond;
6618 
6619 		    expand_normal (hi_index);
6620 
6621 		    index = build_decl (EXPR_LOCATION (exp),
6622 					VAR_DECL, NULL_TREE, domain);
6623 		    index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
6624 		    SET_DECL_RTL (index, index_r);
6625 		    store_expr (lo_index, index_r, 0, false, reverse);
6626 
6627 		    /* Build the head of the loop.  */
6628 		    do_pending_stack_adjust ();
6629 		    emit_label (loop_start);
6630 
6631 		    /* Assign value to element index.  */
6632 		    position =
6633 		      fold_convert (ssizetype,
6634 				    fold_build2 (MINUS_EXPR,
6635 						 TREE_TYPE (index),
6636 						 index,
6637 						 TYPE_MIN_VALUE (domain)));
6638 
6639 		    position =
6640 			size_binop (MULT_EXPR, position,
6641 				    fold_convert (ssizetype,
6642 						  TYPE_SIZE_UNIT (elttype)));
6643 
6644 		    pos_rtx = expand_normal (position);
6645 		    xtarget = offset_address (target, pos_rtx,
6646 					      highest_pow2_factor (position));
6647 		    xtarget = adjust_address (xtarget, mode, 0);
6648 		    if (TREE_CODE (value) == CONSTRUCTOR)
6649 		      store_constructor (value, xtarget, cleared,
6650 					 exact_div (bitsize, BITS_PER_UNIT),
6651 					 reverse);
6652 		    else
6653 		      store_expr (value, xtarget, 0, false, reverse);
6654 
6655 		    /* Generate a conditional jump to exit the loop.  */
6656 		    exit_cond = build2 (LT_EXPR, integer_type_node,
6657 					index, hi_index);
6658 		    jumpif (exit_cond, loop_end,
6659 			    profile_probability::uninitialized ());
6660 
6661 		    /* Update the loop counter, and jump to the head of
6662 		       the loop.  */
6663 		    expand_assignment (index,
6664 				       build2 (PLUS_EXPR, TREE_TYPE (index),
6665 					       index, integer_one_node),
6666 				       false);
6667 
6668 		    emit_jump (loop_start);
6669 
6670 		    /* Build the end of the loop.  */
6671 		    emit_label (loop_end);
6672 		  }
6673 	      }
6674 	    else if ((index != 0 && ! tree_fits_shwi_p (index))
6675 		     || ! tree_fits_uhwi_p (TYPE_SIZE (elttype)))
6676 	      {
6677 		tree position;
6678 
6679 		if (index == 0)
6680 		  index = ssize_int (1);
6681 
6682 		if (minelt)
6683 		  index = fold_convert (ssizetype,
6684 					fold_build2 (MINUS_EXPR,
6685 						     TREE_TYPE (index),
6686 						     index,
6687 						     TYPE_MIN_VALUE (domain)));
6688 
6689 		position =
6690 		  size_binop (MULT_EXPR, index,
6691 			      fold_convert (ssizetype,
6692 					    TYPE_SIZE_UNIT (elttype)));
6693 		xtarget = offset_address (target,
6694 					  expand_normal (position),
6695 					  highest_pow2_factor (position));
6696 		xtarget = adjust_address (xtarget, mode, 0);
6697 		store_expr (value, xtarget, 0, false, reverse);
6698 	      }
6699 	    else
6700 	      {
6701 		if (index != 0)
6702 		  bitpos = ((tree_to_shwi (index) - minelt)
6703 			    * tree_to_uhwi (TYPE_SIZE (elttype)));
6704 		else
6705 		  bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype)));
6706 
6707 		if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
6708 		    && TREE_CODE (type) == ARRAY_TYPE
6709 		    && TYPE_NONALIASED_COMPONENT (type))
6710 		  {
6711 		    target = copy_rtx (target);
6712 		    MEM_KEEP_ALIAS_SET_P (target) = 1;
6713 		  }
6714 		store_constructor_field (target, bitsize, bitpos, 0,
6715 					 bitregion_end, mode, value,
6716 					 cleared, get_alias_set (elttype),
6717 					 reverse);
6718 	      }
6719 	  }
6720 	break;
6721       }
6722 
6723     case VECTOR_TYPE:
6724       {
6725 	unsigned HOST_WIDE_INT idx;
6726 	constructor_elt *ce;
6727 	int i;
6728 	int need_to_clear;
6729 	insn_code icode = CODE_FOR_nothing;
6730 	tree elt;
6731 	tree elttype = TREE_TYPE (type);
6732 	int elt_size = tree_to_uhwi (TYPE_SIZE (elttype));
6733 	machine_mode eltmode = TYPE_MODE (elttype);
6734 	HOST_WIDE_INT bitsize;
6735 	HOST_WIDE_INT bitpos;
6736 	rtvec vector = NULL;
6737 	poly_uint64 n_elts;
6738 	unsigned HOST_WIDE_INT const_n_elts;
6739 	alias_set_type alias;
6740 	bool vec_vec_init_p = false;
6741 	machine_mode mode = GET_MODE (target);
6742 
6743 	gcc_assert (eltmode != BLKmode);
6744 
6745 	/* Try using vec_duplicate_optab for uniform vectors.  */
6746 	if (!TREE_SIDE_EFFECTS (exp)
6747 	    && VECTOR_MODE_P (mode)
6748 	    && eltmode == GET_MODE_INNER (mode)
6749 	    && ((icode = optab_handler (vec_duplicate_optab, mode))
6750 		!= CODE_FOR_nothing)
6751 	    && (elt = uniform_vector_p (exp)))
6752 	  {
6753 	    struct expand_operand ops[2];
6754 	    create_output_operand (&ops[0], target, mode);
6755 	    create_input_operand (&ops[1], expand_normal (elt), eltmode);
6756 	    expand_insn (icode, 2, ops);
6757 	    if (!rtx_equal_p (target, ops[0].value))
6758 	      emit_move_insn (target, ops[0].value);
6759 	    break;
6760 	  }
6761 
6762 	n_elts = TYPE_VECTOR_SUBPARTS (type);
6763 	if (REG_P (target)
6764 	    && VECTOR_MODE_P (mode)
6765 	    && n_elts.is_constant (&const_n_elts))
6766 	  {
6767 	    machine_mode emode = eltmode;
6768 
6769 	    if (CONSTRUCTOR_NELTS (exp)
6770 		&& (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value))
6771 		    == VECTOR_TYPE))
6772 	      {
6773 		tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
6774 		gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
6775 				      * TYPE_VECTOR_SUBPARTS (etype),
6776 				      n_elts));
6777 		emode = TYPE_MODE (etype);
6778 	      }
6779 	    icode = convert_optab_handler (vec_init_optab, mode, emode);
6780 	    if (icode != CODE_FOR_nothing)
6781 	      {
6782 		unsigned int i, n = const_n_elts;
6783 
6784 		if (emode != eltmode)
6785 		  {
6786 		    n = CONSTRUCTOR_NELTS (exp);
6787 		    vec_vec_init_p = true;
6788 		  }
6789 		vector = rtvec_alloc (n);
6790 		for (i = 0; i < n; i++)
6791 		  RTVEC_ELT (vector, i) = CONST0_RTX (emode);
6792 	      }
6793 	  }
6794 
6795 	/* If the constructor has fewer elements than the vector,
6796 	   clear the whole array first.  Similarly if this is static
6797 	   constructor of a non-BLKmode object.  */
6798 	if (cleared)
6799 	  need_to_clear = 0;
6800 	else if (REG_P (target) && TREE_STATIC (exp))
6801 	  need_to_clear = 1;
6802 	else
6803 	  {
6804 	    unsigned HOST_WIDE_INT count = 0, zero_count = 0;
6805 	    tree value;
6806 
6807 	    FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
6808 	      {
6809 		tree sz = TYPE_SIZE (TREE_TYPE (value));
6810 		int n_elts_here
6811 		  = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, sz,
6812 						   TYPE_SIZE (elttype)));
6813 
6814 		count += n_elts_here;
6815 		if (mostly_zeros_p (value))
6816 		  zero_count += n_elts_here;
6817 	      }
6818 
6819 	    /* Clear the entire vector first if there are any missing elements,
6820 	       or if the incidence of zero elements is >= 75%.  */
6821 	    need_to_clear = (maybe_lt (count, n_elts)
6822 			     || 4 * zero_count >= 3 * count);
6823 	  }
6824 
6825 	if (need_to_clear && maybe_gt (size, 0) && !vector)
6826 	  {
6827 	    if (REG_P (target))
6828 	      emit_move_insn (target, CONST0_RTX (mode));
6829 	    else
6830 	      clear_storage (target, gen_int_mode (size, Pmode),
6831 			     BLOCK_OP_NORMAL);
6832 	    cleared = 1;
6833 	  }
6834 
6835 	/* Inform later passes that the old value is dead.  */
6836 	if (!cleared && !vector && REG_P (target))
6837 	  emit_move_insn (target, CONST0_RTX (mode));
6838 
6839         if (MEM_P (target))
6840 	  alias = MEM_ALIAS_SET (target);
6841 	else
6842 	  alias = get_alias_set (elttype);
6843 
6844         /* Store each element of the constructor into the corresponding
6845 	   element of TARGET, determined by counting the elements.  */
6846 	for (idx = 0, i = 0;
6847 	     vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
6848 	     idx++, i += bitsize / elt_size)
6849 	  {
6850 	    HOST_WIDE_INT eltpos;
6851 	    tree value = ce->value;
6852 
6853 	    bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)));
6854 	    if (cleared && initializer_zerop (value))
6855 	      continue;
6856 
6857 	    if (ce->index)
6858 	      eltpos = tree_to_uhwi (ce->index);
6859 	    else
6860 	      eltpos = i;
6861 
6862 	    if (vector)
6863 	      {
6864 		if (vec_vec_init_p)
6865 		  {
6866 		    gcc_assert (ce->index == NULL_TREE);
6867 		    gcc_assert (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE);
6868 		    eltpos = idx;
6869 		  }
6870 		else
6871 		  gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
6872 		RTVEC_ELT (vector, eltpos) = expand_normal (value);
6873 	      }
6874 	    else
6875 	      {
6876 		machine_mode value_mode
6877 		  = (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE
6878 		     ? TYPE_MODE (TREE_TYPE (value)) : eltmode);
6879 		bitpos = eltpos * elt_size;
6880 		store_constructor_field (target, bitsize, bitpos, 0,
6881 					 bitregion_end, value_mode,
6882 					 value, cleared, alias, reverse);
6883 	      }
6884 	  }
6885 
6886 	if (vector)
6887 	  emit_insn (GEN_FCN (icode) (target,
6888 				      gen_rtx_PARALLEL (mode, vector)));
6889 	break;
6890       }
6891 
6892     default:
6893       gcc_unreachable ();
6894     }
6895 }
6896 
6897 /* Store the value of EXP (an expression tree)
6898    into a subfield of TARGET which has mode MODE and occupies
6899    BITSIZE bits, starting BITPOS bits from the start of TARGET.
6900    If MODE is VOIDmode, it means that we are storing into a bit-field.
6901 
6902    BITREGION_START is bitpos of the first bitfield in this region.
6903    BITREGION_END is the bitpos of the ending bitfield in this region.
6904    These two fields are 0, if the C++ memory model does not apply,
6905    or we are not interested in keeping track of bitfield regions.
6906 
6907    Always return const0_rtx unless we have something particular to
6908    return.
6909 
6910    ALIAS_SET is the alias set for the destination.  This value will
6911    (in general) be different from that for TARGET, since TARGET is a
6912    reference to the containing structure.
6913 
6914    If NONTEMPORAL is true, try generating a nontemporal store.
6915 
6916    If REVERSE is true, the store is to be done in reverse order.  */
6917 
6918 static rtx
store_field(rtx target,poly_int64 bitsize,poly_int64 bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,machine_mode mode,tree exp,alias_set_type alias_set,bool nontemporal,bool reverse)6919 store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
6920 	     poly_uint64 bitregion_start, poly_uint64 bitregion_end,
6921 	     machine_mode mode, tree exp,
6922 	     alias_set_type alias_set, bool nontemporal,  bool reverse)
6923 {
6924   if (TREE_CODE (exp) == ERROR_MARK)
6925     return const0_rtx;
6926 
6927   /* If we have nothing to store, do nothing unless the expression has
6928      side-effects.  Don't do that for zero sized addressable lhs of
6929      calls.  */
6930   if (known_eq (bitsize, 0)
6931       && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6932 	  || TREE_CODE (exp) != CALL_EXPR))
6933     return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6934 
6935   if (GET_CODE (target) == CONCAT)
6936     {
6937       /* We're storing into a struct containing a single __complex.  */
6938 
6939       gcc_assert (known_eq (bitpos, 0));
6940       return store_expr (exp, target, 0, nontemporal, reverse);
6941     }
6942 
6943   /* If the structure is in a register or if the component
6944      is a bit field, we cannot use addressing to access it.
6945      Use bit-field techniques or SUBREG to store in it.  */
6946 
6947   poly_int64 decl_bitsize;
6948   if (mode == VOIDmode
6949       || (mode != BLKmode && ! direct_store[(int) mode]
6950 	  && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
6951 	  && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
6952       || REG_P (target)
6953       || GET_CODE (target) == SUBREG
6954       /* If the field isn't aligned enough to store as an ordinary memref,
6955 	 store it as a bit field.  */
6956       || (mode != BLKmode
6957 	  && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
6958 		|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
6959 	       && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
6960 	      || !multiple_p (bitpos, BITS_PER_UNIT)))
6961       || (known_size_p (bitsize)
6962 	  && mode != BLKmode
6963 	  && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
6964       /* If the RHS and field are a constant size and the size of the
6965 	 RHS isn't the same size as the bitfield, we must use bitfield
6966 	 operations.  */
6967       || (known_size_p (bitsize)
6968 	  && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
6969 	  && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
6970 		       bitsize)
6971 	  /* Except for initialization of full bytes from a CONSTRUCTOR, which
6972 	     we will handle specially below.  */
6973 	  && !(TREE_CODE (exp) == CONSTRUCTOR
6974 	       && multiple_p (bitsize, BITS_PER_UNIT))
6975 	  /* And except for bitwise copying of TREE_ADDRESSABLE types,
6976 	     where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
6977 	     includes some extra padding.  store_expr / expand_expr will in
6978 	     that case call get_inner_reference that will have the bitsize
6979 	     we check here and thus the block move will not clobber the
6980 	     padding that shouldn't be clobbered.  In the future we could
6981 	     replace the TREE_ADDRESSABLE check with a check that
6982 	     get_base_address needs to live in memory.  */
6983 	  && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6984 	      || TREE_CODE (exp) != COMPONENT_REF
6985 	      || !multiple_p (bitsize, BITS_PER_UNIT)
6986 	      || !multiple_p (bitpos, BITS_PER_UNIT)
6987 	      || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
6988 				   &decl_bitsize)
6989 	      || maybe_ne (decl_bitsize, bitsize)))
6990       /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
6991          decl we must use bitfield operations.  */
6992       || (known_size_p (bitsize)
6993 	  && TREE_CODE (exp) == MEM_REF
6994 	  && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
6995 	  && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6996 	  && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6997 	  && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
6998     {
6999       rtx temp;
7000       gimple *nop_def;
7001 
7002       /* If EXP is a NOP_EXPR of precision less than its mode, then that
7003 	 implies a mask operation.  If the precision is the same size as
7004 	 the field we're storing into, that mask is redundant.  This is
7005 	 particularly common with bit field assignments generated by the
7006 	 C front end.  */
7007       nop_def = get_def_for_expr (exp, NOP_EXPR);
7008       if (nop_def)
7009 	{
7010 	  tree type = TREE_TYPE (exp);
7011 	  if (INTEGRAL_TYPE_P (type)
7012 	      && maybe_ne (TYPE_PRECISION (type),
7013 			   GET_MODE_BITSIZE (TYPE_MODE (type)))
7014 	      && known_eq (bitsize, TYPE_PRECISION (type)))
7015 	    {
7016 	      tree op = gimple_assign_rhs1 (nop_def);
7017 	      type = TREE_TYPE (op);
7018 	      if (INTEGRAL_TYPE_P (type)
7019 		  && known_ge (TYPE_PRECISION (type), bitsize))
7020 		exp = op;
7021 	    }
7022 	}
7023 
7024       temp = expand_normal (exp);
7025 
7026       /* We don't support variable-sized BLKmode bitfields, since our
7027 	 handling of BLKmode is bound up with the ability to break
7028 	 things into words.  */
7029       gcc_assert (mode != BLKmode || bitsize.is_constant ());
7030 
7031       /* Handle calls that return values in multiple non-contiguous locations.
7032 	 The Irix 6 ABI has examples of this.  */
7033       if (GET_CODE (temp) == PARALLEL)
7034 	{
7035 	  HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7036 	  machine_mode temp_mode = GET_MODE (temp);
7037 	  if (temp_mode == BLKmode || temp_mode == VOIDmode)
7038 	    temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
7039 	  rtx temp_target = gen_reg_rtx (temp_mode);
7040 	  emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
7041 	  temp = temp_target;
7042 	}
7043 
7044       /* Handle calls that return BLKmode values in registers.  */
7045       else if (mode == BLKmode && REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
7046 	{
7047 	  rtx temp_target = gen_reg_rtx (GET_MODE (temp));
7048 	  copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
7049 	  temp = temp_target;
7050 	}
7051 
7052       /* If the value has aggregate type and an integral mode then, if BITSIZE
7053 	 is narrower than this mode and this is for big-endian data, we first
7054 	 need to put the value into the low-order bits for store_bit_field,
7055 	 except when MODE is BLKmode and BITSIZE larger than the word size
7056 	 (see the handling of fields larger than a word in store_bit_field).
7057 	 Moreover, the field may be not aligned on a byte boundary; in this
7058 	 case, if it has reverse storage order, it needs to be accessed as a
7059 	 scalar field with reverse storage order and we must first put the
7060 	 value into target order.  */
7061       scalar_int_mode temp_mode;
7062       if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
7063 	  && is_int_mode (GET_MODE (temp), &temp_mode))
7064 	{
7065 	  HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
7066 
7067 	  reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
7068 
7069 	  if (reverse)
7070 	    temp = flip_storage_order (temp_mode, temp);
7071 
7072 	  gcc_checking_assert (known_le (bitsize, size));
7073 	  if (maybe_lt (bitsize, size)
7074 	      && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
7075 	      /* Use of to_constant for BLKmode was checked above.  */
7076 	      && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
7077 	    temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
7078 				 size - bitsize, NULL_RTX, 1);
7079 	}
7080 
7081       /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE.  */
7082       if (mode != VOIDmode && mode != BLKmode
7083 	  && mode != TYPE_MODE (TREE_TYPE (exp)))
7084 	temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
7085 
7086       /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7087 	 and BITPOS must be aligned on a byte boundary.  If so, we simply do
7088 	 a block copy.  Likewise for a BLKmode-like TARGET.  */
7089       if (GET_MODE (temp) == BLKmode
7090 	  && (GET_MODE (target) == BLKmode
7091 	      || (MEM_P (target)
7092 		  && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
7093 		  && multiple_p (bitpos, BITS_PER_UNIT)
7094 		  && multiple_p (bitsize, BITS_PER_UNIT))))
7095 	{
7096 	  gcc_assert (MEM_P (target) && MEM_P (temp));
7097 	  poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
7098 	  poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
7099 
7100 	  target = adjust_address (target, VOIDmode, bytepos);
7101 	  emit_block_move (target, temp,
7102 			   gen_int_mode (bytesize, Pmode),
7103 			   BLOCK_OP_NORMAL);
7104 
7105 	  return const0_rtx;
7106 	}
7107 
7108       /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7109 	 word size, we need to load the value (see again store_bit_field).  */
7110       if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
7111 	{
7112 	  scalar_int_mode temp_mode = smallest_int_mode_for_size (bitsize);
7113 	  temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
7114 				    temp_mode, false, NULL);
7115 	}
7116 
7117       /* Store the value in the bitfield.  */
7118       store_bit_field (target, bitsize, bitpos,
7119 		       bitregion_start, bitregion_end,
7120 		       mode, temp, reverse);
7121 
7122       return const0_rtx;
7123     }
7124   else
7125     {
7126       /* Now build a reference to just the desired component.  */
7127       rtx to_rtx = adjust_address (target, mode,
7128 				   exact_div (bitpos, BITS_PER_UNIT));
7129 
7130       if (to_rtx == target)
7131 	to_rtx = copy_rtx (to_rtx);
7132 
7133       if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
7134 	set_mem_alias_set (to_rtx, alias_set);
7135 
7136       /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7137 	 into a target smaller than its type; handle that case now.  */
7138       if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
7139 	{
7140 	  poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
7141 	  store_constructor (exp, to_rtx, 0, bytesize, reverse);
7142 	  return to_rtx;
7143 	}
7144 
7145       return store_expr (exp, to_rtx, 0, nontemporal, reverse);
7146     }
7147 }
7148 
7149 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7150    an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7151    codes and find the ultimate containing object, which we return.
7152 
7153    We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7154    bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7155    storage order of the field.
7156    If the position of the field is variable, we store a tree
7157    giving the variable offset (in units) in *POFFSET.
7158    This offset is in addition to the bit position.
7159    If the position is not variable, we store 0 in *POFFSET.
7160 
7161    If any of the extraction expressions is volatile,
7162    we store 1 in *PVOLATILEP.  Otherwise we don't change that.
7163 
7164    If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7165    Otherwise, it is a mode that can be used to access the field.
7166 
7167    If the field describes a variable-sized object, *PMODE is set to
7168    BLKmode and *PBITSIZE is set to -1.  An access cannot be made in
7169    this case, but the address of the object can be found.  */
7170 
7171 tree
get_inner_reference(tree exp,poly_int64_pod * pbitsize,poly_int64_pod * pbitpos,tree * poffset,machine_mode * pmode,int * punsignedp,int * preversep,int * pvolatilep)7172 get_inner_reference (tree exp, poly_int64_pod *pbitsize,
7173 		     poly_int64_pod *pbitpos, tree *poffset,
7174 		     machine_mode *pmode, int *punsignedp,
7175 		     int *preversep, int *pvolatilep)
7176 {
7177   tree size_tree = 0;
7178   machine_mode mode = VOIDmode;
7179   bool blkmode_bitfield = false;
7180   tree offset = size_zero_node;
7181   poly_offset_int bit_offset = 0;
7182 
7183   /* First get the mode, signedness, storage order and size.  We do this from
7184      just the outermost expression.  */
7185   *pbitsize = -1;
7186   if (TREE_CODE (exp) == COMPONENT_REF)
7187     {
7188       tree field = TREE_OPERAND (exp, 1);
7189       size_tree = DECL_SIZE (field);
7190       if (flag_strict_volatile_bitfields > 0
7191 	  && TREE_THIS_VOLATILE (exp)
7192 	  && DECL_BIT_FIELD_TYPE (field)
7193 	  && DECL_MODE (field) != BLKmode)
7194 	/* Volatile bitfields should be accessed in the mode of the
7195 	     field's type, not the mode computed based on the bit
7196 	     size.  */
7197 	mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
7198       else if (!DECL_BIT_FIELD (field))
7199 	{
7200 	  mode = DECL_MODE (field);
7201 	  /* For vector fields re-check the target flags, as DECL_MODE
7202 	     could have been set with different target flags than
7203 	     the current function has.  */
7204 	  if (mode == BLKmode
7205 	      && VECTOR_TYPE_P (TREE_TYPE (field))
7206 	      && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
7207 	    mode = TYPE_MODE (TREE_TYPE (field));
7208 	}
7209       else if (DECL_MODE (field) == BLKmode)
7210 	blkmode_bitfield = true;
7211 
7212       *punsignedp = DECL_UNSIGNED (field);
7213     }
7214   else if (TREE_CODE (exp) == BIT_FIELD_REF)
7215     {
7216       size_tree = TREE_OPERAND (exp, 1);
7217       *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
7218 		     || TYPE_UNSIGNED (TREE_TYPE (exp)));
7219 
7220       /* For vector types, with the correct size of access, use the mode of
7221 	 inner type.  */
7222       if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
7223 	  && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
7224 	  && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
7225         mode = TYPE_MODE (TREE_TYPE (exp));
7226     }
7227   else
7228     {
7229       mode = TYPE_MODE (TREE_TYPE (exp));
7230       *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
7231 
7232       if (mode == BLKmode)
7233 	size_tree = TYPE_SIZE (TREE_TYPE (exp));
7234       else
7235 	*pbitsize = GET_MODE_BITSIZE (mode);
7236     }
7237 
7238   if (size_tree != 0)
7239     {
7240       if (! tree_fits_uhwi_p (size_tree))
7241 	mode = BLKmode, *pbitsize = -1;
7242       else
7243 	*pbitsize = tree_to_uhwi (size_tree);
7244     }
7245 
7246   *preversep = reverse_storage_order_for_component_p (exp);
7247 
7248   /* Compute cumulative bit-offset for nested component-refs and array-refs,
7249      and find the ultimate containing object.  */
7250   while (1)
7251     {
7252       switch (TREE_CODE (exp))
7253 	{
7254 	case BIT_FIELD_REF:
7255 	  bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
7256 	  break;
7257 
7258 	case COMPONENT_REF:
7259 	  {
7260 	    tree field = TREE_OPERAND (exp, 1);
7261 	    tree this_offset = component_ref_field_offset (exp);
7262 
7263 	    /* If this field hasn't been filled in yet, don't go past it.
7264 	       This should only happen when folding expressions made during
7265 	       type construction.  */
7266 	    if (this_offset == 0)
7267 	      break;
7268 
7269 	    offset = size_binop (PLUS_EXPR, offset, this_offset);
7270 	    bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
7271 
7272 	    /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN.  */
7273 	  }
7274 	  break;
7275 
7276 	case ARRAY_REF:
7277 	case ARRAY_RANGE_REF:
7278 	  {
7279 	    tree index = TREE_OPERAND (exp, 1);
7280 	    tree low_bound = array_ref_low_bound (exp);
7281 	    tree unit_size = array_ref_element_size (exp);
7282 
7283 	    /* We assume all arrays have sizes that are a multiple of a byte.
7284 	       First subtract the lower bound, if any, in the type of the
7285 	       index, then convert to sizetype and multiply by the size of
7286 	       the array element.  */
7287 	    if (! integer_zerop (low_bound))
7288 	      index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
7289 				   index, low_bound);
7290 
7291 	    offset = size_binop (PLUS_EXPR, offset,
7292 			         size_binop (MULT_EXPR,
7293 					     fold_convert (sizetype, index),
7294 					     unit_size));
7295 	  }
7296 	  break;
7297 
7298 	case REALPART_EXPR:
7299 	  break;
7300 
7301 	case IMAGPART_EXPR:
7302 	  bit_offset += *pbitsize;
7303 	  break;
7304 
7305 	case VIEW_CONVERT_EXPR:
7306 	  break;
7307 
7308 	case MEM_REF:
7309 	  /* Hand back the decl for MEM[&decl, off].  */
7310 	  if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
7311 	    {
7312 	      tree off = TREE_OPERAND (exp, 1);
7313 	      if (!integer_zerop (off))
7314 		{
7315 		  poly_offset_int boff = mem_ref_offset (exp);
7316 		  boff <<= LOG2_BITS_PER_UNIT;
7317 		  bit_offset += boff;
7318 		}
7319 	      exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7320 	    }
7321 	  goto done;
7322 
7323 	default:
7324 	  goto done;
7325 	}
7326 
7327       /* If any reference in the chain is volatile, the effect is volatile.  */
7328       if (TREE_THIS_VOLATILE (exp))
7329 	*pvolatilep = 1;
7330 
7331       exp = TREE_OPERAND (exp, 0);
7332     }
7333  done:
7334 
7335   /* If OFFSET is constant, see if we can return the whole thing as a
7336      constant bit position.  Make sure to handle overflow during
7337      this conversion.  */
7338   if (poly_int_tree_p (offset))
7339     {
7340       poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
7341 				      TYPE_PRECISION (sizetype));
7342       tem <<= LOG2_BITS_PER_UNIT;
7343       tem += bit_offset;
7344       if (tem.to_shwi (pbitpos))
7345 	*poffset = offset = NULL_TREE;
7346     }
7347 
7348   /* Otherwise, split it up.  */
7349   if (offset)
7350     {
7351       /* Avoid returning a negative bitpos as this may wreak havoc later.  */
7352       if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
7353         {
7354 	  *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
7355 	  poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
7356 	  offset = size_binop (PLUS_EXPR, offset,
7357 			       build_int_cst (sizetype, bytes.force_shwi ()));
7358 	}
7359 
7360       *poffset = offset;
7361     }
7362 
7363   /* We can use BLKmode for a byte-aligned BLKmode bitfield.  */
7364   if (mode == VOIDmode
7365       && blkmode_bitfield
7366       && multiple_p (*pbitpos, BITS_PER_UNIT)
7367       && multiple_p (*pbitsize, BITS_PER_UNIT))
7368     *pmode = BLKmode;
7369   else
7370     *pmode = mode;
7371 
7372   return exp;
7373 }
7374 
7375 /* Alignment in bits the TARGET of an assignment may be assumed to have.  */
7376 
7377 static unsigned HOST_WIDE_INT
target_align(const_tree target)7378 target_align (const_tree target)
7379 {
7380   /* We might have a chain of nested references with intermediate misaligning
7381      bitfields components, so need to recurse to find out.  */
7382 
7383   unsigned HOST_WIDE_INT this_align, outer_align;
7384 
7385   switch (TREE_CODE (target))
7386     {
7387     case BIT_FIELD_REF:
7388       return 1;
7389 
7390     case COMPONENT_REF:
7391       this_align = DECL_ALIGN (TREE_OPERAND (target, 1));
7392       outer_align = target_align (TREE_OPERAND (target, 0));
7393       return MIN (this_align, outer_align);
7394 
7395     case ARRAY_REF:
7396     case ARRAY_RANGE_REF:
7397       this_align = TYPE_ALIGN (TREE_TYPE (target));
7398       outer_align = target_align (TREE_OPERAND (target, 0));
7399       return MIN (this_align, outer_align);
7400 
7401     CASE_CONVERT:
7402     case NON_LVALUE_EXPR:
7403     case VIEW_CONVERT_EXPR:
7404       this_align = TYPE_ALIGN (TREE_TYPE (target));
7405       outer_align = target_align (TREE_OPERAND (target, 0));
7406       return MAX (this_align, outer_align);
7407 
7408     default:
7409       return TYPE_ALIGN (TREE_TYPE (target));
7410     }
7411 }
7412 
7413 
7414 /* Given an rtx VALUE that may contain additions and multiplications, return
7415    an equivalent value that just refers to a register, memory, or constant.
7416    This is done by generating instructions to perform the arithmetic and
7417    returning a pseudo-register containing the value.
7418 
7419    The returned value may be a REG, SUBREG, MEM or constant.  */
7420 
7421 rtx
force_operand(rtx value,rtx target)7422 force_operand (rtx value, rtx target)
7423 {
7424   rtx op1, op2;
7425   /* Use subtarget as the target for operand 0 of a binary operation.  */
7426   rtx subtarget = get_subtarget (target);
7427   enum rtx_code code = GET_CODE (value);
7428 
7429   /* Check for subreg applied to an expression produced by loop optimizer.  */
7430   if (code == SUBREG
7431       && !REG_P (SUBREG_REG (value))
7432       && !MEM_P (SUBREG_REG (value)))
7433     {
7434       value
7435 	= simplify_gen_subreg (GET_MODE (value),
7436 			       force_reg (GET_MODE (SUBREG_REG (value)),
7437 					  force_operand (SUBREG_REG (value),
7438 							 NULL_RTX)),
7439 			       GET_MODE (SUBREG_REG (value)),
7440 			       SUBREG_BYTE (value));
7441       code = GET_CODE (value);
7442     }
7443 
7444   /* Check for a PIC address load.  */
7445   if ((code == PLUS || code == MINUS)
7446       && XEXP (value, 0) == pic_offset_table_rtx
7447       && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
7448 	  || GET_CODE (XEXP (value, 1)) == LABEL_REF
7449 	  || GET_CODE (XEXP (value, 1)) == CONST))
7450     {
7451       if (!subtarget)
7452 	subtarget = gen_reg_rtx (GET_MODE (value));
7453       emit_move_insn (subtarget, value);
7454       return subtarget;
7455     }
7456 
7457   if (ARITHMETIC_P (value))
7458     {
7459       op2 = XEXP (value, 1);
7460       if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
7461 	subtarget = 0;
7462       if (code == MINUS && CONST_INT_P (op2))
7463 	{
7464 	  code = PLUS;
7465 	  op2 = negate_rtx (GET_MODE (value), op2);
7466 	}
7467 
7468       /* Check for an addition with OP2 a constant integer and our first
7469          operand a PLUS of a virtual register and something else.  In that
7470          case, we want to emit the sum of the virtual register and the
7471          constant first and then add the other value.  This allows virtual
7472          register instantiation to simply modify the constant rather than
7473          creating another one around this addition.  */
7474       if (code == PLUS && CONST_INT_P (op2)
7475 	  && GET_CODE (XEXP (value, 0)) == PLUS
7476 	  && REG_P (XEXP (XEXP (value, 0), 0))
7477 	  && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7478 	  && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
7479 	{
7480 	  rtx temp = expand_simple_binop (GET_MODE (value), code,
7481 					  XEXP (XEXP (value, 0), 0), op2,
7482 					  subtarget, 0, OPTAB_LIB_WIDEN);
7483 	  return expand_simple_binop (GET_MODE (value), code, temp,
7484 				      force_operand (XEXP (XEXP (value,
7485 								 0), 1), 0),
7486 				      target, 0, OPTAB_LIB_WIDEN);
7487 	}
7488 
7489       op1 = force_operand (XEXP (value, 0), subtarget);
7490       op2 = force_operand (op2, NULL_RTX);
7491       switch (code)
7492 	{
7493 	case MULT:
7494 	  return expand_mult (GET_MODE (value), op1, op2, target, 1);
7495 	case DIV:
7496 	  if (!INTEGRAL_MODE_P (GET_MODE (value)))
7497 	    return expand_simple_binop (GET_MODE (value), code, op1, op2,
7498 					target, 1, OPTAB_LIB_WIDEN);
7499 	  else
7500 	    return expand_divmod (0,
7501 				  FLOAT_MODE_P (GET_MODE (value))
7502 				  ? RDIV_EXPR : TRUNC_DIV_EXPR,
7503 				  GET_MODE (value), op1, op2, target, 0);
7504 	case MOD:
7505 	  return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7506 				target, 0);
7507 	case UDIV:
7508 	  return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
7509 				target, 1);
7510 	case UMOD:
7511 	  return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7512 				target, 1);
7513 	case ASHIFTRT:
7514 	  return expand_simple_binop (GET_MODE (value), code, op1, op2,
7515 				      target, 0, OPTAB_LIB_WIDEN);
7516 	default:
7517 	  return expand_simple_binop (GET_MODE (value), code, op1, op2,
7518 				      target, 1, OPTAB_LIB_WIDEN);
7519 	}
7520     }
7521   if (UNARY_P (value))
7522     {
7523       if (!target)
7524 	target = gen_reg_rtx (GET_MODE (value));
7525       op1 = force_operand (XEXP (value, 0), NULL_RTX);
7526       switch (code)
7527 	{
7528 	case ZERO_EXTEND:
7529 	case SIGN_EXTEND:
7530 	case TRUNCATE:
7531 	case FLOAT_EXTEND:
7532 	case FLOAT_TRUNCATE:
7533 	  convert_move (target, op1, code == ZERO_EXTEND);
7534 	  return target;
7535 
7536 	case FIX:
7537 	case UNSIGNED_FIX:
7538 	  expand_fix (target, op1, code == UNSIGNED_FIX);
7539 	  return target;
7540 
7541 	case FLOAT:
7542 	case UNSIGNED_FLOAT:
7543 	  expand_float (target, op1, code == UNSIGNED_FLOAT);
7544 	  return target;
7545 
7546 	default:
7547 	  return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
7548 	}
7549     }
7550 
7551 #ifdef INSN_SCHEDULING
7552   /* On machines that have insn scheduling, we want all memory reference to be
7553      explicit, so we need to deal with such paradoxical SUBREGs.  */
7554   if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
7555     value
7556       = simplify_gen_subreg (GET_MODE (value),
7557 			     force_reg (GET_MODE (SUBREG_REG (value)),
7558 					force_operand (SUBREG_REG (value),
7559 						       NULL_RTX)),
7560 			     GET_MODE (SUBREG_REG (value)),
7561 			     SUBREG_BYTE (value));
7562 #endif
7563 
7564   return value;
7565 }
7566 
7567 /* Subroutine of expand_expr: return nonzero iff there is no way that
7568    EXP can reference X, which is being modified.  TOP_P is nonzero if this
7569    call is going to be used to determine whether we need a temporary
7570    for EXP, as opposed to a recursive call to this function.
7571 
7572    It is always safe for this routine to return zero since it merely
7573    searches for optimization opportunities.  */
7574 
7575 int
safe_from_p(const_rtx x,tree exp,int top_p)7576 safe_from_p (const_rtx x, tree exp, int top_p)
7577 {
7578   rtx exp_rtl = 0;
7579   int i, nops;
7580 
7581   if (x == 0
7582       /* If EXP has varying size, we MUST use a target since we currently
7583 	 have no way of allocating temporaries of variable size
7584 	 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
7585 	 So we assume here that something at a higher level has prevented a
7586 	 clash.  This is somewhat bogus, but the best we can do.  Only
7587 	 do this when X is BLKmode and when we are at the top level.  */
7588       || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
7589 	  && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
7590 	  && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
7591 	      || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
7592 	      || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
7593 	      != INTEGER_CST)
7594 	  && GET_MODE (x) == BLKmode)
7595       /* If X is in the outgoing argument area, it is always safe.  */
7596       || (MEM_P (x)
7597 	  && (XEXP (x, 0) == virtual_outgoing_args_rtx
7598 	      || (GET_CODE (XEXP (x, 0)) == PLUS
7599 		  && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
7600     return 1;
7601 
7602   /* If this is a subreg of a hard register, declare it unsafe, otherwise,
7603      find the underlying pseudo.  */
7604   if (GET_CODE (x) == SUBREG)
7605     {
7606       x = SUBREG_REG (x);
7607       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7608 	return 0;
7609     }
7610 
7611   /* Now look at our tree code and possibly recurse.  */
7612   switch (TREE_CODE_CLASS (TREE_CODE (exp)))
7613     {
7614     case tcc_declaration:
7615       exp_rtl = DECL_RTL_IF_SET (exp);
7616       break;
7617 
7618     case tcc_constant:
7619       return 1;
7620 
7621     case tcc_exceptional:
7622       if (TREE_CODE (exp) == TREE_LIST)
7623 	{
7624 	  while (1)
7625 	    {
7626 	      if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0))
7627 		return 0;
7628 	      exp = TREE_CHAIN (exp);
7629 	      if (!exp)
7630 		return 1;
7631 	      if (TREE_CODE (exp) != TREE_LIST)
7632 		return safe_from_p (x, exp, 0);
7633 	    }
7634 	}
7635       else if (TREE_CODE (exp) == CONSTRUCTOR)
7636 	{
7637 	  constructor_elt *ce;
7638 	  unsigned HOST_WIDE_INT idx;
7639 
7640 	  FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
7641 	    if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
7642 		|| !safe_from_p (x, ce->value, 0))
7643 	      return 0;
7644 	  return 1;
7645 	}
7646       else if (TREE_CODE (exp) == ERROR_MARK)
7647 	return 1;	/* An already-visited SAVE_EXPR? */
7648       else
7649 	return 0;
7650 
7651     case tcc_statement:
7652       /* The only case we look at here is the DECL_INITIAL inside a
7653 	 DECL_EXPR.  */
7654       return (TREE_CODE (exp) != DECL_EXPR
7655 	      || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
7656 	      || !DECL_INITIAL (DECL_EXPR_DECL (exp))
7657 	      || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
7658 
7659     case tcc_binary:
7660     case tcc_comparison:
7661       if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
7662 	return 0;
7663       /* Fall through.  */
7664 
7665     case tcc_unary:
7666       return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7667 
7668     case tcc_expression:
7669     case tcc_reference:
7670     case tcc_vl_exp:
7671       /* Now do code-specific tests.  EXP_RTL is set to any rtx we find in
7672 	 the expression.  If it is set, we conflict iff we are that rtx or
7673 	 both are in memory.  Otherwise, we check all operands of the
7674 	 expression recursively.  */
7675 
7676       switch (TREE_CODE (exp))
7677 	{
7678 	case ADDR_EXPR:
7679 	  /* If the operand is static or we are static, we can't conflict.
7680 	     Likewise if we don't conflict with the operand at all.  */
7681 	  if (staticp (TREE_OPERAND (exp, 0))
7682 	      || TREE_STATIC (exp)
7683 	      || safe_from_p (x, TREE_OPERAND (exp, 0), 0))
7684 	    return 1;
7685 
7686 	  /* Otherwise, the only way this can conflict is if we are taking
7687 	     the address of a DECL a that address if part of X, which is
7688 	     very rare.  */
7689 	  exp = TREE_OPERAND (exp, 0);
7690 	  if (DECL_P (exp))
7691 	    {
7692 	      if (!DECL_RTL_SET_P (exp)
7693 		  || !MEM_P (DECL_RTL (exp)))
7694 		return 0;
7695 	      else
7696 		exp_rtl = XEXP (DECL_RTL (exp), 0);
7697 	    }
7698 	  break;
7699 
7700 	case MEM_REF:
7701 	  if (MEM_P (x)
7702 	      && alias_sets_conflict_p (MEM_ALIAS_SET (x),
7703 					get_alias_set (exp)))
7704 	    return 0;
7705 	  break;
7706 
7707 	case CALL_EXPR:
7708 	  /* Assume that the call will clobber all hard registers and
7709 	     all of memory.  */
7710 	  if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7711 	      || MEM_P (x))
7712 	    return 0;
7713 	  break;
7714 
7715 	case WITH_CLEANUP_EXPR:
7716 	case CLEANUP_POINT_EXPR:
7717 	  /* Lowered by gimplify.c.  */
7718 	  gcc_unreachable ();
7719 
7720 	case SAVE_EXPR:
7721 	  return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7722 
7723 	default:
7724 	  break;
7725 	}
7726 
7727       /* If we have an rtx, we do not need to scan our operands.  */
7728       if (exp_rtl)
7729 	break;
7730 
7731       nops = TREE_OPERAND_LENGTH (exp);
7732       for (i = 0; i < nops; i++)
7733 	if (TREE_OPERAND (exp, i) != 0
7734 	    && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
7735 	  return 0;
7736 
7737       break;
7738 
7739     case tcc_type:
7740       /* Should never get a type here.  */
7741       gcc_unreachable ();
7742     }
7743 
7744   /* If we have an rtl, find any enclosed object.  Then see if we conflict
7745      with it.  */
7746   if (exp_rtl)
7747     {
7748       if (GET_CODE (exp_rtl) == SUBREG)
7749 	{
7750 	  exp_rtl = SUBREG_REG (exp_rtl);
7751 	  if (REG_P (exp_rtl)
7752 	      && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
7753 	    return 0;
7754 	}
7755 
7756       /* If the rtl is X, then it is not safe.  Otherwise, it is unless both
7757 	 are memory and they conflict.  */
7758       return ! (rtx_equal_p (x, exp_rtl)
7759 		|| (MEM_P (x) && MEM_P (exp_rtl)
7760 		    && true_dependence (exp_rtl, VOIDmode, x)));
7761     }
7762 
7763   /* If we reach here, it is safe.  */
7764   return 1;
7765 }
7766 
7767 
7768 /* Return the highest power of two that EXP is known to be a multiple of.
7769    This is used in updating alignment of MEMs in array references.  */
7770 
7771 unsigned HOST_WIDE_INT
highest_pow2_factor(const_tree exp)7772 highest_pow2_factor (const_tree exp)
7773 {
7774   unsigned HOST_WIDE_INT ret;
7775   int trailing_zeros = tree_ctz (exp);
7776   if (trailing_zeros >= HOST_BITS_PER_WIDE_INT)
7777     return BIGGEST_ALIGNMENT;
7778   ret = HOST_WIDE_INT_1U << trailing_zeros;
7779   if (ret > BIGGEST_ALIGNMENT)
7780     return BIGGEST_ALIGNMENT;
7781   return ret;
7782 }
7783 
7784 /* Similar, except that the alignment requirements of TARGET are
7785    taken into account.  Assume it is at least as aligned as its
7786    type, unless it is a COMPONENT_REF in which case the layout of
7787    the structure gives the alignment.  */
7788 
7789 static unsigned HOST_WIDE_INT
highest_pow2_factor_for_target(const_tree target,const_tree exp)7790 highest_pow2_factor_for_target (const_tree target, const_tree exp)
7791 {
7792   unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
7793   unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
7794 
7795   return MAX (factor, talign);
7796 }
7797 
7798 /* Convert the tree comparison code TCODE to the rtl one where the
7799    signedness is UNSIGNEDP.  */
7800 
7801 static enum rtx_code
convert_tree_comp_to_rtx(enum tree_code tcode,int unsignedp)7802 convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
7803 {
7804   enum rtx_code code;
7805   switch (tcode)
7806     {
7807     case EQ_EXPR:
7808       code = EQ;
7809       break;
7810     case NE_EXPR:
7811       code = NE;
7812       break;
7813     case LT_EXPR:
7814       code = unsignedp ? LTU : LT;
7815       break;
7816     case LE_EXPR:
7817       code = unsignedp ? LEU : LE;
7818       break;
7819     case GT_EXPR:
7820       code = unsignedp ? GTU : GT;
7821       break;
7822     case GE_EXPR:
7823       code = unsignedp ? GEU : GE;
7824       break;
7825     case UNORDERED_EXPR:
7826       code = UNORDERED;
7827       break;
7828     case ORDERED_EXPR:
7829       code = ORDERED;
7830       break;
7831     case UNLT_EXPR:
7832       code = UNLT;
7833       break;
7834     case UNLE_EXPR:
7835       code = UNLE;
7836       break;
7837     case UNGT_EXPR:
7838       code = UNGT;
7839       break;
7840     case UNGE_EXPR:
7841       code = UNGE;
7842       break;
7843     case UNEQ_EXPR:
7844       code = UNEQ;
7845       break;
7846     case LTGT_EXPR:
7847       code = LTGT;
7848       break;
7849 
7850     default:
7851       gcc_unreachable ();
7852     }
7853   return code;
7854 }
7855 
7856 /* Subroutine of expand_expr.  Expand the two operands of a binary
7857    expression EXP0 and EXP1 placing the results in OP0 and OP1.
7858    The value may be stored in TARGET if TARGET is nonzero.  The
7859    MODIFIER argument is as documented by expand_expr.  */
7860 
7861 void
expand_operands(tree exp0,tree exp1,rtx target,rtx * op0,rtx * op1,enum expand_modifier modifier)7862 expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
7863 		 enum expand_modifier modifier)
7864 {
7865   if (! safe_from_p (target, exp1, 1))
7866     target = 0;
7867   if (operand_equal_p (exp0, exp1, 0))
7868     {
7869       *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7870       *op1 = copy_rtx (*op0);
7871     }
7872   else
7873     {
7874       *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7875       *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier);
7876     }
7877 }
7878 
7879 
7880 /* Return a MEM that contains constant EXP.  DEFER is as for
7881    output_constant_def and MODIFIER is as for expand_expr.  */
7882 
7883 static rtx
expand_expr_constant(tree exp,int defer,enum expand_modifier modifier)7884 expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
7885 {
7886   rtx mem;
7887 
7888   mem = output_constant_def (exp, defer);
7889   if (modifier != EXPAND_INITIALIZER)
7890     mem = use_anchored_address (mem);
7891   return mem;
7892 }
7893 
7894 /* A subroutine of expand_expr_addr_expr.  Evaluate the address of EXP.
7895    The TARGET, TMODE and MODIFIER arguments are as for expand_expr.  */
7896 
7897 static rtx
expand_expr_addr_expr_1(tree exp,rtx target,scalar_int_mode tmode,enum expand_modifier modifier,addr_space_t as)7898 expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
7899 		         enum expand_modifier modifier, addr_space_t as)
7900 {
7901   rtx result, subtarget;
7902   tree inner, offset;
7903   poly_int64 bitsize, bitpos;
7904   int unsignedp, reversep, volatilep = 0;
7905   machine_mode mode1;
7906 
7907   /* If we are taking the address of a constant and are at the top level,
7908      we have to use output_constant_def since we can't call force_const_mem
7909      at top level.  */
7910   /* ??? This should be considered a front-end bug.  We should not be
7911      generating ADDR_EXPR of something that isn't an LVALUE.  The only
7912      exception here is STRING_CST.  */
7913   if (CONSTANT_CLASS_P (exp))
7914     {
7915       result = XEXP (expand_expr_constant (exp, 0, modifier), 0);
7916       if (modifier < EXPAND_SUM)
7917 	result = force_operand (result, target);
7918       return result;
7919     }
7920 
7921   /* Everything must be something allowed by is_gimple_addressable.  */
7922   switch (TREE_CODE (exp))
7923     {
7924     case INDIRECT_REF:
7925       /* This case will happen via recursion for &a->b.  */
7926       return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
7927 
7928     case MEM_REF:
7929       {
7930 	tree tem = TREE_OPERAND (exp, 0);
7931 	if (!integer_zerop (TREE_OPERAND (exp, 1)))
7932 	  tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
7933 	return expand_expr (tem, target, tmode, modifier);
7934       }
7935 
7936     case TARGET_MEM_REF:
7937       return addr_for_mem_ref (exp, as, true);
7938 
7939     case CONST_DECL:
7940       /* Expand the initializer like constants above.  */
7941       result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
7942 					   0, modifier), 0);
7943       if (modifier < EXPAND_SUM)
7944 	result = force_operand (result, target);
7945       return result;
7946 
7947     case REALPART_EXPR:
7948       /* The real part of the complex number is always first, therefore
7949 	 the address is the same as the address of the parent object.  */
7950       offset = 0;
7951       bitpos = 0;
7952       inner = TREE_OPERAND (exp, 0);
7953       break;
7954 
7955     case IMAGPART_EXPR:
7956       /* The imaginary part of the complex number is always second.
7957 	 The expression is therefore always offset by the size of the
7958 	 scalar type.  */
7959       offset = 0;
7960       bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
7961       inner = TREE_OPERAND (exp, 0);
7962       break;
7963 
7964     case COMPOUND_LITERAL_EXPR:
7965       /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
7966 	 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
7967 	 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
7968 	 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
7969 	 the initializers aren't gimplified.  */
7970       if (COMPOUND_LITERAL_EXPR_DECL (exp)
7971 	  && TREE_STATIC (COMPOUND_LITERAL_EXPR_DECL (exp)))
7972 	return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
7973 					target, tmode, modifier, as);
7974       /* FALLTHRU */
7975     default:
7976       /* If the object is a DECL, then expand it for its rtl.  Don't bypass
7977 	 expand_expr, as that can have various side effects; LABEL_DECLs for
7978 	 example, may not have their DECL_RTL set yet.  Expand the rtl of
7979 	 CONSTRUCTORs too, which should yield a memory reference for the
7980 	 constructor's contents.  Assume language specific tree nodes can
7981 	 be expanded in some interesting way.  */
7982       gcc_assert (TREE_CODE (exp) < LAST_AND_UNUSED_TREE_CODE);
7983       if (DECL_P (exp)
7984 	  || TREE_CODE (exp) == CONSTRUCTOR
7985 	  || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
7986 	{
7987 	  result = expand_expr (exp, target, tmode,
7988 				modifier == EXPAND_INITIALIZER
7989 				? EXPAND_INITIALIZER : EXPAND_CONST_ADDRESS);
7990 
7991 	  /* If the DECL isn't in memory, then the DECL wasn't properly
7992 	     marked TREE_ADDRESSABLE, which will be either a front-end
7993 	     or a tree optimizer bug.  */
7994 
7995 	  gcc_assert (MEM_P (result));
7996 	  result = XEXP (result, 0);
7997 
7998 	  /* ??? Is this needed anymore?  */
7999 	  if (DECL_P (exp))
8000 	    TREE_USED (exp) = 1;
8001 
8002 	  if (modifier != EXPAND_INITIALIZER
8003 	      && modifier != EXPAND_CONST_ADDRESS
8004 	      && modifier != EXPAND_SUM)
8005 	    result = force_operand (result, target);
8006 	  return result;
8007 	}
8008 
8009       /* Pass FALSE as the last argument to get_inner_reference although
8010 	 we are expanding to RTL.  The rationale is that we know how to
8011 	 handle "aligning nodes" here: we can just bypass them because
8012 	 they won't change the final object whose address will be returned
8013 	 (they actually exist only for that purpose).  */
8014       inner = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
8015 				   &unsignedp, &reversep, &volatilep);
8016       break;
8017     }
8018 
8019   /* We must have made progress.  */
8020   gcc_assert (inner != exp);
8021 
8022   subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
8023   /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
8024      inner alignment, force the inner to be sufficiently aligned.  */
8025   if (CONSTANT_CLASS_P (inner)
8026       && TYPE_ALIGN (TREE_TYPE (inner)) < TYPE_ALIGN (TREE_TYPE (exp)))
8027     {
8028       inner = copy_node (inner);
8029       TREE_TYPE (inner) = copy_node (TREE_TYPE (inner));
8030       SET_TYPE_ALIGN (TREE_TYPE (inner), TYPE_ALIGN (TREE_TYPE (exp)));
8031       TYPE_USER_ALIGN (TREE_TYPE (inner)) = 1;
8032     }
8033   result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier, as);
8034 
8035   if (offset)
8036     {
8037       rtx tmp;
8038 
8039       if (modifier != EXPAND_NORMAL)
8040 	result = force_operand (result, NULL);
8041       tmp = expand_expr (offset, NULL_RTX, tmode,
8042 			 modifier == EXPAND_INITIALIZER
8043 			  ? EXPAND_INITIALIZER : EXPAND_NORMAL);
8044 
8045       /* expand_expr is allowed to return an object in a mode other
8046 	 than TMODE.  If it did, we need to convert.  */
8047       if (GET_MODE (tmp) != VOIDmode && tmode != GET_MODE (tmp))
8048 	tmp = convert_modes (tmode, GET_MODE (tmp),
8049 			     tmp, TYPE_UNSIGNED (TREE_TYPE (offset)));
8050       result = convert_memory_address_addr_space (tmode, result, as);
8051       tmp = convert_memory_address_addr_space (tmode, tmp, as);
8052 
8053       if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8054 	result = simplify_gen_binary (PLUS, tmode, result, tmp);
8055       else
8056 	{
8057 	  subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
8058 	  result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
8059 					1, OPTAB_LIB_WIDEN);
8060 	}
8061     }
8062 
8063   if (maybe_ne (bitpos, 0))
8064     {
8065       /* Someone beforehand should have rejected taking the address
8066 	 of an object that isn't byte-aligned.  */
8067       poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
8068       result = convert_memory_address_addr_space (tmode, result, as);
8069       result = plus_constant (tmode, result, bytepos);
8070       if (modifier < EXPAND_SUM)
8071 	result = force_operand (result, target);
8072     }
8073 
8074   return result;
8075 }
8076 
8077 /* A subroutine of expand_expr.  Evaluate EXP, which is an ADDR_EXPR.
8078    The TARGET, TMODE and MODIFIER arguments are as for expand_expr.  */
8079 
8080 static rtx
expand_expr_addr_expr(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier)8081 expand_expr_addr_expr (tree exp, rtx target, machine_mode tmode,
8082 		       enum expand_modifier modifier)
8083 {
8084   addr_space_t as = ADDR_SPACE_GENERIC;
8085   scalar_int_mode address_mode = Pmode;
8086   scalar_int_mode pointer_mode = ptr_mode;
8087   machine_mode rmode;
8088   rtx result;
8089 
8090   /* Target mode of VOIDmode says "whatever's natural".  */
8091   if (tmode == VOIDmode)
8092     tmode = TYPE_MODE (TREE_TYPE (exp));
8093 
8094   if (POINTER_TYPE_P (TREE_TYPE (exp)))
8095     {
8096       as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
8097       address_mode = targetm.addr_space.address_mode (as);
8098       pointer_mode = targetm.addr_space.pointer_mode (as);
8099     }
8100 
8101   /* We can get called with some Weird Things if the user does silliness
8102      like "(short) &a".  In that case, convert_memory_address won't do
8103      the right thing, so ignore the given target mode.  */
8104   scalar_int_mode new_tmode = (tmode == pointer_mode
8105 			       ? pointer_mode
8106 			       : address_mode);
8107 
8108   result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
8109 				    new_tmode, modifier, as);
8110 
8111   /* Despite expand_expr claims concerning ignoring TMODE when not
8112      strictly convenient, stuff breaks if we don't honor it.  Note
8113      that combined with the above, we only do this for pointer modes.  */
8114   rmode = GET_MODE (result);
8115   if (rmode == VOIDmode)
8116     rmode = new_tmode;
8117   if (rmode != new_tmode)
8118     result = convert_memory_address_addr_space (new_tmode, result, as);
8119 
8120   return result;
8121 }
8122 
8123 /* Generate code for computing CONSTRUCTOR EXP.
8124    An rtx for the computed value is returned.  If AVOID_TEMP_MEM
8125    is TRUE, instead of creating a temporary variable in memory
8126    NULL is returned and the caller needs to handle it differently.  */
8127 
8128 static rtx
expand_constructor(tree exp,rtx target,enum expand_modifier modifier,bool avoid_temp_mem)8129 expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
8130 		    bool avoid_temp_mem)
8131 {
8132   tree type = TREE_TYPE (exp);
8133   machine_mode mode = TYPE_MODE (type);
8134 
8135   /* Try to avoid creating a temporary at all.  This is possible
8136      if all of the initializer is zero.
8137      FIXME: try to handle all [0..255] initializers we can handle
8138      with memset.  */
8139   if (TREE_STATIC (exp)
8140       && !TREE_ADDRESSABLE (exp)
8141       && target != 0 && mode == BLKmode
8142       && all_zeros_p (exp))
8143     {
8144       clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
8145       return target;
8146     }
8147 
8148   /* All elts simple constants => refer to a constant in memory.  But
8149      if this is a non-BLKmode mode, let it store a field at a time
8150      since that should make a CONST_INT, CONST_WIDE_INT or
8151      CONST_DOUBLE when we fold.  Likewise, if we have a target we can
8152      use, it is best to store directly into the target unless the type
8153      is large enough that memcpy will be used.  If we are making an
8154      initializer and all operands are constant, put it in memory as
8155      well.
8156 
8157      FIXME: Avoid trying to fill vector constructors piece-meal.
8158      Output them with output_constant_def below unless we're sure
8159      they're zeros.  This should go away when vector initializers
8160      are treated like VECTOR_CST instead of arrays.  */
8161   if ((TREE_STATIC (exp)
8162        && ((mode == BLKmode
8163 	    && ! (target != 0 && safe_from_p (target, exp, 1)))
8164 		  || TREE_ADDRESSABLE (exp)
8165 		  || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8166 		      && (! can_move_by_pieces
8167 				     (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8168 				      TYPE_ALIGN (type)))
8169 		      && ! mostly_zeros_p (exp))))
8170       || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
8171 	  && TREE_CONSTANT (exp)))
8172     {
8173       rtx constructor;
8174 
8175       if (avoid_temp_mem)
8176 	return NULL_RTX;
8177 
8178       constructor = expand_expr_constant (exp, 1, modifier);
8179 
8180       if (modifier != EXPAND_CONST_ADDRESS
8181 	  && modifier != EXPAND_INITIALIZER
8182 	  && modifier != EXPAND_SUM)
8183 	constructor = validize_mem (constructor);
8184 
8185       return constructor;
8186     }
8187 
8188   /* Handle calls that pass values in multiple non-contiguous
8189      locations.  The Irix 6 ABI has examples of this.  */
8190   if (target == 0 || ! safe_from_p (target, exp, 1)
8191       || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM)
8192     {
8193       if (avoid_temp_mem)
8194 	return NULL_RTX;
8195 
8196       target = assign_temp (type, TREE_ADDRESSABLE (exp), 1);
8197     }
8198 
8199   store_constructor (exp, target, 0, int_expr_size (exp), false);
8200   return target;
8201 }
8202 
8203 
8204 /* expand_expr: generate code for computing expression EXP.
8205    An rtx for the computed value is returned.  The value is never null.
8206    In the case of a void EXP, const0_rtx is returned.
8207 
8208    The value may be stored in TARGET if TARGET is nonzero.
8209    TARGET is just a suggestion; callers must assume that
8210    the rtx returned may not be the same as TARGET.
8211 
8212    If TARGET is CONST0_RTX, it means that the value will be ignored.
8213 
8214    If TMODE is not VOIDmode, it suggests generating the
8215    result in mode TMODE.  But this is done only when convenient.
8216    Otherwise, TMODE is ignored and the value generated in its natural mode.
8217    TMODE is just a suggestion; callers must assume that
8218    the rtx returned may not have mode TMODE.
8219 
8220    Note that TARGET may have neither TMODE nor MODE.  In that case, it
8221    probably will not be used.
8222 
8223    If MODIFIER is EXPAND_SUM then when EXP is an addition
8224    we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8225    or a nest of (PLUS ...) and (MINUS ...) where the terms are
8226    products as above, or REG or MEM, or constant.
8227    Ordinarily in such cases we would output mul or add instructions
8228    and then return a pseudo reg containing the sum.
8229 
8230    EXPAND_INITIALIZER is much like EXPAND_SUM except that
8231    it also marks a label as absolutely required (it can't be dead).
8232    It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8233    This is used for outputting expressions used in initializers.
8234 
8235    EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8236    with a constant address even if that address is not normally legitimate.
8237    EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8238 
8239    EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8240    a call parameter.  Such targets require special care as we haven't yet
8241    marked TARGET so that it's safe from being trashed by libcalls.  We
8242    don't want to use TARGET for anything but the final result;
8243    Intermediate values must go elsewhere.   Additionally, calls to
8244    emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8245 
8246    If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8247    address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8248    DECL_RTL of the VAR_DECL.  *ALT_RTL is also set if EXP is a
8249    COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8250    recursively.
8251 
8252    If INNER_REFERENCE_P is true, we are expanding an inner reference.
8253    In this case, we don't adjust a returned MEM rtx that wouldn't be
8254    sufficiently aligned for its mode; instead, it's up to the caller
8255    to deal with it afterwards.  This is used to make sure that unaligned
8256    base objects for which out-of-bounds accesses are supported, for
8257    example record types with trailing arrays, aren't realigned behind
8258    the back of the caller.
8259    The normal operating mode is to pass FALSE for this parameter.  */
8260 
8261 rtx
expand_expr_real(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier,rtx * alt_rtl,bool inner_reference_p)8262 expand_expr_real (tree exp, rtx target, machine_mode tmode,
8263 		  enum expand_modifier modifier, rtx *alt_rtl,
8264 		  bool inner_reference_p)
8265 {
8266   rtx ret;
8267 
8268   /* Handle ERROR_MARK before anybody tries to access its type.  */
8269   if (TREE_CODE (exp) == ERROR_MARK
8270       || (TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
8271     {
8272       ret = CONST0_RTX (tmode);
8273       return ret ? ret : const0_rtx;
8274     }
8275 
8276   ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl,
8277 			    inner_reference_p);
8278   return ret;
8279 }
8280 
8281 /* Try to expand the conditional expression which is represented by
8282    TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves.  If it succeeds
8283    return the rtl reg which represents the result.  Otherwise return
8284    NULL_RTX.  */
8285 
8286 static rtx
expand_cond_expr_using_cmove(tree treeop0 ATTRIBUTE_UNUSED,tree treeop1 ATTRIBUTE_UNUSED,tree treeop2 ATTRIBUTE_UNUSED)8287 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
8288 			      tree treeop1 ATTRIBUTE_UNUSED,
8289 			      tree treeop2 ATTRIBUTE_UNUSED)
8290 {
8291   rtx insn;
8292   rtx op00, op01, op1, op2;
8293   enum rtx_code comparison_code;
8294   machine_mode comparison_mode;
8295   gimple *srcstmt;
8296   rtx temp;
8297   tree type = TREE_TYPE (treeop1);
8298   int unsignedp = TYPE_UNSIGNED (type);
8299   machine_mode mode = TYPE_MODE (type);
8300   machine_mode orig_mode = mode;
8301   static bool expanding_cond_expr_using_cmove = false;
8302 
8303   /* Conditional move expansion can end up TERing two operands which,
8304      when recursively hitting conditional expressions can result in
8305      exponential behavior if the cmove expansion ultimatively fails.
8306      It's hardly profitable to TER a cmove into a cmove so avoid doing
8307      that by failing early if we end up recursing.  */
8308   if (expanding_cond_expr_using_cmove)
8309     return NULL_RTX;
8310 
8311   /* If we cannot do a conditional move on the mode, try doing it
8312      with the promoted mode. */
8313   if (!can_conditionally_move_p (mode))
8314     {
8315       mode = promote_mode (type, mode, &unsignedp);
8316       if (!can_conditionally_move_p (mode))
8317 	return NULL_RTX;
8318       temp = assign_temp (type, 0, 0); /* Use promoted mode for temp.  */
8319     }
8320   else
8321     temp = assign_temp (type, 0, 1);
8322 
8323   expanding_cond_expr_using_cmove = true;
8324   start_sequence ();
8325   expand_operands (treeop1, treeop2,
8326 		   temp, &op1, &op2, EXPAND_NORMAL);
8327 
8328   if (TREE_CODE (treeop0) == SSA_NAME
8329       && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
8330     {
8331       tree type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
8332       enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
8333       op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
8334       op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
8335       comparison_mode = TYPE_MODE (type);
8336       unsignedp = TYPE_UNSIGNED (type);
8337       comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8338     }
8339   else if (COMPARISON_CLASS_P (treeop0))
8340     {
8341       tree type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
8342       enum tree_code cmpcode = TREE_CODE (treeop0);
8343       op00 = expand_normal (TREE_OPERAND (treeop0, 0));
8344       op01 = expand_normal (TREE_OPERAND (treeop0, 1));
8345       unsignedp = TYPE_UNSIGNED (type);
8346       comparison_mode = TYPE_MODE (type);
8347       comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8348     }
8349   else
8350     {
8351       op00 = expand_normal (treeop0);
8352       op01 = const0_rtx;
8353       comparison_code = NE;
8354       comparison_mode = GET_MODE (op00);
8355       if (comparison_mode == VOIDmode)
8356 	comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
8357     }
8358   expanding_cond_expr_using_cmove = false;
8359 
8360   if (GET_MODE (op1) != mode)
8361     op1 = gen_lowpart (mode, op1);
8362 
8363   if (GET_MODE (op2) != mode)
8364     op2 = gen_lowpart (mode, op2);
8365 
8366   /* Try to emit the conditional move.  */
8367   insn = emit_conditional_move (temp, comparison_code,
8368 				op00, op01, comparison_mode,
8369 				op1, op2, mode,
8370 				unsignedp);
8371 
8372   /* If we could do the conditional move, emit the sequence,
8373      and return.  */
8374   if (insn)
8375     {
8376       rtx_insn *seq = get_insns ();
8377       end_sequence ();
8378       emit_insn (seq);
8379       return convert_modes (orig_mode, mode, temp, 0);
8380     }
8381 
8382   /* Otherwise discard the sequence and fall back to code with
8383      branches.  */
8384   end_sequence ();
8385   return NULL_RTX;
8386 }
8387 
8388 rtx
expand_expr_real_2(sepops ops,rtx target,machine_mode tmode,enum expand_modifier modifier)8389 expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
8390 		    enum expand_modifier modifier)
8391 {
8392   rtx op0, op1, op2, temp;
8393   rtx_code_label *lab;
8394   tree type;
8395   int unsignedp;
8396   machine_mode mode;
8397   scalar_int_mode int_mode;
8398   enum tree_code code = ops->code;
8399   optab this_optab;
8400   rtx subtarget, original_target;
8401   int ignore;
8402   bool reduce_bit_field;
8403   location_t loc = ops->location;
8404   tree treeop0, treeop1, treeop2;
8405 #define REDUCE_BIT_FIELD(expr)	(reduce_bit_field			  \
8406 				 ? reduce_to_bit_field_precision ((expr), \
8407 								  target, \
8408 								  type)	  \
8409 				 : (expr))
8410 
8411   type = ops->type;
8412   mode = TYPE_MODE (type);
8413   unsignedp = TYPE_UNSIGNED (type);
8414 
8415   treeop0 = ops->op0;
8416   treeop1 = ops->op1;
8417   treeop2 = ops->op2;
8418 
8419   /* We should be called only on simple (binary or unary) expressions,
8420      exactly those that are valid in gimple expressions that aren't
8421      GIMPLE_SINGLE_RHS (or invalid).  */
8422   gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
8423 	      || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
8424 	      || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
8425 
8426   ignore = (target == const0_rtx
8427 	    || ((CONVERT_EXPR_CODE_P (code)
8428 		 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
8429 		&& TREE_CODE (type) == VOID_TYPE));
8430 
8431   /* We should be called only if we need the result.  */
8432   gcc_assert (!ignore);
8433 
8434   /* An operation in what may be a bit-field type needs the
8435      result to be reduced to the precision of the bit-field type,
8436      which is narrower than that of the type's mode.  */
8437   reduce_bit_field = (INTEGRAL_TYPE_P (type)
8438 		      && !type_has_mode_precision_p (type));
8439 
8440   if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
8441     target = 0;
8442 
8443   /* Use subtarget as the target for operand 0 of a binary operation.  */
8444   subtarget = get_subtarget (target);
8445   original_target = target;
8446 
8447   switch (code)
8448     {
8449     case NON_LVALUE_EXPR:
8450     case PAREN_EXPR:
8451     CASE_CONVERT:
8452       if (treeop0 == error_mark_node)
8453 	return const0_rtx;
8454 
8455       if (TREE_CODE (type) == UNION_TYPE)
8456 	{
8457 	  tree valtype = TREE_TYPE (treeop0);
8458 
8459 	  /* If both input and output are BLKmode, this conversion isn't doing
8460 	     anything except possibly changing memory attribute.  */
8461 	  if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode)
8462 	    {
8463 	      rtx result = expand_expr (treeop0, target, tmode,
8464 					modifier);
8465 
8466 	      result = copy_rtx (result);
8467 	      set_mem_attributes (result, type, 0);
8468 	      return result;
8469 	    }
8470 
8471 	  if (target == 0)
8472 	    {
8473 	      if (TYPE_MODE (type) != BLKmode)
8474 		target = gen_reg_rtx (TYPE_MODE (type));
8475 	      else
8476 		target = assign_temp (type, 1, 1);
8477 	    }
8478 
8479 	  if (MEM_P (target))
8480 	    /* Store data into beginning of memory target.  */
8481 	    store_expr (treeop0,
8482 			adjust_address (target, TYPE_MODE (valtype), 0),
8483 			modifier == EXPAND_STACK_PARM,
8484 			false, TYPE_REVERSE_STORAGE_ORDER (type));
8485 
8486 	  else
8487 	    {
8488 	      gcc_assert (REG_P (target)
8489 			  && !TYPE_REVERSE_STORAGE_ORDER (type));
8490 
8491 	      /* Store this field into a union of the proper type.  */
8492 	      poly_uint64 op0_size
8493 		= tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
8494 	      poly_uint64 union_size = GET_MODE_BITSIZE (mode);
8495 	      store_field (target,
8496 			   /* The conversion must be constructed so that
8497 			      we know at compile time how many bits
8498 			      to preserve.  */
8499 			   ordered_min (op0_size, union_size),
8500 			   0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
8501 			   false, false);
8502 	    }
8503 
8504 	  /* Return the entire union.  */
8505 	  return target;
8506 	}
8507 
8508       if (mode == TYPE_MODE (TREE_TYPE (treeop0)))
8509 	{
8510 	  op0 = expand_expr (treeop0, target, VOIDmode,
8511 			     modifier);
8512 
8513 	  /* If the signedness of the conversion differs and OP0 is
8514 	     a promoted SUBREG, clear that indication since we now
8515 	     have to do the proper extension.  */
8516 	  if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
8517 	      && GET_CODE (op0) == SUBREG)
8518 	    SUBREG_PROMOTED_VAR_P (op0) = 0;
8519 
8520 	  return REDUCE_BIT_FIELD (op0);
8521 	}
8522 
8523       op0 = expand_expr (treeop0, NULL_RTX, mode,
8524 			 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
8525       if (GET_MODE (op0) == mode)
8526 	;
8527 
8528       /* If OP0 is a constant, just convert it into the proper mode.  */
8529       else if (CONSTANT_P (op0))
8530 	{
8531 	  tree inner_type = TREE_TYPE (treeop0);
8532 	  machine_mode inner_mode = GET_MODE (op0);
8533 
8534 	  if (inner_mode == VOIDmode)
8535 	    inner_mode = TYPE_MODE (inner_type);
8536 
8537 	  if (modifier == EXPAND_INITIALIZER)
8538 	    op0 = lowpart_subreg (mode, op0, inner_mode);
8539 	  else
8540 	    op0=  convert_modes (mode, inner_mode, op0,
8541 				 TYPE_UNSIGNED (inner_type));
8542 	}
8543 
8544       else if (modifier == EXPAND_INITIALIZER)
8545 	op0 = gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8546 			     ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
8547 
8548       else if (target == 0)
8549 	op0 = convert_to_mode (mode, op0,
8550 			       TYPE_UNSIGNED (TREE_TYPE
8551 					      (treeop0)));
8552       else
8553 	{
8554 	  convert_move (target, op0,
8555 			TYPE_UNSIGNED (TREE_TYPE (treeop0)));
8556 	  op0 = target;
8557 	}
8558 
8559       return REDUCE_BIT_FIELD (op0);
8560 
8561     case ADDR_SPACE_CONVERT_EXPR:
8562       {
8563 	tree treeop0_type = TREE_TYPE (treeop0);
8564 
8565 	gcc_assert (POINTER_TYPE_P (type));
8566 	gcc_assert (POINTER_TYPE_P (treeop0_type));
8567 
8568 	addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
8569 	addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type));
8570 
8571         /* Conversions between pointers to the same address space should
8572 	   have been implemented via CONVERT_EXPR / NOP_EXPR.  */
8573 	gcc_assert (as_to != as_from);
8574 
8575 	op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
8576 
8577         /* Ask target code to handle conversion between pointers
8578 	   to overlapping address spaces.  */
8579 	if (targetm.addr_space.subset_p (as_to, as_from)
8580 	    || targetm.addr_space.subset_p (as_from, as_to))
8581 	  {
8582 	    op0 = targetm.addr_space.convert (op0, treeop0_type, type);
8583 	  }
8584         else
8585           {
8586 	    /* For disjoint address spaces, converting anything but a null
8587 	       pointer invokes undefined behavior.  We truncate or extend the
8588 	       value as if we'd converted via integers, which handles 0 as
8589 	       required, and all others as the programmer likely expects.  */
8590 #ifndef POINTERS_EXTEND_UNSIGNED
8591 	    const int POINTERS_EXTEND_UNSIGNED = 1;
8592 #endif
8593 	    op0 = convert_modes (mode, TYPE_MODE (treeop0_type),
8594 				 op0, POINTERS_EXTEND_UNSIGNED);
8595 	  }
8596 	gcc_assert (op0);
8597 	return op0;
8598       }
8599 
8600     case POINTER_PLUS_EXPR:
8601       /* Even though the sizetype mode and the pointer's mode can be different
8602          expand is able to handle this correctly and get the correct result out
8603          of the PLUS_EXPR code.  */
8604       /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
8605          if sizetype precision is smaller than pointer precision.  */
8606       if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
8607 	treeop1 = fold_convert_loc (loc, type,
8608 				    fold_convert_loc (loc, ssizetype,
8609 						      treeop1));
8610       /* If sizetype precision is larger than pointer precision, truncate the
8611 	 offset to have matching modes.  */
8612       else if (TYPE_PRECISION (sizetype) > TYPE_PRECISION (type))
8613 	treeop1 = fold_convert_loc (loc, type, treeop1);
8614       /* FALLTHRU */
8615 
8616     case PLUS_EXPR:
8617       /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
8618 	 something else, make sure we add the register to the constant and
8619 	 then to the other thing.  This case can occur during strength
8620 	 reduction and doing it this way will produce better code if the
8621 	 frame pointer or argument pointer is eliminated.
8622 
8623 	 fold-const.c will ensure that the constant is always in the inner
8624 	 PLUS_EXPR, so the only case we need to do anything about is if
8625 	 sp, ap, or fp is our second argument, in which case we must swap
8626 	 the innermost first argument and our second argument.  */
8627 
8628       if (TREE_CODE (treeop0) == PLUS_EXPR
8629 	  && TREE_CODE (TREE_OPERAND (treeop0, 1)) == INTEGER_CST
8630 	  && VAR_P (treeop1)
8631 	  && (DECL_RTL (treeop1) == frame_pointer_rtx
8632 	      || DECL_RTL (treeop1) == stack_pointer_rtx
8633 	      || DECL_RTL (treeop1) == arg_pointer_rtx))
8634 	{
8635 	  gcc_unreachable ();
8636 	}
8637 
8638       /* If the result is to be ptr_mode and we are adding an integer to
8639 	 something, we might be forming a constant.  So try to use
8640 	 plus_constant.  If it produces a sum and we can't accept it,
8641 	 use force_operand.  This allows P = &ARR[const] to generate
8642 	 efficient code on machines where a SYMBOL_REF is not a valid
8643 	 address.
8644 
8645 	 If this is an EXPAND_SUM call, always return the sum.  */
8646       if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
8647 	  || (mode == ptr_mode && (unsignedp || ! flag_trapv)))
8648 	{
8649 	  if (modifier == EXPAND_STACK_PARM)
8650 	    target = 0;
8651 	  if (TREE_CODE (treeop0) == INTEGER_CST
8652 	      && HWI_COMPUTABLE_MODE_P (mode)
8653 	      && TREE_CONSTANT (treeop1))
8654 	    {
8655 	      rtx constant_part;
8656 	      HOST_WIDE_INT wc;
8657 	      machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
8658 
8659 	      op1 = expand_expr (treeop1, subtarget, VOIDmode,
8660 				 EXPAND_SUM);
8661 	      /* Use wi::shwi to ensure that the constant is
8662 		 truncated according to the mode of OP1, then sign extended
8663 		 to a HOST_WIDE_INT.  Using the constant directly can result
8664 		 in non-canonical RTL in a 64x32 cross compile.  */
8665 	      wc = TREE_INT_CST_LOW (treeop0);
8666 	      constant_part =
8667 		immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8668 	      op1 = plus_constant (mode, op1, INTVAL (constant_part));
8669 	      if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8670 		op1 = force_operand (op1, target);
8671 	      return REDUCE_BIT_FIELD (op1);
8672 	    }
8673 
8674 	  else if (TREE_CODE (treeop1) == INTEGER_CST
8675 		   && HWI_COMPUTABLE_MODE_P (mode)
8676 		   && TREE_CONSTANT (treeop0))
8677 	    {
8678 	      rtx constant_part;
8679 	      HOST_WIDE_INT wc;
8680 	      machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
8681 
8682 	      op0 = expand_expr (treeop0, subtarget, VOIDmode,
8683 				 (modifier == EXPAND_INITIALIZER
8684 				 ? EXPAND_INITIALIZER : EXPAND_SUM));
8685 	      if (! CONSTANT_P (op0))
8686 		{
8687 		  op1 = expand_expr (treeop1, NULL_RTX,
8688 				     VOIDmode, modifier);
8689 		  /* Return a PLUS if modifier says it's OK.  */
8690 		  if (modifier == EXPAND_SUM
8691 		      || modifier == EXPAND_INITIALIZER)
8692 		    return simplify_gen_binary (PLUS, mode, op0, op1);
8693 		  goto binop2;
8694 		}
8695 	      /* Use wi::shwi to ensure that the constant is
8696 		 truncated according to the mode of OP1, then sign extended
8697 		 to a HOST_WIDE_INT.  Using the constant directly can result
8698 		 in non-canonical RTL in a 64x32 cross compile.  */
8699 	      wc = TREE_INT_CST_LOW (treeop1);
8700 	      constant_part
8701 		= immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8702 	      op0 = plus_constant (mode, op0, INTVAL (constant_part));
8703 	      if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8704 		op0 = force_operand (op0, target);
8705 	      return REDUCE_BIT_FIELD (op0);
8706 	    }
8707 	}
8708 
8709       /* Use TER to expand pointer addition of a negated value
8710 	 as pointer subtraction.  */
8711       if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
8712 	   || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
8713 	       && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
8714 	  && TREE_CODE (treeop1) == SSA_NAME
8715 	  && TYPE_MODE (TREE_TYPE (treeop0))
8716 	     == TYPE_MODE (TREE_TYPE (treeop1)))
8717 	{
8718 	  gimple *def = get_def_for_expr (treeop1, NEGATE_EXPR);
8719 	  if (def)
8720 	    {
8721 	      treeop1 = gimple_assign_rhs1 (def);
8722 	      code = MINUS_EXPR;
8723 	      goto do_minus;
8724 	    }
8725 	}
8726 
8727       /* No sense saving up arithmetic to be done
8728 	 if it's all in the wrong mode to form part of an address.
8729 	 And force_operand won't know whether to sign-extend or
8730 	 zero-extend.  */
8731       if (modifier != EXPAND_INITIALIZER
8732 	  && (modifier != EXPAND_SUM || mode != ptr_mode))
8733 	{
8734 	  expand_operands (treeop0, treeop1,
8735 			   subtarget, &op0, &op1, modifier);
8736 	  if (op0 == const0_rtx)
8737 	    return op1;
8738 	  if (op1 == const0_rtx)
8739 	    return op0;
8740 	  goto binop2;
8741 	}
8742 
8743       expand_operands (treeop0, treeop1,
8744 		       subtarget, &op0, &op1, modifier);
8745       return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8746 
8747     case MINUS_EXPR:
8748     case POINTER_DIFF_EXPR:
8749     do_minus:
8750       /* For initializers, we are allowed to return a MINUS of two
8751 	 symbolic constants.  Here we handle all cases when both operands
8752 	 are constant.  */
8753       /* Handle difference of two symbolic constants,
8754 	 for the sake of an initializer.  */
8755       if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8756 	  && really_constant_p (treeop0)
8757 	  && really_constant_p (treeop1))
8758 	{
8759 	  expand_operands (treeop0, treeop1,
8760 			   NULL_RTX, &op0, &op1, modifier);
8761 	  return simplify_gen_binary (MINUS, mode, op0, op1);
8762 	}
8763 
8764       /* No sense saving up arithmetic to be done
8765 	 if it's all in the wrong mode to form part of an address.
8766 	 And force_operand won't know whether to sign-extend or
8767 	 zero-extend.  */
8768       if (modifier != EXPAND_INITIALIZER
8769 	  && (modifier != EXPAND_SUM || mode != ptr_mode))
8770 	goto binop;
8771 
8772       expand_operands (treeop0, treeop1,
8773 		       subtarget, &op0, &op1, modifier);
8774 
8775       /* Convert A - const to A + (-const).  */
8776       if (CONST_INT_P (op1))
8777 	{
8778 	  op1 = negate_rtx (mode, op1);
8779 	  return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8780 	}
8781 
8782       goto binop2;
8783 
8784     case WIDEN_MULT_PLUS_EXPR:
8785     case WIDEN_MULT_MINUS_EXPR:
8786       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
8787       op2 = expand_normal (treeop2);
8788       target = expand_widen_pattern_expr (ops, op0, op1, op2,
8789 					  target, unsignedp);
8790       return target;
8791 
8792     case WIDEN_MULT_EXPR:
8793       /* If first operand is constant, swap them.
8794 	 Thus the following special case checks need only
8795 	 check the second operand.  */
8796       if (TREE_CODE (treeop0) == INTEGER_CST)
8797 	std::swap (treeop0, treeop1);
8798 
8799       /* First, check if we have a multiplication of one signed and one
8800 	 unsigned operand.  */
8801       if (TREE_CODE (treeop1) != INTEGER_CST
8802 	  && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8803 	      != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
8804 	{
8805 	  machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
8806 	  this_optab = usmul_widen_optab;
8807 	  if (find_widening_optab_handler (this_optab, mode, innermode)
8808 		!= CODE_FOR_nothing)
8809 	    {
8810 	      if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
8811 		expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8812 				 EXPAND_NORMAL);
8813 	      else
8814 		expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
8815 				 EXPAND_NORMAL);
8816 	      /* op0 and op1 might still be constant, despite the above
8817 		 != INTEGER_CST check.  Handle it.  */
8818 	      if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8819 		{
8820 		  op0 = convert_modes (mode, innermode, op0, true);
8821 		  op1 = convert_modes (mode, innermode, op1, false);
8822 		  return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8823 							target, unsignedp));
8824 		}
8825 	      goto binop3;
8826 	    }
8827 	}
8828       /* Check for a multiplication with matching signedness.  */
8829       else if ((TREE_CODE (treeop1) == INTEGER_CST
8830 		&& int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
8831 	       || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
8832 		   == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
8833 	{
8834 	  tree op0type = TREE_TYPE (treeop0);
8835 	  machine_mode innermode = TYPE_MODE (op0type);
8836 	  bool zextend_p = TYPE_UNSIGNED (op0type);
8837 	  optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
8838 	  this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
8839 
8840 	  if (TREE_CODE (treeop0) != INTEGER_CST)
8841 	    {
8842 	      if (find_widening_optab_handler (this_optab, mode, innermode)
8843 		  != CODE_FOR_nothing)
8844 		{
8845 		  expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8846 				   EXPAND_NORMAL);
8847 		  /* op0 and op1 might still be constant, despite the above
8848 		     != INTEGER_CST check.  Handle it.  */
8849 		  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8850 		    {
8851 		     widen_mult_const:
8852 		      op0 = convert_modes (mode, innermode, op0, zextend_p);
8853 		      op1
8854 			= convert_modes (mode, innermode, op1,
8855 					 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8856 		      return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8857 							    target,
8858 							    unsignedp));
8859 		    }
8860 		  temp = expand_widening_mult (mode, op0, op1, target,
8861 					       unsignedp, this_optab);
8862 		  return REDUCE_BIT_FIELD (temp);
8863 		}
8864 	      if (find_widening_optab_handler (other_optab, mode, innermode)
8865 		  != CODE_FOR_nothing
8866 		  && innermode == word_mode)
8867 		{
8868 		  rtx htem, hipart;
8869 		  op0 = expand_normal (treeop0);
8870 		  op1 = expand_normal (treeop1);
8871 		  /* op0 and op1 might be constants, despite the above
8872 		     != INTEGER_CST check.  Handle it.  */
8873 		  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8874 		    goto widen_mult_const;
8875 		  if (TREE_CODE (treeop1) == INTEGER_CST)
8876 		    op1 = convert_modes (mode, word_mode, op1,
8877 					 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8878 		  temp = expand_binop (mode, other_optab, op0, op1, target,
8879 				       unsignedp, OPTAB_LIB_WIDEN);
8880 		  hipart = gen_highpart (word_mode, temp);
8881 		  htem = expand_mult_highpart_adjust (word_mode, hipart,
8882 						      op0, op1, hipart,
8883 						      zextend_p);
8884 		  if (htem != hipart)
8885 		    emit_move_insn (hipart, htem);
8886 		  return REDUCE_BIT_FIELD (temp);
8887 		}
8888 	    }
8889 	}
8890       treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
8891       treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
8892       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8893       return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8894 
8895     case FMA_EXPR:
8896       {
8897 	optab opt = fma_optab;
8898 	gimple *def0, *def2;
8899 
8900 	/* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
8901 	   call.  */
8902 	if (optab_handler (fma_optab, mode) == CODE_FOR_nothing)
8903 	  {
8904 	    tree fn = mathfn_built_in (TREE_TYPE (treeop0), BUILT_IN_FMA);
8905 	    tree call_expr;
8906 
8907 	    gcc_assert (fn != NULL_TREE);
8908 	    call_expr = build_call_expr (fn, 3, treeop0, treeop1, treeop2);
8909 	    return expand_builtin (call_expr, target, subtarget, mode, false);
8910 	  }
8911 
8912 	def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
8913 	/* The multiplication is commutative - look at its 2nd operand
8914 	   if the first isn't fed by a negate.  */
8915 	if (!def0)
8916 	  {
8917 	    def0 = get_def_for_expr (treeop1, NEGATE_EXPR);
8918 	    /* Swap operands if the 2nd operand is fed by a negate.  */
8919 	    if (def0)
8920 	      std::swap (treeop0, treeop1);
8921 	  }
8922 	def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
8923 
8924 	op0 = op2 = NULL;
8925 
8926 	if (def0 && def2
8927 	    && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
8928 	  {
8929 	    opt = fnms_optab;
8930 	    op0 = expand_normal (gimple_assign_rhs1 (def0));
8931 	    op2 = expand_normal (gimple_assign_rhs1 (def2));
8932 	  }
8933 	else if (def0
8934 		 && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
8935 	  {
8936 	    opt = fnma_optab;
8937 	    op0 = expand_normal (gimple_assign_rhs1 (def0));
8938 	  }
8939 	else if (def2
8940 		 && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
8941 	  {
8942 	    opt = fms_optab;
8943 	    op2 = expand_normal (gimple_assign_rhs1 (def2));
8944 	  }
8945 
8946 	if (op0 == NULL)
8947 	  op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
8948 	if (op2 == NULL)
8949 	  op2 = expand_normal (treeop2);
8950 	op1 = expand_normal (treeop1);
8951 
8952 	return expand_ternary_op (TYPE_MODE (type), opt,
8953 				  op0, op1, op2, target, 0);
8954       }
8955 
8956     case MULT_EXPR:
8957       /* If this is a fixed-point operation, then we cannot use the code
8958 	 below because "expand_mult" doesn't support sat/no-sat fixed-point
8959          multiplications.   */
8960       if (ALL_FIXED_POINT_MODE_P (mode))
8961 	goto binop;
8962 
8963       /* If first operand is constant, swap them.
8964 	 Thus the following special case checks need only
8965 	 check the second operand.  */
8966       if (TREE_CODE (treeop0) == INTEGER_CST)
8967 	std::swap (treeop0, treeop1);
8968 
8969       /* Attempt to return something suitable for generating an
8970 	 indexed address, for machines that support that.  */
8971 
8972       if (modifier == EXPAND_SUM && mode == ptr_mode
8973 	  && tree_fits_shwi_p (treeop1))
8974 	{
8975 	  tree exp1 = treeop1;
8976 
8977 	  op0 = expand_expr (treeop0, subtarget, VOIDmode,
8978 			     EXPAND_SUM);
8979 
8980 	  if (!REG_P (op0))
8981 	    op0 = force_operand (op0, NULL_RTX);
8982 	  if (!REG_P (op0))
8983 	    op0 = copy_to_mode_reg (mode, op0);
8984 
8985 	  return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
8986 			       gen_int_mode (tree_to_shwi (exp1),
8987 					     TYPE_MODE (TREE_TYPE (exp1)))));
8988 	}
8989 
8990       if (modifier == EXPAND_STACK_PARM)
8991 	target = 0;
8992 
8993       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8994       return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8995 
8996     case TRUNC_MOD_EXPR:
8997     case FLOOR_MOD_EXPR:
8998     case CEIL_MOD_EXPR:
8999     case ROUND_MOD_EXPR:
9000 
9001     case TRUNC_DIV_EXPR:
9002     case FLOOR_DIV_EXPR:
9003     case CEIL_DIV_EXPR:
9004     case ROUND_DIV_EXPR:
9005     case EXACT_DIV_EXPR:
9006      {
9007        /* If this is a fixed-point operation, then we cannot use the code
9008 	  below because "expand_divmod" doesn't support sat/no-sat fixed-point
9009 	  divisions.   */
9010        if (ALL_FIXED_POINT_MODE_P (mode))
9011 	 goto binop;
9012 
9013        if (modifier == EXPAND_STACK_PARM)
9014 	 target = 0;
9015        /* Possible optimization: compute the dividend with EXPAND_SUM
9016 	  then if the divisor is constant can optimize the case
9017 	  where some terms of the dividend have coeffs divisible by it.  */
9018        expand_operands (treeop0, treeop1,
9019 			subtarget, &op0, &op1, EXPAND_NORMAL);
9020        bool mod_p = code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR
9021 		    || code == CEIL_MOD_EXPR || code == ROUND_MOD_EXPR;
9022        if (SCALAR_INT_MODE_P (mode)
9023 	   && optimize >= 2
9024 	   && get_range_pos_neg (treeop0) == 1
9025 	   && get_range_pos_neg (treeop1) == 1)
9026 	 {
9027 	   /* If both arguments are known to be positive when interpreted
9028 	      as signed, we can expand it as both signed and unsigned
9029 	      division or modulo.  Choose the cheaper sequence in that case.  */
9030 	   bool speed_p = optimize_insn_for_speed_p ();
9031 	   do_pending_stack_adjust ();
9032 	   start_sequence ();
9033 	   rtx uns_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 1);
9034 	   rtx_insn *uns_insns = get_insns ();
9035 	   end_sequence ();
9036 	   start_sequence ();
9037 	   rtx sgn_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 0);
9038 	   rtx_insn *sgn_insns = get_insns ();
9039 	   end_sequence ();
9040 	   unsigned uns_cost = seq_cost (uns_insns, speed_p);
9041 	   unsigned sgn_cost = seq_cost (sgn_insns, speed_p);
9042 
9043 	   /* If costs are the same then use as tie breaker the other
9044 	      other factor.  */
9045 	   if (uns_cost == sgn_cost)
9046 	     {
9047 		uns_cost = seq_cost (uns_insns, !speed_p);
9048 		sgn_cost = seq_cost (sgn_insns, !speed_p);
9049 	     }
9050 
9051 	   if (uns_cost < sgn_cost || (uns_cost == sgn_cost && unsignedp))
9052 	     {
9053 	       emit_insn (uns_insns);
9054 	       return uns_ret;
9055 	     }
9056 	   emit_insn (sgn_insns);
9057 	   return sgn_ret;
9058 	 }
9059        return expand_divmod (mod_p, code, mode, op0, op1, target, unsignedp);
9060      }
9061     case RDIV_EXPR:
9062       goto binop;
9063 
9064     case MULT_HIGHPART_EXPR:
9065       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9066       temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
9067       gcc_assert (temp);
9068       return temp;
9069 
9070     case FIXED_CONVERT_EXPR:
9071       op0 = expand_normal (treeop0);
9072       if (target == 0 || modifier == EXPAND_STACK_PARM)
9073 	target = gen_reg_rtx (mode);
9074 
9075       if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
9076 	   && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9077           || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
9078 	expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
9079       else
9080 	expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
9081       return target;
9082 
9083     case FIX_TRUNC_EXPR:
9084       op0 = expand_normal (treeop0);
9085       if (target == 0 || modifier == EXPAND_STACK_PARM)
9086 	target = gen_reg_rtx (mode);
9087       expand_fix (target, op0, unsignedp);
9088       return target;
9089 
9090     case FLOAT_EXPR:
9091       op0 = expand_normal (treeop0);
9092       if (target == 0 || modifier == EXPAND_STACK_PARM)
9093 	target = gen_reg_rtx (mode);
9094       /* expand_float can't figure out what to do if FROM has VOIDmode.
9095 	 So give it the correct mode.  With -O, cse will optimize this.  */
9096       if (GET_MODE (op0) == VOIDmode)
9097 	op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
9098 				op0);
9099       expand_float (target, op0,
9100 		    TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9101       return target;
9102 
9103     case NEGATE_EXPR:
9104       op0 = expand_expr (treeop0, subtarget,
9105 			 VOIDmode, EXPAND_NORMAL);
9106       if (modifier == EXPAND_STACK_PARM)
9107 	target = 0;
9108       temp = expand_unop (mode,
9109       			  optab_for_tree_code (NEGATE_EXPR, type,
9110 					       optab_default),
9111 			  op0, target, 0);
9112       gcc_assert (temp);
9113       return REDUCE_BIT_FIELD (temp);
9114 
9115     case ABS_EXPR:
9116       op0 = expand_expr (treeop0, subtarget,
9117 			 VOIDmode, EXPAND_NORMAL);
9118       if (modifier == EXPAND_STACK_PARM)
9119 	target = 0;
9120 
9121       /* ABS_EXPR is not valid for complex arguments.  */
9122       gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
9123 		  && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
9124 
9125       /* Unsigned abs is simply the operand.  Testing here means we don't
9126 	 risk generating incorrect code below.  */
9127       if (TYPE_UNSIGNED (type))
9128 	return op0;
9129 
9130       return expand_abs (mode, op0, target, unsignedp,
9131 			 safe_from_p (target, treeop0, 1));
9132 
9133     case MAX_EXPR:
9134     case MIN_EXPR:
9135       target = original_target;
9136       if (target == 0
9137 	  || modifier == EXPAND_STACK_PARM
9138 	  || (MEM_P (target) && MEM_VOLATILE_P (target))
9139 	  || GET_MODE (target) != mode
9140 	  || (REG_P (target)
9141 	      && REGNO (target) < FIRST_PSEUDO_REGISTER))
9142 	target = gen_reg_rtx (mode);
9143       expand_operands (treeop0, treeop1,
9144 		       target, &op0, &op1, EXPAND_NORMAL);
9145 
9146       /* First try to do it with a special MIN or MAX instruction.
9147 	 If that does not win, use a conditional jump to select the proper
9148 	 value.  */
9149       this_optab = optab_for_tree_code (code, type, optab_default);
9150       temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
9151 			   OPTAB_WIDEN);
9152       if (temp != 0)
9153 	return temp;
9154 
9155       /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
9156 	 and similarly for MAX <x, y>.  */
9157       if (VECTOR_TYPE_P (type))
9158 	{
9159 	  tree t0 = make_tree (type, op0);
9160 	  tree t1 = make_tree (type, op1);
9161 	  tree comparison = build2 (code == MIN_EXPR ? LE_EXPR : GE_EXPR,
9162 				    type, t0, t1);
9163 	  return expand_vec_cond_expr (type, comparison, t0, t1,
9164 				       original_target);
9165 	}
9166 
9167       /* At this point, a MEM target is no longer useful; we will get better
9168 	 code without it.  */
9169 
9170       if (! REG_P (target))
9171 	target = gen_reg_rtx (mode);
9172 
9173       /* If op1 was placed in target, swap op0 and op1.  */
9174       if (target != op0 && target == op1)
9175 	std::swap (op0, op1);
9176 
9177       /* We generate better code and avoid problems with op1 mentioning
9178 	 target by forcing op1 into a pseudo if it isn't a constant.  */
9179       if (! CONSTANT_P (op1))
9180 	op1 = force_reg (mode, op1);
9181 
9182       {
9183 	enum rtx_code comparison_code;
9184 	rtx cmpop1 = op1;
9185 
9186 	if (code == MAX_EXPR)
9187 	  comparison_code = unsignedp ? GEU : GE;
9188 	else
9189 	  comparison_code = unsignedp ? LEU : LE;
9190 
9191 	/* Canonicalize to comparisons against 0.  */
9192 	if (op1 == const1_rtx)
9193 	  {
9194 	    /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9195 	       or (a != 0 ? a : 1) for unsigned.
9196 	       For MIN we are safe converting (a <= 1 ? a : 1)
9197 	       into (a <= 0 ? a : 1)  */
9198 	    cmpop1 = const0_rtx;
9199 	    if (code == MAX_EXPR)
9200 	      comparison_code = unsignedp ? NE : GT;
9201 	  }
9202 	if (op1 == constm1_rtx && !unsignedp)
9203 	  {
9204 	    /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9205 	       and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9206 	    cmpop1 = const0_rtx;
9207 	    if (code == MIN_EXPR)
9208 	      comparison_code = LT;
9209 	  }
9210 
9211 	/* Use a conditional move if possible.  */
9212 	if (can_conditionally_move_p (mode))
9213 	  {
9214 	    rtx insn;
9215 
9216 	    start_sequence ();
9217 
9218 	    /* Try to emit the conditional move.  */
9219 	    insn = emit_conditional_move (target, comparison_code,
9220 					  op0, cmpop1, mode,
9221 					  op0, op1, mode,
9222 					  unsignedp);
9223 
9224 	    /* If we could do the conditional move, emit the sequence,
9225 	       and return.  */
9226 	    if (insn)
9227 	      {
9228 		rtx_insn *seq = get_insns ();
9229 		end_sequence ();
9230 		emit_insn (seq);
9231 		return target;
9232 	      }
9233 
9234 	    /* Otherwise discard the sequence and fall back to code with
9235 	       branches.  */
9236 	    end_sequence ();
9237 	  }
9238 
9239 	if (target != op0)
9240 	  emit_move_insn (target, op0);
9241 
9242 	lab = gen_label_rtx ();
9243 	do_compare_rtx_and_jump (target, cmpop1, comparison_code,
9244 				 unsignedp, mode, NULL_RTX, NULL, lab,
9245 				 profile_probability::uninitialized ());
9246       }
9247       emit_move_insn (target, op1);
9248       emit_label (lab);
9249       return target;
9250 
9251     case BIT_NOT_EXPR:
9252       op0 = expand_expr (treeop0, subtarget,
9253 			 VOIDmode, EXPAND_NORMAL);
9254       if (modifier == EXPAND_STACK_PARM)
9255 	target = 0;
9256       /* In case we have to reduce the result to bitfield precision
9257 	 for unsigned bitfield expand this as XOR with a proper constant
9258 	 instead.  */
9259       if (reduce_bit_field && TYPE_UNSIGNED (type))
9260 	{
9261 	  int_mode = SCALAR_INT_TYPE_MODE (type);
9262 	  wide_int mask = wi::mask (TYPE_PRECISION (type),
9263 				    false, GET_MODE_PRECISION (int_mode));
9264 
9265 	  temp = expand_binop (int_mode, xor_optab, op0,
9266 			       immed_wide_int_const (mask, int_mode),
9267 			       target, 1, OPTAB_LIB_WIDEN);
9268 	}
9269       else
9270 	temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
9271       gcc_assert (temp);
9272       return temp;
9273 
9274       /* ??? Can optimize bitwise operations with one arg constant.
9275 	 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9276 	 and (a bitwise1 b) bitwise2 b (etc)
9277 	 but that is probably not worth while.  */
9278 
9279     case BIT_AND_EXPR:
9280     case BIT_IOR_EXPR:
9281     case BIT_XOR_EXPR:
9282       goto binop;
9283 
9284     case LROTATE_EXPR:
9285     case RROTATE_EXPR:
9286       gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
9287 		  || type_has_mode_precision_p (type));
9288       /* fall through */
9289 
9290     case LSHIFT_EXPR:
9291     case RSHIFT_EXPR:
9292       {
9293 	/* If this is a fixed-point operation, then we cannot use the code
9294 	   below because "expand_shift" doesn't support sat/no-sat fixed-point
9295 	   shifts.  */
9296 	if (ALL_FIXED_POINT_MODE_P (mode))
9297 	  goto binop;
9298 
9299 	if (! safe_from_p (subtarget, treeop1, 1))
9300 	  subtarget = 0;
9301 	if (modifier == EXPAND_STACK_PARM)
9302 	  target = 0;
9303 	op0 = expand_expr (treeop0, subtarget,
9304 			   VOIDmode, EXPAND_NORMAL);
9305 
9306 	/* Left shift optimization when shifting across word_size boundary.
9307 
9308 	   If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9309 	   there isn't native instruction to support this wide mode
9310 	   left shift.  Given below scenario:
9311 
9312 	    Type A = (Type) B  << C
9313 
9314 	    |<		 T	    >|
9315 	    | dest_high  |  dest_low |
9316 
9317 			 | word_size |
9318 
9319 	   If the shift amount C caused we shift B to across the word
9320 	   size boundary, i.e part of B shifted into high half of
9321 	   destination register, and part of B remains in the low
9322 	   half, then GCC will use the following left shift expand
9323 	   logic:
9324 
9325 	   1. Initialize dest_low to B.
9326 	   2. Initialize every bit of dest_high to the sign bit of B.
9327 	   3. Logic left shift dest_low by C bit to finalize dest_low.
9328 	      The value of dest_low before this shift is kept in a temp D.
9329 	   4. Logic left shift dest_high by C.
9330 	   5. Logic right shift D by (word_size - C).
9331 	   6. Or the result of 4 and 5 to finalize dest_high.
9332 
9333 	   While, by checking gimple statements, if operand B is
9334 	   coming from signed extension, then we can simplify above
9335 	   expand logic into:
9336 
9337 	      1. dest_high = src_low >> (word_size - C).
9338 	      2. dest_low = src_low << C.
9339 
9340 	   We can use one arithmetic right shift to finish all the
9341 	   purpose of steps 2, 4, 5, 6, thus we reduce the steps
9342 	   needed from 6 into 2.
9343 
9344 	   The case is similar for zero extension, except that we
9345 	   initialize dest_high to zero rather than copies of the sign
9346 	   bit from B.  Furthermore, we need to use a logical right shift
9347 	   in this case.
9348 
9349 	   The choice of sign-extension versus zero-extension is
9350 	   determined entirely by whether or not B is signed and is
9351 	   independent of the current setting of unsignedp.  */
9352 
9353 	temp = NULL_RTX;
9354 	if (code == LSHIFT_EXPR
9355 	    && target
9356 	    && REG_P (target)
9357 	    && GET_MODE_2XWIDER_MODE (word_mode).exists (&int_mode)
9358 	    && mode == int_mode
9359 	    && TREE_CONSTANT (treeop1)
9360 	    && TREE_CODE (treeop0) == SSA_NAME)
9361 	  {
9362 	    gimple *def = SSA_NAME_DEF_STMT (treeop0);
9363 	    if (is_gimple_assign (def)
9364 		&& gimple_assign_rhs_code (def) == NOP_EXPR)
9365 	      {
9366 		scalar_int_mode rmode = SCALAR_INT_TYPE_MODE
9367 		  (TREE_TYPE (gimple_assign_rhs1 (def)));
9368 
9369 		if (GET_MODE_SIZE (rmode) < GET_MODE_SIZE (int_mode)
9370 		    && TREE_INT_CST_LOW (treeop1) < GET_MODE_BITSIZE (word_mode)
9371 		    && ((TREE_INT_CST_LOW (treeop1) + GET_MODE_BITSIZE (rmode))
9372 			>= GET_MODE_BITSIZE (word_mode)))
9373 		  {
9374 		    rtx_insn *seq, *seq_old;
9375 		    poly_uint64 high_off = subreg_highpart_offset (word_mode,
9376 								   int_mode);
9377 		    bool extend_unsigned
9378 		      = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
9379 		    rtx low = lowpart_subreg (word_mode, op0, int_mode);
9380 		    rtx dest_low = lowpart_subreg (word_mode, target, int_mode);
9381 		    rtx dest_high = simplify_gen_subreg (word_mode, target,
9382 							 int_mode, high_off);
9383 		    HOST_WIDE_INT ramount = (BITS_PER_WORD
9384 					     - TREE_INT_CST_LOW (treeop1));
9385 		    tree rshift = build_int_cst (TREE_TYPE (treeop1), ramount);
9386 
9387 		    start_sequence ();
9388 		    /* dest_high = src_low >> (word_size - C).  */
9389 		    temp = expand_variable_shift (RSHIFT_EXPR, word_mode, low,
9390 						  rshift, dest_high,
9391 						  extend_unsigned);
9392 		    if (temp != dest_high)
9393 		      emit_move_insn (dest_high, temp);
9394 
9395 		    /* dest_low = src_low << C.  */
9396 		    temp = expand_variable_shift (LSHIFT_EXPR, word_mode, low,
9397 						  treeop1, dest_low, unsignedp);
9398 		    if (temp != dest_low)
9399 		      emit_move_insn (dest_low, temp);
9400 
9401 		    seq = get_insns ();
9402 		    end_sequence ();
9403 		    temp = target ;
9404 
9405 		    if (have_insn_for (ASHIFT, int_mode))
9406 		      {
9407 			bool speed_p = optimize_insn_for_speed_p ();
9408 			start_sequence ();
9409 			rtx ret_old = expand_variable_shift (code, int_mode,
9410 							     op0, treeop1,
9411 							     target,
9412 							     unsignedp);
9413 
9414 			seq_old = get_insns ();
9415 			end_sequence ();
9416 			if (seq_cost (seq, speed_p)
9417 			    >= seq_cost (seq_old, speed_p))
9418 			  {
9419 			    seq = seq_old;
9420 			    temp = ret_old;
9421 			  }
9422 		      }
9423 		      emit_insn (seq);
9424 		  }
9425 	      }
9426 	  }
9427 
9428 	if (temp == NULL_RTX)
9429 	  temp = expand_variable_shift (code, mode, op0, treeop1, target,
9430 					unsignedp);
9431 	if (code == LSHIFT_EXPR)
9432 	  temp = REDUCE_BIT_FIELD (temp);
9433 	return temp;
9434       }
9435 
9436       /* Could determine the answer when only additive constants differ.  Also,
9437 	 the addition of one can be handled by changing the condition.  */
9438     case LT_EXPR:
9439     case LE_EXPR:
9440     case GT_EXPR:
9441     case GE_EXPR:
9442     case EQ_EXPR:
9443     case NE_EXPR:
9444     case UNORDERED_EXPR:
9445     case ORDERED_EXPR:
9446     case UNLT_EXPR:
9447     case UNLE_EXPR:
9448     case UNGT_EXPR:
9449     case UNGE_EXPR:
9450     case UNEQ_EXPR:
9451     case LTGT_EXPR:
9452       {
9453 	temp = do_store_flag (ops,
9454 			      modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
9455 			      tmode != VOIDmode ? tmode : mode);
9456 	if (temp)
9457 	  return temp;
9458 
9459 	/* Use a compare and a jump for BLKmode comparisons, or for function
9460 	   type comparisons is have_canonicalize_funcptr_for_compare.  */
9461 
9462 	if ((target == 0
9463 	     || modifier == EXPAND_STACK_PARM
9464 	     || ! safe_from_p (target, treeop0, 1)
9465 	     || ! safe_from_p (target, treeop1, 1)
9466 	     /* Make sure we don't have a hard reg (such as function's return
9467 		value) live across basic blocks, if not optimizing.  */
9468 	     || (!optimize && REG_P (target)
9469 		 && REGNO (target) < FIRST_PSEUDO_REGISTER)))
9470 	  target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
9471 
9472 	emit_move_insn (target, const0_rtx);
9473 
9474 	rtx_code_label *lab1 = gen_label_rtx ();
9475 	jumpifnot_1 (code, treeop0, treeop1, lab1,
9476 		     profile_probability::uninitialized ());
9477 
9478 	if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
9479 	  emit_move_insn (target, constm1_rtx);
9480 	else
9481 	  emit_move_insn (target, const1_rtx);
9482 
9483 	emit_label (lab1);
9484 	return target;
9485       }
9486     case COMPLEX_EXPR:
9487       /* Get the rtx code of the operands.  */
9488       op0 = expand_normal (treeop0);
9489       op1 = expand_normal (treeop1);
9490 
9491       if (!target)
9492 	target = gen_reg_rtx (TYPE_MODE (type));
9493       else
9494 	/* If target overlaps with op1, then either we need to force
9495 	   op1 into a pseudo (if target also overlaps with op0),
9496 	   or write the complex parts in reverse order.  */
9497 	switch (GET_CODE (target))
9498 	  {
9499 	  case CONCAT:
9500 	    if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
9501 	      {
9502 		if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
9503 		  {
9504 		  complex_expr_force_op1:
9505 		    temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
9506 		    emit_move_insn (temp, op1);
9507 		    op1 = temp;
9508 		    break;
9509 		  }
9510 	      complex_expr_swap_order:
9511 		/* Move the imaginary (op1) and real (op0) parts to their
9512 		   location.  */
9513 		write_complex_part (target, op1, true);
9514 		write_complex_part (target, op0, false);
9515 
9516 		return target;
9517 	      }
9518 	    break;
9519 	  case MEM:
9520 	    temp = adjust_address_nv (target,
9521 				      GET_MODE_INNER (GET_MODE (target)), 0);
9522 	    if (reg_overlap_mentioned_p (temp, op1))
9523 	      {
9524 		scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
9525 		temp = adjust_address_nv (target, imode,
9526 					  GET_MODE_SIZE (imode));
9527 		if (reg_overlap_mentioned_p (temp, op0))
9528 		  goto complex_expr_force_op1;
9529 		goto complex_expr_swap_order;
9530 	      }
9531 	    break;
9532 	  default:
9533 	    if (reg_overlap_mentioned_p (target, op1))
9534 	      {
9535 		if (reg_overlap_mentioned_p (target, op0))
9536 		  goto complex_expr_force_op1;
9537 		goto complex_expr_swap_order;
9538 	      }
9539 	    break;
9540 	  }
9541 
9542       /* Move the real (op0) and imaginary (op1) parts to their location.  */
9543       write_complex_part (target, op0, false);
9544       write_complex_part (target, op1, true);
9545 
9546       return target;
9547 
9548     case WIDEN_SUM_EXPR:
9549       {
9550         tree oprnd0 = treeop0;
9551         tree oprnd1 = treeop1;
9552 
9553         expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9554         target = expand_widen_pattern_expr (ops, op0, NULL_RTX, op1,
9555                                             target, unsignedp);
9556         return target;
9557       }
9558 
9559     case VEC_UNPACK_HI_EXPR:
9560     case VEC_UNPACK_LO_EXPR:
9561       {
9562 	op0 = expand_normal (treeop0);
9563 	temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
9564 					  target, unsignedp);
9565 	gcc_assert (temp);
9566 	return temp;
9567       }
9568 
9569     case VEC_UNPACK_FLOAT_HI_EXPR:
9570     case VEC_UNPACK_FLOAT_LO_EXPR:
9571       {
9572 	op0 = expand_normal (treeop0);
9573 	/* The signedness is determined from input operand.  */
9574 	temp = expand_widen_pattern_expr
9575 	  (ops, op0, NULL_RTX, NULL_RTX,
9576 	   target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9577 
9578 	gcc_assert (temp);
9579 	return temp;
9580       }
9581 
9582     case VEC_WIDEN_MULT_HI_EXPR:
9583     case VEC_WIDEN_MULT_LO_EXPR:
9584     case VEC_WIDEN_MULT_EVEN_EXPR:
9585     case VEC_WIDEN_MULT_ODD_EXPR:
9586     case VEC_WIDEN_LSHIFT_HI_EXPR:
9587     case VEC_WIDEN_LSHIFT_LO_EXPR:
9588       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9589       target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
9590 					  target, unsignedp);
9591       gcc_assert (target);
9592       return target;
9593 
9594     case VEC_PACK_TRUNC_EXPR:
9595     case VEC_PACK_SAT_EXPR:
9596     case VEC_PACK_FIX_TRUNC_EXPR:
9597       mode = TYPE_MODE (TREE_TYPE (treeop0));
9598       goto binop;
9599 
9600     case VEC_PERM_EXPR:
9601       {
9602 	expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
9603 	vec_perm_builder sel;
9604 	if (TREE_CODE (treeop2) == VECTOR_CST
9605 	    && tree_to_vec_perm_builder (&sel, treeop2))
9606 	  {
9607 	    machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
9608 	    temp = expand_vec_perm_const (mode, op0, op1, sel,
9609 					  sel_mode, target);
9610 	  }
9611 	else
9612 	  {
9613 	    op2 = expand_normal (treeop2);
9614 	    temp = expand_vec_perm_var (mode, op0, op1, op2, target);
9615 	  }
9616 	gcc_assert (temp);
9617 	return temp;
9618       }
9619 
9620     case DOT_PROD_EXPR:
9621       {
9622 	tree oprnd0 = treeop0;
9623 	tree oprnd1 = treeop1;
9624 	tree oprnd2 = treeop2;
9625 	rtx op2;
9626 
9627 	expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9628 	op2 = expand_normal (oprnd2);
9629 	target = expand_widen_pattern_expr (ops, op0, op1, op2,
9630 					    target, unsignedp);
9631 	return target;
9632       }
9633 
9634       case SAD_EXPR:
9635       {
9636 	tree oprnd0 = treeop0;
9637 	tree oprnd1 = treeop1;
9638 	tree oprnd2 = treeop2;
9639 	rtx op2;
9640 
9641 	expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9642 	op2 = expand_normal (oprnd2);
9643 	target = expand_widen_pattern_expr (ops, op0, op1, op2,
9644 					    target, unsignedp);
9645 	return target;
9646       }
9647 
9648     case REALIGN_LOAD_EXPR:
9649       {
9650         tree oprnd0 = treeop0;
9651         tree oprnd1 = treeop1;
9652         tree oprnd2 = treeop2;
9653         rtx op2;
9654 
9655         this_optab = optab_for_tree_code (code, type, optab_default);
9656         expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9657         op2 = expand_normal (oprnd2);
9658         temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
9659 				  target, unsignedp);
9660         gcc_assert (temp);
9661         return temp;
9662       }
9663 
9664     case COND_EXPR:
9665       {
9666 	/* A COND_EXPR with its type being VOID_TYPE represents a
9667 	   conditional jump and is handled in
9668 	   expand_gimple_cond_expr.  */
9669 	gcc_assert (!VOID_TYPE_P (type));
9670 
9671 	/* Note that COND_EXPRs whose type is a structure or union
9672 	   are required to be constructed to contain assignments of
9673 	   a temporary variable, so that we can evaluate them here
9674 	   for side effect only.  If type is void, we must do likewise.  */
9675 
9676 	gcc_assert (!TREE_ADDRESSABLE (type)
9677 		    && !ignore
9678 		    && TREE_TYPE (treeop1) != void_type_node
9679 		    && TREE_TYPE (treeop2) != void_type_node);
9680 
9681 	temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
9682 	if (temp)
9683 	  return temp;
9684 
9685 	/* If we are not to produce a result, we have no target.  Otherwise,
9686 	   if a target was specified use it; it will not be used as an
9687 	   intermediate target unless it is safe.  If no target, use a
9688 	   temporary.  */
9689 
9690 	if (modifier != EXPAND_STACK_PARM
9691 	    && original_target
9692 	    && safe_from_p (original_target, treeop0, 1)
9693 	    && GET_MODE (original_target) == mode
9694 	    && !MEM_P (original_target))
9695 	  temp = original_target;
9696 	else
9697 	  temp = assign_temp (type, 0, 1);
9698 
9699 	do_pending_stack_adjust ();
9700 	NO_DEFER_POP;
9701 	rtx_code_label *lab0 = gen_label_rtx ();
9702 	rtx_code_label *lab1 = gen_label_rtx ();
9703 	jumpifnot (treeop0, lab0,
9704 		   profile_probability::uninitialized ());
9705 	store_expr (treeop1, temp,
9706 		    modifier == EXPAND_STACK_PARM,
9707 		    false, false);
9708 
9709 	emit_jump_insn (targetm.gen_jump (lab1));
9710 	emit_barrier ();
9711 	emit_label (lab0);
9712 	store_expr (treeop2, temp,
9713 		    modifier == EXPAND_STACK_PARM,
9714 		    false, false);
9715 
9716 	emit_label (lab1);
9717 	OK_DEFER_POP;
9718 	return temp;
9719       }
9720 
9721     case VEC_COND_EXPR:
9722       target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
9723       return target;
9724 
9725     case VEC_DUPLICATE_EXPR:
9726       op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
9727       target = expand_vector_broadcast (mode, op0);
9728       gcc_assert (target);
9729       return target;
9730 
9731     case VEC_SERIES_EXPR:
9732       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
9733       return expand_vec_series_expr (mode, op0, op1, target);
9734 
9735     case BIT_INSERT_EXPR:
9736       {
9737 	unsigned bitpos = tree_to_uhwi (treeop2);
9738 	unsigned bitsize;
9739 	if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1)))
9740 	  bitsize = TYPE_PRECISION (TREE_TYPE (treeop1));
9741 	else
9742 	  bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1)));
9743 	rtx op0 = expand_normal (treeop0);
9744 	rtx op1 = expand_normal (treeop1);
9745 	rtx dst = gen_reg_rtx (mode);
9746 	emit_move_insn (dst, op0);
9747 	store_bit_field (dst, bitsize, bitpos, 0, 0,
9748 			 TYPE_MODE (TREE_TYPE (treeop1)), op1, false);
9749 	return dst;
9750       }
9751 
9752     default:
9753       gcc_unreachable ();
9754     }
9755 
9756   /* Here to do an ordinary binary operator.  */
9757  binop:
9758   expand_operands (treeop0, treeop1,
9759 		   subtarget, &op0, &op1, EXPAND_NORMAL);
9760  binop2:
9761   this_optab = optab_for_tree_code (code, type, optab_default);
9762  binop3:
9763   if (modifier == EXPAND_STACK_PARM)
9764     target = 0;
9765   temp = expand_binop (mode, this_optab, op0, op1, target,
9766 		       unsignedp, OPTAB_LIB_WIDEN);
9767   gcc_assert (temp);
9768   /* Bitwise operations do not need bitfield reduction as we expect their
9769      operands being properly truncated.  */
9770   if (code == BIT_XOR_EXPR
9771       || code == BIT_AND_EXPR
9772       || code == BIT_IOR_EXPR)
9773     return temp;
9774   return REDUCE_BIT_FIELD (temp);
9775 }
9776 #undef REDUCE_BIT_FIELD
9777 
9778 
9779 /* Return TRUE if expression STMT is suitable for replacement.
9780    Never consider memory loads as replaceable, because those don't ever lead
9781    into constant expressions.  */
9782 
9783 static bool
stmt_is_replaceable_p(gimple * stmt)9784 stmt_is_replaceable_p (gimple *stmt)
9785 {
9786   if (ssa_is_replaceable_p (stmt))
9787     {
9788       /* Don't move around loads.  */
9789       if (!gimple_assign_single_p (stmt)
9790 	  || is_gimple_val (gimple_assign_rhs1 (stmt)))
9791 	return true;
9792     }
9793   return false;
9794 }
9795 
9796 rtx
expand_expr_real_1(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier,rtx * alt_rtl,bool inner_reference_p)9797 expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
9798 		    enum expand_modifier modifier, rtx *alt_rtl,
9799 		    bool inner_reference_p)
9800 {
9801   rtx op0, op1, temp, decl_rtl;
9802   tree type;
9803   int unsignedp;
9804   machine_mode mode, dmode;
9805   enum tree_code code = TREE_CODE (exp);
9806   rtx subtarget, original_target;
9807   int ignore;
9808   tree context;
9809   bool reduce_bit_field;
9810   location_t loc = EXPR_LOCATION (exp);
9811   struct separate_ops ops;
9812   tree treeop0, treeop1, treeop2;
9813   tree ssa_name = NULL_TREE;
9814   gimple *g;
9815 
9816   type = TREE_TYPE (exp);
9817   mode = TYPE_MODE (type);
9818   unsignedp = TYPE_UNSIGNED (type);
9819 
9820   treeop0 = treeop1 = treeop2 = NULL_TREE;
9821   if (!VL_EXP_CLASS_P (exp))
9822     switch (TREE_CODE_LENGTH (code))
9823       {
9824 	default:
9825 	case 3: treeop2 = TREE_OPERAND (exp, 2); /* FALLTHRU */
9826 	case 2: treeop1 = TREE_OPERAND (exp, 1); /* FALLTHRU */
9827 	case 1: treeop0 = TREE_OPERAND (exp, 0); /* FALLTHRU */
9828 	case 0: break;
9829       }
9830   ops.code = code;
9831   ops.type = type;
9832   ops.op0 = treeop0;
9833   ops.op1 = treeop1;
9834   ops.op2 = treeop2;
9835   ops.location = loc;
9836 
9837   ignore = (target == const0_rtx
9838 	    || ((CONVERT_EXPR_CODE_P (code)
9839 		 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
9840 		&& TREE_CODE (type) == VOID_TYPE));
9841 
9842   /* An operation in what may be a bit-field type needs the
9843      result to be reduced to the precision of the bit-field type,
9844      which is narrower than that of the type's mode.  */
9845   reduce_bit_field = (!ignore
9846 		      && INTEGRAL_TYPE_P (type)
9847 		      && !type_has_mode_precision_p (type));
9848 
9849   /* If we are going to ignore this result, we need only do something
9850      if there is a side-effect somewhere in the expression.  If there
9851      is, short-circuit the most common cases here.  Note that we must
9852      not call expand_expr with anything but const0_rtx in case this
9853      is an initial expansion of a size that contains a PLACEHOLDER_EXPR.  */
9854 
9855   if (ignore)
9856     {
9857       if (! TREE_SIDE_EFFECTS (exp))
9858 	return const0_rtx;
9859 
9860       /* Ensure we reference a volatile object even if value is ignored, but
9861 	 don't do this if all we are doing is taking its address.  */
9862       if (TREE_THIS_VOLATILE (exp)
9863 	  && TREE_CODE (exp) != FUNCTION_DECL
9864 	  && mode != VOIDmode && mode != BLKmode
9865 	  && modifier != EXPAND_CONST_ADDRESS)
9866 	{
9867 	  temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
9868 	  if (MEM_P (temp))
9869 	    copy_to_reg (temp);
9870 	  return const0_rtx;
9871 	}
9872 
9873       if (TREE_CODE_CLASS (code) == tcc_unary
9874 	  || code == BIT_FIELD_REF
9875 	  || code == COMPONENT_REF
9876 	  || code == INDIRECT_REF)
9877 	return expand_expr (treeop0, const0_rtx, VOIDmode,
9878 			    modifier);
9879 
9880       else if (TREE_CODE_CLASS (code) == tcc_binary
9881 	       || TREE_CODE_CLASS (code) == tcc_comparison
9882 	       || code == ARRAY_REF || code == ARRAY_RANGE_REF)
9883 	{
9884 	  expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
9885 	  expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
9886 	  return const0_rtx;
9887 	}
9888 
9889       target = 0;
9890     }
9891 
9892   if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
9893     target = 0;
9894 
9895   /* Use subtarget as the target for operand 0 of a binary operation.  */
9896   subtarget = get_subtarget (target);
9897   original_target = target;
9898 
9899   switch (code)
9900     {
9901     case LABEL_DECL:
9902       {
9903 	tree function = decl_function_context (exp);
9904 
9905 	temp = label_rtx (exp);
9906 	temp = gen_rtx_LABEL_REF (Pmode, temp);
9907 
9908 	if (function != current_function_decl
9909 	    && function != 0)
9910 	  LABEL_REF_NONLOCAL_P (temp) = 1;
9911 
9912 	temp = gen_rtx_MEM (FUNCTION_MODE, temp);
9913 	return temp;
9914       }
9915 
9916     case SSA_NAME:
9917       /* ??? ivopts calls expander, without any preparation from
9918          out-of-ssa.  So fake instructions as if this was an access to the
9919 	 base variable.  This unnecessarily allocates a pseudo, see how we can
9920 	 reuse it, if partition base vars have it set already.  */
9921       if (!currently_expanding_to_rtl)
9922 	{
9923 	  tree var = SSA_NAME_VAR (exp);
9924 	  if (var && DECL_RTL_SET_P (var))
9925 	    return DECL_RTL (var);
9926 	  return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
9927 			      LAST_VIRTUAL_REGISTER + 1);
9928 	}
9929 
9930       g = get_gimple_for_ssa_name (exp);
9931       /* For EXPAND_INITIALIZER try harder to get something simpler.  */
9932       if (g == NULL
9933 	  && modifier == EXPAND_INITIALIZER
9934 	  && !SSA_NAME_IS_DEFAULT_DEF (exp)
9935 	  && (optimize || !SSA_NAME_VAR (exp)
9936 	      || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
9937 	  && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
9938 	g = SSA_NAME_DEF_STMT (exp);
9939       if (g)
9940 	{
9941 	  rtx r;
9942 	  location_t saved_loc = curr_insn_location ();
9943 	  location_t loc = gimple_location (g);
9944 	  if (loc != UNKNOWN_LOCATION)
9945 	    set_curr_insn_location (loc);
9946 	  ops.code = gimple_assign_rhs_code (g);
9947           switch (get_gimple_rhs_class (ops.code))
9948 	    {
9949 	    case GIMPLE_TERNARY_RHS:
9950 	      ops.op2 = gimple_assign_rhs3 (g);
9951 	      /* Fallthru */
9952 	    case GIMPLE_BINARY_RHS:
9953 	      ops.op1 = gimple_assign_rhs2 (g);
9954 
9955 	      /* Try to expand conditonal compare.  */
9956 	      if (targetm.gen_ccmp_first)
9957 		{
9958 		  gcc_checking_assert (targetm.gen_ccmp_next != NULL);
9959 		  r = expand_ccmp_expr (g, mode);
9960 		  if (r)
9961 		    break;
9962 		}
9963 	      /* Fallthru */
9964 	    case GIMPLE_UNARY_RHS:
9965 	      ops.op0 = gimple_assign_rhs1 (g);
9966 	      ops.type = TREE_TYPE (gimple_assign_lhs (g));
9967 	      ops.location = loc;
9968 	      r = expand_expr_real_2 (&ops, target, tmode, modifier);
9969 	      break;
9970 	    case GIMPLE_SINGLE_RHS:
9971 	      {
9972 		r = expand_expr_real (gimple_assign_rhs1 (g), target,
9973 				      tmode, modifier, alt_rtl,
9974 				      inner_reference_p);
9975 		break;
9976 	      }
9977 	    default:
9978 	      gcc_unreachable ();
9979 	    }
9980 	  set_curr_insn_location (saved_loc);
9981 	  if (REG_P (r) && !REG_EXPR (r))
9982 	    set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
9983 	  return r;
9984 	}
9985 
9986       ssa_name = exp;
9987       decl_rtl = get_rtx_for_ssa_name (ssa_name);
9988       exp = SSA_NAME_VAR (ssa_name);
9989       goto expand_decl_rtl;
9990 
9991     case PARM_DECL:
9992     case VAR_DECL:
9993       /* If a static var's type was incomplete when the decl was written,
9994 	 but the type is complete now, lay out the decl now.  */
9995       if (DECL_SIZE (exp) == 0
9996 	  && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp))
9997 	  && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
9998 	layout_decl (exp, 0);
9999 
10000       /* fall through */
10001 
10002     case FUNCTION_DECL:
10003     case RESULT_DECL:
10004       decl_rtl = DECL_RTL (exp);
10005     expand_decl_rtl:
10006       gcc_assert (decl_rtl);
10007 
10008       /* DECL_MODE might change when TYPE_MODE depends on attribute target
10009 	 settings for VECTOR_TYPE_P that might switch for the function.  */
10010       if (currently_expanding_to_rtl
10011 	  && code == VAR_DECL && MEM_P (decl_rtl)
10012 	  && VECTOR_TYPE_P (type) && exp && DECL_MODE (exp) != mode)
10013 	decl_rtl = change_address (decl_rtl, TYPE_MODE (type), 0);
10014       else
10015 	decl_rtl = copy_rtx (decl_rtl);
10016 
10017       /* Record writes to register variables.  */
10018       if (modifier == EXPAND_WRITE
10019 	  && REG_P (decl_rtl)
10020 	  && HARD_REGISTER_P (decl_rtl))
10021         add_to_hard_reg_set (&crtl->asm_clobbers,
10022 			     GET_MODE (decl_rtl), REGNO (decl_rtl));
10023 
10024       /* Ensure variable marked as used even if it doesn't go through
10025 	 a parser.  If it hasn't be used yet, write out an external
10026 	 definition.  */
10027       if (exp)
10028 	TREE_USED (exp) = 1;
10029 
10030       /* Show we haven't gotten RTL for this yet.  */
10031       temp = 0;
10032 
10033       /* Variables inherited from containing functions should have
10034 	 been lowered by this point.  */
10035       if (exp)
10036 	context = decl_function_context (exp);
10037       gcc_assert (!exp
10038 		  || SCOPE_FILE_SCOPE_P (context)
10039 		  || context == current_function_decl
10040 		  || TREE_STATIC (exp)
10041 		  || DECL_EXTERNAL (exp)
10042 		  /* ??? C++ creates functions that are not TREE_STATIC.  */
10043 		  || TREE_CODE (exp) == FUNCTION_DECL);
10044 
10045       /* This is the case of an array whose size is to be determined
10046 	 from its initializer, while the initializer is still being parsed.
10047 	 ??? We aren't parsing while expanding anymore.  */
10048 
10049       if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
10050 	temp = validize_mem (decl_rtl);
10051 
10052       /* If DECL_RTL is memory, we are in the normal case and the
10053 	 address is not valid, get the address into a register.  */
10054 
10055       else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
10056 	{
10057 	  if (alt_rtl)
10058 	    *alt_rtl = decl_rtl;
10059 	  decl_rtl = use_anchored_address (decl_rtl);
10060 	  if (modifier != EXPAND_CONST_ADDRESS
10061 	      && modifier != EXPAND_SUM
10062 	      && !memory_address_addr_space_p (exp ? DECL_MODE (exp)
10063 					       : GET_MODE (decl_rtl),
10064 					       XEXP (decl_rtl, 0),
10065 					       MEM_ADDR_SPACE (decl_rtl)))
10066 	    temp = replace_equiv_address (decl_rtl,
10067 					  copy_rtx (XEXP (decl_rtl, 0)));
10068 	}
10069 
10070       /* If we got something, return it.  But first, set the alignment
10071 	 if the address is a register.  */
10072       if (temp != 0)
10073 	{
10074 	  if (exp && MEM_P (temp) && REG_P (XEXP (temp, 0)))
10075 	    mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
10076 
10077 	  return temp;
10078 	}
10079 
10080       if (exp)
10081 	dmode = DECL_MODE (exp);
10082       else
10083 	dmode = TYPE_MODE (TREE_TYPE (ssa_name));
10084 
10085       /* If the mode of DECL_RTL does not match that of the decl,
10086 	 there are two cases: we are dealing with a BLKmode value
10087 	 that is returned in a register, or we are dealing with
10088 	 a promoted value.  In the latter case, return a SUBREG
10089 	 of the wanted mode, but mark it so that we know that it
10090 	 was already extended.  */
10091       if (REG_P (decl_rtl)
10092 	  && dmode != BLKmode
10093 	  && GET_MODE (decl_rtl) != dmode)
10094 	{
10095 	  machine_mode pmode;
10096 
10097 	  /* Get the signedness to be used for this variable.  Ensure we get
10098 	     the same mode we got when the variable was declared.  */
10099 	  if (code != SSA_NAME)
10100 	    pmode = promote_decl_mode (exp, &unsignedp);
10101 	  else if ((g = SSA_NAME_DEF_STMT (ssa_name))
10102 		   && gimple_code (g) == GIMPLE_CALL
10103 		   && !gimple_call_internal_p (g))
10104 	    pmode = promote_function_mode (type, mode, &unsignedp,
10105 					   gimple_call_fntype (g),
10106 					   2);
10107 	  else
10108 	    pmode = promote_ssa_mode (ssa_name, &unsignedp);
10109 	  gcc_assert (GET_MODE (decl_rtl) == pmode);
10110 
10111 	  temp = gen_lowpart_SUBREG (mode, decl_rtl);
10112 	  SUBREG_PROMOTED_VAR_P (temp) = 1;
10113 	  SUBREG_PROMOTED_SET (temp, unsignedp);
10114 	  return temp;
10115 	}
10116 
10117       return decl_rtl;
10118 
10119     case INTEGER_CST:
10120       {
10121 	/* Given that TYPE_PRECISION (type) is not always equal to
10122 	   GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10123 	   the former to the latter according to the signedness of the
10124 	   type.  */
10125 	scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
10126 	temp = immed_wide_int_const
10127 	  (wi::to_wide (exp, GET_MODE_PRECISION (mode)), mode);
10128 	return temp;
10129       }
10130 
10131     case VECTOR_CST:
10132       {
10133 	tree tmp = NULL_TREE;
10134 	if (VECTOR_MODE_P (mode))
10135 	  return const_vector_from_tree (exp);
10136 	scalar_int_mode int_mode;
10137 	if (is_int_mode (mode, &int_mode))
10138 	  {
10139 	    if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
10140 	      return const_scalar_mask_from_tree (int_mode, exp);
10141 	    else
10142 	      {
10143 		tree type_for_mode
10144 		  = lang_hooks.types.type_for_mode (int_mode, 1);
10145 		if (type_for_mode)
10146 		  tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
10147 					type_for_mode, exp);
10148 	      }
10149 	  }
10150 	if (!tmp)
10151 	  {
10152 	    vec<constructor_elt, va_gc> *v;
10153 	    /* Constructors need to be fixed-length.  FIXME.  */
10154 	    unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
10155 	    vec_alloc (v, nunits);
10156 	    for (unsigned int i = 0; i < nunits; ++i)
10157 	      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
10158 	    tmp = build_constructor (type, v);
10159 	  }
10160 	return expand_expr (tmp, ignore ? const0_rtx : target,
10161 			    tmode, modifier);
10162       }
10163 
10164     case CONST_DECL:
10165       if (modifier == EXPAND_WRITE)
10166 	{
10167 	  /* Writing into CONST_DECL is always invalid, but handle it
10168 	     gracefully.  */
10169 	  addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
10170 	  scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
10171 	  op0 = expand_expr_addr_expr_1 (exp, NULL_RTX, address_mode,
10172 					 EXPAND_NORMAL, as);
10173 	  op0 = memory_address_addr_space (mode, op0, as);
10174 	  temp = gen_rtx_MEM (mode, op0);
10175 	  set_mem_addr_space (temp, as);
10176 	  return temp;
10177 	}
10178       return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
10179 
10180     case REAL_CST:
10181       /* If optimized, generate immediate CONST_DOUBLE
10182 	 which will be turned into memory by reload if necessary.
10183 
10184 	 We used to force a register so that loop.c could see it.  But
10185 	 this does not allow gen_* patterns to perform optimizations with
10186 	 the constants.  It also produces two insns in cases like "x = 1.0;".
10187 	 On most machines, floating-point constants are not permitted in
10188 	 many insns, so we'd end up copying it to a register in any case.
10189 
10190 	 Now, we do the copying in expand_binop, if appropriate.  */
10191       return const_double_from_real_value (TREE_REAL_CST (exp),
10192 					   TYPE_MODE (TREE_TYPE (exp)));
10193 
10194     case FIXED_CST:
10195       return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp),
10196 					   TYPE_MODE (TREE_TYPE (exp)));
10197 
10198     case COMPLEX_CST:
10199       /* Handle evaluating a complex constant in a CONCAT target.  */
10200       if (original_target && GET_CODE (original_target) == CONCAT)
10201 	{
10202 	  machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
10203 	  rtx rtarg, itarg;
10204 
10205 	  rtarg = XEXP (original_target, 0);
10206 	  itarg = XEXP (original_target, 1);
10207 
10208 	  /* Move the real and imaginary parts separately.  */
10209 	  op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
10210 	  op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
10211 
10212 	  if (op0 != rtarg)
10213 	    emit_move_insn (rtarg, op0);
10214 	  if (op1 != itarg)
10215 	    emit_move_insn (itarg, op1);
10216 
10217 	  return original_target;
10218 	}
10219 
10220       /* fall through */
10221 
10222     case STRING_CST:
10223       temp = expand_expr_constant (exp, 1, modifier);
10224 
10225       /* temp contains a constant address.
10226 	 On RISC machines where a constant address isn't valid,
10227 	 make some insns to get that address into a register.  */
10228       if (modifier != EXPAND_CONST_ADDRESS
10229 	  && modifier != EXPAND_INITIALIZER
10230 	  && modifier != EXPAND_SUM
10231 	  && ! memory_address_addr_space_p (mode, XEXP (temp, 0),
10232 					    MEM_ADDR_SPACE (temp)))
10233 	return replace_equiv_address (temp,
10234 				      copy_rtx (XEXP (temp, 0)));
10235       return temp;
10236 
10237     case POLY_INT_CST:
10238       return immed_wide_int_const (poly_int_cst_value (exp), mode);
10239 
10240     case SAVE_EXPR:
10241       {
10242 	tree val = treeop0;
10243 	rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl,
10244 				      inner_reference_p);
10245 
10246 	if (!SAVE_EXPR_RESOLVED_P (exp))
10247 	  {
10248 	    /* We can indeed still hit this case, typically via builtin
10249 	       expanders calling save_expr immediately before expanding
10250 	       something.  Assume this means that we only have to deal
10251 	       with non-BLKmode values.  */
10252 	    gcc_assert (GET_MODE (ret) != BLKmode);
10253 
10254 	    val = build_decl (curr_insn_location (),
10255 			      VAR_DECL, NULL, TREE_TYPE (exp));
10256 	    DECL_ARTIFICIAL (val) = 1;
10257 	    DECL_IGNORED_P (val) = 1;
10258 	    treeop0 = val;
10259 	    TREE_OPERAND (exp, 0) = treeop0;
10260 	    SAVE_EXPR_RESOLVED_P (exp) = 1;
10261 
10262 	    if (!CONSTANT_P (ret))
10263 	      ret = copy_to_reg (ret);
10264 	    SET_DECL_RTL (val, ret);
10265 	  }
10266 
10267         return ret;
10268       }
10269 
10270 
10271     case CONSTRUCTOR:
10272       /* If we don't need the result, just ensure we evaluate any
10273 	 subexpressions.  */
10274       if (ignore)
10275 	{
10276 	  unsigned HOST_WIDE_INT idx;
10277 	  tree value;
10278 
10279 	  FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
10280 	    expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
10281 
10282 	  return const0_rtx;
10283 	}
10284 
10285       return expand_constructor (exp, target, modifier, false);
10286 
10287     case TARGET_MEM_REF:
10288       {
10289 	addr_space_t as
10290 	  = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10291 	enum insn_code icode;
10292 	unsigned int align;
10293 
10294 	op0 = addr_for_mem_ref (exp, as, true);
10295 	op0 = memory_address_addr_space (mode, op0, as);
10296 	temp = gen_rtx_MEM (mode, op0);
10297 	set_mem_attributes (temp, exp, 0);
10298 	set_mem_addr_space (temp, as);
10299 	align = get_object_alignment (exp);
10300 	if (modifier != EXPAND_WRITE
10301 	    && modifier != EXPAND_MEMORY
10302 	    && mode != BLKmode
10303 	    && align < GET_MODE_ALIGNMENT (mode)
10304 	    /* If the target does not have special handling for unaligned
10305 	       loads of mode then it can use regular moves for them.  */
10306 	    && ((icode = optab_handler (movmisalign_optab, mode))
10307 		!= CODE_FOR_nothing))
10308 	  {
10309 	    struct expand_operand ops[2];
10310 
10311 	    /* We've already validated the memory, and we're creating a
10312 	       new pseudo destination.  The predicates really can't fail,
10313 	       nor can the generator.  */
10314 	    create_output_operand (&ops[0], NULL_RTX, mode);
10315 	    create_fixed_operand (&ops[1], temp);
10316 	    expand_insn (icode, 2, ops);
10317 	    temp = ops[0].value;
10318 	  }
10319 	return temp;
10320       }
10321 
10322     case MEM_REF:
10323       {
10324 	const bool reverse = REF_REVERSE_STORAGE_ORDER (exp);
10325 	addr_space_t as
10326 	  = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10327 	machine_mode address_mode;
10328 	tree base = TREE_OPERAND (exp, 0);
10329 	gimple *def_stmt;
10330 	enum insn_code icode;
10331 	unsigned align;
10332 	/* Handle expansion of non-aliased memory with non-BLKmode.  That
10333 	   might end up in a register.  */
10334 	if (mem_ref_refers_to_non_mem_p (exp))
10335 	  {
10336 	    poly_int64 offset = mem_ref_offset (exp).force_shwi ();
10337 	    base = TREE_OPERAND (base, 0);
10338 	    if (known_eq (offset, 0)
10339 	        && !reverse
10340 		&& tree_fits_uhwi_p (TYPE_SIZE (type))
10341 		&& known_eq (GET_MODE_BITSIZE (DECL_MODE (base)),
10342 			     tree_to_uhwi (TYPE_SIZE (type))))
10343 	      return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
10344 				  target, tmode, modifier);
10345 	    if (TYPE_MODE (type) == BLKmode)
10346 	      {
10347 		temp = assign_stack_temp (DECL_MODE (base),
10348 					  GET_MODE_SIZE (DECL_MODE (base)));
10349 		store_expr (base, temp, 0, false, false);
10350 		temp = adjust_address (temp, BLKmode, offset);
10351 		set_mem_size (temp, int_size_in_bytes (type));
10352 		return temp;
10353 	      }
10354 	    exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
10355 			  bitsize_int (offset * BITS_PER_UNIT));
10356 	    REF_REVERSE_STORAGE_ORDER (exp) = reverse;
10357 	    return expand_expr (exp, target, tmode, modifier);
10358 	  }
10359 	address_mode = targetm.addr_space.address_mode (as);
10360 	base = TREE_OPERAND (exp, 0);
10361 	if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
10362 	  {
10363 	    tree mask = gimple_assign_rhs2 (def_stmt);
10364 	    base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
10365 			   gimple_assign_rhs1 (def_stmt), mask);
10366 	    TREE_OPERAND (exp, 0) = base;
10367 	  }
10368 	align = get_object_alignment (exp);
10369 	op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
10370 	op0 = memory_address_addr_space (mode, op0, as);
10371 	if (!integer_zerop (TREE_OPERAND (exp, 1)))
10372 	  {
10373 	    rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
10374 	    op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
10375 	    op0 = memory_address_addr_space (mode, op0, as);
10376 	  }
10377 	temp = gen_rtx_MEM (mode, op0);
10378 	set_mem_attributes (temp, exp, 0);
10379 	set_mem_addr_space (temp, as);
10380 	if (TREE_THIS_VOLATILE (exp))
10381 	  MEM_VOLATILE_P (temp) = 1;
10382 	if (modifier != EXPAND_WRITE
10383 	    && modifier != EXPAND_MEMORY
10384 	    && !inner_reference_p
10385 	    && mode != BLKmode
10386 	    && align < GET_MODE_ALIGNMENT (mode))
10387 	  {
10388 	    if ((icode = optab_handler (movmisalign_optab, mode))
10389 		!= CODE_FOR_nothing)
10390 	      {
10391 		struct expand_operand ops[2];
10392 
10393 		/* We've already validated the memory, and we're creating a
10394 		   new pseudo destination.  The predicates really can't fail,
10395 		   nor can the generator.  */
10396 		create_output_operand (&ops[0], NULL_RTX, mode);
10397 		create_fixed_operand (&ops[1], temp);
10398 		expand_insn (icode, 2, ops);
10399 		temp = ops[0].value;
10400 	      }
10401 	    else if (targetm.slow_unaligned_access (mode, align))
10402 	      temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
10403 					0, TYPE_UNSIGNED (TREE_TYPE (exp)),
10404 					(modifier == EXPAND_STACK_PARM
10405 					 ? NULL_RTX : target),
10406 					mode, mode, false, alt_rtl);
10407 	  }
10408 	if (reverse
10409 	    && modifier != EXPAND_MEMORY
10410 	    && modifier != EXPAND_WRITE)
10411 	  temp = flip_storage_order (mode, temp);
10412 	return temp;
10413       }
10414 
10415     case ARRAY_REF:
10416 
10417       {
10418 	tree array = treeop0;
10419 	tree index = treeop1;
10420 	tree init;
10421 
10422 	/* Fold an expression like: "foo"[2].
10423 	   This is not done in fold so it won't happen inside &.
10424 	   Don't fold if this is for wide characters since it's too
10425 	   difficult to do correctly and this is a very rare case.  */
10426 
10427 	if (modifier != EXPAND_CONST_ADDRESS
10428 	    && modifier != EXPAND_INITIALIZER
10429 	    && modifier != EXPAND_MEMORY)
10430 	  {
10431 	    tree t = fold_read_from_constant_string (exp);
10432 
10433 	    if (t)
10434 	      return expand_expr (t, target, tmode, modifier);
10435 	  }
10436 
10437 	/* If this is a constant index into a constant array,
10438 	   just get the value from the array.  Handle both the cases when
10439 	   we have an explicit constructor and when our operand is a variable
10440 	   that was declared const.  */
10441 
10442 	if (modifier != EXPAND_CONST_ADDRESS
10443 	    && modifier != EXPAND_INITIALIZER
10444 	    && modifier != EXPAND_MEMORY
10445 	    && TREE_CODE (array) == CONSTRUCTOR
10446 	    && ! TREE_SIDE_EFFECTS (array)
10447 	    && TREE_CODE (index) == INTEGER_CST)
10448 	  {
10449 	    unsigned HOST_WIDE_INT ix;
10450 	    tree field, value;
10451 
10452 	    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
10453 				      field, value)
10454 	      if (tree_int_cst_equal (field, index))
10455 		{
10456 		  if (!TREE_SIDE_EFFECTS (value))
10457 		    return expand_expr (fold (value), target, tmode, modifier);
10458 		  break;
10459 		}
10460 	  }
10461 
10462 	else if (optimize >= 1
10463 		 && modifier != EXPAND_CONST_ADDRESS
10464 		 && modifier != EXPAND_INITIALIZER
10465 		 && modifier != EXPAND_MEMORY
10466 		 && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
10467 		 && TREE_CODE (index) == INTEGER_CST
10468 		 && (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
10469 		 && (init = ctor_for_folding (array)) != error_mark_node)
10470 	  {
10471 	    if (init == NULL_TREE)
10472 	      {
10473 		tree value = build_zero_cst (type);
10474 		if (TREE_CODE (value) == CONSTRUCTOR)
10475 		  {
10476 		    /* If VALUE is a CONSTRUCTOR, this optimization is only
10477 		       useful if this doesn't store the CONSTRUCTOR into
10478 		       memory.  If it does, it is more efficient to just
10479 		       load the data from the array directly.  */
10480 		    rtx ret = expand_constructor (value, target,
10481 						  modifier, true);
10482 		    if (ret == NULL_RTX)
10483 		      value = NULL_TREE;
10484 		  }
10485 
10486 		if (value)
10487 		  return expand_expr (value, target, tmode, modifier);
10488 	      }
10489 	    else if (TREE_CODE (init) == CONSTRUCTOR)
10490 	      {
10491 		unsigned HOST_WIDE_INT ix;
10492 		tree field, value;
10493 
10494 		FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
10495 					  field, value)
10496 		  if (tree_int_cst_equal (field, index))
10497 		    {
10498 		      if (TREE_SIDE_EFFECTS (value))
10499 			break;
10500 
10501 		      if (TREE_CODE (value) == CONSTRUCTOR)
10502 			{
10503 			  /* If VALUE is a CONSTRUCTOR, this
10504 			     optimization is only useful if
10505 			     this doesn't store the CONSTRUCTOR
10506 			     into memory.  If it does, it is more
10507 			     efficient to just load the data from
10508 			     the array directly.  */
10509 			  rtx ret = expand_constructor (value, target,
10510 							modifier, true);
10511 			  if (ret == NULL_RTX)
10512 			    break;
10513 			}
10514 
10515 		      return
10516 		        expand_expr (fold (value), target, tmode, modifier);
10517 		    }
10518 	      }
10519 	    else if (TREE_CODE (init) == STRING_CST)
10520 	      {
10521 		tree low_bound = array_ref_low_bound (exp);
10522 		tree index1 = fold_convert_loc (loc, sizetype, treeop1);
10523 
10524 		/* Optimize the special case of a zero lower bound.
10525 
10526 		   We convert the lower bound to sizetype to avoid problems
10527 		   with constant folding.  E.g. suppose the lower bound is
10528 		   1 and its mode is QI.  Without the conversion
10529 		      (ARRAY + (INDEX - (unsigned char)1))
10530 		   becomes
10531 		      (ARRAY + (-(unsigned char)1) + INDEX)
10532 		   which becomes
10533 		      (ARRAY + 255 + INDEX).  Oops!  */
10534 		if (!integer_zerop (low_bound))
10535 		  index1 = size_diffop_loc (loc, index1,
10536 					    fold_convert_loc (loc, sizetype,
10537 							      low_bound));
10538 
10539 		if (tree_fits_uhwi_p (index1)
10540 		    && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
10541 		  {
10542 		    tree type = TREE_TYPE (TREE_TYPE (init));
10543 		    scalar_int_mode mode;
10544 
10545 		    if (is_int_mode (TYPE_MODE (type), &mode)
10546 			&& GET_MODE_SIZE (mode) == 1)
10547 		      return gen_int_mode (TREE_STRING_POINTER (init)
10548 					   [TREE_INT_CST_LOW (index1)],
10549 					   mode);
10550 		  }
10551 	      }
10552 	  }
10553       }
10554       goto normal_inner_ref;
10555 
10556     case COMPONENT_REF:
10557       /* If the operand is a CONSTRUCTOR, we can just extract the
10558 	 appropriate field if it is present.  */
10559       if (TREE_CODE (treeop0) == CONSTRUCTOR)
10560 	{
10561 	  unsigned HOST_WIDE_INT idx;
10562 	  tree field, value;
10563 	  scalar_int_mode field_mode;
10564 
10565 	  FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
10566 				    idx, field, value)
10567 	    if (field == treeop1
10568 		/* We can normally use the value of the field in the
10569 		   CONSTRUCTOR.  However, if this is a bitfield in
10570 		   an integral mode that we can fit in a HOST_WIDE_INT,
10571 		   we must mask only the number of bits in the bitfield,
10572 		   since this is done implicitly by the constructor.  If
10573 		   the bitfield does not meet either of those conditions,
10574 		   we can't do this optimization.  */
10575 		&& (! DECL_BIT_FIELD (field)
10576 		    || (is_int_mode (DECL_MODE (field), &field_mode)
10577 			&& (GET_MODE_PRECISION (field_mode)
10578 			    <= HOST_BITS_PER_WIDE_INT))))
10579 	      {
10580 		if (DECL_BIT_FIELD (field)
10581 		    && modifier == EXPAND_STACK_PARM)
10582 		  target = 0;
10583 		op0 = expand_expr (value, target, tmode, modifier);
10584 		if (DECL_BIT_FIELD (field))
10585 		  {
10586 		    HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
10587 		    scalar_int_mode imode
10588 		      = SCALAR_INT_TYPE_MODE (TREE_TYPE (field));
10589 
10590 		    if (TYPE_UNSIGNED (TREE_TYPE (field)))
10591 		      {
10592 			op1 = gen_int_mode ((HOST_WIDE_INT_1 << bitsize) - 1,
10593 					    imode);
10594 			op0 = expand_and (imode, op0, op1, target);
10595 		      }
10596 		    else
10597 		      {
10598 			int count = GET_MODE_PRECISION (imode) - bitsize;
10599 
10600 			op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
10601 					    target, 0);
10602 			op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
10603 					    target, 0);
10604 		      }
10605 		  }
10606 
10607 		return op0;
10608 	      }
10609 	}
10610       goto normal_inner_ref;
10611 
10612     case BIT_FIELD_REF:
10613     case ARRAY_RANGE_REF:
10614     normal_inner_ref:
10615       {
10616 	machine_mode mode1, mode2;
10617 	poly_int64 bitsize, bitpos, bytepos;
10618 	tree offset;
10619 	int reversep, volatilep = 0, must_force_mem;
10620 	tree tem
10621 	  = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
10622 				 &unsignedp, &reversep, &volatilep);
10623 	rtx orig_op0, memloc;
10624 	bool clear_mem_expr = false;
10625 
10626 	/* If we got back the original object, something is wrong.  Perhaps
10627 	   we are evaluating an expression too early.  In any event, don't
10628 	   infinitely recurse.  */
10629 	gcc_assert (tem != exp);
10630 
10631 	/* If TEM's type is a union of variable size, pass TARGET to the inner
10632 	   computation, since it will need a temporary and TARGET is known
10633 	   to have to do.  This occurs in unchecked conversion in Ada.  */
10634 	orig_op0 = op0
10635 	  = expand_expr_real (tem,
10636 			      (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
10637 			       && COMPLETE_TYPE_P (TREE_TYPE (tem))
10638 			       && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
10639 				   != INTEGER_CST)
10640 			       && modifier != EXPAND_STACK_PARM
10641 			       ? target : NULL_RTX),
10642 			      VOIDmode,
10643 			      modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
10644 			      NULL, true);
10645 
10646 	/* If the field has a mode, we want to access it in the
10647 	   field's mode, not the computed mode.
10648 	   If a MEM has VOIDmode (external with incomplete type),
10649 	   use BLKmode for it instead.  */
10650 	if (MEM_P (op0))
10651 	  {
10652 	    if (mode1 != VOIDmode)
10653 	      op0 = adjust_address (op0, mode1, 0);
10654 	    else if (GET_MODE (op0) == VOIDmode)
10655 	      op0 = adjust_address (op0, BLKmode, 0);
10656 	  }
10657 
10658 	mode2
10659 	  = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
10660 
10661 	/* If we have either an offset, a BLKmode result, or a reference
10662 	   outside the underlying object, we must force it to memory.
10663 	   Such a case can occur in Ada if we have unchecked conversion
10664 	   of an expression from a scalar type to an aggregate type or
10665 	   for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
10666 	   passed a partially uninitialized object or a view-conversion
10667 	   to a larger size.  */
10668 	must_force_mem = (offset
10669 			  || mode1 == BLKmode
10670 			  || maybe_gt (bitpos + bitsize,
10671 				       GET_MODE_BITSIZE (mode2)));
10672 
10673 	/* Handle CONCAT first.  */
10674 	if (GET_CODE (op0) == CONCAT && !must_force_mem)
10675 	  {
10676 	    if (known_eq (bitpos, 0)
10677 		&& known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
10678 		&& COMPLEX_MODE_P (mode1)
10679 		&& COMPLEX_MODE_P (GET_MODE (op0))
10680 		&& (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
10681 		    == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0)))))
10682 	      {
10683 		if (reversep)
10684 		  op0 = flip_storage_order (GET_MODE (op0), op0);
10685 		if (mode1 != GET_MODE (op0))
10686 		  {
10687 		    rtx parts[2];
10688 		    for (int i = 0; i < 2; i++)
10689 		      {
10690 			rtx op = read_complex_part (op0, i != 0);
10691 			if (GET_CODE (op) == SUBREG)
10692 			  op = force_reg (GET_MODE (op), op);
10693 			rtx temp = gen_lowpart_common (GET_MODE_INNER (mode1),
10694 						       op);
10695 			if (temp)
10696 			  op = temp;
10697 			else
10698 			  {
10699 			    if (!REG_P (op) && !MEM_P (op))
10700 			      op = force_reg (GET_MODE (op), op);
10701 			    op = gen_lowpart (GET_MODE_INNER (mode1), op);
10702 			  }
10703 			parts[i] = op;
10704 		      }
10705 		    op0 = gen_rtx_CONCAT (mode1, parts[0], parts[1]);
10706 		  }
10707 		return op0;
10708 	      }
10709 	    if (known_eq (bitpos, 0)
10710 		&& known_eq (bitsize,
10711 			     GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10712 		&& maybe_ne (bitsize, 0))
10713 	      {
10714 		op0 = XEXP (op0, 0);
10715 		mode2 = GET_MODE (op0);
10716 	      }
10717 	    else if (known_eq (bitpos,
10718 			       GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10719 		     && known_eq (bitsize,
10720 				  GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
10721 		     && maybe_ne (bitpos, 0)
10722 		     && maybe_ne (bitsize, 0))
10723 	      {
10724 		op0 = XEXP (op0, 1);
10725 		bitpos = 0;
10726 		mode2 = GET_MODE (op0);
10727 	      }
10728 	    else
10729 	      /* Otherwise force into memory.  */
10730 	      must_force_mem = 1;
10731 	  }
10732 
10733 	/* If this is a constant, put it in a register if it is a legitimate
10734 	   constant and we don't need a memory reference.  */
10735 	if (CONSTANT_P (op0)
10736 	    && mode2 != BLKmode
10737 	    && targetm.legitimate_constant_p (mode2, op0)
10738 	    && !must_force_mem)
10739 	  op0 = force_reg (mode2, op0);
10740 
10741 	/* Otherwise, if this is a constant, try to force it to the constant
10742 	   pool.  Note that back-ends, e.g. MIPS, may refuse to do so if it
10743 	   is a legitimate constant.  */
10744 	else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
10745 	  op0 = validize_mem (memloc);
10746 
10747 	/* Otherwise, if this is a constant or the object is not in memory
10748 	   and need be, put it there.  */
10749 	else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
10750 	  {
10751 	    memloc = assign_temp (TREE_TYPE (tem), 1, 1);
10752 	    emit_move_insn (memloc, op0);
10753 	    op0 = memloc;
10754 	    clear_mem_expr = true;
10755 	  }
10756 
10757 	if (offset)
10758 	  {
10759 	    machine_mode address_mode;
10760 	    rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
10761 					  EXPAND_SUM);
10762 
10763 	    gcc_assert (MEM_P (op0));
10764 
10765 	    address_mode = get_address_mode (op0);
10766 	    if (GET_MODE (offset_rtx) != address_mode)
10767 	      {
10768 		/* We cannot be sure that the RTL in offset_rtx is valid outside
10769 		   of a memory address context, so force it into a register
10770 		   before attempting to convert it to the desired mode.  */
10771 		offset_rtx = force_operand (offset_rtx, NULL_RTX);
10772 		offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
10773 	      }
10774 
10775 	    /* See the comment in expand_assignment for the rationale.  */
10776 	    if (mode1 != VOIDmode
10777 		&& maybe_ne (bitpos, 0)
10778 		&& maybe_gt (bitsize, 0)
10779 		&& multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
10780 		&& multiple_p (bitpos, bitsize)
10781 		&& multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
10782 		&& MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
10783 	      {
10784 		op0 = adjust_address (op0, mode1, bytepos);
10785 		bitpos = 0;
10786 	      }
10787 
10788 	    op0 = offset_address (op0, offset_rtx,
10789 				  highest_pow2_factor (offset));
10790 	  }
10791 
10792 	/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
10793 	   record its alignment as BIGGEST_ALIGNMENT.  */
10794 	if (MEM_P (op0)
10795 	    && known_eq (bitpos, 0)
10796 	    && offset != 0
10797 	    && is_aligning_offset (offset, tem))
10798 	  set_mem_align (op0, BIGGEST_ALIGNMENT);
10799 
10800 	/* Don't forget about volatility even if this is a bitfield.  */
10801 	if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
10802 	  {
10803 	    if (op0 == orig_op0)
10804 	      op0 = copy_rtx (op0);
10805 
10806 	    MEM_VOLATILE_P (op0) = 1;
10807 	  }
10808 
10809 	/* In cases where an aligned union has an unaligned object
10810 	   as a field, we might be extracting a BLKmode value from
10811 	   an integer-mode (e.g., SImode) object.  Handle this case
10812 	   by doing the extract into an object as wide as the field
10813 	   (which we know to be the width of a basic mode), then
10814 	   storing into memory, and changing the mode to BLKmode.  */
10815 	if (mode1 == VOIDmode
10816 	    || REG_P (op0) || GET_CODE (op0) == SUBREG
10817 	    || (mode1 != BLKmode && ! direct_load[(int) mode1]
10818 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
10819 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
10820 		&& modifier != EXPAND_CONST_ADDRESS
10821 		&& modifier != EXPAND_INITIALIZER
10822 		&& modifier != EXPAND_MEMORY)
10823 	    /* If the bitfield is volatile and the bitsize
10824 	       is narrower than the access size of the bitfield,
10825 	       we need to extract bitfields from the access.  */
10826 	    || (volatilep && TREE_CODE (exp) == COMPONENT_REF
10827 		&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
10828 		&& mode1 != BLKmode
10829 		&& maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
10830 	    /* If the field isn't aligned enough to fetch as a memref,
10831 	       fetch it as a bit field.  */
10832 	    || (mode1 != BLKmode
10833 		&& (((MEM_P (op0)
10834 		      ? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
10835 			|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
10836 		      : TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
10837 			|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
10838 		     && modifier != EXPAND_MEMORY
10839 		     && ((modifier == EXPAND_CONST_ADDRESS
10840 			  || modifier == EXPAND_INITIALIZER)
10841 			 ? STRICT_ALIGNMENT
10842 			 : targetm.slow_unaligned_access (mode1,
10843 							  MEM_ALIGN (op0))))
10844 		    || !multiple_p (bitpos, BITS_PER_UNIT)))
10845 	    /* If the type and the field are a constant size and the
10846 	       size of the type isn't the same size as the bitfield,
10847 	       we must use bitfield operations.  */
10848 	    || (known_size_p (bitsize)
10849 		&& TYPE_SIZE (TREE_TYPE (exp))
10850 		&& poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
10851 		&& maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
10852 			     bitsize)))
10853 	  {
10854 	    machine_mode ext_mode = mode;
10855 
10856 	    if (ext_mode == BLKmode
10857 		&& ! (target != 0 && MEM_P (op0)
10858 		      && MEM_P (target)
10859 		      && multiple_p (bitpos, BITS_PER_UNIT)))
10860 	      ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
10861 
10862 	    if (ext_mode == BLKmode)
10863 	      {
10864 		if (target == 0)
10865 		  target = assign_temp (type, 1, 1);
10866 
10867 		/* ??? Unlike the similar test a few lines below, this one is
10868 		   very likely obsolete.  */
10869 		if (known_eq (bitsize, 0))
10870 		  return target;
10871 
10872 		/* In this case, BITPOS must start at a byte boundary and
10873 		   TARGET, if specified, must be a MEM.  */
10874 		gcc_assert (MEM_P (op0)
10875 			    && (!target || MEM_P (target)));
10876 
10877 		bytepos = exact_div (bitpos, BITS_PER_UNIT);
10878 		poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
10879 		emit_block_move (target,
10880 				 adjust_address (op0, VOIDmode, bytepos),
10881 				 gen_int_mode (bytesize, Pmode),
10882 				 (modifier == EXPAND_STACK_PARM
10883 				  ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
10884 
10885 		return target;
10886 	      }
10887 
10888 	    /* If we have nothing to extract, the result will be 0 for targets
10889 	       with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise.  Always
10890 	       return 0 for the sake of consistency, as reading a zero-sized
10891 	       bitfield is valid in Ada and the value is fully specified.  */
10892 	    if (known_eq (bitsize, 0))
10893 	      return const0_rtx;
10894 
10895 	    op0 = validize_mem (op0);
10896 
10897 	    if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
10898 	      mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10899 
10900 	    /* If the result has a record type and the extraction is done in
10901 	       an integral mode, then the field may be not aligned on a byte
10902 	       boundary; in this case, if it has reverse storage order, it
10903 	       needs to be extracted as a scalar field with reverse storage
10904 	       order and put back into memory order afterwards.  */
10905 	    if (TREE_CODE (type) == RECORD_TYPE
10906 		&& GET_MODE_CLASS (ext_mode) == MODE_INT)
10907 	      reversep = TYPE_REVERSE_STORAGE_ORDER (type);
10908 
10909 	    op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
10910 				     (modifier == EXPAND_STACK_PARM
10911 				      ? NULL_RTX : target),
10912 				     ext_mode, ext_mode, reversep, alt_rtl);
10913 
10914 	    /* If the result has a record type and the mode of OP0 is an
10915 	       integral mode then, if BITSIZE is narrower than this mode
10916 	       and this is for big-endian data, we must put the field
10917 	       into the high-order bits.  And we must also put it back
10918 	       into memory order if it has been previously reversed.  */
10919 	    scalar_int_mode op0_mode;
10920 	    if (TREE_CODE (type) == RECORD_TYPE
10921 		&& is_int_mode (GET_MODE (op0), &op0_mode))
10922 	      {
10923 		HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
10924 
10925 		gcc_checking_assert (known_le (bitsize, size));
10926 		if (maybe_lt (bitsize, size)
10927 		    && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
10928 		  op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
10929 				      size - bitsize, op0, 1);
10930 
10931 		if (reversep)
10932 		  op0 = flip_storage_order (op0_mode, op0);
10933 	      }
10934 
10935 	    /* If the result type is BLKmode, store the data into a temporary
10936 	       of the appropriate type, but with the mode corresponding to the
10937 	       mode for the data we have (op0's mode).  */
10938 	    if (mode == BLKmode)
10939 	      {
10940 		rtx new_rtx
10941 		  = assign_stack_temp_for_type (ext_mode,
10942 						GET_MODE_BITSIZE (ext_mode),
10943 						type);
10944 		emit_move_insn (new_rtx, op0);
10945 		op0 = copy_rtx (new_rtx);
10946 		PUT_MODE (op0, BLKmode);
10947 	      }
10948 
10949 	    return op0;
10950 	  }
10951 
10952 	/* If the result is BLKmode, use that to access the object
10953 	   now as well.  */
10954 	if (mode == BLKmode)
10955 	  mode1 = BLKmode;
10956 
10957 	/* Get a reference to just this component.  */
10958 	bytepos = bits_to_bytes_round_down (bitpos);
10959 	if (modifier == EXPAND_CONST_ADDRESS
10960 	    || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
10961 	  op0 = adjust_address_nv (op0, mode1, bytepos);
10962 	else
10963 	  op0 = adjust_address (op0, mode1, bytepos);
10964 
10965 	if (op0 == orig_op0)
10966 	  op0 = copy_rtx (op0);
10967 
10968 	/* Don't set memory attributes if the base expression is
10969 	   SSA_NAME that got expanded as a MEM.  In that case, we should
10970 	   just honor its original memory attributes.  */
10971 	if (TREE_CODE (tem) != SSA_NAME || !MEM_P (orig_op0))
10972 	  set_mem_attributes (op0, exp, 0);
10973 
10974 	if (REG_P (XEXP (op0, 0)))
10975 	  mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10976 
10977 	/* If op0 is a temporary because the original expressions was forced
10978 	   to memory, clear MEM_EXPR so that the original expression cannot
10979 	   be marked as addressable through MEM_EXPR of the temporary.  */
10980 	if (clear_mem_expr)
10981 	  set_mem_expr (op0, NULL_TREE);
10982 
10983 	MEM_VOLATILE_P (op0) |= volatilep;
10984 
10985         if (reversep
10986 	    && modifier != EXPAND_MEMORY
10987 	    && modifier != EXPAND_WRITE)
10988 	  op0 = flip_storage_order (mode1, op0);
10989 
10990 	if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
10991 	    || modifier == EXPAND_CONST_ADDRESS
10992 	    || modifier == EXPAND_INITIALIZER)
10993 	  return op0;
10994 
10995 	if (target == 0)
10996 	  target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
10997 
10998 	convert_move (target, op0, unsignedp);
10999 	return target;
11000       }
11001 
11002     case OBJ_TYPE_REF:
11003       return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
11004 
11005     case CALL_EXPR:
11006       /* All valid uses of __builtin_va_arg_pack () are removed during
11007 	 inlining.  */
11008       if (CALL_EXPR_VA_ARG_PACK (exp))
11009 	error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp);
11010       {
11011 	tree fndecl = get_callee_fndecl (exp), attr;
11012 
11013 	if (fndecl
11014 	    /* Don't diagnose the error attribute in thunks, those are
11015 	       artificially created.  */
11016 	    && !CALL_FROM_THUNK_P (exp)
11017 	    && (attr = lookup_attribute ("error",
11018 					 DECL_ATTRIBUTES (fndecl))) != NULL)
11019 	  {
11020 	    const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11021 	    error ("%Kcall to %qs declared with attribute error: %s", exp,
11022 		   identifier_to_locale (ident),
11023 		   TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11024 	  }
11025 	if (fndecl
11026 	    /* Don't diagnose the warning attribute in thunks, those are
11027 	       artificially created.  */
11028 	    && !CALL_FROM_THUNK_P (exp)
11029 	    && (attr = lookup_attribute ("warning",
11030 					 DECL_ATTRIBUTES (fndecl))) != NULL)
11031 	  {
11032 	    const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11033 	    warning_at (tree_nonartificial_location (exp), 0,
11034 			"%Kcall to %qs declared with attribute warning: %s",
11035 			exp, identifier_to_locale (ident),
11036 			TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11037 	  }
11038 
11039 	/* Check for a built-in function.  */
11040 	if (fndecl && DECL_BUILT_IN (fndecl))
11041 	  {
11042 	    gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
11043 	    if (CALL_WITH_BOUNDS_P (exp))
11044 	      return expand_builtin_with_bounds (exp, target, subtarget,
11045 						 tmode, ignore);
11046 	    else
11047 	      return expand_builtin (exp, target, subtarget, tmode, ignore);
11048 	  }
11049       }
11050       return expand_call (exp, target, ignore);
11051 
11052     case VIEW_CONVERT_EXPR:
11053       op0 = NULL_RTX;
11054 
11055       /* If we are converting to BLKmode, try to avoid an intermediate
11056 	 temporary by fetching an inner memory reference.  */
11057       if (mode == BLKmode
11058 	  && poly_int_tree_p (TYPE_SIZE (type))
11059 	  && TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
11060 	  && handled_component_p (treeop0))
11061       {
11062 	machine_mode mode1;
11063 	poly_int64 bitsize, bitpos, bytepos;
11064 	tree offset;
11065 	int unsignedp, reversep, volatilep = 0;
11066 	tree tem
11067 	  = get_inner_reference (treeop0, &bitsize, &bitpos, &offset, &mode1,
11068 				 &unsignedp, &reversep, &volatilep);
11069 	rtx orig_op0;
11070 
11071 	/* ??? We should work harder and deal with non-zero offsets.  */
11072 	if (!offset
11073 	    && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11074 	    && !reversep
11075 	    && known_size_p (bitsize)
11076 	    && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
11077 	  {
11078 	    /* See the normal_inner_ref case for the rationale.  */
11079 	    orig_op0
11080 	      = expand_expr_real (tem,
11081 				  (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11082 				   && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11083 				       != INTEGER_CST)
11084 				   && modifier != EXPAND_STACK_PARM
11085 				   ? target : NULL_RTX),
11086 				  VOIDmode,
11087 				  modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11088 				  NULL, true);
11089 
11090 	    if (MEM_P (orig_op0))
11091 	      {
11092 		op0 = orig_op0;
11093 
11094 		/* Get a reference to just this component.  */
11095 		if (modifier == EXPAND_CONST_ADDRESS
11096 		    || modifier == EXPAND_SUM
11097 		    || modifier == EXPAND_INITIALIZER)
11098 		  op0 = adjust_address_nv (op0, mode, bytepos);
11099 		else
11100 		  op0 = adjust_address (op0, mode, bytepos);
11101 
11102 		if (op0 == orig_op0)
11103 		  op0 = copy_rtx (op0);
11104 
11105 		set_mem_attributes (op0, treeop0, 0);
11106 		if (REG_P (XEXP (op0, 0)))
11107 		  mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11108 
11109 		MEM_VOLATILE_P (op0) |= volatilep;
11110 	      }
11111 	  }
11112       }
11113 
11114       if (!op0)
11115 	op0 = expand_expr_real (treeop0, NULL_RTX, VOIDmode, modifier,
11116 				NULL, inner_reference_p);
11117 
11118       /* If the input and output modes are both the same, we are done.  */
11119       if (mode == GET_MODE (op0))
11120 	;
11121       /* If neither mode is BLKmode, and both modes are the same size
11122 	 then we can use gen_lowpart.  */
11123       else if (mode != BLKmode
11124 	       && GET_MODE (op0) != BLKmode
11125 	       && known_eq (GET_MODE_PRECISION (mode),
11126 			    GET_MODE_PRECISION (GET_MODE (op0)))
11127 	       && !COMPLEX_MODE_P (GET_MODE (op0)))
11128 	{
11129 	  if (GET_CODE (op0) == SUBREG)
11130 	    op0 = force_reg (GET_MODE (op0), op0);
11131 	  temp = gen_lowpart_common (mode, op0);
11132 	  if (temp)
11133 	    op0 = temp;
11134 	  else
11135 	    {
11136 	      if (!REG_P (op0) && !MEM_P (op0))
11137 		op0 = force_reg (GET_MODE (op0), op0);
11138 	      op0 = gen_lowpart (mode, op0);
11139 	    }
11140 	}
11141       /* If both types are integral, convert from one mode to the other.  */
11142       else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
11143 	op0 = convert_modes (mode, GET_MODE (op0), op0,
11144 			     TYPE_UNSIGNED (TREE_TYPE (treeop0)));
11145       /* If the output type is a bit-field type, do an extraction.  */
11146       else if (reduce_bit_field)
11147 	return extract_bit_field (op0, TYPE_PRECISION (type), 0,
11148 				  TYPE_UNSIGNED (type), NULL_RTX,
11149 				  mode, mode, false, NULL);
11150       /* As a last resort, spill op0 to memory, and reload it in a
11151 	 different mode.  */
11152       else if (!MEM_P (op0))
11153 	{
11154 	  /* If the operand is not a MEM, force it into memory.  Since we
11155 	     are going to be changing the mode of the MEM, don't call
11156 	     force_const_mem for constants because we don't allow pool
11157 	     constants to change mode.  */
11158 	  tree inner_type = TREE_TYPE (treeop0);
11159 
11160 	  gcc_assert (!TREE_ADDRESSABLE (exp));
11161 
11162 	  if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type))
11163 	    target
11164 	      = assign_stack_temp_for_type
11165 		(TYPE_MODE (inner_type),
11166 		 GET_MODE_SIZE (TYPE_MODE (inner_type)), inner_type);
11167 
11168 	  emit_move_insn (target, op0);
11169 	  op0 = target;
11170 	}
11171 
11172       /* If OP0 is (now) a MEM, we need to deal with alignment issues.  If the
11173 	 output type is such that the operand is known to be aligned, indicate
11174 	 that it is.  Otherwise, we need only be concerned about alignment for
11175 	 non-BLKmode results.  */
11176       if (MEM_P (op0))
11177 	{
11178 	  enum insn_code icode;
11179 
11180 	  if (modifier != EXPAND_WRITE
11181 	      && modifier != EXPAND_MEMORY
11182 	      && !inner_reference_p
11183 	      && mode != BLKmode
11184 	      && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode))
11185 	    {
11186 	      /* If the target does have special handling for unaligned
11187 		 loads of mode then use them.  */
11188 	      if ((icode = optab_handler (movmisalign_optab, mode))
11189 		  != CODE_FOR_nothing)
11190 		{
11191 		  rtx reg;
11192 
11193 		  op0 = adjust_address (op0, mode, 0);
11194 		  /* We've already validated the memory, and we're creating a
11195 		     new pseudo destination.  The predicates really can't
11196 		     fail.  */
11197 		  reg = gen_reg_rtx (mode);
11198 
11199 		  /* Nor can the insn generator.  */
11200 		  rtx_insn *insn = GEN_FCN (icode) (reg, op0);
11201 		  emit_insn (insn);
11202 		  return reg;
11203 		}
11204 	      else if (STRICT_ALIGNMENT)
11205 		{
11206 		  poly_uint64 mode_size = GET_MODE_SIZE (mode);
11207 		  poly_uint64 temp_size = mode_size;
11208 		  if (GET_MODE (op0) != BLKmode)
11209 		    temp_size = upper_bound (temp_size,
11210 					     GET_MODE_SIZE (GET_MODE (op0)));
11211 		  rtx new_rtx
11212 		    = assign_stack_temp_for_type (mode, temp_size, type);
11213 		  rtx new_with_op0_mode
11214 		    = adjust_address (new_rtx, GET_MODE (op0), 0);
11215 
11216 		  gcc_assert (!TREE_ADDRESSABLE (exp));
11217 
11218 		  if (GET_MODE (op0) == BLKmode)
11219 		    {
11220 		      rtx size_rtx = gen_int_mode (mode_size, Pmode);
11221 		      emit_block_move (new_with_op0_mode, op0, size_rtx,
11222 				       (modifier == EXPAND_STACK_PARM
11223 					? BLOCK_OP_CALL_PARM
11224 					: BLOCK_OP_NORMAL));
11225 		    }
11226 		  else
11227 		    emit_move_insn (new_with_op0_mode, op0);
11228 
11229 		  op0 = new_rtx;
11230 		}
11231 	    }
11232 
11233 	  op0 = adjust_address (op0, mode, 0);
11234 	}
11235 
11236       return op0;
11237 
11238     case MODIFY_EXPR:
11239       {
11240 	tree lhs = treeop0;
11241 	tree rhs = treeop1;
11242 	gcc_assert (ignore);
11243 
11244 	/* Check for |= or &= of a bitfield of size one into another bitfield
11245 	   of size 1.  In this case, (unless we need the result of the
11246 	   assignment) we can do this more efficiently with a
11247 	   test followed by an assignment, if necessary.
11248 
11249 	   ??? At this point, we can't get a BIT_FIELD_REF here.  But if
11250 	   things change so we do, this code should be enhanced to
11251 	   support it.  */
11252 	if (TREE_CODE (lhs) == COMPONENT_REF
11253 	    && (TREE_CODE (rhs) == BIT_IOR_EXPR
11254 		|| TREE_CODE (rhs) == BIT_AND_EXPR)
11255 	    && TREE_OPERAND (rhs, 0) == lhs
11256 	    && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
11257 	    && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
11258 	    && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
11259 	  {
11260 	    rtx_code_label *label = gen_label_rtx ();
11261 	    int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
11262 	    do_jump (TREE_OPERAND (rhs, 1),
11263 		     value ? label : 0,
11264 		     value ? 0 : label,
11265 		     profile_probability::uninitialized ());
11266 	    expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
11267 			       false);
11268 	    do_pending_stack_adjust ();
11269 	    emit_label (label);
11270 	    return const0_rtx;
11271 	  }
11272 
11273 	expand_assignment (lhs, rhs, false);
11274 	return const0_rtx;
11275       }
11276 
11277     case ADDR_EXPR:
11278       return expand_expr_addr_expr (exp, target, tmode, modifier);
11279 
11280     case REALPART_EXPR:
11281       op0 = expand_normal (treeop0);
11282       return read_complex_part (op0, false);
11283 
11284     case IMAGPART_EXPR:
11285       op0 = expand_normal (treeop0);
11286       return read_complex_part (op0, true);
11287 
11288     case RETURN_EXPR:
11289     case LABEL_EXPR:
11290     case GOTO_EXPR:
11291     case SWITCH_EXPR:
11292     case ASM_EXPR:
11293       /* Expanded in cfgexpand.c.  */
11294       gcc_unreachable ();
11295 
11296     case TRY_CATCH_EXPR:
11297     case CATCH_EXPR:
11298     case EH_FILTER_EXPR:
11299     case TRY_FINALLY_EXPR:
11300       /* Lowered by tree-eh.c.  */
11301       gcc_unreachable ();
11302 
11303     case WITH_CLEANUP_EXPR:
11304     case CLEANUP_POINT_EXPR:
11305     case TARGET_EXPR:
11306     case CASE_LABEL_EXPR:
11307     case VA_ARG_EXPR:
11308     case BIND_EXPR:
11309     case INIT_EXPR:
11310     case CONJ_EXPR:
11311     case COMPOUND_EXPR:
11312     case PREINCREMENT_EXPR:
11313     case PREDECREMENT_EXPR:
11314     case POSTINCREMENT_EXPR:
11315     case POSTDECREMENT_EXPR:
11316     case LOOP_EXPR:
11317     case EXIT_EXPR:
11318     case COMPOUND_LITERAL_EXPR:
11319       /* Lowered by gimplify.c.  */
11320       gcc_unreachable ();
11321 
11322     case FDESC_EXPR:
11323       /* Function descriptors are not valid except for as
11324 	 initialization constants, and should not be expanded.  */
11325       gcc_unreachable ();
11326 
11327     case WITH_SIZE_EXPR:
11328       /* WITH_SIZE_EXPR expands to its first argument.  The caller should
11329 	 have pulled out the size to use in whatever context it needed.  */
11330       return expand_expr_real (treeop0, original_target, tmode,
11331 			       modifier, alt_rtl, inner_reference_p);
11332 
11333     default:
11334       return expand_expr_real_2 (&ops, target, tmode, modifier);
11335     }
11336 }
11337 
11338 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11339    signedness of TYPE), possibly returning the result in TARGET.
11340    TYPE is known to be a partial integer type.  */
11341 static rtx
reduce_to_bit_field_precision(rtx exp,rtx target,tree type)11342 reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
11343 {
11344   HOST_WIDE_INT prec = TYPE_PRECISION (type);
11345   if (target && GET_MODE (target) != GET_MODE (exp))
11346     target = 0;
11347   /* For constant values, reduce using build_int_cst_type. */
11348   if (CONST_INT_P (exp))
11349     {
11350       HOST_WIDE_INT value = INTVAL (exp);
11351       tree t = build_int_cst_type (type, value);
11352       return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
11353     }
11354   else if (TYPE_UNSIGNED (type))
11355     {
11356       scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11357       rtx mask = immed_wide_int_const
11358 	(wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
11359       return expand_and (mode, exp, mask, target);
11360     }
11361   else
11362     {
11363       scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11364       int count = GET_MODE_PRECISION (mode) - prec;
11365       exp = expand_shift (LSHIFT_EXPR, mode, exp, count, target, 0);
11366       return expand_shift (RSHIFT_EXPR, mode, exp, count, target, 0);
11367     }
11368 }
11369 
11370 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11371    when applied to the address of EXP produces an address known to be
11372    aligned more than BIGGEST_ALIGNMENT.  */
11373 
11374 static int
is_aligning_offset(const_tree offset,const_tree exp)11375 is_aligning_offset (const_tree offset, const_tree exp)
11376 {
11377   /* Strip off any conversions.  */
11378   while (CONVERT_EXPR_P (offset))
11379     offset = TREE_OPERAND (offset, 0);
11380 
11381   /* We must now have a BIT_AND_EXPR with a constant that is one less than
11382      power of 2 and which is larger than BIGGEST_ALIGNMENT.  */
11383   if (TREE_CODE (offset) != BIT_AND_EXPR
11384       || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
11385       || compare_tree_int (TREE_OPERAND (offset, 1),
11386 			   BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
11387       || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
11388     return 0;
11389 
11390   /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11391      It must be NEGATE_EXPR.  Then strip any more conversions.  */
11392   offset = TREE_OPERAND (offset, 0);
11393   while (CONVERT_EXPR_P (offset))
11394     offset = TREE_OPERAND (offset, 0);
11395 
11396   if (TREE_CODE (offset) != NEGATE_EXPR)
11397     return 0;
11398 
11399   offset = TREE_OPERAND (offset, 0);
11400   while (CONVERT_EXPR_P (offset))
11401     offset = TREE_OPERAND (offset, 0);
11402 
11403   /* This must now be the address of EXP.  */
11404   return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp;
11405 }
11406 
11407 /* Return the tree node if an ARG corresponds to a string constant or zero
11408    if it doesn't.  If we return nonzero, set *PTR_OFFSET to the offset
11409    in bytes within the string that ARG is accessing.  The type of the
11410    offset will be `sizetype'.  */
11411 
11412 tree
string_constant(tree arg,tree * ptr_offset)11413 string_constant (tree arg, tree *ptr_offset)
11414 {
11415   tree array, offset, lower_bound;
11416   STRIP_NOPS (arg);
11417 
11418   if (TREE_CODE (arg) == ADDR_EXPR)
11419     {
11420       if (TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
11421 	{
11422 	  *ptr_offset = size_zero_node;
11423 	  return TREE_OPERAND (arg, 0);
11424 	}
11425       else if (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL)
11426 	{
11427 	  array = TREE_OPERAND (arg, 0);
11428 	  offset = size_zero_node;
11429 	}
11430       else if (TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF)
11431 	{
11432 	  array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
11433 	  offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
11434 	  if (TREE_CODE (array) != STRING_CST && !VAR_P (array))
11435 	    return 0;
11436 
11437 	  /* Check if the array has a nonzero lower bound.  */
11438 	  lower_bound = array_ref_low_bound (TREE_OPERAND (arg, 0));
11439 	  if (!integer_zerop (lower_bound))
11440 	    {
11441 	      /* If the offset and base aren't both constants, return 0.  */
11442 	      if (TREE_CODE (lower_bound) != INTEGER_CST)
11443 	        return 0;
11444 	      if (TREE_CODE (offset) != INTEGER_CST)
11445 		return 0;
11446 	      /* Adjust offset by the lower bound.  */
11447 	      offset = size_diffop (fold_convert (sizetype, offset),
11448 				    fold_convert (sizetype, lower_bound));
11449 	    }
11450 	}
11451       else if (TREE_CODE (TREE_OPERAND (arg, 0)) == MEM_REF)
11452 	{
11453 	  array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
11454 	  offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
11455 	  if (TREE_CODE (array) != ADDR_EXPR)
11456 	    return 0;
11457 	  array = TREE_OPERAND (array, 0);
11458 	  if (TREE_CODE (array) != STRING_CST && !VAR_P (array))
11459 	    return 0;
11460 	}
11461       else
11462 	return 0;
11463     }
11464   else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
11465     {
11466       tree arg0 = TREE_OPERAND (arg, 0);
11467       tree arg1 = TREE_OPERAND (arg, 1);
11468 
11469       STRIP_NOPS (arg0);
11470       STRIP_NOPS (arg1);
11471 
11472       if (TREE_CODE (arg0) == ADDR_EXPR
11473 	  && (TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST
11474 	      || TREE_CODE (TREE_OPERAND (arg0, 0)) == VAR_DECL))
11475 	{
11476 	  array = TREE_OPERAND (arg0, 0);
11477 	  offset = arg1;
11478 	}
11479       else if (TREE_CODE (arg1) == ADDR_EXPR
11480 	       && (TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST
11481 		   || TREE_CODE (TREE_OPERAND (arg1, 0)) == VAR_DECL))
11482 	{
11483 	  array = TREE_OPERAND (arg1, 0);
11484 	  offset = arg0;
11485 	}
11486       else
11487 	return 0;
11488     }
11489   else
11490     return 0;
11491 
11492   if (TREE_CODE (array) == STRING_CST)
11493     {
11494       *ptr_offset = fold_convert (sizetype, offset);
11495       return array;
11496     }
11497   else if (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
11498     {
11499       int length;
11500       tree init = ctor_for_folding (array);
11501 
11502       /* Variables initialized to string literals can be handled too.  */
11503       if (init == error_mark_node
11504 	  || !init
11505 	  || TREE_CODE (init) != STRING_CST)
11506 	return 0;
11507 
11508       /* Avoid const char foo[4] = "abcde";  */
11509       if (DECL_SIZE_UNIT (array) == NULL_TREE
11510 	  || TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
11511 	  || (length = TREE_STRING_LENGTH (init)) <= 0
11512 	  || compare_tree_int (DECL_SIZE_UNIT (array), length) < 0)
11513 	return 0;
11514 
11515       /* If variable is bigger than the string literal, OFFSET must be constant
11516 	 and inside of the bounds of the string literal.  */
11517       offset = fold_convert (sizetype, offset);
11518       if (compare_tree_int (DECL_SIZE_UNIT (array), length) > 0
11519 	  && (! tree_fits_uhwi_p (offset)
11520 	      || compare_tree_int (offset, length) >= 0))
11521 	return 0;
11522 
11523       *ptr_offset = offset;
11524       return init;
11525     }
11526 
11527   return 0;
11528 }
11529 
11530 /* Generate code to calculate OPS, and exploded expression
11531    using a store-flag instruction and return an rtx for the result.
11532    OPS reflects a comparison.
11533 
11534    If TARGET is nonzero, store the result there if convenient.
11535 
11536    Return zero if there is no suitable set-flag instruction
11537    available on this machine.
11538 
11539    Once expand_expr has been called on the arguments of the comparison,
11540    we are committed to doing the store flag, since it is not safe to
11541    re-evaluate the expression.  We emit the store-flag insn by calling
11542    emit_store_flag, but only expand the arguments if we have a reason
11543    to believe that emit_store_flag will be successful.  If we think that
11544    it will, but it isn't, we have to simulate the store-flag with a
11545    set/jump/set sequence.  */
11546 
11547 static rtx
do_store_flag(sepops ops,rtx target,machine_mode mode)11548 do_store_flag (sepops ops, rtx target, machine_mode mode)
11549 {
11550   enum rtx_code code;
11551   tree arg0, arg1, type;
11552   machine_mode operand_mode;
11553   int unsignedp;
11554   rtx op0, op1;
11555   rtx subtarget = target;
11556   location_t loc = ops->location;
11557 
11558   arg0 = ops->op0;
11559   arg1 = ops->op1;
11560 
11561   /* Don't crash if the comparison was erroneous.  */
11562   if (arg0 == error_mark_node || arg1 == error_mark_node)
11563     return const0_rtx;
11564 
11565   type = TREE_TYPE (arg0);
11566   operand_mode = TYPE_MODE (type);
11567   unsignedp = TYPE_UNSIGNED (type);
11568 
11569   /* We won't bother with BLKmode store-flag operations because it would mean
11570      passing a lot of information to emit_store_flag.  */
11571   if (operand_mode == BLKmode)
11572     return 0;
11573 
11574   /* We won't bother with store-flag operations involving function pointers
11575      when function pointers must be canonicalized before comparisons.  */
11576   if (targetm.have_canonicalize_funcptr_for_compare ()
11577       && ((POINTER_TYPE_P (TREE_TYPE (arg0))
11578 	   && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))))
11579 	  || (POINTER_TYPE_P (TREE_TYPE (arg1))
11580 	      && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1))))))
11581     return 0;
11582 
11583   STRIP_NOPS (arg0);
11584   STRIP_NOPS (arg1);
11585 
11586   /* For vector typed comparisons emit code to generate the desired
11587      all-ones or all-zeros mask.  Conveniently use the VEC_COND_EXPR
11588      expander for this.  */
11589   if (TREE_CODE (ops->type) == VECTOR_TYPE)
11590     {
11591       tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
11592       if (VECTOR_BOOLEAN_TYPE_P (ops->type)
11593 	  && expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
11594 	return expand_vec_cmp_expr (ops->type, ifexp, target);
11595       else
11596 	{
11597 	  tree if_true = constant_boolean_node (true, ops->type);
11598 	  tree if_false = constant_boolean_node (false, ops->type);
11599 	  return expand_vec_cond_expr (ops->type, ifexp, if_true,
11600 				       if_false, target);
11601 	}
11602     }
11603 
11604   /* Get the rtx comparison code to use.  We know that EXP is a comparison
11605      operation of some type.  Some comparisons against 1 and -1 can be
11606      converted to comparisons with zero.  Do so here so that the tests
11607      below will be aware that we have a comparison with zero.   These
11608      tests will not catch constants in the first operand, but constants
11609      are rarely passed as the first operand.  */
11610 
11611   switch (ops->code)
11612     {
11613     case EQ_EXPR:
11614       code = EQ;
11615       break;
11616     case NE_EXPR:
11617       code = NE;
11618       break;
11619     case LT_EXPR:
11620       if (integer_onep (arg1))
11621 	arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
11622       else
11623 	code = unsignedp ? LTU : LT;
11624       break;
11625     case LE_EXPR:
11626       if (! unsignedp && integer_all_onesp (arg1))
11627 	arg1 = integer_zero_node, code = LT;
11628       else
11629 	code = unsignedp ? LEU : LE;
11630       break;
11631     case GT_EXPR:
11632       if (! unsignedp && integer_all_onesp (arg1))
11633 	arg1 = integer_zero_node, code = GE;
11634       else
11635 	code = unsignedp ? GTU : GT;
11636       break;
11637     case GE_EXPR:
11638       if (integer_onep (arg1))
11639 	arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
11640       else
11641 	code = unsignedp ? GEU : GE;
11642       break;
11643 
11644     case UNORDERED_EXPR:
11645       code = UNORDERED;
11646       break;
11647     case ORDERED_EXPR:
11648       code = ORDERED;
11649       break;
11650     case UNLT_EXPR:
11651       code = UNLT;
11652       break;
11653     case UNLE_EXPR:
11654       code = UNLE;
11655       break;
11656     case UNGT_EXPR:
11657       code = UNGT;
11658       break;
11659     case UNGE_EXPR:
11660       code = UNGE;
11661       break;
11662     case UNEQ_EXPR:
11663       code = UNEQ;
11664       break;
11665     case LTGT_EXPR:
11666       code = LTGT;
11667       break;
11668 
11669     default:
11670       gcc_unreachable ();
11671     }
11672 
11673   /* Put a constant second.  */
11674   if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
11675       || TREE_CODE (arg0) == FIXED_CST)
11676     {
11677       std::swap (arg0, arg1);
11678       code = swap_condition (code);
11679     }
11680 
11681   /* If this is an equality or inequality test of a single bit, we can
11682      do this by shifting the bit being tested to the low-order bit and
11683      masking the result with the constant 1.  If the condition was EQ,
11684      we xor it with 1.  This does not require an scc insn and is faster
11685      than an scc insn even if we have it.
11686 
11687      The code to make this transformation was moved into fold_single_bit_test,
11688      so we just call into the folder and expand its result.  */
11689 
11690   if ((code == NE || code == EQ)
11691       && integer_zerop (arg1)
11692       && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
11693     {
11694       gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR);
11695       if (srcstmt
11696 	  && integer_pow2p (gimple_assign_rhs2 (srcstmt)))
11697 	{
11698 	  enum tree_code tcode = code == NE ? NE_EXPR : EQ_EXPR;
11699 	  tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
11700 	  tree temp = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg1),
11701 				       gimple_assign_rhs1 (srcstmt),
11702 				       gimple_assign_rhs2 (srcstmt));
11703 	  temp = fold_single_bit_test (loc, tcode, temp, arg1, type);
11704 	  if (temp)
11705 	    return expand_expr (temp, target, VOIDmode, EXPAND_NORMAL);
11706 	}
11707     }
11708 
11709   if (! get_subtarget (target)
11710       || GET_MODE (subtarget) != operand_mode)
11711     subtarget = 0;
11712 
11713   expand_operands (arg0, arg1, subtarget, &op0, &op1, EXPAND_NORMAL);
11714 
11715   if (target == 0)
11716     target = gen_reg_rtx (mode);
11717 
11718   /* Try a cstore if possible.  */
11719   return emit_store_flag_force (target, code, op0, op1,
11720 				operand_mode, unsignedp,
11721 				(TYPE_PRECISION (ops->type) == 1
11722 				 && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
11723 }
11724 
11725 /* Attempt to generate a casesi instruction.  Returns 1 if successful,
11726    0 otherwise (i.e. if there is no casesi instruction).
11727 
11728    DEFAULT_PROBABILITY is the probability of jumping to the default
11729    label.  */
11730 int
try_casesi(tree index_type,tree index_expr,tree minval,tree range,rtx table_label,rtx default_label,rtx fallback_label,profile_probability default_probability)11731 try_casesi (tree index_type, tree index_expr, tree minval, tree range,
11732 	    rtx table_label, rtx default_label, rtx fallback_label,
11733             profile_probability default_probability)
11734 {
11735   struct expand_operand ops[5];
11736   scalar_int_mode index_mode = SImode;
11737   rtx op1, op2, index;
11738 
11739   if (! targetm.have_casesi ())
11740     return 0;
11741 
11742   /* The index must be some form of integer.  Convert it to SImode.  */
11743   scalar_int_mode omode = SCALAR_INT_TYPE_MODE (index_type);
11744   if (GET_MODE_BITSIZE (omode) > GET_MODE_BITSIZE (index_mode))
11745     {
11746       rtx rangertx = expand_normal (range);
11747 
11748       /* We must handle the endpoints in the original mode.  */
11749       index_expr = build2 (MINUS_EXPR, index_type,
11750 			   index_expr, minval);
11751       minval = integer_zero_node;
11752       index = expand_normal (index_expr);
11753       if (default_label)
11754         emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
11755 				 omode, 1, default_label,
11756                                  default_probability);
11757       /* Now we can safely truncate.  */
11758       index = convert_to_mode (index_mode, index, 0);
11759     }
11760   else
11761     {
11762       if (omode != index_mode)
11763 	{
11764 	  index_type = lang_hooks.types.type_for_mode (index_mode, 0);
11765 	  index_expr = fold_convert (index_type, index_expr);
11766 	}
11767 
11768       index = expand_normal (index_expr);
11769     }
11770 
11771   do_pending_stack_adjust ();
11772 
11773   op1 = expand_normal (minval);
11774   op2 = expand_normal (range);
11775 
11776   create_input_operand (&ops[0], index, index_mode);
11777   create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
11778   create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
11779   create_fixed_operand (&ops[3], table_label);
11780   create_fixed_operand (&ops[4], (default_label
11781 				  ? default_label
11782 				  : fallback_label));
11783   expand_jump_insn (targetm.code_for_casesi, 5, ops);
11784   return 1;
11785 }
11786 
11787 /* Attempt to generate a tablejump instruction; same concept.  */
11788 /* Subroutine of the next function.
11789 
11790    INDEX is the value being switched on, with the lowest value
11791    in the table already subtracted.
11792    MODE is its expected mode (needed if INDEX is constant).
11793    RANGE is the length of the jump table.
11794    TABLE_LABEL is a CODE_LABEL rtx for the table itself.
11795 
11796    DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
11797    index value is out of range.
11798    DEFAULT_PROBABILITY is the probability of jumping to
11799    the default label.  */
11800 
11801 static void
do_tablejump(rtx index,machine_mode mode,rtx range,rtx table_label,rtx default_label,profile_probability default_probability)11802 do_tablejump (rtx index, machine_mode mode, rtx range, rtx table_label,
11803 	      rtx default_label, profile_probability default_probability)
11804 {
11805   rtx temp, vector;
11806 
11807   if (INTVAL (range) > cfun->cfg->max_jumptable_ents)
11808     cfun->cfg->max_jumptable_ents = INTVAL (range);
11809 
11810   /* Do an unsigned comparison (in the proper mode) between the index
11811      expression and the value which represents the length of the range.
11812      Since we just finished subtracting the lower bound of the range
11813      from the index expression, this comparison allows us to simultaneously
11814      check that the original index expression value is both greater than
11815      or equal to the minimum value of the range and less than or equal to
11816      the maximum value of the range.  */
11817 
11818   if (default_label)
11819     emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
11820 			     default_label, default_probability);
11821 
11822 
11823   /* If index is in range, it must fit in Pmode.
11824      Convert to Pmode so we can index with it.  */
11825   if (mode != Pmode)
11826     index = convert_to_mode (Pmode, index, 1);
11827 
11828   /* Don't let a MEM slip through, because then INDEX that comes
11829      out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
11830      and break_out_memory_refs will go to work on it and mess it up.  */
11831 #ifdef PIC_CASE_VECTOR_ADDRESS
11832   if (flag_pic && !REG_P (index))
11833     index = copy_to_mode_reg (Pmode, index);
11834 #endif
11835 
11836   /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
11837      GET_MODE_SIZE, because this indicates how large insns are.  The other
11838      uses should all be Pmode, because they are addresses.  This code
11839      could fail if addresses and insns are not the same size.  */
11840   index = simplify_gen_binary (MULT, Pmode, index,
11841 			       gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE),
11842 					     Pmode));
11843   index = simplify_gen_binary (PLUS, Pmode, index,
11844 			       gen_rtx_LABEL_REF (Pmode, table_label));
11845 
11846 #ifdef PIC_CASE_VECTOR_ADDRESS
11847   if (flag_pic)
11848     index = PIC_CASE_VECTOR_ADDRESS (index);
11849   else
11850 #endif
11851     index = memory_address (CASE_VECTOR_MODE, index);
11852   temp = gen_reg_rtx (CASE_VECTOR_MODE);
11853   vector = gen_const_mem (CASE_VECTOR_MODE, index);
11854   convert_move (temp, vector, 0);
11855 
11856   emit_jump_insn (targetm.gen_tablejump (temp, table_label));
11857 
11858   /* If we are generating PIC code or if the table is PC-relative, the
11859      table and JUMP_INSN must be adjacent, so don't output a BARRIER.  */
11860   if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
11861     emit_barrier ();
11862 }
11863 
11864 int
try_tablejump(tree index_type,tree index_expr,tree minval,tree range,rtx table_label,rtx default_label,profile_probability default_probability)11865 try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
11866 	       rtx table_label, rtx default_label,
11867 	       profile_probability default_probability)
11868 {
11869   rtx index;
11870 
11871   if (! targetm.have_tablejump ())
11872     return 0;
11873 
11874   index_expr = fold_build2 (MINUS_EXPR, index_type,
11875 			    fold_convert (index_type, index_expr),
11876 			    fold_convert (index_type, minval));
11877   index = expand_normal (index_expr);
11878   do_pending_stack_adjust ();
11879 
11880   do_tablejump (index, TYPE_MODE (index_type),
11881 		convert_modes (TYPE_MODE (index_type),
11882 			       TYPE_MODE (TREE_TYPE (range)),
11883 			       expand_normal (range),
11884 			       TYPE_UNSIGNED (TREE_TYPE (range))),
11885 		table_label, default_label, default_probability);
11886   return 1;
11887 }
11888 
11889 /* Return a CONST_VECTOR rtx representing vector mask for
11890    a VECTOR_CST of booleans.  */
11891 static rtx
const_vector_mask_from_tree(tree exp)11892 const_vector_mask_from_tree (tree exp)
11893 {
11894   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
11895   machine_mode inner = GET_MODE_INNER (mode);
11896 
11897   rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
11898 			      VECTOR_CST_NELTS_PER_PATTERN (exp));
11899   unsigned int count = builder.encoded_nelts ();
11900   for (unsigned int i = 0; i < count; ++i)
11901     {
11902       tree elt = VECTOR_CST_ELT (exp, i);
11903       gcc_assert (TREE_CODE (elt) == INTEGER_CST);
11904       if (integer_zerop (elt))
11905 	builder.quick_push (CONST0_RTX (inner));
11906       else if (integer_onep (elt)
11907 	       || integer_minus_onep (elt))
11908 	builder.quick_push (CONSTM1_RTX (inner));
11909       else
11910 	gcc_unreachable ();
11911     }
11912   return builder.build ();
11913 }
11914 
11915 /* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
11916    Return a constant scalar rtx of mode MODE in which bit X is set if element
11917    X of EXP is nonzero.  */
11918 static rtx
const_scalar_mask_from_tree(scalar_int_mode mode,tree exp)11919 const_scalar_mask_from_tree (scalar_int_mode mode, tree exp)
11920 {
11921   wide_int res = wi::zero (GET_MODE_PRECISION (mode));
11922   tree elt;
11923 
11924   /* The result has a fixed number of bits so the input must too.  */
11925   unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
11926   for (unsigned int i = 0; i < nunits; ++i)
11927     {
11928       elt = VECTOR_CST_ELT (exp, i);
11929       gcc_assert (TREE_CODE (elt) == INTEGER_CST);
11930       if (integer_all_onesp (elt))
11931 	res = wi::set_bit (res, i);
11932       else
11933 	gcc_assert (integer_zerop (elt));
11934     }
11935 
11936   return immed_wide_int_const (res, mode);
11937 }
11938 
11939 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree.  */
11940 static rtx
const_vector_from_tree(tree exp)11941 const_vector_from_tree (tree exp)
11942 {
11943   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
11944 
11945   if (initializer_zerop (exp))
11946     return CONST0_RTX (mode);
11947 
11948   if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
11949     return const_vector_mask_from_tree (exp);
11950 
11951   machine_mode inner = GET_MODE_INNER (mode);
11952 
11953   rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
11954 			      VECTOR_CST_NELTS_PER_PATTERN (exp));
11955   unsigned int count = builder.encoded_nelts ();
11956   for (unsigned int i = 0; i < count; ++i)
11957     {
11958       tree elt = VECTOR_CST_ELT (exp, i);
11959       if (TREE_CODE (elt) == REAL_CST)
11960 	builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
11961 							  inner));
11962       else if (TREE_CODE (elt) == FIXED_CST)
11963 	builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
11964 							  inner));
11965       else
11966 	builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
11967 						  inner));
11968     }
11969   return builder.build ();
11970 }
11971 
11972 /* Build a decl for a personality function given a language prefix.  */
11973 
11974 tree
build_personality_function(const char * lang)11975 build_personality_function (const char *lang)
11976 {
11977   const char *unwind_and_version;
11978   tree decl, type;
11979   char *name;
11980 
11981   switch (targetm_common.except_unwind_info (&global_options))
11982     {
11983     case UI_NONE:
11984       return NULL;
11985     case UI_SJLJ:
11986       unwind_and_version = "_sj0";
11987       break;
11988     case UI_DWARF2:
11989     case UI_TARGET:
11990       unwind_and_version = "_v0";
11991       break;
11992     case UI_SEH:
11993       unwind_and_version = "_seh0";
11994       break;
11995     default:
11996       gcc_unreachable ();
11997     }
11998 
11999   name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
12000 
12001   type = build_function_type_list (integer_type_node, integer_type_node,
12002 				   long_long_unsigned_type_node,
12003 				   ptr_type_node, ptr_type_node, NULL_TREE);
12004   decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
12005 		     get_identifier (name), type);
12006   DECL_ARTIFICIAL (decl) = 1;
12007   DECL_EXTERNAL (decl) = 1;
12008   TREE_PUBLIC (decl) = 1;
12009 
12010   /* Zap the nonsensical SYMBOL_REF_DECL for this.  What we're left with
12011      are the flags assigned by targetm.encode_section_info.  */
12012   SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
12013 
12014   return decl;
12015 }
12016 
12017 /* Extracts the personality function of DECL and returns the corresponding
12018    libfunc.  */
12019 
12020 rtx
get_personality_function(tree decl)12021 get_personality_function (tree decl)
12022 {
12023   tree personality = DECL_FUNCTION_PERSONALITY (decl);
12024   enum eh_personality_kind pk;
12025 
12026   pk = function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl));
12027   if (pk == eh_personality_none)
12028     return NULL;
12029 
12030   if (!personality
12031       && pk == eh_personality_any)
12032     personality = lang_hooks.eh_personality ();
12033 
12034   if (pk == eh_personality_lang)
12035     gcc_assert (personality != NULL_TREE);
12036 
12037   return XEXP (DECL_RTL (personality), 0);
12038 }
12039 
12040 /* Returns a tree for the size of EXP in bytes.  */
12041 
12042 static tree
tree_expr_size(const_tree exp)12043 tree_expr_size (const_tree exp)
12044 {
12045   if (DECL_P (exp)
12046       && DECL_SIZE_UNIT (exp) != 0)
12047     return DECL_SIZE_UNIT (exp);
12048   else
12049     return size_in_bytes (TREE_TYPE (exp));
12050 }
12051 
12052 /* Return an rtx for the size in bytes of the value of EXP.  */
12053 
12054 rtx
expr_size(tree exp)12055 expr_size (tree exp)
12056 {
12057   tree size;
12058 
12059   if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12060     size = TREE_OPERAND (exp, 1);
12061   else
12062     {
12063       size = tree_expr_size (exp);
12064       gcc_assert (size);
12065       gcc_assert (size == SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, exp));
12066     }
12067 
12068   return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), EXPAND_NORMAL);
12069 }
12070 
12071 /* Return a wide integer for the size in bytes of the value of EXP, or -1
12072    if the size can vary or is larger than an integer.  */
12073 
12074 static HOST_WIDE_INT
int_expr_size(tree exp)12075 int_expr_size (tree exp)
12076 {
12077   tree size;
12078 
12079   if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12080     size = TREE_OPERAND (exp, 1);
12081   else
12082     {
12083       size = tree_expr_size (exp);
12084       gcc_assert (size);
12085     }
12086 
12087   if (size == 0 || !tree_fits_shwi_p (size))
12088     return -1;
12089 
12090   return tree_to_shwi (size);
12091 }
12092