1 /* Convert tree expression to rtl instructions, for GNU compiler.
2    Copyright (C) 1988-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "ssa.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "attribs.h"
43 #include "varasm.h"
44 #include "except.h"
45 #include "insn-attr.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "stmt.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
51 #include "expr.h"
52 #include "optabs-tree.h"
53 #include "libfuncs.h"
54 #include "reload.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-dfa.h"
58 #include "tree-ssa-live.h"
59 #include "tree-outof-ssa.h"
60 #include "tree-ssa-address.h"
61 #include "builtins.h"
62 #include "ccmp.h"
63 #include "gimple-fold.h"
64 #include "rtx-vector-builder.h"
65 
66 
67 /* If this is nonzero, we do not bother generating VOLATILE
68    around volatile memory references, and we are willing to
69    output indirect addresses.  If cse is to follow, we reject
70    indirect addresses so a useful potential cse is generated;
71    if it is used only once, instruction combination will produce
72    the same indirect address eventually.  */
73 int cse_not_expected;
74 
75 static bool block_move_libcall_safe_for_call_parm (void);
76 static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT,
77 					unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
78 					unsigned HOST_WIDE_INT);
79 static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
80 static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
81 static rtx_insn *compress_float_constant (rtx, rtx);
82 static rtx get_subtarget (rtx);
83 static void store_constructor (tree, rtx, int, poly_int64, bool);
84 static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
85 			machine_mode, tree, alias_set_type, bool, bool);
86 
87 static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
88 
89 static int is_aligning_offset (const_tree, const_tree);
90 static rtx reduce_to_bit_field_precision (rtx, rtx, tree);
91 static rtx do_store_flag (sepops, rtx, machine_mode);
92 #ifdef PUSH_ROUNDING
93 static void emit_single_push_insn (machine_mode, rtx, tree);
94 #endif
95 static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx,
96 			  profile_probability);
97 static rtx const_vector_from_tree (tree);
98 static rtx const_scalar_mask_from_tree (scalar_int_mode, tree);
99 static tree tree_expr_size (const_tree);
100 static HOST_WIDE_INT int_expr_size (tree);
101 static void convert_mode_scalar (rtx, rtx, int);
102 
103 
104 /* This is run to set up which modes can be used
105    directly in memory and to initialize the block move optab.  It is run
106    at the beginning of compilation and when the target is reinitialized.  */
107 
108 void
init_expr_target(void)109 init_expr_target (void)
110 {
111   rtx pat;
112   int num_clobbers;
113   rtx mem, mem1;
114   rtx reg;
115 
116   /* Try indexing by frame ptr and try by stack ptr.
117      It is known that on the Convex the stack ptr isn't a valid index.
118      With luck, one or the other is valid on any machine.  */
119   mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
120   mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx);
121 
122   /* A scratch register we can modify in-place below to avoid
123      useless RTL allocations.  */
124   reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
125 
126   rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN));
127   pat = gen_rtx_SET (NULL_RTX, NULL_RTX);
128   PATTERN (insn) = pat;
129 
130   for (machine_mode mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
131        mode = (machine_mode) ((int) mode + 1))
132     {
133       int regno;
134 
135       direct_load[(int) mode] = direct_store[(int) mode] = 0;
136       PUT_MODE (mem, mode);
137       PUT_MODE (mem1, mode);
138 
139       /* See if there is some register that can be used in this mode and
140 	 directly loaded or stored from memory.  */
141 
142       if (mode != VOIDmode && mode != BLKmode)
143 	for (regno = 0; regno < FIRST_PSEUDO_REGISTER
144 	     && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
145 	     regno++)
146 	  {
147 	    if (!targetm.hard_regno_mode_ok (regno, mode))
148 	      continue;
149 
150 	    set_mode_and_regno (reg, mode, regno);
151 
152 	    SET_SRC (pat) = mem;
153 	    SET_DEST (pat) = reg;
154 	    if (recog (pat, insn, &num_clobbers) >= 0)
155 	      direct_load[(int) mode] = 1;
156 
157 	    SET_SRC (pat) = mem1;
158 	    SET_DEST (pat) = reg;
159 	    if (recog (pat, insn, &num_clobbers) >= 0)
160 	      direct_load[(int) mode] = 1;
161 
162 	    SET_SRC (pat) = reg;
163 	    SET_DEST (pat) = mem;
164 	    if (recog (pat, insn, &num_clobbers) >= 0)
165 	      direct_store[(int) mode] = 1;
166 
167 	    SET_SRC (pat) = reg;
168 	    SET_DEST (pat) = mem1;
169 	    if (recog (pat, insn, &num_clobbers) >= 0)
170 	      direct_store[(int) mode] = 1;
171 	  }
172     }
173 
174   mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
175 
176   opt_scalar_float_mode mode_iter;
177   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)
178     {
179       scalar_float_mode mode = mode_iter.require ();
180       scalar_float_mode srcmode;
181       FOR_EACH_MODE_UNTIL (srcmode, mode)
182 	{
183 	  enum insn_code ic;
184 
185 	  ic = can_extend_p (mode, srcmode, 0);
186 	  if (ic == CODE_FOR_nothing)
187 	    continue;
188 
189 	  PUT_MODE (mem, srcmode);
190 
191 	  if (insn_operand_matches (ic, 1, mem))
192 	    float_extend_from_mem[mode][srcmode] = true;
193 	}
194     }
195 }
196 
197 /* This is run at the start of compiling a function.  */
198 
199 void
init_expr(void)200 init_expr (void)
201 {
202   memset (&crtl->expr, 0, sizeof (crtl->expr));
203 }
204 
205 /* Copy data from FROM to TO, where the machine modes are not the same.
206    Both modes may be integer, or both may be floating, or both may be
207    fixed-point.
208    UNSIGNEDP should be nonzero if FROM is an unsigned type.
209    This causes zero-extension instead of sign-extension.  */
210 
211 void
convert_move(rtx to,rtx from,int unsignedp)212 convert_move (rtx to, rtx from, int unsignedp)
213 {
214   machine_mode to_mode = GET_MODE (to);
215   machine_mode from_mode = GET_MODE (from);
216 
217   gcc_assert (to_mode != BLKmode);
218   gcc_assert (from_mode != BLKmode);
219 
220   /* If the source and destination are already the same, then there's
221      nothing to do.  */
222   if (to == from)
223     return;
224 
225   /* If FROM is a SUBREG that indicates that we have already done at least
226      the required extension, strip it.  We don't handle such SUBREGs as
227      TO here.  */
228 
229   scalar_int_mode to_int_mode;
230   if (GET_CODE (from) == SUBREG
231       && SUBREG_PROMOTED_VAR_P (from)
232       && is_a <scalar_int_mode> (to_mode, &to_int_mode)
233       && (GET_MODE_PRECISION (subreg_promoted_mode (from))
234 	  >= GET_MODE_PRECISION (to_int_mode))
235       && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
236     {
237       from = gen_lowpart (to_int_mode, SUBREG_REG (from));
238       from_mode = to_int_mode;
239     }
240 
241   gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
242 
243   if (to_mode == from_mode
244       || (from_mode == VOIDmode && CONSTANT_P (from)))
245     {
246       emit_move_insn (to, from);
247       return;
248     }
249 
250   if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
251     {
252       gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
253 			    GET_MODE_BITSIZE (to_mode)));
254 
255       if (VECTOR_MODE_P (to_mode))
256 	from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
257       else
258 	to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0);
259 
260       emit_move_insn (to, from);
261       return;
262     }
263 
264   if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT)
265     {
266       convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp);
267       convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp);
268       return;
269     }
270 
271   convert_mode_scalar (to, from, unsignedp);
272 }
273 
274 /* Like convert_move, but deals only with scalar modes.  */
275 
276 static void
convert_mode_scalar(rtx to,rtx from,int unsignedp)277 convert_mode_scalar (rtx to, rtx from, int unsignedp)
278 {
279   /* Both modes should be scalar types.  */
280   scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
281   scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
282   bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
283   bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
284   enum insn_code code;
285   rtx libcall;
286 
287   gcc_assert (to_real == from_real);
288 
289   /* rtx code for making an equivalent value.  */
290   enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
291 			      : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
292 
293   if (to_real)
294     {
295       rtx value;
296       rtx_insn *insns;
297       convert_optab tab;
298 
299       gcc_assert ((GET_MODE_PRECISION (from_mode)
300 		   != GET_MODE_PRECISION (to_mode))
301 		  || (DECIMAL_FLOAT_MODE_P (from_mode)
302 		      != DECIMAL_FLOAT_MODE_P (to_mode)));
303 
304       if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
305 	/* Conversion between decimal float and binary float, same size.  */
306 	tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
307       else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
308 	tab = sext_optab;
309       else
310 	tab = trunc_optab;
311 
312       /* Try converting directly if the insn is supported.  */
313 
314       code = convert_optab_handler (tab, to_mode, from_mode);
315       if (code != CODE_FOR_nothing)
316 	{
317 	  emit_unop_insn (code, to, from,
318 			  tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE);
319 	  return;
320 	}
321 
322       /* Otherwise use a libcall.  */
323       libcall = convert_optab_libfunc (tab, to_mode, from_mode);
324 
325       /* Is this conversion implemented yet?  */
326       gcc_assert (libcall);
327 
328       start_sequence ();
329       value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode,
330 				       from, from_mode);
331       insns = get_insns ();
332       end_sequence ();
333       emit_libcall_block (insns, to, value,
334 			  tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
335 								       from)
336 			  : gen_rtx_FLOAT_EXTEND (to_mode, from));
337       return;
338     }
339 
340   /* Handle pointer conversion.  */			/* SPEE 900220.  */
341   /* If the target has a converter from FROM_MODE to TO_MODE, use it.  */
342   {
343     convert_optab ctab;
344 
345     if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
346       ctab = trunc_optab;
347     else if (unsignedp)
348       ctab = zext_optab;
349     else
350       ctab = sext_optab;
351 
352     if (convert_optab_handler (ctab, to_mode, from_mode)
353 	!= CODE_FOR_nothing)
354       {
355 	emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode),
356 			to, from, UNKNOWN);
357 	return;
358       }
359   }
360 
361   /* Targets are expected to provide conversion insns between PxImode and
362      xImode for all MODE_PARTIAL_INT modes they use, but no others.  */
363   if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
364     {
365       scalar_int_mode full_mode
366 	= smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
367 
368       gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
369 		  != CODE_FOR_nothing);
370 
371       if (full_mode != from_mode)
372 	from = convert_to_mode (full_mode, from, unsignedp);
373       emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
374 		      to, from, UNKNOWN);
375       return;
376     }
377   if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
378     {
379       rtx new_from;
380       scalar_int_mode full_mode
381 	= smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
382       convert_optab ctab = unsignedp ? zext_optab : sext_optab;
383       enum insn_code icode;
384 
385       icode = convert_optab_handler (ctab, full_mode, from_mode);
386       gcc_assert (icode != CODE_FOR_nothing);
387 
388       if (to_mode == full_mode)
389 	{
390 	  emit_unop_insn (icode, to, from, UNKNOWN);
391 	  return;
392 	}
393 
394       new_from = gen_reg_rtx (full_mode);
395       emit_unop_insn (icode, new_from, from, UNKNOWN);
396 
397       /* else proceed to integer conversions below.  */
398       from_mode = full_mode;
399       from = new_from;
400     }
401 
402    /* Make sure both are fixed-point modes or both are not.  */
403    gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==
404 	       ALL_SCALAR_FIXED_POINT_MODE_P (to_mode));
405    if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode))
406     {
407       /* If we widen from_mode to to_mode and they are in the same class,
408 	 we won't saturate the result.
409 	 Otherwise, always saturate the result to play safe.  */
410       if (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
411 	  && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
412 	expand_fixed_convert (to, from, 0, 0);
413       else
414 	expand_fixed_convert (to, from, 0, 1);
415       return;
416     }
417 
418   /* Now both modes are integers.  */
419 
420   /* Handle expanding beyond a word.  */
421   if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
422       && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
423     {
424       rtx_insn *insns;
425       rtx lowpart;
426       rtx fill_value;
427       rtx lowfrom;
428       int i;
429       scalar_mode lowpart_mode;
430       int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
431 
432       /* Try converting directly if the insn is supported.  */
433       if ((code = can_extend_p (to_mode, from_mode, unsignedp))
434 	  != CODE_FOR_nothing)
435 	{
436 	  /* If FROM is a SUBREG, put it into a register.  Do this
437 	     so that we always generate the same set of insns for
438 	     better cse'ing; if an intermediate assignment occurred,
439 	     we won't be doing the operation directly on the SUBREG.  */
440 	  if (optimize > 0 && GET_CODE (from) == SUBREG)
441 	    from = force_reg (from_mode, from);
442 	  emit_unop_insn (code, to, from, equiv_code);
443 	  return;
444 	}
445       /* Next, try converting via full word.  */
446       else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
447 	       && ((code = can_extend_p (to_mode, word_mode, unsignedp))
448 		   != CODE_FOR_nothing))
449 	{
450 	  rtx word_to = gen_reg_rtx (word_mode);
451 	  if (REG_P (to))
452 	    {
453 	      if (reg_overlap_mentioned_p (to, from))
454 		from = force_reg (from_mode, from);
455 	      emit_clobber (to);
456 	    }
457 	  convert_move (word_to, from, unsignedp);
458 	  emit_unop_insn (code, to, word_to, equiv_code);
459 	  return;
460 	}
461 
462       /* No special multiword conversion insn; do it by hand.  */
463       start_sequence ();
464 
465       /* Since we will turn this into a no conflict block, we must ensure
466          the source does not overlap the target so force it into an isolated
467          register when maybe so.  Likewise for any MEM input, since the
468          conversion sequence might require several references to it and we
469          must ensure we're getting the same value every time.  */
470 
471       if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
472 	from = force_reg (from_mode, from);
473 
474       /* Get a copy of FROM widened to a word, if necessary.  */
475       if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
476 	lowpart_mode = word_mode;
477       else
478 	lowpart_mode = from_mode;
479 
480       lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
481 
482       lowpart = gen_lowpart (lowpart_mode, to);
483       emit_move_insn (lowpart, lowfrom);
484 
485       /* Compute the value to put in each remaining word.  */
486       if (unsignedp)
487 	fill_value = const0_rtx;
488       else
489 	fill_value = emit_store_flag_force (gen_reg_rtx (word_mode),
490 					    LT, lowfrom, const0_rtx,
491 					    lowpart_mode, 0, -1);
492 
493       /* Fill the remaining words.  */
494       for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
495 	{
496 	  int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
497 	  rtx subword = operand_subword (to, index, 1, to_mode);
498 
499 	  gcc_assert (subword);
500 
501 	  if (fill_value != subword)
502 	    emit_move_insn (subword, fill_value);
503 	}
504 
505       insns = get_insns ();
506       end_sequence ();
507 
508       emit_insn (insns);
509       return;
510     }
511 
512   /* Truncating multi-word to a word or less.  */
513   if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
514       && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
515     {
516       if (!((MEM_P (from)
517 	     && ! MEM_VOLATILE_P (from)
518 	     && direct_load[(int) to_mode]
519 	     && ! mode_dependent_address_p (XEXP (from, 0),
520 					    MEM_ADDR_SPACE (from)))
521 	    || REG_P (from)
522 	    || GET_CODE (from) == SUBREG))
523 	from = force_reg (from_mode, from);
524       convert_move (to, gen_lowpart (word_mode, from), 0);
525       return;
526     }
527 
528   /* Now follow all the conversions between integers
529      no more than a word long.  */
530 
531   /* For truncation, usually we can just refer to FROM in a narrower mode.  */
532   if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
533       && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
534     {
535       if (!((MEM_P (from)
536 	     && ! MEM_VOLATILE_P (from)
537 	     && direct_load[(int) to_mode]
538 	     && ! mode_dependent_address_p (XEXP (from, 0),
539 					    MEM_ADDR_SPACE (from)))
540 	    || REG_P (from)
541 	    || GET_CODE (from) == SUBREG))
542 	from = force_reg (from_mode, from);
543       if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
544 	  && !targetm.hard_regno_mode_ok (REGNO (from), to_mode))
545 	from = copy_to_reg (from);
546       emit_move_insn (to, gen_lowpart (to_mode, from));
547       return;
548     }
549 
550   /* Handle extension.  */
551   if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
552     {
553       /* Convert directly if that works.  */
554       if ((code = can_extend_p (to_mode, from_mode, unsignedp))
555 	  != CODE_FOR_nothing)
556 	{
557 	  emit_unop_insn (code, to, from, equiv_code);
558 	  return;
559 	}
560       else
561 	{
562 	  scalar_mode intermediate;
563 	  rtx tmp;
564 	  int shift_amount;
565 
566 	  /* Search for a mode to convert via.  */
567 	  opt_scalar_mode intermediate_iter;
568 	  FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
569 	    {
570 	      scalar_mode intermediate = intermediate_iter.require ();
571 	      if (((can_extend_p (to_mode, intermediate, unsignedp)
572 		    != CODE_FOR_nothing)
573 		   || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
574 		       && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
575 							 intermediate)))
576 		  && (can_extend_p (intermediate, from_mode, unsignedp)
577 		      != CODE_FOR_nothing))
578 		{
579 		  convert_move (to, convert_to_mode (intermediate, from,
580 						     unsignedp), unsignedp);
581 		  return;
582 		}
583 	    }
584 
585 	  /* No suitable intermediate mode.
586 	     Generate what we need with	shifts.  */
587 	  shift_amount = (GET_MODE_PRECISION (to_mode)
588 			  - GET_MODE_PRECISION (from_mode));
589 	  from = gen_lowpart (to_mode, force_reg (from_mode, from));
590 	  tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
591 			      to, unsignedp);
592 	  tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
593 			      to, unsignedp);
594 	  if (tmp != to)
595 	    emit_move_insn (to, tmp);
596 	  return;
597 	}
598     }
599 
600   /* Support special truncate insns for certain modes.  */
601   if (convert_optab_handler (trunc_optab, to_mode,
602 			     from_mode) != CODE_FOR_nothing)
603     {
604       emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
605 		      to, from, UNKNOWN);
606       return;
607     }
608 
609   /* Handle truncation of volatile memrefs, and so on;
610      the things that couldn't be truncated directly,
611      and for which there was no special instruction.
612 
613      ??? Code above formerly short-circuited this, for most integer
614      mode pairs, with a force_reg in from_mode followed by a recursive
615      call to this routine.  Appears always to have been wrong.  */
616   if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
617     {
618       rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
619       emit_move_insn (to, temp);
620       return;
621     }
622 
623   /* Mode combination is not recognized.  */
624   gcc_unreachable ();
625 }
626 
627 /* Return an rtx for a value that would result
628    from converting X to mode MODE.
629    Both X and MODE may be floating, or both integer.
630    UNSIGNEDP is nonzero if X is an unsigned value.
631    This can be done by referring to a part of X in place
632    or by copying to a new temporary with conversion.  */
633 
634 rtx
convert_to_mode(machine_mode mode,rtx x,int unsignedp)635 convert_to_mode (machine_mode mode, rtx x, int unsignedp)
636 {
637   return convert_modes (mode, VOIDmode, x, unsignedp);
638 }
639 
640 /* Return an rtx for a value that would result
641    from converting X from mode OLDMODE to mode MODE.
642    Both modes may be floating, or both integer.
643    UNSIGNEDP is nonzero if X is an unsigned value.
644 
645    This can be done by referring to a part of X in place
646    or by copying to a new temporary with conversion.
647 
648    You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode.  */
649 
650 rtx
convert_modes(machine_mode mode,machine_mode oldmode,rtx x,int unsignedp)651 convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
652 {
653   rtx temp;
654   scalar_int_mode int_mode;
655 
656   /* If FROM is a SUBREG that indicates that we have already done at least
657      the required extension, strip it.  */
658 
659   if (GET_CODE (x) == SUBREG
660       && SUBREG_PROMOTED_VAR_P (x)
661       && is_a <scalar_int_mode> (mode, &int_mode)
662       && (GET_MODE_PRECISION (subreg_promoted_mode (x))
663 	  >= GET_MODE_PRECISION (int_mode))
664       && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
665     x = gen_lowpart (int_mode, SUBREG_REG (x));
666 
667   if (GET_MODE (x) != VOIDmode)
668     oldmode = GET_MODE (x);
669 
670   if (mode == oldmode)
671     return x;
672 
673   if (CONST_SCALAR_INT_P (x)
674       && is_int_mode (mode, &int_mode))
675     {
676       /* If the caller did not tell us the old mode, then there is not
677 	 much to do with respect to canonicalization.  We have to
678 	 assume that all the bits are significant.  */
679       if (GET_MODE_CLASS (oldmode) != MODE_INT)
680 	oldmode = MAX_MODE_INT;
681       wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
682 				   GET_MODE_PRECISION (int_mode),
683 				   unsignedp ? UNSIGNED : SIGNED);
684       return immed_wide_int_const (w, int_mode);
685     }
686 
687   /* We can do this with a gen_lowpart if both desired and current modes
688      are integer, and this is either a constant integer, a register, or a
689      non-volatile MEM. */
690   scalar_int_mode int_oldmode;
691   if (is_int_mode (mode, &int_mode)
692       && is_int_mode (oldmode, &int_oldmode)
693       && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
694       && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
695 	  || CONST_POLY_INT_P (x)
696           || (REG_P (x)
697               && (!HARD_REGISTER_P (x)
698 		  || targetm.hard_regno_mode_ok (REGNO (x), int_mode))
699               && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
700    return gen_lowpart (int_mode, x);
701 
702   /* Converting from integer constant into mode is always equivalent to an
703      subreg operation.  */
704   if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
705     {
706       gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
707 			    GET_MODE_BITSIZE (oldmode)));
708       return simplify_gen_subreg (mode, x, oldmode, 0);
709     }
710 
711   temp = gen_reg_rtx (mode);
712   convert_move (temp, x, unsignedp);
713   return temp;
714 }
715 
716 /* Return the largest alignment we can use for doing a move (or store)
717    of MAX_PIECES.  ALIGN is the largest alignment we could use.  */
718 
719 static unsigned int
alignment_for_piecewise_move(unsigned int max_pieces,unsigned int align)720 alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
721 {
722   scalar_int_mode tmode
723     = int_mode_for_size (max_pieces * BITS_PER_UNIT, 1).require ();
724 
725   if (align >= GET_MODE_ALIGNMENT (tmode))
726     align = GET_MODE_ALIGNMENT (tmode);
727   else
728     {
729       scalar_int_mode xmode = NARROWEST_INT_MODE;
730       opt_scalar_int_mode mode_iter;
731       FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
732 	{
733 	  tmode = mode_iter.require ();
734 	  if (GET_MODE_SIZE (tmode) > max_pieces
735 	      || targetm.slow_unaligned_access (tmode, align))
736 	    break;
737 	  xmode = tmode;
738 	}
739 
740       align = MAX (align, GET_MODE_ALIGNMENT (xmode));
741     }
742 
743   return align;
744 }
745 
746 /* Return the widest integer mode that is narrower than SIZE bytes.  */
747 
748 static scalar_int_mode
widest_int_mode_for_size(unsigned int size)749 widest_int_mode_for_size (unsigned int size)
750 {
751   scalar_int_mode result = NARROWEST_INT_MODE;
752 
753   gcc_checking_assert (size > 1);
754 
755   opt_scalar_int_mode tmode;
756   FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
757     if (GET_MODE_SIZE (tmode.require ()) < size)
758       result = tmode.require ();
759 
760   return result;
761 }
762 
763 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
764    and should be performed piecewise.  */
765 
766 static bool
can_do_by_pieces(unsigned HOST_WIDE_INT len,unsigned int align,enum by_pieces_operation op)767 can_do_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align,
768 		  enum by_pieces_operation op)
769 {
770   return targetm.use_by_pieces_infrastructure_p (len, align, op,
771 						 optimize_insn_for_speed_p ());
772 }
773 
774 /* Determine whether the LEN bytes can be moved by using several move
775    instructions.  Return nonzero if a call to move_by_pieces should
776    succeed.  */
777 
778 bool
can_move_by_pieces(unsigned HOST_WIDE_INT len,unsigned int align)779 can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align)
780 {
781   return can_do_by_pieces (len, align, MOVE_BY_PIECES);
782 }
783 
784 /* Return number of insns required to perform operation OP by pieces
785    for L bytes.  ALIGN (in bits) is maximum alignment we can assume.  */
786 
787 unsigned HOST_WIDE_INT
by_pieces_ninsns(unsigned HOST_WIDE_INT l,unsigned int align,unsigned int max_size,by_pieces_operation op)788 by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align,
789 		  unsigned int max_size, by_pieces_operation op)
790 {
791   unsigned HOST_WIDE_INT n_insns = 0;
792 
793   align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
794 
795   while (max_size > 1 && l > 0)
796     {
797       scalar_int_mode mode = widest_int_mode_for_size (max_size);
798       enum insn_code icode;
799 
800       unsigned int modesize = GET_MODE_SIZE (mode);
801 
802       icode = optab_handler (mov_optab, mode);
803       if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
804 	{
805 	  unsigned HOST_WIDE_INT n_pieces = l / modesize;
806 	  l %= modesize;
807 	  switch (op)
808 	    {
809 	    default:
810 	      n_insns += n_pieces;
811 	      break;
812 
813 	    case COMPARE_BY_PIECES:
814 	      int batch = targetm.compare_by_pieces_branch_ratio (mode);
815 	      int batch_ops = 4 * batch - 1;
816 	      unsigned HOST_WIDE_INT full = n_pieces / batch;
817 	      n_insns += full * batch_ops;
818 	      if (n_pieces % batch != 0)
819 		n_insns++;
820 	      break;
821 
822 	    }
823 	}
824       max_size = modesize;
825     }
826 
827   gcc_assert (!l);
828   return n_insns;
829 }
830 
831 /* Used when performing piecewise block operations, holds information
832    about one of the memory objects involved.  The member functions
833    can be used to generate code for loading from the object and
834    updating the address when iterating.  */
835 
836 class pieces_addr
837 {
838   /* The object being referenced, a MEM.  Can be NULL_RTX to indicate
839      stack pushes.  */
840   rtx m_obj;
841   /* The address of the object.  Can differ from that seen in the
842      MEM rtx if we copied the address to a register.  */
843   rtx m_addr;
844   /* Nonzero if the address on the object has an autoincrement already,
845      signifies whether that was an increment or decrement.  */
846   signed char m_addr_inc;
847   /* Nonzero if we intend to use autoinc without the address already
848      having autoinc form.  We will insert add insns around each memory
849      reference, expecting later passes to form autoinc addressing modes.
850      The only supported options are predecrement and postincrement.  */
851   signed char m_explicit_inc;
852   /* True if we have either of the two possible cases of using
853      autoincrement.  */
854   bool m_auto;
855   /* True if this is an address to be used for load operations rather
856      than stores.  */
857   bool m_is_load;
858 
859   /* Optionally, a function to obtain constants for any given offset into
860      the objects, and data associated with it.  */
861   by_pieces_constfn m_constfn;
862   void *m_cfndata;
863 public:
864   pieces_addr (rtx, bool, by_pieces_constfn, void *);
865   rtx adjust (scalar_int_mode, HOST_WIDE_INT);
866   void increment_address (HOST_WIDE_INT);
867   void maybe_predec (HOST_WIDE_INT);
868   void maybe_postinc (HOST_WIDE_INT);
869   void decide_autoinc (machine_mode, bool, HOST_WIDE_INT);
get_addr_inc()870   int get_addr_inc ()
871   {
872     return m_addr_inc;
873   }
874 };
875 
876 /* Initialize a pieces_addr structure from an object OBJ.  IS_LOAD is
877    true if the operation to be performed on this object is a load
878    rather than a store.  For stores, OBJ can be NULL, in which case we
879    assume the operation is a stack push.  For loads, the optional
880    CONSTFN and its associated CFNDATA can be used in place of the
881    memory load.  */
882 
pieces_addr(rtx obj,bool is_load,by_pieces_constfn constfn,void * cfndata)883 pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn,
884 			  void *cfndata)
885   : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata)
886 {
887   m_addr_inc = 0;
888   m_auto = false;
889   if (obj)
890     {
891       rtx addr = XEXP (obj, 0);
892       rtx_code code = GET_CODE (addr);
893       m_addr = addr;
894       bool dec = code == PRE_DEC || code == POST_DEC;
895       bool inc = code == PRE_INC || code == POST_INC;
896       m_auto = inc || dec;
897       if (m_auto)
898 	m_addr_inc = dec ? -1 : 1;
899 
900       /* While we have always looked for these codes here, the code
901 	 implementing the memory operation has never handled them.
902 	 Support could be added later if necessary or beneficial.  */
903       gcc_assert (code != PRE_INC && code != POST_DEC);
904     }
905   else
906     {
907       m_addr = NULL_RTX;
908       if (!is_load)
909 	{
910 	  m_auto = true;
911 	  if (STACK_GROWS_DOWNWARD)
912 	    m_addr_inc = -1;
913 	  else
914 	    m_addr_inc = 1;
915 	}
916       else
917 	gcc_assert (constfn != NULL);
918     }
919   m_explicit_inc = 0;
920   if (constfn)
921     gcc_assert (is_load);
922 }
923 
924 /* Decide whether to use autoinc for an address involved in a memory op.
925    MODE is the mode of the accesses, REVERSE is true if we've decided to
926    perform the operation starting from the end, and LEN is the length of
927    the operation.  Don't override an earlier decision to set m_auto.  */
928 
929 void
decide_autoinc(machine_mode ARG_UNUSED (mode),bool reverse,HOST_WIDE_INT len)930 pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode), bool reverse,
931 			     HOST_WIDE_INT len)
932 {
933   if (m_auto || m_obj == NULL_RTX)
934     return;
935 
936   bool use_predec = (m_is_load
937 		     ? USE_LOAD_PRE_DECREMENT (mode)
938 		     : USE_STORE_PRE_DECREMENT (mode));
939   bool use_postinc = (m_is_load
940 		      ? USE_LOAD_POST_INCREMENT (mode)
941 		      : USE_STORE_POST_INCREMENT (mode));
942   machine_mode addr_mode = get_address_mode (m_obj);
943 
944   if (use_predec && reverse)
945     {
946       m_addr = copy_to_mode_reg (addr_mode,
947 				 plus_constant (addr_mode,
948 						m_addr, len));
949       m_auto = true;
950       m_explicit_inc = -1;
951     }
952   else if (use_postinc && !reverse)
953     {
954       m_addr = copy_to_mode_reg (addr_mode, m_addr);
955       m_auto = true;
956       m_explicit_inc = 1;
957     }
958   else if (CONSTANT_P (m_addr))
959     m_addr = copy_to_mode_reg (addr_mode, m_addr);
960 }
961 
962 /* Adjust the address to refer to the data at OFFSET in MODE.  If we
963    are using autoincrement for this address, we don't add the offset,
964    but we still modify the MEM's properties.  */
965 
966 rtx
adjust(scalar_int_mode mode,HOST_WIDE_INT offset)967 pieces_addr::adjust (scalar_int_mode mode, HOST_WIDE_INT offset)
968 {
969   if (m_constfn)
970     return m_constfn (m_cfndata, offset, mode);
971   if (m_obj == NULL_RTX)
972     return NULL_RTX;
973   if (m_auto)
974     return adjust_automodify_address (m_obj, mode, m_addr, offset);
975   else
976     return adjust_address (m_obj, mode, offset);
977 }
978 
979 /* Emit an add instruction to increment the address by SIZE.  */
980 
981 void
increment_address(HOST_WIDE_INT size)982 pieces_addr::increment_address (HOST_WIDE_INT size)
983 {
984   rtx amount = gen_int_mode (size, GET_MODE (m_addr));
985   emit_insn (gen_add2_insn (m_addr, amount));
986 }
987 
988 /* If we are supposed to decrement the address after each access, emit code
989    to do so now.  Increment by SIZE (which has should have the correct sign
990    already).  */
991 
992 void
maybe_predec(HOST_WIDE_INT size)993 pieces_addr::maybe_predec (HOST_WIDE_INT size)
994 {
995   if (m_explicit_inc >= 0)
996     return;
997   gcc_assert (HAVE_PRE_DECREMENT);
998   increment_address (size);
999 }
1000 
1001 /* If we are supposed to decrement the address after each access, emit code
1002    to do so now.  Increment by SIZE.  */
1003 
1004 void
maybe_postinc(HOST_WIDE_INT size)1005 pieces_addr::maybe_postinc (HOST_WIDE_INT size)
1006 {
1007   if (m_explicit_inc <= 0)
1008     return;
1009   gcc_assert (HAVE_POST_INCREMENT);
1010   increment_address (size);
1011 }
1012 
1013 /* This structure is used by do_op_by_pieces to describe the operation
1014    to be performed.  */
1015 
1016 class op_by_pieces_d
1017 {
1018  protected:
1019   pieces_addr m_to, m_from;
1020   unsigned HOST_WIDE_INT m_len;
1021   HOST_WIDE_INT m_offset;
1022   unsigned int m_align;
1023   unsigned int m_max_size;
1024   bool m_reverse;
1025 
1026   /* Virtual functions, overriden by derived classes for the specific
1027      operation.  */
1028   virtual void generate (rtx, rtx, machine_mode) = 0;
1029   virtual bool prepare_mode (machine_mode, unsigned int) = 0;
finish_mode(machine_mode)1030   virtual void finish_mode (machine_mode)
1031   {
1032   }
1033 
1034  public:
1035   op_by_pieces_d (rtx, bool, rtx, bool, by_pieces_constfn, void *,
1036 		  unsigned HOST_WIDE_INT, unsigned int);
1037   void run ();
1038 };
1039 
1040 /* The constructor for an op_by_pieces_d structure.  We require two
1041    objects named TO and FROM, which are identified as loads or stores
1042    by TO_LOAD and FROM_LOAD.  If FROM is a load, the optional FROM_CFN
1043    and its associated FROM_CFN_DATA can be used to replace loads with
1044    constant values.  LEN describes the length of the operation.  */
1045 
op_by_pieces_d(rtx to,bool to_load,rtx from,bool from_load,by_pieces_constfn from_cfn,void * from_cfn_data,unsigned HOST_WIDE_INT len,unsigned int align)1046 op_by_pieces_d::op_by_pieces_d (rtx to, bool to_load,
1047 				rtx from, bool from_load,
1048 				by_pieces_constfn from_cfn,
1049 				void *from_cfn_data,
1050 				unsigned HOST_WIDE_INT len,
1051 				unsigned int align)
1052   : m_to (to, to_load, NULL, NULL),
1053     m_from (from, from_load, from_cfn, from_cfn_data),
1054     m_len (len), m_max_size (MOVE_MAX_PIECES + 1)
1055 {
1056   int toi = m_to.get_addr_inc ();
1057   int fromi = m_from.get_addr_inc ();
1058   if (toi >= 0 && fromi >= 0)
1059     m_reverse = false;
1060   else if (toi <= 0 && fromi <= 0)
1061     m_reverse = true;
1062   else
1063     gcc_unreachable ();
1064 
1065   m_offset = m_reverse ? len : 0;
1066   align = MIN (to ? MEM_ALIGN (to) : align,
1067 	       from ? MEM_ALIGN (from) : align);
1068 
1069   /* If copying requires more than two move insns,
1070      copy addresses to registers (to make displacements shorter)
1071      and use post-increment if available.  */
1072   if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2)
1073     {
1074       /* Find the mode of the largest comparison.  */
1075       scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1076 
1077       m_from.decide_autoinc (mode, m_reverse, len);
1078       m_to.decide_autoinc (mode, m_reverse, len);
1079     }
1080 
1081   align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
1082   m_align = align;
1083 }
1084 
1085 /* This function contains the main loop used for expanding a block
1086    operation.  First move what we can in the largest integer mode,
1087    then go to successively smaller modes.  For every access, call
1088    GENFUN with the two operands and the EXTRA_DATA.  */
1089 
1090 void
run()1091 op_by_pieces_d::run ()
1092 {
1093   while (m_max_size > 1 && m_len > 0)
1094     {
1095       scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1096 
1097       if (prepare_mode (mode, m_align))
1098 	{
1099 	  unsigned int size = GET_MODE_SIZE (mode);
1100 	  rtx to1 = NULL_RTX, from1;
1101 
1102 	  while (m_len >= size)
1103 	    {
1104 	      if (m_reverse)
1105 		m_offset -= size;
1106 
1107 	      to1 = m_to.adjust (mode, m_offset);
1108 	      from1 = m_from.adjust (mode, m_offset);
1109 
1110 	      m_to.maybe_predec (-(HOST_WIDE_INT)size);
1111 	      m_from.maybe_predec (-(HOST_WIDE_INT)size);
1112 
1113 	      generate (to1, from1, mode);
1114 
1115 	      m_to.maybe_postinc (size);
1116 	      m_from.maybe_postinc (size);
1117 
1118 	      if (!m_reverse)
1119 		m_offset += size;
1120 
1121 	      m_len -= size;
1122 	    }
1123 
1124 	  finish_mode (mode);
1125 	}
1126 
1127       m_max_size = GET_MODE_SIZE (mode);
1128     }
1129 
1130   /* The code above should have handled everything.  */
1131   gcc_assert (!m_len);
1132 }
1133 
1134 /* Derived class from op_by_pieces_d, providing support for block move
1135    operations.  */
1136 
1137 class move_by_pieces_d : public op_by_pieces_d
1138 {
1139   insn_gen_fn m_gen_fun;
1140   void generate (rtx, rtx, machine_mode);
1141   bool prepare_mode (machine_mode, unsigned int);
1142 
1143  public:
move_by_pieces_d(rtx to,rtx from,unsigned HOST_WIDE_INT len,unsigned int align)1144   move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1145 		    unsigned int align)
1146     : op_by_pieces_d (to, false, from, true, NULL, NULL, len, align)
1147   {
1148   }
1149   rtx finish_retmode (memop_ret);
1150 };
1151 
1152 /* Return true if MODE can be used for a set of copies, given an
1153    alignment ALIGN.  Prepare whatever data is necessary for later
1154    calls to generate.  */
1155 
1156 bool
prepare_mode(machine_mode mode,unsigned int align)1157 move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1158 {
1159   insn_code icode = optab_handler (mov_optab, mode);
1160   m_gen_fun = GEN_FCN (icode);
1161   return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1162 }
1163 
1164 /* A callback used when iterating for a compare_by_pieces_operation.
1165    OP0 and OP1 are the values that have been loaded and should be
1166    compared in MODE.  If OP0 is NULL, this means we should generate a
1167    push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1168    gen function that should be used to generate the mode.  */
1169 
1170 void
generate(rtx op0,rtx op1,machine_mode mode ATTRIBUTE_UNUSED)1171 move_by_pieces_d::generate (rtx op0, rtx op1,
1172 			    machine_mode mode ATTRIBUTE_UNUSED)
1173 {
1174 #ifdef PUSH_ROUNDING
1175   if (op0 == NULL_RTX)
1176     {
1177       emit_single_push_insn (mode, op1, NULL);
1178       return;
1179     }
1180 #endif
1181   emit_insn (m_gen_fun (op0, op1));
1182 }
1183 
1184 /* Perform the final adjustment at the end of a string to obtain the
1185    correct return value for the block operation.
1186    Return value is based on RETMODE argument.  */
1187 
1188 rtx
finish_retmode(memop_ret retmode)1189 move_by_pieces_d::finish_retmode (memop_ret retmode)
1190 {
1191   gcc_assert (!m_reverse);
1192   if (retmode == RETURN_END_MINUS_ONE)
1193     {
1194       m_to.maybe_postinc (-1);
1195       --m_offset;
1196     }
1197   return m_to.adjust (QImode, m_offset);
1198 }
1199 
1200 /* Generate several move instructions to copy LEN bytes from block FROM to
1201    block TO.  (These are MEM rtx's with BLKmode).
1202 
1203    If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1204    used to push FROM to the stack.
1205 
1206    ALIGN is maximum stack alignment we can assume.
1207 
1208    Return value is based on RETMODE argument.  */
1209 
1210 rtx
move_by_pieces(rtx to,rtx from,unsigned HOST_WIDE_INT len,unsigned int align,memop_ret retmode)1211 move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1212 		unsigned int align, memop_ret retmode)
1213 {
1214 #ifndef PUSH_ROUNDING
1215   if (to == NULL)
1216     gcc_unreachable ();
1217 #endif
1218 
1219   move_by_pieces_d data (to, from, len, align);
1220 
1221   data.run ();
1222 
1223   if (retmode != RETURN_BEGIN)
1224     return data.finish_retmode (retmode);
1225   else
1226     return to;
1227 }
1228 
1229 /* Derived class from op_by_pieces_d, providing support for block move
1230    operations.  */
1231 
1232 class store_by_pieces_d : public op_by_pieces_d
1233 {
1234   insn_gen_fn m_gen_fun;
1235   void generate (rtx, rtx, machine_mode);
1236   bool prepare_mode (machine_mode, unsigned int);
1237 
1238  public:
store_by_pieces_d(rtx to,by_pieces_constfn cfn,void * cfn_data,unsigned HOST_WIDE_INT len,unsigned int align)1239   store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data,
1240 		     unsigned HOST_WIDE_INT len, unsigned int align)
1241     : op_by_pieces_d (to, false, NULL_RTX, true, cfn, cfn_data, len, align)
1242   {
1243   }
1244   rtx finish_retmode (memop_ret);
1245 };
1246 
1247 /* Return true if MODE can be used for a set of stores, given an
1248    alignment ALIGN.  Prepare whatever data is necessary for later
1249    calls to generate.  */
1250 
1251 bool
prepare_mode(machine_mode mode,unsigned int align)1252 store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1253 {
1254   insn_code icode = optab_handler (mov_optab, mode);
1255   m_gen_fun = GEN_FCN (icode);
1256   return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1257 }
1258 
1259 /* A callback used when iterating for a store_by_pieces_operation.
1260    OP0 and OP1 are the values that have been loaded and should be
1261    compared in MODE.  If OP0 is NULL, this means we should generate a
1262    push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1263    gen function that should be used to generate the mode.  */
1264 
1265 void
generate(rtx op0,rtx op1,machine_mode)1266 store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode)
1267 {
1268   emit_insn (m_gen_fun (op0, op1));
1269 }
1270 
1271 /* Perform the final adjustment at the end of a string to obtain the
1272    correct return value for the block operation.
1273    Return value is based on RETMODE argument.  */
1274 
1275 rtx
finish_retmode(memop_ret retmode)1276 store_by_pieces_d::finish_retmode (memop_ret retmode)
1277 {
1278   gcc_assert (!m_reverse);
1279   if (retmode == RETURN_END_MINUS_ONE)
1280     {
1281       m_to.maybe_postinc (-1);
1282       --m_offset;
1283     }
1284   return m_to.adjust (QImode, m_offset);
1285 }
1286 
1287 /* Determine whether the LEN bytes generated by CONSTFUN can be
1288    stored to memory using several move instructions.  CONSTFUNDATA is
1289    a pointer which will be passed as argument in every CONSTFUN call.
1290    ALIGN is maximum alignment we can assume.  MEMSETP is true if this is
1291    a memset operation and false if it's a copy of a constant string.
1292    Return nonzero if a call to store_by_pieces should succeed.  */
1293 
1294 int
can_store_by_pieces(unsigned HOST_WIDE_INT len,rtx (* constfun)(void *,HOST_WIDE_INT,scalar_int_mode),void * constfundata,unsigned int align,bool memsetp)1295 can_store_by_pieces (unsigned HOST_WIDE_INT len,
1296 		     rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1297 		     void *constfundata, unsigned int align, bool memsetp)
1298 {
1299   unsigned HOST_WIDE_INT l;
1300   unsigned int max_size;
1301   HOST_WIDE_INT offset = 0;
1302   enum insn_code icode;
1303   int reverse;
1304   /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it.  */
1305   rtx cst ATTRIBUTE_UNUSED;
1306 
1307   if (len == 0)
1308     return 1;
1309 
1310   if (!targetm.use_by_pieces_infrastructure_p (len, align,
1311 					       memsetp
1312 						 ? SET_BY_PIECES
1313 						 : STORE_BY_PIECES,
1314 					       optimize_insn_for_speed_p ()))
1315     return 0;
1316 
1317   align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
1318 
1319   /* We would first store what we can in the largest integer mode, then go to
1320      successively smaller modes.  */
1321 
1322   for (reverse = 0;
1323        reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT);
1324        reverse++)
1325     {
1326       l = len;
1327       max_size = STORE_MAX_PIECES + 1;
1328       while (max_size > 1 && l > 0)
1329 	{
1330 	  scalar_int_mode mode = widest_int_mode_for_size (max_size);
1331 
1332 	  icode = optab_handler (mov_optab, mode);
1333 	  if (icode != CODE_FOR_nothing
1334 	      && align >= GET_MODE_ALIGNMENT (mode))
1335 	    {
1336 	      unsigned int size = GET_MODE_SIZE (mode);
1337 
1338 	      while (l >= size)
1339 		{
1340 		  if (reverse)
1341 		    offset -= size;
1342 
1343 		  cst = (*constfun) (constfundata, offset, mode);
1344 		  if (!targetm.legitimate_constant_p (mode, cst))
1345 		    return 0;
1346 
1347 		  if (!reverse)
1348 		    offset += size;
1349 
1350 		  l -= size;
1351 		}
1352 	    }
1353 
1354 	  max_size = GET_MODE_SIZE (mode);
1355 	}
1356 
1357       /* The code above should have handled everything.  */
1358       gcc_assert (!l);
1359     }
1360 
1361   return 1;
1362 }
1363 
1364 /* Generate several move instructions to store LEN bytes generated by
1365    CONSTFUN to block TO.  (A MEM rtx with BLKmode).  CONSTFUNDATA is a
1366    pointer which will be passed as argument in every CONSTFUN call.
1367    ALIGN is maximum alignment we can assume.  MEMSETP is true if this is
1368    a memset operation and false if it's a copy of a constant string.
1369    Return value is based on RETMODE argument.  */
1370 
1371 rtx
store_by_pieces(rtx to,unsigned HOST_WIDE_INT len,rtx (* constfun)(void *,HOST_WIDE_INT,scalar_int_mode),void * constfundata,unsigned int align,bool memsetp,memop_ret retmode)1372 store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
1373 		 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1374 		 void *constfundata, unsigned int align, bool memsetp,
1375 		 memop_ret retmode)
1376 {
1377   if (len == 0)
1378     {
1379       gcc_assert (retmode != RETURN_END_MINUS_ONE);
1380       return to;
1381     }
1382 
1383   gcc_assert (targetm.use_by_pieces_infrastructure_p
1384 		(len, align,
1385 		 memsetp ? SET_BY_PIECES : STORE_BY_PIECES,
1386 		 optimize_insn_for_speed_p ()));
1387 
1388   store_by_pieces_d data (to, constfun, constfundata, len, align);
1389   data.run ();
1390 
1391   if (retmode != RETURN_BEGIN)
1392     return data.finish_retmode (retmode);
1393   else
1394     return to;
1395 }
1396 
1397 /* Callback routine for clear_by_pieces.
1398    Return const0_rtx unconditionally.  */
1399 
1400 static rtx
clear_by_pieces_1(void *,HOST_WIDE_INT,scalar_int_mode)1401 clear_by_pieces_1 (void *, HOST_WIDE_INT, scalar_int_mode)
1402 {
1403   return const0_rtx;
1404 }
1405 
1406 /* Generate several move instructions to clear LEN bytes of block TO.  (A MEM
1407    rtx with BLKmode).  ALIGN is maximum alignment we can assume.  */
1408 
1409 static void
clear_by_pieces(rtx to,unsigned HOST_WIDE_INT len,unsigned int align)1410 clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
1411 {
1412   if (len == 0)
1413     return;
1414 
1415   store_by_pieces_d data (to, clear_by_pieces_1, NULL, len, align);
1416   data.run ();
1417 }
1418 
1419 /* Context used by compare_by_pieces_genfn.  It stores the fail label
1420    to jump to in case of miscomparison, and for branch ratios greater than 1,
1421    it stores an accumulator and the current and maximum counts before
1422    emitting another branch.  */
1423 
1424 class compare_by_pieces_d : public op_by_pieces_d
1425 {
1426   rtx_code_label *m_fail_label;
1427   rtx m_accumulator;
1428   int m_count, m_batch;
1429 
1430   void generate (rtx, rtx, machine_mode);
1431   bool prepare_mode (machine_mode, unsigned int);
1432   void finish_mode (machine_mode);
1433  public:
compare_by_pieces_d(rtx op0,rtx op1,by_pieces_constfn op1_cfn,void * op1_cfn_data,HOST_WIDE_INT len,int align,rtx_code_label * fail_label)1434   compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn,
1435 		       void *op1_cfn_data, HOST_WIDE_INT len, int align,
1436 		       rtx_code_label *fail_label)
1437     : op_by_pieces_d (op0, true, op1, true, op1_cfn, op1_cfn_data, len, align)
1438   {
1439     m_fail_label = fail_label;
1440   }
1441 };
1442 
1443 /* A callback used when iterating for a compare_by_pieces_operation.
1444    OP0 and OP1 are the values that have been loaded and should be
1445    compared in MODE.  DATA holds a pointer to the compare_by_pieces_data
1446    context structure.  */
1447 
1448 void
generate(rtx op0,rtx op1,machine_mode mode)1449 compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode)
1450 {
1451   if (m_batch > 1)
1452     {
1453       rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX,
1454 			       true, OPTAB_LIB_WIDEN);
1455       if (m_count != 0)
1456 	temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp,
1457 			     true, OPTAB_LIB_WIDEN);
1458       m_accumulator = temp;
1459 
1460       if (++m_count < m_batch)
1461 	return;
1462 
1463       m_count = 0;
1464       op0 = m_accumulator;
1465       op1 = const0_rtx;
1466       m_accumulator = NULL_RTX;
1467     }
1468   do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX, NULL,
1469 			   m_fail_label, profile_probability::uninitialized ());
1470 }
1471 
1472 /* Return true if MODE can be used for a set of moves and comparisons,
1473    given an alignment ALIGN.  Prepare whatever data is necessary for
1474    later calls to generate.  */
1475 
1476 bool
prepare_mode(machine_mode mode,unsigned int align)1477 compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1478 {
1479   insn_code icode = optab_handler (mov_optab, mode);
1480   if (icode == CODE_FOR_nothing
1481       || align < GET_MODE_ALIGNMENT (mode)
1482       || !can_compare_p (EQ, mode, ccp_jump))
1483     return false;
1484   m_batch = targetm.compare_by_pieces_branch_ratio (mode);
1485   if (m_batch < 0)
1486     return false;
1487   m_accumulator = NULL_RTX;
1488   m_count = 0;
1489   return true;
1490 }
1491 
1492 /* Called after expanding a series of comparisons in MODE.  If we have
1493    accumulated results for which we haven't emitted a branch yet, do
1494    so now.  */
1495 
1496 void
finish_mode(machine_mode mode)1497 compare_by_pieces_d::finish_mode (machine_mode mode)
1498 {
1499   if (m_accumulator != NULL_RTX)
1500     do_compare_rtx_and_jump (m_accumulator, const0_rtx, NE, true, mode,
1501 			     NULL_RTX, NULL, m_fail_label,
1502 			     profile_probability::uninitialized ());
1503 }
1504 
1505 /* Generate several move instructions to compare LEN bytes from blocks
1506    ARG0 and ARG1.  (These are MEM rtx's with BLKmode).
1507 
1508    If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1509    used to push FROM to the stack.
1510 
1511    ALIGN is maximum stack alignment we can assume.
1512 
1513    Optionally, the caller can pass a constfn and associated data in A1_CFN
1514    and A1_CFN_DATA. describing that the second operand being compared is a
1515    known constant and how to obtain its data.  */
1516 
1517 static rtx
compare_by_pieces(rtx arg0,rtx arg1,unsigned HOST_WIDE_INT len,rtx target,unsigned int align,by_pieces_constfn a1_cfn,void * a1_cfn_data)1518 compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
1519 		   rtx target, unsigned int align,
1520 		   by_pieces_constfn a1_cfn, void *a1_cfn_data)
1521 {
1522   rtx_code_label *fail_label = gen_label_rtx ();
1523   rtx_code_label *end_label = gen_label_rtx ();
1524 
1525   if (target == NULL_RTX
1526       || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
1527     target = gen_reg_rtx (TYPE_MODE (integer_type_node));
1528 
1529   compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align,
1530 			    fail_label);
1531 
1532   data.run ();
1533 
1534   emit_move_insn (target, const0_rtx);
1535   emit_jump (end_label);
1536   emit_barrier ();
1537   emit_label (fail_label);
1538   emit_move_insn (target, const1_rtx);
1539   emit_label (end_label);
1540 
1541   return target;
1542 }
1543 
1544 /* Emit code to move a block Y to a block X.  This may be done with
1545    string-move instructions, with multiple scalar move instructions,
1546    or with a library call.
1547 
1548    Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1549    SIZE is an rtx that says how long they are.
1550    ALIGN is the maximum alignment we can assume they have.
1551    METHOD describes what kind of copy this is, and what mechanisms may be used.
1552    MIN_SIZE is the minimal size of block to move
1553    MAX_SIZE is the maximal size of block to move, if it cannot be represented
1554    in unsigned HOST_WIDE_INT, than it is mask of all ones.
1555 
1556    Return the address of the new block, if memcpy is called and returns it,
1557    0 otherwise.  */
1558 
1559 rtx
emit_block_move_hints(rtx x,rtx y,rtx size,enum block_op_methods method,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)1560 emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
1561 		       unsigned int expected_align, HOST_WIDE_INT expected_size,
1562 		       unsigned HOST_WIDE_INT min_size,
1563 		       unsigned HOST_WIDE_INT max_size,
1564 		       unsigned HOST_WIDE_INT probable_max_size)
1565 {
1566   int may_use_call;
1567   rtx retval = 0;
1568   unsigned int align;
1569 
1570   gcc_assert (size);
1571   if (CONST_INT_P (size) && INTVAL (size) == 0)
1572     return 0;
1573 
1574   switch (method)
1575     {
1576     case BLOCK_OP_NORMAL:
1577     case BLOCK_OP_TAILCALL:
1578       may_use_call = 1;
1579       break;
1580 
1581     case BLOCK_OP_CALL_PARM:
1582       may_use_call = block_move_libcall_safe_for_call_parm ();
1583 
1584       /* Make inhibit_defer_pop nonzero around the library call
1585 	 to force it to pop the arguments right away.  */
1586       NO_DEFER_POP;
1587       break;
1588 
1589     case BLOCK_OP_NO_LIBCALL:
1590       may_use_call = 0;
1591       break;
1592 
1593     case BLOCK_OP_NO_LIBCALL_RET:
1594       may_use_call = -1;
1595       break;
1596 
1597     default:
1598       gcc_unreachable ();
1599     }
1600 
1601   gcc_assert (MEM_P (x) && MEM_P (y));
1602   align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1603   gcc_assert (align >= BITS_PER_UNIT);
1604 
1605   /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1606      block copy is more efficient for other large modes, e.g. DCmode.  */
1607   x = adjust_address (x, BLKmode, 0);
1608   y = adjust_address (y, BLKmode, 0);
1609 
1610   /* Set MEM_SIZE as appropriate for this block copy.  The main place this
1611      can be incorrect is coming from __builtin_memcpy.  */
1612   poly_int64 const_size;
1613   if (poly_int_rtx_p (size, &const_size))
1614     {
1615       x = shallow_copy_rtx (x);
1616       y = shallow_copy_rtx (y);
1617       set_mem_size (x, const_size);
1618       set_mem_size (y, const_size);
1619     }
1620 
1621   if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
1622     move_by_pieces (x, y, INTVAL (size), align, RETURN_BEGIN);
1623   else if (emit_block_move_via_movmem (x, y, size, align,
1624 				       expected_align, expected_size,
1625 				       min_size, max_size, probable_max_size))
1626     ;
1627   else if (may_use_call
1628 	   && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
1629 	   && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
1630     {
1631       if (may_use_call < 0)
1632 	return pc_rtx;
1633 
1634       retval = emit_block_copy_via_libcall (x, y, size,
1635 					    method == BLOCK_OP_TAILCALL);
1636     }
1637 
1638   else
1639     emit_block_move_via_loop (x, y, size, align);
1640 
1641   if (method == BLOCK_OP_CALL_PARM)
1642     OK_DEFER_POP;
1643 
1644   return retval;
1645 }
1646 
1647 rtx
emit_block_move(rtx x,rtx y,rtx size,enum block_op_methods method)1648 emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
1649 {
1650   unsigned HOST_WIDE_INT max, min = 0;
1651   if (GET_CODE (size) == CONST_INT)
1652     min = max = UINTVAL (size);
1653   else
1654     max = GET_MODE_MASK (GET_MODE (size));
1655   return emit_block_move_hints (x, y, size, method, 0, -1,
1656 				min, max, max);
1657 }
1658 
1659 /* A subroutine of emit_block_move.  Returns true if calling the
1660    block move libcall will not clobber any parameters which may have
1661    already been placed on the stack.  */
1662 
1663 static bool
block_move_libcall_safe_for_call_parm(void)1664 block_move_libcall_safe_for_call_parm (void)
1665 {
1666 #if defined (REG_PARM_STACK_SPACE)
1667   tree fn;
1668 #endif
1669 
1670   /* If arguments are pushed on the stack, then they're safe.  */
1671   if (PUSH_ARGS)
1672     return true;
1673 
1674   /* If registers go on the stack anyway, any argument is sure to clobber
1675      an outgoing argument.  */
1676 #if defined (REG_PARM_STACK_SPACE)
1677   fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1678   /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1679      depend on its argument.  */
1680   (void) fn;
1681   if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
1682       && REG_PARM_STACK_SPACE (fn) != 0)
1683     return false;
1684 #endif
1685 
1686   /* If any argument goes in memory, then it might clobber an outgoing
1687      argument.  */
1688   {
1689     CUMULATIVE_ARGS args_so_far_v;
1690     cumulative_args_t args_so_far;
1691     tree fn, arg;
1692 
1693     fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1694     INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
1695     args_so_far = pack_cumulative_args (&args_so_far_v);
1696 
1697     arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
1698     for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
1699       {
1700 	machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
1701 	rtx tmp = targetm.calls.function_arg (args_so_far, mode,
1702 					      NULL_TREE, true);
1703 	if (!tmp || !REG_P (tmp))
1704 	  return false;
1705 	if (targetm.calls.arg_partial_bytes (args_so_far, mode, NULL, 1))
1706 	  return false;
1707 	targetm.calls.function_arg_advance (args_so_far, mode,
1708 					    NULL_TREE, true);
1709       }
1710   }
1711   return true;
1712 }
1713 
1714 /* A subroutine of emit_block_move.  Expand a movmem pattern;
1715    return true if successful.  */
1716 
1717 static bool
emit_block_move_via_movmem(rtx x,rtx y,rtx size,unsigned int align,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)1718 emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
1719 			    unsigned int expected_align, HOST_WIDE_INT expected_size,
1720 			    unsigned HOST_WIDE_INT min_size,
1721 			    unsigned HOST_WIDE_INT max_size,
1722 			    unsigned HOST_WIDE_INT probable_max_size)
1723 {
1724   int save_volatile_ok = volatile_ok;
1725 
1726   if (expected_align < align)
1727     expected_align = align;
1728   if (expected_size != -1)
1729     {
1730       if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
1731 	expected_size = probable_max_size;
1732       if ((unsigned HOST_WIDE_INT)expected_size < min_size)
1733 	expected_size = min_size;
1734     }
1735 
1736   /* Since this is a move insn, we don't care about volatility.  */
1737   volatile_ok = 1;
1738 
1739   /* Try the most limited insn first, because there's no point
1740      including more than one in the machine description unless
1741      the more limited one has some advantage.  */
1742 
1743   opt_scalar_int_mode mode_iter;
1744   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
1745     {
1746       scalar_int_mode mode = mode_iter.require ();
1747       enum insn_code code = direct_optab_handler (movmem_optab, mode);
1748 
1749       if (code != CODE_FOR_nothing
1750 	  /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1751 	     here because if SIZE is less than the mode mask, as it is
1752 	     returned by the macro, it will definitely be less than the
1753 	     actual mode mask.  Since SIZE is within the Pmode address
1754 	     space, we limit MODE to Pmode.  */
1755 	  && ((CONST_INT_P (size)
1756 	       && ((unsigned HOST_WIDE_INT) INTVAL (size)
1757 		   <= (GET_MODE_MASK (mode) >> 1)))
1758 	      || max_size <= (GET_MODE_MASK (mode) >> 1)
1759 	      || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
1760 	{
1761 	  struct expand_operand ops[9];
1762 	  unsigned int nops;
1763 
1764 	  /* ??? When called via emit_block_move_for_call, it'd be
1765 	     nice if there were some way to inform the backend, so
1766 	     that it doesn't fail the expansion because it thinks
1767 	     emitting the libcall would be more efficient.  */
1768 	  nops = insn_data[(int) code].n_generator_args;
1769 	  gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
1770 
1771 	  create_fixed_operand (&ops[0], x);
1772 	  create_fixed_operand (&ops[1], y);
1773 	  /* The check above guarantees that this size conversion is valid.  */
1774 	  create_convert_operand_to (&ops[2], size, mode, true);
1775 	  create_integer_operand (&ops[3], align / BITS_PER_UNIT);
1776 	  if (nops >= 6)
1777 	    {
1778 	      create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
1779 	      create_integer_operand (&ops[5], expected_size);
1780 	    }
1781 	  if (nops >= 8)
1782 	    {
1783 	      create_integer_operand (&ops[6], min_size);
1784 	      /* If we cannot represent the maximal size,
1785 		 make parameter NULL.  */
1786 	      if ((HOST_WIDE_INT) max_size != -1)
1787 	        create_integer_operand (&ops[7], max_size);
1788 	      else
1789 		create_fixed_operand (&ops[7], NULL);
1790 	    }
1791 	  if (nops == 9)
1792 	    {
1793 	      /* If we cannot represent the maximal size,
1794 		 make parameter NULL.  */
1795 	      if ((HOST_WIDE_INT) probable_max_size != -1)
1796 	        create_integer_operand (&ops[8], probable_max_size);
1797 	      else
1798 		create_fixed_operand (&ops[8], NULL);
1799 	    }
1800 	  if (maybe_expand_insn (code, nops, ops))
1801 	    {
1802 	      volatile_ok = save_volatile_ok;
1803 	      return true;
1804 	    }
1805 	}
1806     }
1807 
1808   volatile_ok = save_volatile_ok;
1809   return false;
1810 }
1811 
1812 /* A subroutine of emit_block_move.  Copy the data via an explicit
1813    loop.  This is used only when libcalls are forbidden.  */
1814 /* ??? It'd be nice to copy in hunks larger than QImode.  */
1815 
1816 static void
emit_block_move_via_loop(rtx x,rtx y,rtx size,unsigned int align ATTRIBUTE_UNUSED)1817 emit_block_move_via_loop (rtx x, rtx y, rtx size,
1818 			  unsigned int align ATTRIBUTE_UNUSED)
1819 {
1820   rtx_code_label *cmp_label, *top_label;
1821   rtx iter, x_addr, y_addr, tmp;
1822   machine_mode x_addr_mode = get_address_mode (x);
1823   machine_mode y_addr_mode = get_address_mode (y);
1824   machine_mode iter_mode;
1825 
1826   iter_mode = GET_MODE (size);
1827   if (iter_mode == VOIDmode)
1828     iter_mode = word_mode;
1829 
1830   top_label = gen_label_rtx ();
1831   cmp_label = gen_label_rtx ();
1832   iter = gen_reg_rtx (iter_mode);
1833 
1834   emit_move_insn (iter, const0_rtx);
1835 
1836   x_addr = force_operand (XEXP (x, 0), NULL_RTX);
1837   y_addr = force_operand (XEXP (y, 0), NULL_RTX);
1838   do_pending_stack_adjust ();
1839 
1840   emit_jump (cmp_label);
1841   emit_label (top_label);
1842 
1843   tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
1844   x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
1845 
1846   if (x_addr_mode != y_addr_mode)
1847     tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
1848   y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
1849 
1850   x = change_address (x, QImode, x_addr);
1851   y = change_address (y, QImode, y_addr);
1852 
1853   emit_move_insn (x, y);
1854 
1855   tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
1856 			     true, OPTAB_LIB_WIDEN);
1857   if (tmp != iter)
1858     emit_move_insn (iter, tmp);
1859 
1860   emit_label (cmp_label);
1861 
1862   emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
1863 			   true, top_label,
1864 			   profile_probability::guessed_always ()
1865 				.apply_scale (9, 10));
1866 }
1867 
1868 /* Expand a call to memcpy or memmove or memcmp, and return the result.
1869    TAILCALL is true if this is a tail call.  */
1870 
1871 rtx
emit_block_op_via_libcall(enum built_in_function fncode,rtx dst,rtx src,rtx size,bool tailcall)1872 emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src,
1873 			   rtx size, bool tailcall)
1874 {
1875   rtx dst_addr, src_addr;
1876   tree call_expr, dst_tree, src_tree, size_tree;
1877   machine_mode size_mode;
1878 
1879   /* Since dst and src are passed to a libcall, mark the corresponding
1880      tree EXPR as addressable.  */
1881   tree dst_expr = MEM_EXPR (dst);
1882   tree src_expr = MEM_EXPR (src);
1883   if (dst_expr)
1884     mark_addressable (dst_expr);
1885   if (src_expr)
1886     mark_addressable (src_expr);
1887 
1888   dst_addr = copy_addr_to_reg (XEXP (dst, 0));
1889   dst_addr = convert_memory_address (ptr_mode, dst_addr);
1890   dst_tree = make_tree (ptr_type_node, dst_addr);
1891 
1892   src_addr = copy_addr_to_reg (XEXP (src, 0));
1893   src_addr = convert_memory_address (ptr_mode, src_addr);
1894   src_tree = make_tree (ptr_type_node, src_addr);
1895 
1896   size_mode = TYPE_MODE (sizetype);
1897   size = convert_to_mode (size_mode, size, 1);
1898   size = copy_to_mode_reg (size_mode, size);
1899   size_tree = make_tree (sizetype, size);
1900 
1901   /* It is incorrect to use the libcall calling conventions for calls to
1902      memcpy/memmove/memcmp because they can be provided by the user.  */
1903   tree fn = builtin_decl_implicit (fncode);
1904   call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
1905   CALL_EXPR_TAILCALL (call_expr) = tailcall;
1906 
1907   return expand_call (call_expr, NULL_RTX, false);
1908 }
1909 
1910 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
1911    ARG3_TYPE is the type of ARG3_RTX.  Return the result rtx on success,
1912    otherwise return null.  */
1913 
1914 rtx
expand_cmpstrn_or_cmpmem(insn_code icode,rtx target,rtx arg1_rtx,rtx arg2_rtx,tree arg3_type,rtx arg3_rtx,HOST_WIDE_INT align)1915 expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
1916 			  rtx arg2_rtx, tree arg3_type, rtx arg3_rtx,
1917 			  HOST_WIDE_INT align)
1918 {
1919   machine_mode insn_mode = insn_data[icode].operand[0].mode;
1920 
1921   if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
1922     target = NULL_RTX;
1923 
1924   struct expand_operand ops[5];
1925   create_output_operand (&ops[0], target, insn_mode);
1926   create_fixed_operand (&ops[1], arg1_rtx);
1927   create_fixed_operand (&ops[2], arg2_rtx);
1928   create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type),
1929 			       TYPE_UNSIGNED (arg3_type));
1930   create_integer_operand (&ops[4], align);
1931   if (maybe_expand_insn (icode, 5, ops))
1932     return ops[0].value;
1933   return NULL_RTX;
1934 }
1935 
1936 /* Expand a block compare between X and Y with length LEN using the
1937    cmpmem optab, placing the result in TARGET.  LEN_TYPE is the type
1938    of the expression that was used to calculate the length.  ALIGN
1939    gives the known minimum common alignment.  */
1940 
1941 static rtx
emit_block_cmp_via_cmpmem(rtx x,rtx y,rtx len,tree len_type,rtx target,unsigned align)1942 emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
1943 			   unsigned align)
1944 {
1945   /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
1946      implementing memcmp because it will stop if it encounters two
1947      zero bytes.  */
1948   insn_code icode = direct_optab_handler (cmpmem_optab, SImode);
1949 
1950   if (icode == CODE_FOR_nothing)
1951     return NULL_RTX;
1952 
1953   return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align);
1954 }
1955 
1956 /* Emit code to compare a block Y to a block X.  This may be done with
1957    string-compare instructions, with multiple scalar instructions,
1958    or with a library call.
1959 
1960    Both X and Y must be MEM rtx's.  LEN is an rtx that says how long
1961    they are.  LEN_TYPE is the type of the expression that was used to
1962    calculate it.
1963 
1964    If EQUALITY_ONLY is true, it means we don't have to return the tri-state
1965    value of a normal memcmp call, instead we can just compare for equality.
1966    If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
1967    returning NULL_RTX.
1968 
1969    Optionally, the caller can pass a constfn and associated data in Y_CFN
1970    and Y_CFN_DATA. describing that the second operand being compared is a
1971    known constant and how to obtain its data.
1972    Return the result of the comparison, or NULL_RTX if we failed to
1973    perform the operation.  */
1974 
1975 rtx
emit_block_cmp_hints(rtx x,rtx y,rtx len,tree len_type,rtx target,bool equality_only,by_pieces_constfn y_cfn,void * y_cfndata)1976 emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
1977 		      bool equality_only, by_pieces_constfn y_cfn,
1978 		      void *y_cfndata)
1979 {
1980   rtx result = 0;
1981 
1982   if (CONST_INT_P (len) && INTVAL (len) == 0)
1983     return const0_rtx;
1984 
1985   gcc_assert (MEM_P (x) && MEM_P (y));
1986   unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1987   gcc_assert (align >= BITS_PER_UNIT);
1988 
1989   x = adjust_address (x, BLKmode, 0);
1990   y = adjust_address (y, BLKmode, 0);
1991 
1992   if (equality_only
1993       && CONST_INT_P (len)
1994       && can_do_by_pieces (INTVAL (len), align, COMPARE_BY_PIECES))
1995     result = compare_by_pieces (x, y, INTVAL (len), target, align,
1996 				y_cfn, y_cfndata);
1997   else
1998     result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
1999 
2000   return result;
2001 }
2002 
2003 /* Copy all or part of a value X into registers starting at REGNO.
2004    The number of registers to be filled is NREGS.  */
2005 
2006 void
move_block_to_reg(int regno,rtx x,int nregs,machine_mode mode)2007 move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode)
2008 {
2009   if (nregs == 0)
2010     return;
2011 
2012   if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
2013     x = validize_mem (force_const_mem (mode, x));
2014 
2015   /* See if the machine can do this with a load multiple insn.  */
2016   if (targetm.have_load_multiple ())
2017     {
2018       rtx_insn *last = get_last_insn ();
2019       rtx first = gen_rtx_REG (word_mode, regno);
2020       if (rtx_insn *pat = targetm.gen_load_multiple (first, x,
2021 						     GEN_INT (nregs)))
2022 	{
2023 	  emit_insn (pat);
2024 	  return;
2025 	}
2026       else
2027 	delete_insns_since (last);
2028     }
2029 
2030   for (int i = 0; i < nregs; i++)
2031     emit_move_insn (gen_rtx_REG (word_mode, regno + i),
2032 		    operand_subword_force (x, i, mode));
2033 }
2034 
2035 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2036    The number of registers to be filled is NREGS.  */
2037 
2038 void
move_block_from_reg(int regno,rtx x,int nregs)2039 move_block_from_reg (int regno, rtx x, int nregs)
2040 {
2041   if (nregs == 0)
2042     return;
2043 
2044   /* See if the machine can do this with a store multiple insn.  */
2045   if (targetm.have_store_multiple ())
2046     {
2047       rtx_insn *last = get_last_insn ();
2048       rtx first = gen_rtx_REG (word_mode, regno);
2049       if (rtx_insn *pat = targetm.gen_store_multiple (x, first,
2050 						      GEN_INT (nregs)))
2051 	{
2052 	  emit_insn (pat);
2053 	  return;
2054 	}
2055       else
2056 	delete_insns_since (last);
2057     }
2058 
2059   for (int i = 0; i < nregs; i++)
2060     {
2061       rtx tem = operand_subword (x, i, 1, BLKmode);
2062 
2063       gcc_assert (tem);
2064 
2065       emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
2066     }
2067 }
2068 
2069 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2070    ORIG, where ORIG is a non-consecutive group of registers represented by
2071    a PARALLEL.  The clone is identical to the original except in that the
2072    original set of registers is replaced by a new set of pseudo registers.
2073    The new set has the same modes as the original set.  */
2074 
2075 rtx
gen_group_rtx(rtx orig)2076 gen_group_rtx (rtx orig)
2077 {
2078   int i, length;
2079   rtx *tmps;
2080 
2081   gcc_assert (GET_CODE (orig) == PARALLEL);
2082 
2083   length = XVECLEN (orig, 0);
2084   tmps = XALLOCAVEC (rtx, length);
2085 
2086   /* Skip a NULL entry in first slot.  */
2087   i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
2088 
2089   if (i)
2090     tmps[0] = 0;
2091 
2092   for (; i < length; i++)
2093     {
2094       machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0));
2095       rtx offset = XEXP (XVECEXP (orig, 0, i), 1);
2096 
2097       tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset);
2098     }
2099 
2100   return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
2101 }
2102 
2103 /* A subroutine of emit_group_load.  Arguments as for emit_group_load,
2104    except that values are placed in TMPS[i], and must later be moved
2105    into corresponding XEXP (XVECEXP (DST, 0, i), 0) element.  */
2106 
2107 static void
emit_group_load_1(rtx * tmps,rtx dst,rtx orig_src,tree type,poly_int64 ssize)2108 emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
2109 		   poly_int64 ssize)
2110 {
2111   rtx src;
2112   int start, i;
2113   machine_mode m = GET_MODE (orig_src);
2114 
2115   gcc_assert (GET_CODE (dst) == PARALLEL);
2116 
2117   if (m != VOIDmode
2118       && !SCALAR_INT_MODE_P (m)
2119       && !MEM_P (orig_src)
2120       && GET_CODE (orig_src) != CONCAT)
2121     {
2122       scalar_int_mode imode;
2123       if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
2124 	{
2125 	  src = gen_reg_rtx (imode);
2126 	  emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
2127 	}
2128       else
2129 	{
2130 	  src = assign_stack_temp (GET_MODE (orig_src), ssize);
2131 	  emit_move_insn (src, orig_src);
2132 	}
2133       emit_group_load_1 (tmps, dst, src, type, ssize);
2134       return;
2135     }
2136 
2137   /* Check for a NULL entry, used to indicate that the parameter goes
2138      both on the stack and in registers.  */
2139   if (XEXP (XVECEXP (dst, 0, 0), 0))
2140     start = 0;
2141   else
2142     start = 1;
2143 
2144   /* Process the pieces.  */
2145   for (i = start; i < XVECLEN (dst, 0); i++)
2146     {
2147       machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
2148       poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
2149       poly_int64 bytelen = GET_MODE_SIZE (mode);
2150       poly_int64 shift = 0;
2151 
2152       /* Handle trailing fragments that run over the size of the struct.
2153 	 It's the target's responsibility to make sure that the fragment
2154 	 cannot be strictly smaller in some cases and strictly larger
2155 	 in others.  */
2156       gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2157       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2158 	{
2159 	  /* Arrange to shift the fragment to where it belongs.
2160 	     extract_bit_field loads to the lsb of the reg.  */
2161 	  if (
2162 #ifdef BLOCK_REG_PADDING
2163 	      BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
2164 	      == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2165 #else
2166 	      BYTES_BIG_ENDIAN
2167 #endif
2168 	      )
2169 	    shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2170 	  bytelen = ssize - bytepos;
2171 	  gcc_assert (maybe_gt (bytelen, 0));
2172 	}
2173 
2174       /* If we won't be loading directly from memory, protect the real source
2175 	 from strange tricks we might play; but make sure that the source can
2176 	 be loaded directly into the destination.  */
2177       src = orig_src;
2178       if (!MEM_P (orig_src)
2179 	  && (!CONSTANT_P (orig_src)
2180 	      || (GET_MODE (orig_src) != mode
2181 		  && GET_MODE (orig_src) != VOIDmode)))
2182 	{
2183 	  if (GET_MODE (orig_src) == VOIDmode)
2184 	    src = gen_reg_rtx (mode);
2185 	  else
2186 	    src = gen_reg_rtx (GET_MODE (orig_src));
2187 
2188 	  emit_move_insn (src, orig_src);
2189 	}
2190 
2191       /* Optimize the access just a bit.  */
2192       if (MEM_P (src)
2193 	  && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
2194 	      || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
2195 	  && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
2196 	  && known_eq (bytelen, GET_MODE_SIZE (mode)))
2197 	{
2198 	  tmps[i] = gen_reg_rtx (mode);
2199 	  emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
2200 	}
2201       else if (COMPLEX_MODE_P (mode)
2202 	       && GET_MODE (src) == mode
2203 	       && known_eq (bytelen, GET_MODE_SIZE (mode)))
2204 	/* Let emit_move_complex do the bulk of the work.  */
2205 	tmps[i] = src;
2206       else if (GET_CODE (src) == CONCAT)
2207 	{
2208 	  poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
2209 	  poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
2210 	  unsigned int elt;
2211 	  poly_int64 subpos;
2212 
2213 	  if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
2214 	      && known_le (subpos + bytelen, slen0))
2215 	    {
2216 	      /* The following assumes that the concatenated objects all
2217 		 have the same size.  In this case, a simple calculation
2218 		 can be used to determine the object and the bit field
2219 		 to be extracted.  */
2220 	      tmps[i] = XEXP (src, elt);
2221 	      if (maybe_ne (subpos, 0)
2222 		  || maybe_ne (subpos + bytelen, slen0)
2223 		  || (!CONSTANT_P (tmps[i])
2224 		      && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
2225 		tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
2226 					     subpos * BITS_PER_UNIT,
2227 					     1, NULL_RTX, mode, mode, false,
2228 					     NULL);
2229 	    }
2230 	  else
2231 	    {
2232 	      rtx mem;
2233 
2234 	      gcc_assert (known_eq (bytepos, 0));
2235 	      mem = assign_stack_temp (GET_MODE (src), slen);
2236 	      emit_move_insn (mem, src);
2237 	      tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
2238 					   0, 1, NULL_RTX, mode, mode, false,
2239 					   NULL);
2240 	    }
2241 	}
2242       /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2243 	 SIMD register, which is currently broken.  While we get GCC
2244 	 to emit proper RTL for these cases, let's dump to memory.  */
2245       else if (VECTOR_MODE_P (GET_MODE (dst))
2246 	       && REG_P (src))
2247 	{
2248 	  poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
2249 	  rtx mem;
2250 
2251 	  mem = assign_stack_temp (GET_MODE (src), slen);
2252 	  emit_move_insn (mem, src);
2253 	  tmps[i] = adjust_address (mem, mode, bytepos);
2254 	}
2255       else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
2256                && XVECLEN (dst, 0) > 1)
2257         tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
2258       else if (CONSTANT_P (src))
2259 	{
2260 	  if (known_eq (bytelen, ssize))
2261 	    tmps[i] = src;
2262 	  else
2263 	    {
2264 	      rtx first, second;
2265 
2266 	      /* TODO: const_wide_int can have sizes other than this...  */
2267 	      gcc_assert (known_eq (2 * bytelen, ssize));
2268 	      split_double (src, &first, &second);
2269 	      if (i)
2270 		tmps[i] = second;
2271 	      else
2272 		tmps[i] = first;
2273 	    }
2274 	}
2275       else if (REG_P (src) && GET_MODE (src) == mode)
2276 	tmps[i] = src;
2277       else
2278 	tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
2279 				     bytepos * BITS_PER_UNIT, 1, NULL_RTX,
2280 				     mode, mode, false, NULL);
2281 
2282       if (maybe_ne (shift, 0))
2283 	tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
2284 				shift, tmps[i], 0);
2285     }
2286 }
2287 
2288 /* Emit code to move a block SRC of type TYPE to a block DST,
2289    where DST is non-consecutive registers represented by a PARALLEL.
2290    SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2291    if not known.  */
2292 
2293 void
emit_group_load(rtx dst,rtx src,tree type,poly_int64 ssize)2294 emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
2295 {
2296   rtx *tmps;
2297   int i;
2298 
2299   tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0));
2300   emit_group_load_1 (tmps, dst, src, type, ssize);
2301 
2302   /* Copy the extracted pieces into the proper (probable) hard regs.  */
2303   for (i = 0; i < XVECLEN (dst, 0); i++)
2304     {
2305       rtx d = XEXP (XVECEXP (dst, 0, i), 0);
2306       if (d == NULL)
2307 	continue;
2308       emit_move_insn (d, tmps[i]);
2309     }
2310 }
2311 
2312 /* Similar, but load SRC into new pseudos in a format that looks like
2313    PARALLEL.  This can later be fed to emit_group_move to get things
2314    in the right place.  */
2315 
2316 rtx
emit_group_load_into_temps(rtx parallel,rtx src,tree type,poly_int64 ssize)2317 emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
2318 {
2319   rtvec vec;
2320   int i;
2321 
2322   vec = rtvec_alloc (XVECLEN (parallel, 0));
2323   emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
2324 
2325   /* Convert the vector to look just like the original PARALLEL, except
2326      with the computed values.  */
2327   for (i = 0; i < XVECLEN (parallel, 0); i++)
2328     {
2329       rtx e = XVECEXP (parallel, 0, i);
2330       rtx d = XEXP (e, 0);
2331 
2332       if (d)
2333 	{
2334 	  d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
2335 	  e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
2336 	}
2337       RTVEC_ELT (vec, i) = e;
2338     }
2339 
2340   return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
2341 }
2342 
2343 /* Emit code to move a block SRC to block DST, where SRC and DST are
2344    non-consecutive groups of registers, each represented by a PARALLEL.  */
2345 
2346 void
emit_group_move(rtx dst,rtx src)2347 emit_group_move (rtx dst, rtx src)
2348 {
2349   int i;
2350 
2351   gcc_assert (GET_CODE (src) == PARALLEL
2352 	      && GET_CODE (dst) == PARALLEL
2353 	      && XVECLEN (src, 0) == XVECLEN (dst, 0));
2354 
2355   /* Skip first entry if NULL.  */
2356   for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++)
2357     emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0),
2358 		    XEXP (XVECEXP (src, 0, i), 0));
2359 }
2360 
2361 /* Move a group of registers represented by a PARALLEL into pseudos.  */
2362 
2363 rtx
emit_group_move_into_temps(rtx src)2364 emit_group_move_into_temps (rtx src)
2365 {
2366   rtvec vec = rtvec_alloc (XVECLEN (src, 0));
2367   int i;
2368 
2369   for (i = 0; i < XVECLEN (src, 0); i++)
2370     {
2371       rtx e = XVECEXP (src, 0, i);
2372       rtx d = XEXP (e, 0);
2373 
2374       if (d)
2375 	e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
2376       RTVEC_ELT (vec, i) = e;
2377     }
2378 
2379   return gen_rtx_PARALLEL (GET_MODE (src), vec);
2380 }
2381 
2382 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2383    where SRC is non-consecutive registers represented by a PARALLEL.
2384    SSIZE represents the total size of block ORIG_DST, or -1 if not
2385    known.  */
2386 
2387 void
emit_group_store(rtx orig_dst,rtx src,tree type ATTRIBUTE_UNUSED,poly_int64 ssize)2388 emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
2389 		  poly_int64 ssize)
2390 {
2391   rtx *tmps, dst;
2392   int start, finish, i;
2393   machine_mode m = GET_MODE (orig_dst);
2394 
2395   gcc_assert (GET_CODE (src) == PARALLEL);
2396 
2397   if (!SCALAR_INT_MODE_P (m)
2398       && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
2399     {
2400       scalar_int_mode imode;
2401       if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
2402 	{
2403 	  dst = gen_reg_rtx (imode);
2404 	  emit_group_store (dst, src, type, ssize);
2405 	  dst = gen_lowpart (GET_MODE (orig_dst), dst);
2406 	}
2407       else
2408 	{
2409 	  dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
2410 	  emit_group_store (dst, src, type, ssize);
2411 	}
2412       emit_move_insn (orig_dst, dst);
2413       return;
2414     }
2415 
2416   /* Check for a NULL entry, used to indicate that the parameter goes
2417      both on the stack and in registers.  */
2418   if (XEXP (XVECEXP (src, 0, 0), 0))
2419     start = 0;
2420   else
2421     start = 1;
2422   finish = XVECLEN (src, 0);
2423 
2424   tmps = XALLOCAVEC (rtx, finish);
2425 
2426   /* Copy the (probable) hard regs into pseudos.  */
2427   for (i = start; i < finish; i++)
2428     {
2429       rtx reg = XEXP (XVECEXP (src, 0, i), 0);
2430       if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2431 	{
2432 	  tmps[i] = gen_reg_rtx (GET_MODE (reg));
2433 	  emit_move_insn (tmps[i], reg);
2434 	}
2435       else
2436 	tmps[i] = reg;
2437     }
2438 
2439   /* If we won't be storing directly into memory, protect the real destination
2440      from strange tricks we might play.  */
2441   dst = orig_dst;
2442   if (GET_CODE (dst) == PARALLEL)
2443     {
2444       rtx temp;
2445 
2446       /* We can get a PARALLEL dst if there is a conditional expression in
2447 	 a return statement.  In that case, the dst and src are the same,
2448 	 so no action is necessary.  */
2449       if (rtx_equal_p (dst, src))
2450 	return;
2451 
2452       /* It is unclear if we can ever reach here, but we may as well handle
2453 	 it.  Allocate a temporary, and split this into a store/load to/from
2454 	 the temporary.  */
2455       temp = assign_stack_temp (GET_MODE (dst), ssize);
2456       emit_group_store (temp, src, type, ssize);
2457       emit_group_load (dst, temp, type, ssize);
2458       return;
2459     }
2460   else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
2461     {
2462       machine_mode outer = GET_MODE (dst);
2463       machine_mode inner;
2464       poly_int64 bytepos;
2465       bool done = false;
2466       rtx temp;
2467 
2468       if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
2469 	dst = gen_reg_rtx (outer);
2470 
2471       /* Make life a bit easier for combine.  */
2472       /* If the first element of the vector is the low part
2473 	 of the destination mode, use a paradoxical subreg to
2474 	 initialize the destination.  */
2475       if (start < finish)
2476 	{
2477 	  inner = GET_MODE (tmps[start]);
2478 	  bytepos = subreg_lowpart_offset (inner, outer);
2479 	  if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
2480 			bytepos))
2481 	    {
2482 	      temp = simplify_gen_subreg (outer, tmps[start],
2483 					  inner, 0);
2484 	      if (temp)
2485 		{
2486 		  emit_move_insn (dst, temp);
2487 		  done = true;
2488 		  start++;
2489 		}
2490 	    }
2491 	}
2492 
2493       /* If the first element wasn't the low part, try the last.  */
2494       if (!done
2495 	  && start < finish - 1)
2496 	{
2497 	  inner = GET_MODE (tmps[finish - 1]);
2498 	  bytepos = subreg_lowpart_offset (inner, outer);
2499 	  if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
2500 							  finish - 1), 1)),
2501 			bytepos))
2502 	    {
2503 	      temp = simplify_gen_subreg (outer, tmps[finish - 1],
2504 					  inner, 0);
2505 	      if (temp)
2506 		{
2507 		  emit_move_insn (dst, temp);
2508 		  done = true;
2509 		  finish--;
2510 		}
2511 	    }
2512 	}
2513 
2514       /* Otherwise, simply initialize the result to zero.  */
2515       if (!done)
2516         emit_move_insn (dst, CONST0_RTX (outer));
2517     }
2518 
2519   /* Process the pieces.  */
2520   for (i = start; i < finish; i++)
2521     {
2522       poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
2523       machine_mode mode = GET_MODE (tmps[i]);
2524       poly_int64 bytelen = GET_MODE_SIZE (mode);
2525       poly_uint64 adj_bytelen;
2526       rtx dest = dst;
2527 
2528       /* Handle trailing fragments that run over the size of the struct.
2529 	 It's the target's responsibility to make sure that the fragment
2530 	 cannot be strictly smaller in some cases and strictly larger
2531 	 in others.  */
2532       gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2533       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2534 	adj_bytelen = ssize - bytepos;
2535       else
2536 	adj_bytelen = bytelen;
2537 
2538       if (GET_CODE (dst) == CONCAT)
2539 	{
2540 	  if (known_le (bytepos + adj_bytelen,
2541 			GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2542 	    dest = XEXP (dst, 0);
2543 	  else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2544 	    {
2545 	      bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
2546 	      dest = XEXP (dst, 1);
2547 	    }
2548 	  else
2549 	    {
2550 	      machine_mode dest_mode = GET_MODE (dest);
2551 	      machine_mode tmp_mode = GET_MODE (tmps[i]);
2552 
2553 	      gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
2554 
2555 	      if (GET_MODE_ALIGNMENT (dest_mode)
2556 		  >= GET_MODE_ALIGNMENT (tmp_mode))
2557 		{
2558 		  dest = assign_stack_temp (dest_mode,
2559 					    GET_MODE_SIZE (dest_mode));
2560 		  emit_move_insn (adjust_address (dest,
2561 						  tmp_mode,
2562 						  bytepos),
2563 				  tmps[i]);
2564 		  dst = dest;
2565 		}
2566 	      else
2567 		{
2568 		  dest = assign_stack_temp (tmp_mode,
2569 					    GET_MODE_SIZE (tmp_mode));
2570 		  emit_move_insn (dest, tmps[i]);
2571 		  dst = adjust_address (dest, dest_mode, bytepos);
2572 		}
2573 	      break;
2574 	    }
2575 	}
2576 
2577       /* Handle trailing fragments that run over the size of the struct.  */
2578       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2579 	{
2580 	  /* store_bit_field always takes its value from the lsb.
2581 	     Move the fragment to the lsb if it's not already there.  */
2582 	  if (
2583 #ifdef BLOCK_REG_PADDING
2584 	      BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
2585 	      == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2586 #else
2587 	      BYTES_BIG_ENDIAN
2588 #endif
2589 	      )
2590 	    {
2591 	      poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2592 	      tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
2593 				      shift, tmps[i], 0);
2594 	    }
2595 
2596 	  /* Make sure not to write past the end of the struct.  */
2597 	  store_bit_field (dest,
2598 			   adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2599 			   bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT - 1,
2600 			   VOIDmode, tmps[i], false);
2601 	}
2602 
2603       /* Optimize the access just a bit.  */
2604       else if (MEM_P (dest)
2605 	       && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
2606 		   || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
2607 	       && multiple_p (bytepos * BITS_PER_UNIT,
2608 			      GET_MODE_ALIGNMENT (mode))
2609 	       && known_eq (bytelen, GET_MODE_SIZE (mode)))
2610 	emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
2611 
2612       else
2613 	store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2614 			 0, 0, mode, tmps[i], false);
2615     }
2616 
2617   /* Copy from the pseudo into the (probable) hard reg.  */
2618   if (orig_dst != dst)
2619     emit_move_insn (orig_dst, dst);
2620 }
2621 
2622 /* Return a form of X that does not use a PARALLEL.  TYPE is the type
2623    of the value stored in X.  */
2624 
2625 rtx
maybe_emit_group_store(rtx x,tree type)2626 maybe_emit_group_store (rtx x, tree type)
2627 {
2628   machine_mode mode = TYPE_MODE (type);
2629   gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
2630   if (GET_CODE (x) == PARALLEL)
2631     {
2632       rtx result = gen_reg_rtx (mode);
2633       emit_group_store (result, x, type, int_size_in_bytes (type));
2634       return result;
2635     }
2636   return x;
2637 }
2638 
2639 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2640 
2641    This is used on targets that return BLKmode values in registers.  */
2642 
2643 static void
copy_blkmode_from_reg(rtx target,rtx srcreg,tree type)2644 copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
2645 {
2646   unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
2647   rtx src = NULL, dst = NULL;
2648   unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
2649   unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
2650   /* No current ABI uses variable-sized modes to pass a BLKmnode type.  */
2651   fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg));
2652   fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target));
2653   fixed_size_mode copy_mode;
2654 
2655   /* BLKmode registers created in the back-end shouldn't have survived.  */
2656   gcc_assert (mode != BLKmode);
2657 
2658   /* If the structure doesn't take up a whole number of words, see whether
2659      SRCREG is padded on the left or on the right.  If it's on the left,
2660      set PADDING_CORRECTION to the number of bits to skip.
2661 
2662      In most ABIs, the structure will be returned at the least end of
2663      the register, which translates to right padding on little-endian
2664      targets and left padding on big-endian targets.  The opposite
2665      holds if the structure is returned at the most significant
2666      end of the register.  */
2667   if (bytes % UNITS_PER_WORD != 0
2668       && (targetm.calls.return_in_msb (type)
2669 	  ? !BYTES_BIG_ENDIAN
2670 	  : BYTES_BIG_ENDIAN))
2671     padding_correction
2672       = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
2673 
2674   /* We can use a single move if we have an exact mode for the size.  */
2675   else if (MEM_P (target)
2676 	   && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
2677 	       || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
2678 	   && bytes == GET_MODE_SIZE (mode))
2679   {
2680     emit_move_insn (adjust_address (target, mode, 0), srcreg);
2681     return;
2682   }
2683 
2684   /* And if we additionally have the same mode for a register.  */
2685   else if (REG_P (target)
2686 	   && GET_MODE (target) == mode
2687 	   && bytes == GET_MODE_SIZE (mode))
2688   {
2689     emit_move_insn (target, srcreg);
2690     return;
2691   }
2692 
2693   /* This code assumes srcreg is at least a full word.  If it isn't, copy it
2694      into a new pseudo which is a full word.  */
2695   if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2696     {
2697       srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
2698       mode = word_mode;
2699     }
2700 
2701   /* Copy the structure BITSIZE bits at a time.  If the target lives in
2702      memory, take care of not reading/writing past its end by selecting
2703      a copy mode suited to BITSIZE.  This should always be possible given
2704      how it is computed.
2705 
2706      If the target lives in register, make sure not to select a copy mode
2707      larger than the mode of the register.
2708 
2709      We could probably emit more efficient code for machines which do not use
2710      strict alignment, but it doesn't seem worth the effort at the current
2711      time.  */
2712 
2713   copy_mode = word_mode;
2714   if (MEM_P (target))
2715     {
2716       opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
2717       if (mem_mode.exists ())
2718 	copy_mode = mem_mode.require ();
2719     }
2720   else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2721     copy_mode = tmode;
2722 
2723   for (bitpos = 0, xbitpos = padding_correction;
2724        bitpos < bytes * BITS_PER_UNIT;
2725        bitpos += bitsize, xbitpos += bitsize)
2726     {
2727       /* We need a new source operand each time xbitpos is on a
2728 	 word boundary and when xbitpos == padding_correction
2729 	 (the first time through).  */
2730       if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
2731 	src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
2732 
2733       /* We need a new destination operand each time bitpos is on
2734 	 a word boundary.  */
2735       if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2736 	dst = target;
2737       else if (bitpos % BITS_PER_WORD == 0)
2738 	dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
2739 
2740       /* Use xbitpos for the source extraction (right justified) and
2741 	 bitpos for the destination store (left justified).  */
2742       store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
2743 		       extract_bit_field (src, bitsize,
2744 					  xbitpos % BITS_PER_WORD, 1,
2745 					  NULL_RTX, copy_mode, copy_mode,
2746 					  false, NULL),
2747 		       false);
2748     }
2749 }
2750 
2751 /* Copy BLKmode value SRC into a register of mode MODE_IN.  Return the
2752    register if it contains any data, otherwise return null.
2753 
2754    This is used on targets that return BLKmode values in registers.  */
2755 
2756 rtx
copy_blkmode_to_reg(machine_mode mode_in,tree src)2757 copy_blkmode_to_reg (machine_mode mode_in, tree src)
2758 {
2759   int i, n_regs;
2760   unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0, bytes;
2761   unsigned int bitsize;
2762   rtx *dst_words, dst, x, src_word = NULL_RTX, dst_word = NULL_RTX;
2763   /* No current ABI uses variable-sized modes to pass a BLKmnode type.  */
2764   fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
2765   fixed_size_mode dst_mode;
2766   scalar_int_mode min_mode;
2767 
2768   gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
2769 
2770   x = expand_normal (src);
2771 
2772   bytes = arg_int_size_in_bytes (TREE_TYPE (src));
2773   if (bytes == 0)
2774     return NULL_RTX;
2775 
2776   /* If the structure doesn't take up a whole number of words, see
2777      whether the register value should be padded on the left or on
2778      the right.  Set PADDING_CORRECTION to the number of padding
2779      bits needed on the left side.
2780 
2781      In most ABIs, the structure will be returned at the least end of
2782      the register, which translates to right padding on little-endian
2783      targets and left padding on big-endian targets.  The opposite
2784      holds if the structure is returned at the most significant
2785      end of the register.  */
2786   if (bytes % UNITS_PER_WORD != 0
2787       && (targetm.calls.return_in_msb (TREE_TYPE (src))
2788 	  ? !BYTES_BIG_ENDIAN
2789 	  : BYTES_BIG_ENDIAN))
2790     padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
2791 					   * BITS_PER_UNIT));
2792 
2793   n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2794   dst_words = XALLOCAVEC (rtx, n_regs);
2795   bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
2796   min_mode = smallest_int_mode_for_size (bitsize);
2797 
2798   /* Copy the structure BITSIZE bits at a time.  */
2799   for (bitpos = 0, xbitpos = padding_correction;
2800        bitpos < bytes * BITS_PER_UNIT;
2801        bitpos += bitsize, xbitpos += bitsize)
2802     {
2803       /* We need a new destination pseudo each time xbitpos is
2804 	 on a word boundary and when xbitpos == padding_correction
2805 	 (the first time through).  */
2806       if (xbitpos % BITS_PER_WORD == 0
2807 	  || xbitpos == padding_correction)
2808 	{
2809 	  /* Generate an appropriate register.  */
2810 	  dst_word = gen_reg_rtx (word_mode);
2811 	  dst_words[xbitpos / BITS_PER_WORD] = dst_word;
2812 
2813 	  /* Clear the destination before we move anything into it.  */
2814 	  emit_move_insn (dst_word, CONST0_RTX (word_mode));
2815 	}
2816 
2817       /* Find the largest integer mode that can be used to copy all or as
2818 	 many bits as possible of the structure if the target supports larger
2819 	 copies.  There are too many corner cases here w.r.t to alignments on
2820 	 the read/writes.  So if there is any padding just use single byte
2821 	 operations.  */
2822       opt_scalar_int_mode mode_iter;
2823       if (padding_correction == 0 && !STRICT_ALIGNMENT)
2824 	{
2825 	  FOR_EACH_MODE_FROM (mode_iter, min_mode)
2826 	    {
2827 	      unsigned int msize = GET_MODE_BITSIZE (mode_iter.require ());
2828 	      if (msize <= ((bytes * BITS_PER_UNIT) - bitpos)
2829 		  && msize <= BITS_PER_WORD)
2830 		bitsize = msize;
2831 	      else
2832 		break;
2833 	    }
2834 	}
2835 
2836       /* We need a new source operand each time bitpos is on a word
2837 	 boundary.  */
2838       if (bitpos % BITS_PER_WORD == 0)
2839 	src_word = operand_subword_force (x, bitpos / BITS_PER_WORD, BLKmode);
2840 
2841       /* Use bitpos for the source extraction (left justified) and
2842 	 xbitpos for the destination store (right justified).  */
2843       store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
2844 		       0, 0, word_mode,
2845 		       extract_bit_field (src_word, bitsize,
2846 					  bitpos % BITS_PER_WORD, 1,
2847 					  NULL_RTX, word_mode, word_mode,
2848 					  false, NULL),
2849 		       false);
2850     }
2851 
2852   if (mode == BLKmode)
2853     {
2854       /* Find the smallest integer mode large enough to hold the
2855 	 entire structure.  */
2856       opt_scalar_int_mode mode_iter;
2857       FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2858 	if (GET_MODE_SIZE (mode_iter.require ()) >= bytes)
2859 	  break;
2860 
2861       /* A suitable mode should have been found.  */
2862       mode = mode_iter.require ();
2863     }
2864 
2865   if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
2866     dst_mode = word_mode;
2867   else
2868     dst_mode = mode;
2869   dst = gen_reg_rtx (dst_mode);
2870 
2871   for (i = 0; i < n_regs; i++)
2872     emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]);
2873 
2874   if (mode != dst_mode)
2875     dst = gen_lowpart (mode, dst);
2876 
2877   return dst;
2878 }
2879 
2880 /* Add a USE expression for REG to the (possibly empty) list pointed
2881    to by CALL_FUSAGE.  REG must denote a hard register.  */
2882 
2883 void
use_reg_mode(rtx * call_fusage,rtx reg,machine_mode mode)2884 use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2885 {
2886   gcc_assert (REG_P (reg));
2887 
2888   if (!HARD_REGISTER_P (reg))
2889     return;
2890 
2891   *call_fusage
2892     = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg), *call_fusage);
2893 }
2894 
2895 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
2896    to by CALL_FUSAGE.  REG must denote a hard register.  */
2897 
2898 void
clobber_reg_mode(rtx * call_fusage,rtx reg,machine_mode mode)2899 clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2900 {
2901   gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
2902 
2903   *call_fusage
2904     = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg), *call_fusage);
2905 }
2906 
2907 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
2908    starting at REGNO.  All of these registers must be hard registers.  */
2909 
2910 void
use_regs(rtx * call_fusage,int regno,int nregs)2911 use_regs (rtx *call_fusage, int regno, int nregs)
2912 {
2913   int i;
2914 
2915   gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
2916 
2917   for (i = 0; i < nregs; i++)
2918     use_reg (call_fusage, regno_reg_rtx[regno + i]);
2919 }
2920 
2921 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
2922    PARALLEL REGS.  This is for calls that pass values in multiple
2923    non-contiguous locations.  The Irix 6 ABI has examples of this.  */
2924 
2925 void
use_group_regs(rtx * call_fusage,rtx regs)2926 use_group_regs (rtx *call_fusage, rtx regs)
2927 {
2928   int i;
2929 
2930   for (i = 0; i < XVECLEN (regs, 0); i++)
2931     {
2932       rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
2933 
2934       /* A NULL entry means the parameter goes both on the stack and in
2935 	 registers.  This can also be a MEM for targets that pass values
2936 	 partially on the stack and partially in registers.  */
2937       if (reg != 0 && REG_P (reg))
2938 	use_reg (call_fusage, reg);
2939     }
2940 }
2941 
2942 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2943    assigment and the code of the expresion on the RHS is CODE.  Return
2944    NULL otherwise.  */
2945 
2946 static gimple *
get_def_for_expr(tree name,enum tree_code code)2947 get_def_for_expr (tree name, enum tree_code code)
2948 {
2949   gimple *def_stmt;
2950 
2951   if (TREE_CODE (name) != SSA_NAME)
2952     return NULL;
2953 
2954   def_stmt = get_gimple_for_ssa_name (name);
2955   if (!def_stmt
2956       || gimple_assign_rhs_code (def_stmt) != code)
2957     return NULL;
2958 
2959   return def_stmt;
2960 }
2961 
2962 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2963    assigment and the class of the expresion on the RHS is CLASS.  Return
2964    NULL otherwise.  */
2965 
2966 static gimple *
get_def_for_expr_class(tree name,enum tree_code_class tclass)2967 get_def_for_expr_class (tree name, enum tree_code_class tclass)
2968 {
2969   gimple *def_stmt;
2970 
2971   if (TREE_CODE (name) != SSA_NAME)
2972     return NULL;
2973 
2974   def_stmt = get_gimple_for_ssa_name (name);
2975   if (!def_stmt
2976       || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
2977     return NULL;
2978 
2979   return def_stmt;
2980 }
2981 
2982 /* Write zeros through the storage of OBJECT.  If OBJECT has BLKmode, SIZE is
2983    its length in bytes.  */
2984 
2985 rtx
clear_storage_hints(rtx object,rtx size,enum block_op_methods method,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)2986 clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
2987 		     unsigned int expected_align, HOST_WIDE_INT expected_size,
2988 		     unsigned HOST_WIDE_INT min_size,
2989 		     unsigned HOST_WIDE_INT max_size,
2990 		     unsigned HOST_WIDE_INT probable_max_size)
2991 {
2992   machine_mode mode = GET_MODE (object);
2993   unsigned int align;
2994 
2995   gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
2996 
2997   /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
2998      just move a zero.  Otherwise, do this a piece at a time.  */
2999   poly_int64 size_val;
3000   if (mode != BLKmode
3001       && poly_int_rtx_p (size, &size_val)
3002       && known_eq (size_val, GET_MODE_SIZE (mode)))
3003     {
3004       rtx zero = CONST0_RTX (mode);
3005       if (zero != NULL)
3006 	{
3007 	  emit_move_insn (object, zero);
3008 	  return NULL;
3009 	}
3010 
3011       if (COMPLEX_MODE_P (mode))
3012 	{
3013 	  zero = CONST0_RTX (GET_MODE_INNER (mode));
3014 	  if (zero != NULL)
3015 	    {
3016 	      write_complex_part (object, zero, 0);
3017 	      write_complex_part (object, zero, 1);
3018 	      return NULL;
3019 	    }
3020 	}
3021     }
3022 
3023   if (size == const0_rtx)
3024     return NULL;
3025 
3026   align = MEM_ALIGN (object);
3027 
3028   if (CONST_INT_P (size)
3029       && targetm.use_by_pieces_infrastructure_p (INTVAL (size), align,
3030 						 CLEAR_BY_PIECES,
3031 						 optimize_insn_for_speed_p ()))
3032     clear_by_pieces (object, INTVAL (size), align);
3033   else if (set_storage_via_setmem (object, size, const0_rtx, align,
3034 				   expected_align, expected_size,
3035 				   min_size, max_size, probable_max_size))
3036     ;
3037   else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
3038     return set_storage_via_libcall (object, size, const0_rtx,
3039 				    method == BLOCK_OP_TAILCALL);
3040   else
3041     gcc_unreachable ();
3042 
3043   return NULL;
3044 }
3045 
3046 rtx
clear_storage(rtx object,rtx size,enum block_op_methods method)3047 clear_storage (rtx object, rtx size, enum block_op_methods method)
3048 {
3049   unsigned HOST_WIDE_INT max, min = 0;
3050   if (GET_CODE (size) == CONST_INT)
3051     min = max = UINTVAL (size);
3052   else
3053     max = GET_MODE_MASK (GET_MODE (size));
3054   return clear_storage_hints (object, size, method, 0, -1, min, max, max);
3055 }
3056 
3057 
3058 /* A subroutine of clear_storage.  Expand a call to memset.
3059    Return the return value of memset, 0 otherwise.  */
3060 
3061 rtx
set_storage_via_libcall(rtx object,rtx size,rtx val,bool tailcall)3062 set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
3063 {
3064   tree call_expr, fn, object_tree, size_tree, val_tree;
3065   machine_mode size_mode;
3066 
3067   object = copy_addr_to_reg (XEXP (object, 0));
3068   object_tree = make_tree (ptr_type_node, object);
3069 
3070   if (!CONST_INT_P (val))
3071     val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
3072   val_tree = make_tree (integer_type_node, val);
3073 
3074   size_mode = TYPE_MODE (sizetype);
3075   size = convert_to_mode (size_mode, size, 1);
3076   size = copy_to_mode_reg (size_mode, size);
3077   size_tree = make_tree (sizetype, size);
3078 
3079   /* It is incorrect to use the libcall calling conventions for calls to
3080      memset because it can be provided by the user.  */
3081   fn = builtin_decl_implicit (BUILT_IN_MEMSET);
3082   call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree);
3083   CALL_EXPR_TAILCALL (call_expr) = tailcall;
3084 
3085   return expand_call (call_expr, NULL_RTX, false);
3086 }
3087 
3088 /* Expand a setmem pattern; return true if successful.  */
3089 
3090 bool
set_storage_via_setmem(rtx object,rtx size,rtx val,unsigned int align,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)3091 set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
3092 			unsigned int expected_align, HOST_WIDE_INT expected_size,
3093 			unsigned HOST_WIDE_INT min_size,
3094 			unsigned HOST_WIDE_INT max_size,
3095 			unsigned HOST_WIDE_INT probable_max_size)
3096 {
3097   /* Try the most limited insn first, because there's no point
3098      including more than one in the machine description unless
3099      the more limited one has some advantage.  */
3100 
3101   if (expected_align < align)
3102     expected_align = align;
3103   if (expected_size != -1)
3104     {
3105       if ((unsigned HOST_WIDE_INT)expected_size > max_size)
3106 	expected_size = max_size;
3107       if ((unsigned HOST_WIDE_INT)expected_size < min_size)
3108 	expected_size = min_size;
3109     }
3110 
3111   opt_scalar_int_mode mode_iter;
3112   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3113     {
3114       scalar_int_mode mode = mode_iter.require ();
3115       enum insn_code code = direct_optab_handler (setmem_optab, mode);
3116 
3117       if (code != CODE_FOR_nothing
3118 	  /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3119 	     here because if SIZE is less than the mode mask, as it is
3120 	     returned by the macro, it will definitely be less than the
3121 	     actual mode mask.  Since SIZE is within the Pmode address
3122 	     space, we limit MODE to Pmode.  */
3123 	  && ((CONST_INT_P (size)
3124 	       && ((unsigned HOST_WIDE_INT) INTVAL (size)
3125 		   <= (GET_MODE_MASK (mode) >> 1)))
3126 	      || max_size <= (GET_MODE_MASK (mode) >> 1)
3127 	      || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
3128 	{
3129 	  struct expand_operand ops[9];
3130 	  unsigned int nops;
3131 
3132 	  nops = insn_data[(int) code].n_generator_args;
3133 	  gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
3134 
3135 	  create_fixed_operand (&ops[0], object);
3136 	  /* The check above guarantees that this size conversion is valid.  */
3137 	  create_convert_operand_to (&ops[1], size, mode, true);
3138 	  create_convert_operand_from (&ops[2], val, byte_mode, true);
3139 	  create_integer_operand (&ops[3], align / BITS_PER_UNIT);
3140 	  if (nops >= 6)
3141 	    {
3142 	      create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
3143 	      create_integer_operand (&ops[5], expected_size);
3144 	    }
3145 	  if (nops >= 8)
3146 	    {
3147 	      create_integer_operand (&ops[6], min_size);
3148 	      /* If we cannot represent the maximal size,
3149 		 make parameter NULL.  */
3150 	      if ((HOST_WIDE_INT) max_size != -1)
3151 	        create_integer_operand (&ops[7], max_size);
3152 	      else
3153 		create_fixed_operand (&ops[7], NULL);
3154 	    }
3155 	  if (nops == 9)
3156 	    {
3157 	      /* If we cannot represent the maximal size,
3158 		 make parameter NULL.  */
3159 	      if ((HOST_WIDE_INT) probable_max_size != -1)
3160 	        create_integer_operand (&ops[8], probable_max_size);
3161 	      else
3162 		create_fixed_operand (&ops[8], NULL);
3163 	    }
3164 	  if (maybe_expand_insn (code, nops, ops))
3165 	    return true;
3166 	}
3167     }
3168 
3169   return false;
3170 }
3171 
3172 
3173 /* Write to one of the components of the complex value CPLX.  Write VAL to
3174    the real part if IMAG_P is false, and the imaginary part if its true.  */
3175 
3176 void
write_complex_part(rtx cplx,rtx val,bool imag_p)3177 write_complex_part (rtx cplx, rtx val, bool imag_p)
3178 {
3179   machine_mode cmode;
3180   scalar_mode imode;
3181   unsigned ibitsize;
3182 
3183   if (GET_CODE (cplx) == CONCAT)
3184     {
3185       emit_move_insn (XEXP (cplx, imag_p), val);
3186       return;
3187     }
3188 
3189   cmode = GET_MODE (cplx);
3190   imode = GET_MODE_INNER (cmode);
3191   ibitsize = GET_MODE_BITSIZE (imode);
3192 
3193   /* For MEMs simplify_gen_subreg may generate an invalid new address
3194      because, e.g., the original address is considered mode-dependent
3195      by the target, which restricts simplify_subreg from invoking
3196      adjust_address_nv.  Instead of preparing fallback support for an
3197      invalid address, we call adjust_address_nv directly.  */
3198   if (MEM_P (cplx))
3199     {
3200       emit_move_insn (adjust_address_nv (cplx, imode,
3201 					 imag_p ? GET_MODE_SIZE (imode) : 0),
3202 		      val);
3203       return;
3204     }
3205 
3206   /* If the sub-object is at least word sized, then we know that subregging
3207      will work.  This special case is important, since store_bit_field
3208      wants to operate on integer modes, and there's rarely an OImode to
3209      correspond to TCmode.  */
3210   if (ibitsize >= BITS_PER_WORD
3211       /* For hard regs we have exact predicates.  Assume we can split
3212 	 the original object if it spans an even number of hard regs.
3213 	 This special case is important for SCmode on 64-bit platforms
3214 	 where the natural size of floating-point regs is 32-bit.  */
3215       || (REG_P (cplx)
3216 	  && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3217 	  && REG_NREGS (cplx) % 2 == 0))
3218     {
3219       rtx part = simplify_gen_subreg (imode, cplx, cmode,
3220 				      imag_p ? GET_MODE_SIZE (imode) : 0);
3221       if (part)
3222         {
3223 	  emit_move_insn (part, val);
3224 	  return;
3225 	}
3226       else
3227 	/* simplify_gen_subreg may fail for sub-word MEMs.  */
3228 	gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3229     }
3230 
3231   store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val,
3232 		   false);
3233 }
3234 
3235 /* Extract one of the components of the complex value CPLX.  Extract the
3236    real part if IMAG_P is false, and the imaginary part if it's true.  */
3237 
3238 rtx
read_complex_part(rtx cplx,bool imag_p)3239 read_complex_part (rtx cplx, bool imag_p)
3240 {
3241   machine_mode cmode;
3242   scalar_mode imode;
3243   unsigned ibitsize;
3244 
3245   if (GET_CODE (cplx) == CONCAT)
3246     return XEXP (cplx, imag_p);
3247 
3248   cmode = GET_MODE (cplx);
3249   imode = GET_MODE_INNER (cmode);
3250   ibitsize = GET_MODE_BITSIZE (imode);
3251 
3252   /* Special case reads from complex constants that got spilled to memory.  */
3253   if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
3254     {
3255       tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
3256       if (decl && TREE_CODE (decl) == COMPLEX_CST)
3257 	{
3258 	  tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
3259 	  if (CONSTANT_CLASS_P (part))
3260 	    return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
3261 	}
3262     }
3263 
3264   /* For MEMs simplify_gen_subreg may generate an invalid new address
3265      because, e.g., the original address is considered mode-dependent
3266      by the target, which restricts simplify_subreg from invoking
3267      adjust_address_nv.  Instead of preparing fallback support for an
3268      invalid address, we call adjust_address_nv directly.  */
3269   if (MEM_P (cplx))
3270     return adjust_address_nv (cplx, imode,
3271 			      imag_p ? GET_MODE_SIZE (imode) : 0);
3272 
3273   /* If the sub-object is at least word sized, then we know that subregging
3274      will work.  This special case is important, since extract_bit_field
3275      wants to operate on integer modes, and there's rarely an OImode to
3276      correspond to TCmode.  */
3277   if (ibitsize >= BITS_PER_WORD
3278       /* For hard regs we have exact predicates.  Assume we can split
3279 	 the original object if it spans an even number of hard regs.
3280 	 This special case is important for SCmode on 64-bit platforms
3281 	 where the natural size of floating-point regs is 32-bit.  */
3282       || (REG_P (cplx)
3283 	  && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3284 	  && REG_NREGS (cplx) % 2 == 0))
3285     {
3286       rtx ret = simplify_gen_subreg (imode, cplx, cmode,
3287 				     imag_p ? GET_MODE_SIZE (imode) : 0);
3288       if (ret)
3289         return ret;
3290       else
3291 	/* simplify_gen_subreg may fail for sub-word MEMs.  */
3292 	gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3293     }
3294 
3295   return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
3296 			    true, NULL_RTX, imode, imode, false, NULL);
3297 }
3298 
3299 /* A subroutine of emit_move_insn_1.  Yet another lowpart generator.
3300    NEW_MODE and OLD_MODE are the same size.  Return NULL if X cannot be
3301    represented in NEW_MODE.  If FORCE is true, this will never happen, as
3302    we'll force-create a SUBREG if needed.  */
3303 
3304 static rtx
emit_move_change_mode(machine_mode new_mode,machine_mode old_mode,rtx x,bool force)3305 emit_move_change_mode (machine_mode new_mode,
3306 		       machine_mode old_mode, rtx x, bool force)
3307 {
3308   rtx ret;
3309 
3310   if (push_operand (x, GET_MODE (x)))
3311     {
3312       ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
3313       MEM_COPY_ATTRIBUTES (ret, x);
3314     }
3315   else if (MEM_P (x))
3316     {
3317       /* We don't have to worry about changing the address since the
3318 	 size in bytes is supposed to be the same.  */
3319       if (reload_in_progress)
3320 	{
3321 	  /* Copy the MEM to change the mode and move any
3322 	     substitutions from the old MEM to the new one.  */
3323 	  ret = adjust_address_nv (x, new_mode, 0);
3324 	  copy_replacements (x, ret);
3325 	}
3326       else
3327 	ret = adjust_address (x, new_mode, 0);
3328     }
3329   else
3330     {
3331       /* Note that we do want simplify_subreg's behavior of validating
3332 	 that the new mode is ok for a hard register.  If we were to use
3333 	 simplify_gen_subreg, we would create the subreg, but would
3334 	 probably run into the target not being able to implement it.  */
3335       /* Except, of course, when FORCE is true, when this is exactly what
3336 	 we want.  Which is needed for CCmodes on some targets.  */
3337       if (force)
3338 	ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
3339       else
3340 	ret = simplify_subreg (new_mode, x, old_mode, 0);
3341     }
3342 
3343   return ret;
3344 }
3345 
3346 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X using
3347    an integer mode of the same size as MODE.  Returns the instruction
3348    emitted, or NULL if such a move could not be generated.  */
3349 
3350 static rtx_insn *
emit_move_via_integer(machine_mode mode,rtx x,rtx y,bool force)3351 emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
3352 {
3353   scalar_int_mode imode;
3354   enum insn_code code;
3355 
3356   /* There must exist a mode of the exact size we require.  */
3357   if (!int_mode_for_mode (mode).exists (&imode))
3358     return NULL;
3359 
3360   /* The target must support moves in this mode.  */
3361   code = optab_handler (mov_optab, imode);
3362   if (code == CODE_FOR_nothing)
3363     return NULL;
3364 
3365   x = emit_move_change_mode (imode, mode, x, force);
3366   if (x == NULL_RTX)
3367     return NULL;
3368   y = emit_move_change_mode (imode, mode, y, force);
3369   if (y == NULL_RTX)
3370     return NULL;
3371   return emit_insn (GEN_FCN (code) (x, y));
3372 }
3373 
3374 /* A subroutine of emit_move_insn_1.  X is a push_operand in MODE.
3375    Return an equivalent MEM that does not use an auto-increment.  */
3376 
3377 rtx
emit_move_resolve_push(machine_mode mode,rtx x)3378 emit_move_resolve_push (machine_mode mode, rtx x)
3379 {
3380   enum rtx_code code = GET_CODE (XEXP (x, 0));
3381   rtx temp;
3382 
3383   poly_int64 adjust = GET_MODE_SIZE (mode);
3384 #ifdef PUSH_ROUNDING
3385   adjust = PUSH_ROUNDING (adjust);
3386 #endif
3387   if (code == PRE_DEC || code == POST_DEC)
3388     adjust = -adjust;
3389   else if (code == PRE_MODIFY || code == POST_MODIFY)
3390     {
3391       rtx expr = XEXP (XEXP (x, 0), 1);
3392 
3393       gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
3394       poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
3395       if (GET_CODE (expr) == MINUS)
3396 	val = -val;
3397       gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
3398       adjust = val;
3399     }
3400 
3401   /* Do not use anti_adjust_stack, since we don't want to update
3402      stack_pointer_delta.  */
3403   temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
3404 			      gen_int_mode (adjust, Pmode), stack_pointer_rtx,
3405 			      0, OPTAB_LIB_WIDEN);
3406   if (temp != stack_pointer_rtx)
3407     emit_move_insn (stack_pointer_rtx, temp);
3408 
3409   switch (code)
3410     {
3411     case PRE_INC:
3412     case PRE_DEC:
3413     case PRE_MODIFY:
3414       temp = stack_pointer_rtx;
3415       break;
3416     case POST_INC:
3417     case POST_DEC:
3418     case POST_MODIFY:
3419       temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
3420       break;
3421     default:
3422       gcc_unreachable ();
3423     }
3424 
3425   return replace_equiv_address (x, temp);
3426 }
3427 
3428 /* A subroutine of emit_move_complex.  Generate a move from Y into X.
3429    X is known to satisfy push_operand, and MODE is known to be complex.
3430    Returns the last instruction emitted.  */
3431 
3432 rtx_insn *
emit_move_complex_push(machine_mode mode,rtx x,rtx y)3433 emit_move_complex_push (machine_mode mode, rtx x, rtx y)
3434 {
3435   scalar_mode submode = GET_MODE_INNER (mode);
3436   bool imag_first;
3437 
3438 #ifdef PUSH_ROUNDING
3439   poly_int64 submodesize = GET_MODE_SIZE (submode);
3440 
3441   /* In case we output to the stack, but the size is smaller than the
3442      machine can push exactly, we need to use move instructions.  */
3443   if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
3444     {
3445       x = emit_move_resolve_push (mode, x);
3446       return emit_move_insn (x, y);
3447     }
3448 #endif
3449 
3450   /* Note that the real part always precedes the imag part in memory
3451      regardless of machine's endianness.  */
3452   switch (GET_CODE (XEXP (x, 0)))
3453     {
3454     case PRE_DEC:
3455     case POST_DEC:
3456       imag_first = true;
3457       break;
3458     case PRE_INC:
3459     case POST_INC:
3460       imag_first = false;
3461       break;
3462     default:
3463       gcc_unreachable ();
3464     }
3465 
3466   emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3467 		  read_complex_part (y, imag_first));
3468   return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3469 			 read_complex_part (y, !imag_first));
3470 }
3471 
3472 /* A subroutine of emit_move_complex.  Perform the move from Y to X
3473    via two moves of the parts.  Returns the last instruction emitted.  */
3474 
3475 rtx_insn *
emit_move_complex_parts(rtx x,rtx y)3476 emit_move_complex_parts (rtx x, rtx y)
3477 {
3478   /* Show the output dies here.  This is necessary for SUBREGs
3479      of pseudos since we cannot track their lifetimes correctly;
3480      hard regs shouldn't appear here except as return values.  */
3481   if (!reload_completed && !reload_in_progress
3482       && REG_P (x) && !reg_overlap_mentioned_p (x, y))
3483     emit_clobber (x);
3484 
3485   write_complex_part (x, read_complex_part (y, false), false);
3486   write_complex_part (x, read_complex_part (y, true), true);
3487 
3488   return get_last_insn ();
3489 }
3490 
3491 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3492    MODE is known to be complex.  Returns the last instruction emitted.  */
3493 
3494 static rtx_insn *
emit_move_complex(machine_mode mode,rtx x,rtx y)3495 emit_move_complex (machine_mode mode, rtx x, rtx y)
3496 {
3497   bool try_int;
3498 
3499   /* Need to take special care for pushes, to maintain proper ordering
3500      of the data, and possibly extra padding.  */
3501   if (push_operand (x, mode))
3502     return emit_move_complex_push (mode, x, y);
3503 
3504   /* See if we can coerce the target into moving both values at once, except
3505      for floating point where we favor moving as parts if this is easy.  */
3506   if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3507       && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
3508       && !(REG_P (x)
3509 	   && HARD_REGISTER_P (x)
3510 	   && REG_NREGS (x) == 1)
3511       && !(REG_P (y)
3512 	   && HARD_REGISTER_P (y)
3513 	   && REG_NREGS (y) == 1))
3514     try_int = false;
3515   /* Not possible if the values are inherently not adjacent.  */
3516   else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
3517     try_int = false;
3518   /* Is possible if both are registers (or subregs of registers).  */
3519   else if (register_operand (x, mode) && register_operand (y, mode))
3520     try_int = true;
3521   /* If one of the operands is a memory, and alignment constraints
3522      are friendly enough, we may be able to do combined memory operations.
3523      We do not attempt this if Y is a constant because that combination is
3524      usually better with the by-parts thing below.  */
3525   else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
3526 	   && (!STRICT_ALIGNMENT
3527 	       || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
3528     try_int = true;
3529   else
3530     try_int = false;
3531 
3532   if (try_int)
3533     {
3534       rtx_insn *ret;
3535 
3536       /* For memory to memory moves, optimal behavior can be had with the
3537 	 existing block move logic.  */
3538       if (MEM_P (x) && MEM_P (y))
3539 	{
3540 	  emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
3541 			   BLOCK_OP_NO_LIBCALL);
3542 	  return get_last_insn ();
3543 	}
3544 
3545       ret = emit_move_via_integer (mode, x, y, true);
3546       if (ret)
3547 	return ret;
3548     }
3549 
3550   return emit_move_complex_parts (x, y);
3551 }
3552 
3553 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3554    MODE is known to be MODE_CC.  Returns the last instruction emitted.  */
3555 
3556 static rtx_insn *
emit_move_ccmode(machine_mode mode,rtx x,rtx y)3557 emit_move_ccmode (machine_mode mode, rtx x, rtx y)
3558 {
3559   rtx_insn *ret;
3560 
3561   /* Assume all MODE_CC modes are equivalent; if we have movcc, use it.  */
3562   if (mode != CCmode)
3563     {
3564       enum insn_code code = optab_handler (mov_optab, CCmode);
3565       if (code != CODE_FOR_nothing)
3566 	{
3567 	  x = emit_move_change_mode (CCmode, mode, x, true);
3568 	  y = emit_move_change_mode (CCmode, mode, y, true);
3569 	  return emit_insn (GEN_FCN (code) (x, y));
3570 	}
3571     }
3572 
3573   /* Otherwise, find the MODE_INT mode of the same width.  */
3574   ret = emit_move_via_integer (mode, x, y, false);
3575   gcc_assert (ret != NULL);
3576   return ret;
3577 }
3578 
3579 /* Return true if word I of OP lies entirely in the
3580    undefined bits of a paradoxical subreg.  */
3581 
3582 static bool
undefined_operand_subword_p(const_rtx op,int i)3583 undefined_operand_subword_p (const_rtx op, int i)
3584 {
3585   if (GET_CODE (op) != SUBREG)
3586     return false;
3587   machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3588   poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
3589   return (known_ge (offset, GET_MODE_SIZE (innermostmode))
3590 	  || known_le (offset, -UNITS_PER_WORD));
3591 }
3592 
3593 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3594    MODE is any multi-word or full-word mode that lacks a move_insn
3595    pattern.  Note that you will get better code if you define such
3596    patterns, even if they must turn into multiple assembler instructions.  */
3597 
3598 static rtx_insn *
emit_move_multi_word(machine_mode mode,rtx x,rtx y)3599 emit_move_multi_word (machine_mode mode, rtx x, rtx y)
3600 {
3601   rtx_insn *last_insn = 0;
3602   rtx_insn *seq;
3603   rtx inner;
3604   bool need_clobber;
3605   int i, mode_size;
3606 
3607   /* This function can only handle cases where the number of words is
3608      known at compile time.  */
3609   mode_size = GET_MODE_SIZE (mode).to_constant ();
3610   gcc_assert (mode_size >= UNITS_PER_WORD);
3611 
3612   /* If X is a push on the stack, do the push now and replace
3613      X with a reference to the stack pointer.  */
3614   if (push_operand (x, mode))
3615     x = emit_move_resolve_push (mode, x);
3616 
3617   /* If we are in reload, see if either operand is a MEM whose address
3618      is scheduled for replacement.  */
3619   if (reload_in_progress && MEM_P (x)
3620       && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
3621     x = replace_equiv_address_nv (x, inner);
3622   if (reload_in_progress && MEM_P (y)
3623       && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
3624     y = replace_equiv_address_nv (y, inner);
3625 
3626   start_sequence ();
3627 
3628   need_clobber = false;
3629   for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
3630     {
3631       rtx xpart = operand_subword (x, i, 1, mode);
3632       rtx ypart;
3633 
3634       /* Do not generate code for a move if it would come entirely
3635 	 from the undefined bits of a paradoxical subreg.  */
3636       if (undefined_operand_subword_p (y, i))
3637 	continue;
3638 
3639       ypart = operand_subword (y, i, 1, mode);
3640 
3641       /* If we can't get a part of Y, put Y into memory if it is a
3642 	 constant.  Otherwise, force it into a register.  Then we must
3643 	 be able to get a part of Y.  */
3644       if (ypart == 0 && CONSTANT_P (y))
3645 	{
3646 	  y = use_anchored_address (force_const_mem (mode, y));
3647 	  ypart = operand_subword (y, i, 1, mode);
3648 	}
3649       else if (ypart == 0)
3650 	ypart = operand_subword_force (y, i, mode);
3651 
3652       gcc_assert (xpart && ypart);
3653 
3654       need_clobber |= (GET_CODE (xpart) == SUBREG);
3655 
3656       last_insn = emit_move_insn (xpart, ypart);
3657     }
3658 
3659   seq = get_insns ();
3660   end_sequence ();
3661 
3662   /* Show the output dies here.  This is necessary for SUBREGs
3663      of pseudos since we cannot track their lifetimes correctly;
3664      hard regs shouldn't appear here except as return values.
3665      We never want to emit such a clobber after reload.  */
3666   if (x != y
3667       && ! (reload_in_progress || reload_completed)
3668       && need_clobber != 0)
3669     emit_clobber (x);
3670 
3671   emit_insn (seq);
3672 
3673   return last_insn;
3674 }
3675 
3676 /* Low level part of emit_move_insn.
3677    Called just like emit_move_insn, but assumes X and Y
3678    are basically valid.  */
3679 
3680 rtx_insn *
emit_move_insn_1(rtx x,rtx y)3681 emit_move_insn_1 (rtx x, rtx y)
3682 {
3683   machine_mode mode = GET_MODE (x);
3684   enum insn_code code;
3685 
3686   gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
3687 
3688   code = optab_handler (mov_optab, mode);
3689   if (code != CODE_FOR_nothing)
3690     return emit_insn (GEN_FCN (code) (x, y));
3691 
3692   /* Expand complex moves by moving real part and imag part.  */
3693   if (COMPLEX_MODE_P (mode))
3694     return emit_move_complex (mode, x, y);
3695 
3696   if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT
3697       || ALL_FIXED_POINT_MODE_P (mode))
3698     {
3699       rtx_insn *result = emit_move_via_integer (mode, x, y, true);
3700 
3701       /* If we can't find an integer mode, use multi words.  */
3702       if (result)
3703 	return result;
3704       else
3705 	return emit_move_multi_word (mode, x, y);
3706     }
3707 
3708   if (GET_MODE_CLASS (mode) == MODE_CC)
3709     return emit_move_ccmode (mode, x, y);
3710 
3711   /* Try using a move pattern for the corresponding integer mode.  This is
3712      only safe when simplify_subreg can convert MODE constants into integer
3713      constants.  At present, it can only do this reliably if the value
3714      fits within a HOST_WIDE_INT.  */
3715   if (!CONSTANT_P (y)
3716       || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
3717     {
3718       rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
3719 
3720       if (ret)
3721 	{
3722 	  if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
3723 	    return ret;
3724 	}
3725     }
3726 
3727   return emit_move_multi_word (mode, x, y);
3728 }
3729 
3730 /* Generate code to copy Y into X.
3731    Both Y and X must have the same mode, except that
3732    Y can be a constant with VOIDmode.
3733    This mode cannot be BLKmode; use emit_block_move for that.
3734 
3735    Return the last instruction emitted.  */
3736 
3737 rtx_insn *
emit_move_insn(rtx x,rtx y)3738 emit_move_insn (rtx x, rtx y)
3739 {
3740   machine_mode mode = GET_MODE (x);
3741   rtx y_cst = NULL_RTX;
3742   rtx_insn *last_insn;
3743   rtx set;
3744 
3745   gcc_assert (mode != BLKmode
3746 	      && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
3747 
3748   if (CONSTANT_P (y))
3749     {
3750       if (optimize
3751 	  && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3752 	  && (last_insn = compress_float_constant (x, y)))
3753 	return last_insn;
3754 
3755       y_cst = y;
3756 
3757       if (!targetm.legitimate_constant_p (mode, y))
3758 	{
3759 	  y = force_const_mem (mode, y);
3760 
3761 	  /* If the target's cannot_force_const_mem prevented the spill,
3762 	     assume that the target's move expanders will also take care
3763 	     of the non-legitimate constant.  */
3764 	  if (!y)
3765 	    y = y_cst;
3766 	  else
3767 	    y = use_anchored_address (y);
3768 	}
3769     }
3770 
3771   /* If X or Y are memory references, verify that their addresses are valid
3772      for the machine.  */
3773   if (MEM_P (x)
3774       && (! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
3775 					 MEM_ADDR_SPACE (x))
3776 	  && ! push_operand (x, GET_MODE (x))))
3777     x = validize_mem (x);
3778 
3779   if (MEM_P (y)
3780       && ! memory_address_addr_space_p (GET_MODE (y), XEXP (y, 0),
3781 					MEM_ADDR_SPACE (y)))
3782     y = validize_mem (y);
3783 
3784   gcc_assert (mode != BLKmode);
3785 
3786   last_insn = emit_move_insn_1 (x, y);
3787 
3788   if (y_cst && REG_P (x)
3789       && (set = single_set (last_insn)) != NULL_RTX
3790       && SET_DEST (set) == x
3791       && ! rtx_equal_p (y_cst, SET_SRC (set)))
3792     set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
3793 
3794   return last_insn;
3795 }
3796 
3797 /* Generate the body of an instruction to copy Y into X.
3798    It may be a list of insns, if one insn isn't enough.  */
3799 
3800 rtx_insn *
gen_move_insn(rtx x,rtx y)3801 gen_move_insn (rtx x, rtx y)
3802 {
3803   rtx_insn *seq;
3804 
3805   start_sequence ();
3806   emit_move_insn_1 (x, y);
3807   seq = get_insns ();
3808   end_sequence ();
3809   return seq;
3810 }
3811 
3812 /* If Y is representable exactly in a narrower mode, and the target can
3813    perform the extension directly from constant or memory, then emit the
3814    move as an extension.  */
3815 
3816 static rtx_insn *
compress_float_constant(rtx x,rtx y)3817 compress_float_constant (rtx x, rtx y)
3818 {
3819   machine_mode dstmode = GET_MODE (x);
3820   machine_mode orig_srcmode = GET_MODE (y);
3821   machine_mode srcmode;
3822   const REAL_VALUE_TYPE *r;
3823   int oldcost, newcost;
3824   bool speed = optimize_insn_for_speed_p ();
3825 
3826   r = CONST_DOUBLE_REAL_VALUE (y);
3827 
3828   if (targetm.legitimate_constant_p (dstmode, y))
3829     oldcost = set_src_cost (y, orig_srcmode, speed);
3830   else
3831     oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
3832 
3833   FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
3834     {
3835       enum insn_code ic;
3836       rtx trunc_y;
3837       rtx_insn *last_insn;
3838 
3839       /* Skip if the target can't extend this way.  */
3840       ic = can_extend_p (dstmode, srcmode, 0);
3841       if (ic == CODE_FOR_nothing)
3842 	continue;
3843 
3844       /* Skip if the narrowed value isn't exact.  */
3845       if (! exact_real_truncate (srcmode, r))
3846 	continue;
3847 
3848       trunc_y = const_double_from_real_value (*r, srcmode);
3849 
3850       if (targetm.legitimate_constant_p (srcmode, trunc_y))
3851 	{
3852 	  /* Skip if the target needs extra instructions to perform
3853 	     the extension.  */
3854 	  if (!insn_operand_matches (ic, 1, trunc_y))
3855 	    continue;
3856 	  /* This is valid, but may not be cheaper than the original. */
3857 	  newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3858 				  dstmode, speed);
3859 	  if (oldcost < newcost)
3860 	    continue;
3861 	}
3862       else if (float_extend_from_mem[dstmode][srcmode])
3863 	{
3864 	  trunc_y = force_const_mem (srcmode, trunc_y);
3865 	  /* This is valid, but may not be cheaper than the original. */
3866 	  newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3867 				  dstmode, speed);
3868 	  if (oldcost < newcost)
3869 	    continue;
3870 	  trunc_y = validize_mem (trunc_y);
3871 	}
3872       else
3873 	continue;
3874 
3875       /* For CSE's benefit, force the compressed constant pool entry
3876 	 into a new pseudo.  This constant may be used in different modes,
3877 	 and if not, combine will put things back together for us.  */
3878       trunc_y = force_reg (srcmode, trunc_y);
3879 
3880       /* If x is a hard register, perform the extension into a pseudo,
3881 	 so that e.g. stack realignment code is aware of it.  */
3882       rtx target = x;
3883       if (REG_P (x) && HARD_REGISTER_P (x))
3884 	target = gen_reg_rtx (dstmode);
3885 
3886       emit_unop_insn (ic, target, trunc_y, UNKNOWN);
3887       last_insn = get_last_insn ();
3888 
3889       if (REG_P (target))
3890 	set_unique_reg_note (last_insn, REG_EQUAL, y);
3891 
3892       if (target != x)
3893 	return emit_move_insn (x, target);
3894       return last_insn;
3895     }
3896 
3897   return NULL;
3898 }
3899 
3900 /* Pushing data onto the stack.  */
3901 
3902 /* Push a block of length SIZE (perhaps variable)
3903    and return an rtx to address the beginning of the block.
3904    The value may be virtual_outgoing_args_rtx.
3905 
3906    EXTRA is the number of bytes of padding to push in addition to SIZE.
3907    BELOW nonzero means this padding comes at low addresses;
3908    otherwise, the padding comes at high addresses.  */
3909 
3910 rtx
push_block(rtx size,poly_int64 extra,int below)3911 push_block (rtx size, poly_int64 extra, int below)
3912 {
3913   rtx temp;
3914 
3915   size = convert_modes (Pmode, ptr_mode, size, 1);
3916   if (CONSTANT_P (size))
3917     anti_adjust_stack (plus_constant (Pmode, size, extra));
3918   else if (REG_P (size) && known_eq (extra, 0))
3919     anti_adjust_stack (size);
3920   else
3921     {
3922       temp = copy_to_mode_reg (Pmode, size);
3923       if (maybe_ne (extra, 0))
3924 	temp = expand_binop (Pmode, add_optab, temp,
3925 			     gen_int_mode (extra, Pmode),
3926 			     temp, 0, OPTAB_LIB_WIDEN);
3927       anti_adjust_stack (temp);
3928     }
3929 
3930   if (STACK_GROWS_DOWNWARD)
3931     {
3932       temp = virtual_outgoing_args_rtx;
3933       if (maybe_ne (extra, 0) && below)
3934 	temp = plus_constant (Pmode, temp, extra);
3935     }
3936   else
3937     {
3938       poly_int64 csize;
3939       if (poly_int_rtx_p (size, &csize))
3940 	temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
3941 			      -csize - (below ? 0 : extra));
3942       else if (maybe_ne (extra, 0) && !below)
3943 	temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3944 			     negate_rtx (Pmode, plus_constant (Pmode, size,
3945 							       extra)));
3946       else
3947 	temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3948 			     negate_rtx (Pmode, size));
3949     }
3950 
3951   return memory_address (NARROWEST_INT_MODE, temp);
3952 }
3953 
3954 /* A utility routine that returns the base of an auto-inc memory, or NULL.  */
3955 
3956 static rtx
mem_autoinc_base(rtx mem)3957 mem_autoinc_base (rtx mem)
3958 {
3959   if (MEM_P (mem))
3960     {
3961       rtx addr = XEXP (mem, 0);
3962       if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3963 	return XEXP (addr, 0);
3964     }
3965   return NULL;
3966 }
3967 
3968 /* A utility routine used here, in reload, and in try_split.  The insns
3969    after PREV up to and including LAST are known to adjust the stack,
3970    with a final value of END_ARGS_SIZE.  Iterate backward from LAST
3971    placing notes as appropriate.  PREV may be NULL, indicating the
3972    entire insn sequence prior to LAST should be scanned.
3973 
3974    The set of allowed stack pointer modifications is small:
3975      (1) One or more auto-inc style memory references (aka pushes),
3976      (2) One or more addition/subtraction with the SP as destination,
3977      (3) A single move insn with the SP as destination,
3978      (4) A call_pop insn,
3979      (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
3980 
3981    Insns in the sequence that do not modify the SP are ignored,
3982    except for noreturn calls.
3983 
3984    The return value is the amount of adjustment that can be trivially
3985    verified, via immediate operand or auto-inc.  If the adjustment
3986    cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN.  */
3987 
3988 poly_int64
find_args_size_adjust(rtx_insn * insn)3989 find_args_size_adjust (rtx_insn *insn)
3990 {
3991   rtx dest, set, pat;
3992   int i;
3993 
3994   pat = PATTERN (insn);
3995   set = NULL;
3996 
3997   /* Look for a call_pop pattern.  */
3998   if (CALL_P (insn))
3999     {
4000       /* We have to allow non-call_pop patterns for the case
4001 	 of emit_single_push_insn of a TLS address.  */
4002       if (GET_CODE (pat) != PARALLEL)
4003 	return 0;
4004 
4005       /* All call_pop have a stack pointer adjust in the parallel.
4006 	 The call itself is always first, and the stack adjust is
4007 	 usually last, so search from the end.  */
4008       for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
4009 	{
4010 	  set = XVECEXP (pat, 0, i);
4011 	  if (GET_CODE (set) != SET)
4012 	    continue;
4013 	  dest = SET_DEST (set);
4014 	  if (dest == stack_pointer_rtx)
4015 	    break;
4016 	}
4017       /* We'd better have found the stack pointer adjust.  */
4018       if (i == 0)
4019 	return 0;
4020       /* Fall through to process the extracted SET and DEST
4021 	 as if it was a standalone insn.  */
4022     }
4023   else if (GET_CODE (pat) == SET)
4024     set = pat;
4025   else if ((set = single_set (insn)) != NULL)
4026     ;
4027   else if (GET_CODE (pat) == PARALLEL)
4028     {
4029       /* ??? Some older ports use a parallel with a stack adjust
4030 	 and a store for a PUSH_ROUNDING pattern, rather than a
4031 	 PRE/POST_MODIFY rtx.  Don't force them to update yet...  */
4032       /* ??? See h8300 and m68k, pushqi1.  */
4033       for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
4034 	{
4035 	  set = XVECEXP (pat, 0, i);
4036 	  if (GET_CODE (set) != SET)
4037 	    continue;
4038 	  dest = SET_DEST (set);
4039 	  if (dest == stack_pointer_rtx)
4040 	    break;
4041 
4042 	  /* We do not expect an auto-inc of the sp in the parallel.  */
4043 	  gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx);
4044 	  gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4045 			       != stack_pointer_rtx);
4046 	}
4047       if (i < 0)
4048 	return 0;
4049     }
4050   else
4051     return 0;
4052 
4053   dest = SET_DEST (set);
4054 
4055   /* Look for direct modifications of the stack pointer.  */
4056   if (REG_P (dest) && REGNO (dest) == STACK_POINTER_REGNUM)
4057     {
4058       /* Look for a trivial adjustment, otherwise assume nothing.  */
4059       /* Note that the SPU restore_stack_block pattern refers to
4060 	 the stack pointer in V4SImode.  Consider that non-trivial.  */
4061       poly_int64 offset;
4062       if (SCALAR_INT_MODE_P (GET_MODE (dest))
4063 	  && strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
4064 	return offset;
4065       /* ??? Reload can generate no-op moves, which will be cleaned
4066 	 up later.  Recognize it and continue searching.  */
4067       else if (rtx_equal_p (dest, SET_SRC (set)))
4068 	return 0;
4069       else
4070 	return HOST_WIDE_INT_MIN;
4071     }
4072   else
4073     {
4074       rtx mem, addr;
4075 
4076       /* Otherwise only think about autoinc patterns.  */
4077       if (mem_autoinc_base (dest) == stack_pointer_rtx)
4078 	{
4079 	  mem = dest;
4080 	  gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4081 			       != stack_pointer_rtx);
4082 	}
4083       else if (mem_autoinc_base (SET_SRC (set)) == stack_pointer_rtx)
4084 	mem = SET_SRC (set);
4085       else
4086 	return 0;
4087 
4088       addr = XEXP (mem, 0);
4089       switch (GET_CODE (addr))
4090 	{
4091 	case PRE_INC:
4092 	case POST_INC:
4093 	  return GET_MODE_SIZE (GET_MODE (mem));
4094 	case PRE_DEC:
4095 	case POST_DEC:
4096 	  return -GET_MODE_SIZE (GET_MODE (mem));
4097 	case PRE_MODIFY:
4098 	case POST_MODIFY:
4099 	  addr = XEXP (addr, 1);
4100 	  gcc_assert (GET_CODE (addr) == PLUS);
4101 	  gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
4102 	  return rtx_to_poly_int64 (XEXP (addr, 1));
4103 	default:
4104 	  gcc_unreachable ();
4105 	}
4106     }
4107 }
4108 
4109 poly_int64
fixup_args_size_notes(rtx_insn * prev,rtx_insn * last,poly_int64 end_args_size)4110 fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
4111 		       poly_int64 end_args_size)
4112 {
4113   poly_int64 args_size = end_args_size;
4114   bool saw_unknown = false;
4115   rtx_insn *insn;
4116 
4117   for (insn = last; insn != prev; insn = PREV_INSN (insn))
4118     {
4119       if (!NONDEBUG_INSN_P (insn))
4120 	continue;
4121 
4122       /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4123 	 a call argument containing a TLS address that itself requires
4124 	 a call to __tls_get_addr.  The handling of stack_pointer_delta
4125 	 in emit_single_push_insn is supposed to ensure that any such
4126 	 notes are already correct.  */
4127       rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
4128       gcc_assert (!note || known_eq (args_size, get_args_size (note)));
4129 
4130       poly_int64 this_delta = find_args_size_adjust (insn);
4131       if (known_eq (this_delta, 0))
4132 	{
4133 	  if (!CALL_P (insn)
4134 	      || ACCUMULATE_OUTGOING_ARGS
4135 	      || find_reg_note (insn, REG_NORETURN, NULL_RTX) == NULL_RTX)
4136 	    continue;
4137 	}
4138 
4139       gcc_assert (!saw_unknown);
4140       if (known_eq (this_delta, HOST_WIDE_INT_MIN))
4141 	saw_unknown = true;
4142 
4143       if (!note)
4144 	add_args_size_note (insn, args_size);
4145       if (STACK_GROWS_DOWNWARD)
4146 	this_delta = -poly_uint64 (this_delta);
4147 
4148       if (saw_unknown)
4149 	args_size = HOST_WIDE_INT_MIN;
4150       else
4151 	args_size -= this_delta;
4152     }
4153 
4154   return args_size;
4155 }
4156 
4157 #ifdef PUSH_ROUNDING
4158 /* Emit single push insn.  */
4159 
4160 static void
emit_single_push_insn_1(machine_mode mode,rtx x,tree type)4161 emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
4162 {
4163   rtx dest_addr;
4164   poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
4165   rtx dest;
4166   enum insn_code icode;
4167 
4168   /* If there is push pattern, use it.  Otherwise try old way of throwing
4169      MEM representing push operation to move expander.  */
4170   icode = optab_handler (push_optab, mode);
4171   if (icode != CODE_FOR_nothing)
4172     {
4173       struct expand_operand ops[1];
4174 
4175       create_input_operand (&ops[0], x, mode);
4176       if (maybe_expand_insn (icode, 1, ops))
4177 	return;
4178     }
4179   if (known_eq (GET_MODE_SIZE (mode), rounded_size))
4180     dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
4181   /* If we are to pad downward, adjust the stack pointer first and
4182      then store X into the stack location using an offset.  This is
4183      because emit_move_insn does not know how to pad; it does not have
4184      access to type.  */
4185   else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
4186     {
4187       emit_move_insn (stack_pointer_rtx,
4188 		      expand_binop (Pmode,
4189 				    STACK_GROWS_DOWNWARD ? sub_optab
4190 				    : add_optab,
4191 				    stack_pointer_rtx,
4192 				    gen_int_mode (rounded_size, Pmode),
4193 				    NULL_RTX, 0, OPTAB_LIB_WIDEN));
4194 
4195       poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
4196       if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
4197 	/* We have already decremented the stack pointer, so get the
4198 	   previous value.  */
4199 	offset += rounded_size;
4200 
4201       if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
4202 	/* We have already incremented the stack pointer, so get the
4203 	   previous value.  */
4204 	offset -= rounded_size;
4205 
4206       dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
4207     }
4208   else
4209     {
4210       if (STACK_GROWS_DOWNWARD)
4211 	/* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC.  */
4212 	dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
4213       else
4214 	/* ??? This seems wrong if STACK_PUSH_CODE == POST_INC.  */
4215 	dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
4216 
4217       dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
4218     }
4219 
4220   dest = gen_rtx_MEM (mode, dest_addr);
4221 
4222   if (type != 0)
4223     {
4224       set_mem_attributes (dest, type, 1);
4225 
4226       if (cfun->tail_call_marked)
4227 	/* Function incoming arguments may overlap with sibling call
4228 	   outgoing arguments and we cannot allow reordering of reads
4229 	   from function arguments with stores to outgoing arguments
4230 	   of sibling calls.  */
4231 	set_mem_alias_set (dest, 0);
4232     }
4233   emit_move_insn (dest, x);
4234 }
4235 
4236 /* Emit and annotate a single push insn.  */
4237 
4238 static void
emit_single_push_insn(machine_mode mode,rtx x,tree type)4239 emit_single_push_insn (machine_mode mode, rtx x, tree type)
4240 {
4241   poly_int64 delta, old_delta = stack_pointer_delta;
4242   rtx_insn *prev = get_last_insn ();
4243   rtx_insn *last;
4244 
4245   emit_single_push_insn_1 (mode, x, type);
4246 
4247   /* Adjust stack_pointer_delta to describe the situation after the push
4248      we just performed.  Note that we must do this after the push rather
4249      than before the push in case calculating X needs pushes and pops of
4250      its own (e.g. if calling __tls_get_addr).  The REG_ARGS_SIZE notes
4251      for such pushes and pops must not include the effect of the future
4252      push of X.  */
4253   stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
4254 
4255   last = get_last_insn ();
4256 
4257   /* Notice the common case where we emitted exactly one insn.  */
4258   if (PREV_INSN (last) == prev)
4259     {
4260       add_args_size_note (last, stack_pointer_delta);
4261       return;
4262     }
4263 
4264   delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
4265   gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
4266 	      || known_eq (delta, old_delta));
4267 }
4268 #endif
4269 
4270 /* If reading SIZE bytes from X will end up reading from
4271    Y return the number of bytes that overlap.  Return -1
4272    if there is no overlap or -2 if we can't determine
4273    (for example when X and Y have different base registers).  */
4274 
4275 static int
memory_load_overlap(rtx x,rtx y,HOST_WIDE_INT size)4276 memory_load_overlap (rtx x, rtx y, HOST_WIDE_INT size)
4277 {
4278   rtx tmp = plus_constant (Pmode, x, size);
4279   rtx sub = simplify_gen_binary (MINUS, Pmode, tmp, y);
4280 
4281   if (!CONST_INT_P (sub))
4282     return -2;
4283 
4284   HOST_WIDE_INT val = INTVAL (sub);
4285 
4286   return IN_RANGE (val, 1, size) ? val : -1;
4287 }
4288 
4289 /* Generate code to push X onto the stack, assuming it has mode MODE and
4290    type TYPE.
4291    MODE is redundant except when X is a CONST_INT (since they don't
4292    carry mode info).
4293    SIZE is an rtx for the size of data to be copied (in bytes),
4294    needed only if X is BLKmode.
4295    Return true if successful.  May return false if asked to push a
4296    partial argument during a sibcall optimization (as specified by
4297    SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4298    to not overlap.
4299 
4300    ALIGN (in bits) is maximum alignment we can assume.
4301 
4302    If PARTIAL and REG are both nonzero, then copy that many of the first
4303    bytes of X into registers starting with REG, and push the rest of X.
4304    The amount of space pushed is decreased by PARTIAL bytes.
4305    REG must be a hard register in this case.
4306    If REG is zero but PARTIAL is not, take any all others actions for an
4307    argument partially in registers, but do not actually load any
4308    registers.
4309 
4310    EXTRA is the amount in bytes of extra space to leave next to this arg.
4311    This is ignored if an argument block has already been allocated.
4312 
4313    On a machine that lacks real push insns, ARGS_ADDR is the address of
4314    the bottom of the argument block for this call.  We use indexing off there
4315    to store the arg.  On machines with push insns, ARGS_ADDR is 0 when a
4316    argument block has not been preallocated.
4317 
4318    ARGS_SO_FAR is the size of args previously pushed for this call.
4319 
4320    REG_PARM_STACK_SPACE is nonzero if functions require stack space
4321    for arguments passed in registers.  If nonzero, it will be the number
4322    of bytes required.  */
4323 
4324 bool
emit_push_insn(rtx x,machine_mode mode,tree type,rtx size,unsigned int align,int partial,rtx reg,poly_int64 extra,rtx args_addr,rtx args_so_far,int reg_parm_stack_space,rtx alignment_pad,bool sibcall_p)4325 emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
4326 		unsigned int align, int partial, rtx reg, poly_int64 extra,
4327 		rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
4328 		rtx alignment_pad, bool sibcall_p)
4329 {
4330   rtx xinner;
4331   pad_direction stack_direction
4332     = STACK_GROWS_DOWNWARD ? PAD_DOWNWARD : PAD_UPWARD;
4333 
4334   /* Decide where to pad the argument: PAD_DOWNWARD for below,
4335      PAD_UPWARD for above, or PAD_NONE for don't pad it.
4336      Default is below for small data on big-endian machines; else above.  */
4337   pad_direction where_pad = targetm.calls.function_arg_padding (mode, type);
4338 
4339   /* Invert direction if stack is post-decrement.
4340      FIXME: why?  */
4341   if (STACK_PUSH_CODE == POST_DEC)
4342     if (where_pad != PAD_NONE)
4343       where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD);
4344 
4345   xinner = x;
4346 
4347   int nregs = partial / UNITS_PER_WORD;
4348   rtx *tmp_regs = NULL;
4349   int overlapping = 0;
4350 
4351   if (mode == BLKmode
4352       || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)))
4353     {
4354       /* Copy a block into the stack, entirely or partially.  */
4355 
4356       rtx temp;
4357       int used;
4358       int offset;
4359       int skip;
4360 
4361       offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4362       used = partial - offset;
4363 
4364       if (mode != BLKmode)
4365 	{
4366 	  /* A value is to be stored in an insufficiently aligned
4367 	     stack slot; copy via a suitably aligned slot if
4368 	     necessary.  */
4369 	  size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
4370 	  if (!MEM_P (xinner))
4371 	    {
4372 	      temp = assign_temp (type, 1, 1);
4373 	      emit_move_insn (temp, xinner);
4374 	      xinner = temp;
4375 	    }
4376 	}
4377 
4378       gcc_assert (size);
4379 
4380       /* USED is now the # of bytes we need not copy to the stack
4381 	 because registers will take care of them.  */
4382 
4383       if (partial != 0)
4384 	xinner = adjust_address (xinner, BLKmode, used);
4385 
4386       /* If the partial register-part of the arg counts in its stack size,
4387 	 skip the part of stack space corresponding to the registers.
4388 	 Otherwise, start copying to the beginning of the stack space,
4389 	 by setting SKIP to 0.  */
4390       skip = (reg_parm_stack_space == 0) ? 0 : used;
4391 
4392 #ifdef PUSH_ROUNDING
4393       /* Do it with several push insns if that doesn't take lots of insns
4394 	 and if there is no difficulty with push insns that skip bytes
4395 	 on the stack for alignment purposes.  */
4396       if (args_addr == 0
4397 	  && PUSH_ARGS
4398 	  && CONST_INT_P (size)
4399 	  && skip == 0
4400 	  && MEM_ALIGN (xinner) >= align
4401 	  && can_move_by_pieces ((unsigned) INTVAL (size) - used, align)
4402 	  /* Here we avoid the case of a structure whose weak alignment
4403 	     forces many pushes of a small amount of data,
4404 	     and such small pushes do rounding that causes trouble.  */
4405 	  && ((!targetm.slow_unaligned_access (word_mode, align))
4406 	      || align >= BIGGEST_ALIGNMENT
4407 	      || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
4408 			   align / BITS_PER_UNIT))
4409 	  && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
4410 	{
4411 	  /* Push padding now if padding above and stack grows down,
4412 	     or if padding below and stack grows up.
4413 	     But if space already allocated, this has already been done.  */
4414 	  if (maybe_ne (extra, 0)
4415 	      && args_addr == 0
4416 	      && where_pad != PAD_NONE
4417 	      && where_pad != stack_direction)
4418 	    anti_adjust_stack (gen_int_mode (extra, Pmode));
4419 
4420 	  move_by_pieces (NULL, xinner, INTVAL (size) - used, align,
4421 			  RETURN_BEGIN);
4422 	}
4423       else
4424 #endif /* PUSH_ROUNDING  */
4425 	{
4426 	  rtx target;
4427 
4428 	  /* Otherwise make space on the stack and copy the data
4429 	     to the address of that space.  */
4430 
4431 	  /* Deduct words put into registers from the size we must copy.  */
4432 	  if (partial != 0)
4433 	    {
4434 	      if (CONST_INT_P (size))
4435 		size = GEN_INT (INTVAL (size) - used);
4436 	      else
4437 		size = expand_binop (GET_MODE (size), sub_optab, size,
4438 				     gen_int_mode (used, GET_MODE (size)),
4439 				     NULL_RTX, 0, OPTAB_LIB_WIDEN);
4440 	    }
4441 
4442 	  /* Get the address of the stack space.
4443 	     In this case, we do not deal with EXTRA separately.
4444 	     A single stack adjust will do.  */
4445 	  poly_int64 offset;
4446 	  if (! args_addr)
4447 	    {
4448 	      temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
4449 	      extra = 0;
4450 	    }
4451 	  else if (poly_int_rtx_p (args_so_far, &offset))
4452 	    temp = memory_address (BLKmode,
4453 				   plus_constant (Pmode, args_addr,
4454 						  skip + offset));
4455 	  else
4456 	    temp = memory_address (BLKmode,
4457 				   plus_constant (Pmode,
4458 						  gen_rtx_PLUS (Pmode,
4459 								args_addr,
4460 								args_so_far),
4461 						  skip));
4462 
4463 	  if (!ACCUMULATE_OUTGOING_ARGS)
4464 	    {
4465 	      /* If the source is referenced relative to the stack pointer,
4466 		 copy it to another register to stabilize it.  We do not need
4467 		 to do this if we know that we won't be changing sp.  */
4468 
4469 	      if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
4470 		  || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
4471 		temp = copy_to_reg (temp);
4472 	    }
4473 
4474 	  target = gen_rtx_MEM (BLKmode, temp);
4475 
4476 	  /* We do *not* set_mem_attributes here, because incoming arguments
4477 	     may overlap with sibling call outgoing arguments and we cannot
4478 	     allow reordering of reads from function arguments with stores
4479 	     to outgoing arguments of sibling calls.  We do, however, want
4480 	     to record the alignment of the stack slot.  */
4481 	  /* ALIGN may well be better aligned than TYPE, e.g. due to
4482 	     PARM_BOUNDARY.  Assume the caller isn't lying.  */
4483 	  set_mem_align (target, align);
4484 
4485 	  /* If part should go in registers and pushing to that part would
4486 	     overwrite some of the values that need to go into regs, load the
4487 	     overlapping values into temporary pseudos to be moved into the hard
4488 	     regs at the end after the stack pushing has completed.
4489 	     We cannot load them directly into the hard regs here because
4490 	     they can be clobbered by the block move expansions.
4491 	     See PR 65358.  */
4492 
4493 	  if (partial > 0 && reg != 0 && mode == BLKmode
4494 	      && GET_CODE (reg) != PARALLEL)
4495 	    {
4496 	      overlapping = memory_load_overlap (XEXP (x, 0), temp, partial);
4497 	      if (overlapping > 0)
4498 	        {
4499 		  gcc_assert (overlapping % UNITS_PER_WORD == 0);
4500 		  overlapping /= UNITS_PER_WORD;
4501 
4502 		  tmp_regs = XALLOCAVEC (rtx, overlapping);
4503 
4504 		  for (int i = 0; i < overlapping; i++)
4505 		    tmp_regs[i] = gen_reg_rtx (word_mode);
4506 
4507 		  for (int i = 0; i < overlapping; i++)
4508 		    emit_move_insn (tmp_regs[i],
4509 				    operand_subword_force (target, i, mode));
4510 	        }
4511 	      else if (overlapping == -1)
4512 		overlapping = 0;
4513 	      /* Could not determine whether there is overlap.
4514 	         Fail the sibcall.  */
4515 	      else
4516 		{
4517 		  overlapping = 0;
4518 		  if (sibcall_p)
4519 		    return false;
4520 		}
4521 	    }
4522 	  emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM);
4523 	}
4524     }
4525   else if (partial > 0)
4526     {
4527       /* Scalar partly in registers.  This case is only supported
4528 	 for fixed-wdth modes.  */
4529       int size = GET_MODE_SIZE (mode).to_constant ();
4530       size /= UNITS_PER_WORD;
4531       int i;
4532       int not_stack;
4533       /* # bytes of start of argument
4534 	 that we must make space for but need not store.  */
4535       int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4536       int args_offset = INTVAL (args_so_far);
4537       int skip;
4538 
4539       /* Push padding now if padding above and stack grows down,
4540 	 or if padding below and stack grows up.
4541 	 But if space already allocated, this has already been done.  */
4542       if (maybe_ne (extra, 0)
4543 	  && args_addr == 0
4544 	  && where_pad != PAD_NONE
4545 	  && where_pad != stack_direction)
4546 	anti_adjust_stack (gen_int_mode (extra, Pmode));
4547 
4548       /* If we make space by pushing it, we might as well push
4549 	 the real data.  Otherwise, we can leave OFFSET nonzero
4550 	 and leave the space uninitialized.  */
4551       if (args_addr == 0)
4552 	offset = 0;
4553 
4554       /* Now NOT_STACK gets the number of words that we don't need to
4555 	 allocate on the stack.  Convert OFFSET to words too.  */
4556       not_stack = (partial - offset) / UNITS_PER_WORD;
4557       offset /= UNITS_PER_WORD;
4558 
4559       /* If the partial register-part of the arg counts in its stack size,
4560 	 skip the part of stack space corresponding to the registers.
4561 	 Otherwise, start copying to the beginning of the stack space,
4562 	 by setting SKIP to 0.  */
4563       skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
4564 
4565       if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
4566 	x = validize_mem (force_const_mem (mode, x));
4567 
4568       /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4569 	 SUBREGs of such registers are not allowed.  */
4570       if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4571 	   && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
4572 	x = copy_to_reg (x);
4573 
4574       /* Loop over all the words allocated on the stack for this arg.  */
4575       /* We can do it by words, because any scalar bigger than a word
4576 	 has a size a multiple of a word.  */
4577       for (i = size - 1; i >= not_stack; i--)
4578 	if (i >= not_stack + offset)
4579 	  if (!emit_push_insn (operand_subword_force (x, i, mode),
4580 			  word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
4581 			  0, args_addr,
4582 			  GEN_INT (args_offset + ((i - not_stack + skip)
4583 						  * UNITS_PER_WORD)),
4584 			  reg_parm_stack_space, alignment_pad, sibcall_p))
4585 	    return false;
4586     }
4587   else
4588     {
4589       rtx addr;
4590       rtx dest;
4591 
4592       /* Push padding now if padding above and stack grows down,
4593 	 or if padding below and stack grows up.
4594 	 But if space already allocated, this has already been done.  */
4595       if (maybe_ne (extra, 0)
4596 	  && args_addr == 0
4597 	  && where_pad != PAD_NONE
4598 	  && where_pad != stack_direction)
4599 	anti_adjust_stack (gen_int_mode (extra, Pmode));
4600 
4601 #ifdef PUSH_ROUNDING
4602       if (args_addr == 0 && PUSH_ARGS)
4603 	emit_single_push_insn (mode, x, type);
4604       else
4605 #endif
4606 	{
4607 	  addr = simplify_gen_binary (PLUS, Pmode, args_addr, args_so_far);
4608 	  dest = gen_rtx_MEM (mode, memory_address (mode, addr));
4609 
4610 	  /* We do *not* set_mem_attributes here, because incoming arguments
4611 	     may overlap with sibling call outgoing arguments and we cannot
4612 	     allow reordering of reads from function arguments with stores
4613 	     to outgoing arguments of sibling calls.  We do, however, want
4614 	     to record the alignment of the stack slot.  */
4615 	  /* ALIGN may well be better aligned than TYPE, e.g. due to
4616 	     PARM_BOUNDARY.  Assume the caller isn't lying.  */
4617 	  set_mem_align (dest, align);
4618 
4619 	  emit_move_insn (dest, x);
4620 	}
4621     }
4622 
4623   /* Move the partial arguments into the registers and any overlapping
4624      values that we moved into the pseudos in tmp_regs.  */
4625   if (partial > 0 && reg != 0)
4626     {
4627       /* Handle calls that pass values in multiple non-contiguous locations.
4628 	 The Irix 6 ABI has examples of this.  */
4629       if (GET_CODE (reg) == PARALLEL)
4630 	emit_group_load (reg, x, type, -1);
4631       else
4632         {
4633 	  gcc_assert (partial % UNITS_PER_WORD == 0);
4634 	  move_block_to_reg (REGNO (reg), x, nregs - overlapping, mode);
4635 
4636 	  for (int i = 0; i < overlapping; i++)
4637 	    emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)
4638 						    + nregs - overlapping + i),
4639 			    tmp_regs[i]);
4640 
4641 	}
4642     }
4643 
4644   if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
4645     anti_adjust_stack (gen_int_mode (extra, Pmode));
4646 
4647   if (alignment_pad && args_addr == 0)
4648     anti_adjust_stack (alignment_pad);
4649 
4650   return true;
4651 }
4652 
4653 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4654    operations.  */
4655 
4656 static rtx
get_subtarget(rtx x)4657 get_subtarget (rtx x)
4658 {
4659   return (optimize
4660           || x == 0
4661 	   /* Only registers can be subtargets.  */
4662 	   || !REG_P (x)
4663 	   /* Don't use hard regs to avoid extending their life.  */
4664 	   || REGNO (x) < FIRST_PSEUDO_REGISTER
4665 	  ? 0 : x);
4666 }
4667 
4668 /* A subroutine of expand_assignment.  Optimize FIELD op= VAL, where
4669    FIELD is a bitfield.  Returns true if the optimization was successful,
4670    and there's nothing else to do.  */
4671 
4672 static bool
optimize_bitfield_assignment_op(poly_uint64 pbitsize,poly_uint64 pbitpos,poly_uint64 pbitregion_start,poly_uint64 pbitregion_end,machine_mode mode1,rtx str_rtx,tree to,tree src,bool reverse)4673 optimize_bitfield_assignment_op (poly_uint64 pbitsize,
4674 				 poly_uint64 pbitpos,
4675 				 poly_uint64 pbitregion_start,
4676 				 poly_uint64 pbitregion_end,
4677 				 machine_mode mode1, rtx str_rtx,
4678 				 tree to, tree src, bool reverse)
4679 {
4680   /* str_mode is not guaranteed to be a scalar type.  */
4681   machine_mode str_mode = GET_MODE (str_rtx);
4682   unsigned int str_bitsize;
4683   tree op0, op1;
4684   rtx value, result;
4685   optab binop;
4686   gimple *srcstmt;
4687   enum tree_code code;
4688 
4689   unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
4690   if (mode1 != VOIDmode
4691       || !pbitsize.is_constant (&bitsize)
4692       || !pbitpos.is_constant (&bitpos)
4693       || !pbitregion_start.is_constant (&bitregion_start)
4694       || !pbitregion_end.is_constant (&bitregion_end)
4695       || bitsize >= BITS_PER_WORD
4696       || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
4697       || str_bitsize > BITS_PER_WORD
4698       || TREE_SIDE_EFFECTS (to)
4699       || TREE_THIS_VOLATILE (to))
4700     return false;
4701 
4702   STRIP_NOPS (src);
4703   if (TREE_CODE (src) != SSA_NAME)
4704     return false;
4705   if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
4706     return false;
4707 
4708   srcstmt = get_gimple_for_ssa_name (src);
4709   if (!srcstmt
4710       || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
4711     return false;
4712 
4713   code = gimple_assign_rhs_code (srcstmt);
4714 
4715   op0 = gimple_assign_rhs1 (srcstmt);
4716 
4717   /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
4718      to find its initialization.  Hopefully the initialization will
4719      be from a bitfield load.  */
4720   if (TREE_CODE (op0) == SSA_NAME)
4721     {
4722       gimple *op0stmt = get_gimple_for_ssa_name (op0);
4723 
4724       /* We want to eventually have OP0 be the same as TO, which
4725 	 should be a bitfield.  */
4726       if (!op0stmt
4727 	  || !is_gimple_assign (op0stmt)
4728 	  || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
4729 	return false;
4730       op0 = gimple_assign_rhs1 (op0stmt);
4731     }
4732 
4733   op1 = gimple_assign_rhs2 (srcstmt);
4734 
4735   if (!operand_equal_p (to, op0, 0))
4736     return false;
4737 
4738   if (MEM_P (str_rtx))
4739     {
4740       unsigned HOST_WIDE_INT offset1;
4741 
4742       if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
4743 	str_bitsize = BITS_PER_WORD;
4744 
4745       scalar_int_mode best_mode;
4746       if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end,
4747 			  MEM_ALIGN (str_rtx), str_bitsize, false, &best_mode))
4748 	return false;
4749       str_mode = best_mode;
4750       str_bitsize = GET_MODE_BITSIZE (best_mode);
4751 
4752       offset1 = bitpos;
4753       bitpos %= str_bitsize;
4754       offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
4755       str_rtx = adjust_address (str_rtx, str_mode, offset1);
4756     }
4757   else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
4758     return false;
4759 
4760   /* If the bit field covers the whole REG/MEM, store_field
4761      will likely generate better code.  */
4762   if (bitsize >= str_bitsize)
4763     return false;
4764 
4765   /* We can't handle fields split across multiple entities.  */
4766   if (bitpos + bitsize > str_bitsize)
4767     return false;
4768 
4769   if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
4770     bitpos = str_bitsize - bitpos - bitsize;
4771 
4772   switch (code)
4773     {
4774     case PLUS_EXPR:
4775     case MINUS_EXPR:
4776       /* For now, just optimize the case of the topmost bitfield
4777 	 where we don't need to do any masking and also
4778 	 1 bit bitfields where xor can be used.
4779 	 We might win by one instruction for the other bitfields
4780 	 too if insv/extv instructions aren't used, so that
4781 	 can be added later.  */
4782       if ((reverse || bitpos + bitsize != str_bitsize)
4783 	  && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
4784 	break;
4785 
4786       value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4787       value = convert_modes (str_mode,
4788 			     TYPE_MODE (TREE_TYPE (op1)), value,
4789 			     TYPE_UNSIGNED (TREE_TYPE (op1)));
4790 
4791       /* We may be accessing data outside the field, which means
4792 	 we can alias adjacent data.  */
4793       if (MEM_P (str_rtx))
4794 	{
4795 	  str_rtx = shallow_copy_rtx (str_rtx);
4796 	  set_mem_alias_set (str_rtx, 0);
4797 	  set_mem_expr (str_rtx, 0);
4798 	}
4799 
4800       if (bitsize == 1 && (reverse || bitpos + bitsize != str_bitsize))
4801 	{
4802 	  value = expand_and (str_mode, value, const1_rtx, NULL);
4803 	  binop = xor_optab;
4804 	}
4805       else
4806 	binop = code == PLUS_EXPR ? add_optab : sub_optab;
4807 
4808       value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4809       if (reverse)
4810 	value = flip_storage_order (str_mode, value);
4811       result = expand_binop (str_mode, binop, str_rtx,
4812 			     value, str_rtx, 1, OPTAB_WIDEN);
4813       if (result != str_rtx)
4814 	emit_move_insn (str_rtx, result);
4815       return true;
4816 
4817     case BIT_IOR_EXPR:
4818     case BIT_XOR_EXPR:
4819       if (TREE_CODE (op1) != INTEGER_CST)
4820 	break;
4821       value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4822       value = convert_modes (str_mode,
4823 			     TYPE_MODE (TREE_TYPE (op1)), value,
4824 			     TYPE_UNSIGNED (TREE_TYPE (op1)));
4825 
4826       /* We may be accessing data outside the field, which means
4827 	 we can alias adjacent data.  */
4828       if (MEM_P (str_rtx))
4829 	{
4830 	  str_rtx = shallow_copy_rtx (str_rtx);
4831 	  set_mem_alias_set (str_rtx, 0);
4832 	  set_mem_expr (str_rtx, 0);
4833 	}
4834 
4835       binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
4836       if (bitpos + bitsize != str_bitsize)
4837 	{
4838 	  rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1,
4839 				   str_mode);
4840 	  value = expand_and (str_mode, value, mask, NULL_RTX);
4841 	}
4842       value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4843       if (reverse)
4844 	value = flip_storage_order (str_mode, value);
4845       result = expand_binop (str_mode, binop, str_rtx,
4846 			     value, str_rtx, 1, OPTAB_WIDEN);
4847       if (result != str_rtx)
4848 	emit_move_insn (str_rtx, result);
4849       return true;
4850 
4851     default:
4852       break;
4853     }
4854 
4855   return false;
4856 }
4857 
4858 /* In the C++ memory model, consecutive bit fields in a structure are
4859    considered one memory location.
4860 
4861    Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
4862    returns the bit range of consecutive bits in which this COMPONENT_REF
4863    belongs.  The values are returned in *BITSTART and *BITEND.  *BITPOS
4864    and *OFFSET may be adjusted in the process.
4865 
4866    If the access does not need to be restricted, 0 is returned in both
4867    *BITSTART and *BITEND.  */
4868 
4869 void
get_bit_range(poly_uint64_pod * bitstart,poly_uint64_pod * bitend,tree exp,poly_int64_pod * bitpos,tree * offset)4870 get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
4871 	       poly_int64_pod *bitpos, tree *offset)
4872 {
4873   poly_int64 bitoffset;
4874   tree field, repr;
4875 
4876   gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
4877 
4878   field = TREE_OPERAND (exp, 1);
4879   repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
4880   /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
4881      need to limit the range we can access.  */
4882   if (!repr)
4883     {
4884       *bitstart = *bitend = 0;
4885       return;
4886     }
4887 
4888   /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
4889      part of a larger bit field, then the representative does not serve any
4890      useful purpose.  This can occur in Ada.  */
4891   if (handled_component_p (TREE_OPERAND (exp, 0)))
4892     {
4893       machine_mode rmode;
4894       poly_int64 rbitsize, rbitpos;
4895       tree roffset;
4896       int unsignedp, reversep, volatilep = 0;
4897       get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
4898 			   &roffset, &rmode, &unsignedp, &reversep,
4899 			   &volatilep);
4900       if (!multiple_p (rbitpos, BITS_PER_UNIT))
4901 	{
4902 	  *bitstart = *bitend = 0;
4903 	  return;
4904 	}
4905     }
4906 
4907   /* Compute the adjustment to bitpos from the offset of the field
4908      relative to the representative.  DECL_FIELD_OFFSET of field and
4909      repr are the same by construction if they are not constants,
4910      see finish_bitfield_layout.  */
4911   poly_uint64 field_offset, repr_offset;
4912   if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
4913       && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
4914     bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
4915   else
4916     bitoffset = 0;
4917   bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
4918 		- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
4919 
4920   /* If the adjustment is larger than bitpos, we would have a negative bit
4921      position for the lower bound and this may wreak havoc later.  Adjust
4922      offset and bitpos to make the lower bound non-negative in that case.  */
4923   if (maybe_gt (bitoffset, *bitpos))
4924     {
4925       poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
4926       poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
4927 
4928       *bitpos += adjust_bits;
4929       if (*offset == NULL_TREE)
4930 	*offset = size_int (-adjust_bytes);
4931       else
4932 	*offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
4933       *bitstart = 0;
4934     }
4935   else
4936     *bitstart = *bitpos - bitoffset;
4937 
4938   *bitend = *bitstart + tree_to_poly_uint64 (DECL_SIZE (repr)) - 1;
4939 }
4940 
4941 /* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside
4942    in memory and has non-BLKmode.  DECL_RTL must not be a MEM; if
4943    DECL_RTL was not set yet, return NORTL.  */
4944 
4945 static inline bool
addr_expr_of_non_mem_decl_p_1(tree addr,bool nortl)4946 addr_expr_of_non_mem_decl_p_1 (tree addr, bool nortl)
4947 {
4948   if (TREE_CODE (addr) != ADDR_EXPR)
4949     return false;
4950 
4951   tree base = TREE_OPERAND (addr, 0);
4952 
4953   if (!DECL_P (base)
4954       || TREE_ADDRESSABLE (base)
4955       || DECL_MODE (base) == BLKmode)
4956     return false;
4957 
4958   if (!DECL_RTL_SET_P (base))
4959     return nortl;
4960 
4961   return (!MEM_P (DECL_RTL (base)));
4962 }
4963 
4964 /* Returns true if the MEM_REF REF refers to an object that does not
4965    reside in memory and has non-BLKmode.  */
4966 
4967 static inline bool
mem_ref_refers_to_non_mem_p(tree ref)4968 mem_ref_refers_to_non_mem_p (tree ref)
4969 {
4970   tree base = TREE_OPERAND (ref, 0);
4971   return addr_expr_of_non_mem_decl_p_1 (base, false);
4972 }
4973 
4974 /* Expand an assignment that stores the value of FROM into TO.  If NONTEMPORAL
4975    is true, try generating a nontemporal store.  */
4976 
4977 void
expand_assignment(tree to,tree from,bool nontemporal)4978 expand_assignment (tree to, tree from, bool nontemporal)
4979 {
4980   rtx to_rtx = 0;
4981   rtx result;
4982   machine_mode mode;
4983   unsigned int align;
4984   enum insn_code icode;
4985 
4986   /* Don't crash if the lhs of the assignment was erroneous.  */
4987   if (TREE_CODE (to) == ERROR_MARK)
4988     {
4989       expand_normal (from);
4990       return;
4991     }
4992 
4993   /* Optimize away no-op moves without side-effects.  */
4994   if (operand_equal_p (to, from, 0))
4995     return;
4996 
4997   /* Handle misaligned stores.  */
4998   mode = TYPE_MODE (TREE_TYPE (to));
4999   if ((TREE_CODE (to) == MEM_REF
5000        || TREE_CODE (to) == TARGET_MEM_REF)
5001       && mode != BLKmode
5002       && !mem_ref_refers_to_non_mem_p (to)
5003       && ((align = get_object_alignment (to))
5004 	  < GET_MODE_ALIGNMENT (mode))
5005       && (((icode = optab_handler (movmisalign_optab, mode))
5006 	   != CODE_FOR_nothing)
5007 	  || targetm.slow_unaligned_access (mode, align)))
5008     {
5009       rtx reg, mem;
5010 
5011       reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
5012       reg = force_not_mem (reg);
5013       mem = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5014       if (TREE_CODE (to) == MEM_REF && REF_REVERSE_STORAGE_ORDER (to))
5015 	reg = flip_storage_order (mode, reg);
5016 
5017       if (icode != CODE_FOR_nothing)
5018 	{
5019 	  struct expand_operand ops[2];
5020 
5021 	  create_fixed_operand (&ops[0], mem);
5022 	  create_input_operand (&ops[1], reg, mode);
5023 	  /* The movmisalign<mode> pattern cannot fail, else the assignment
5024 	     would silently be omitted.  */
5025 	  expand_insn (icode, 2, ops);
5026 	}
5027       else
5028 	store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg,
5029 			 false);
5030       return;
5031     }
5032 
5033   /* Assignment of a structure component needs special treatment
5034      if the structure component's rtx is not simply a MEM.
5035      Assignment of an array element at a constant index, and assignment of
5036      an array element in an unaligned packed structure field, has the same
5037      problem.  Same for (partially) storing into a non-memory object.  */
5038   if (handled_component_p (to)
5039       || (TREE_CODE (to) == MEM_REF
5040 	  && (REF_REVERSE_STORAGE_ORDER (to)
5041 	      || mem_ref_refers_to_non_mem_p (to)))
5042       || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
5043     {
5044       machine_mode mode1;
5045       poly_int64 bitsize, bitpos;
5046       poly_uint64 bitregion_start = 0;
5047       poly_uint64 bitregion_end = 0;
5048       tree offset;
5049       int unsignedp, reversep, volatilep = 0;
5050       tree tem;
5051 
5052       push_temp_slots ();
5053       tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
5054 				 &unsignedp, &reversep, &volatilep);
5055 
5056       /* Make sure bitpos is not negative, it can wreak havoc later.  */
5057       if (maybe_lt (bitpos, 0))
5058 	{
5059 	  gcc_assert (offset == NULL_TREE);
5060 	  offset = size_int (bits_to_bytes_round_down (bitpos));
5061 	  bitpos = num_trailing_bits (bitpos);
5062 	}
5063 
5064       if (TREE_CODE (to) == COMPONENT_REF
5065 	  && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
5066 	get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
5067       /* The C++ memory model naturally applies to byte-aligned fields.
5068 	 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5069 	 BITSIZE are not byte-aligned, there is no need to limit the range
5070 	 we can access.  This can occur with packed structures in Ada.  */
5071       else if (maybe_gt (bitsize, 0)
5072 	       && multiple_p (bitsize, BITS_PER_UNIT)
5073 	       && multiple_p (bitpos, BITS_PER_UNIT))
5074 	{
5075 	  bitregion_start = bitpos;
5076 	  bitregion_end = bitpos + bitsize - 1;
5077 	}
5078 
5079       to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
5080 
5081       /* If the field has a mode, we want to access it in the
5082 	 field's mode, not the computed mode.
5083 	 If a MEM has VOIDmode (external with incomplete type),
5084 	 use BLKmode for it instead.  */
5085       if (MEM_P (to_rtx))
5086 	{
5087 	  if (mode1 != VOIDmode)
5088 	    to_rtx = adjust_address (to_rtx, mode1, 0);
5089 	  else if (GET_MODE (to_rtx) == VOIDmode)
5090 	    to_rtx = adjust_address (to_rtx, BLKmode, 0);
5091 	}
5092 
5093       if (offset != 0)
5094 	{
5095 	  machine_mode address_mode;
5096 	  rtx offset_rtx;
5097 
5098 	  if (!MEM_P (to_rtx))
5099 	    {
5100 	      /* We can get constant negative offsets into arrays with broken
5101 		 user code.  Translate this to a trap instead of ICEing.  */
5102 	      gcc_assert (TREE_CODE (offset) == INTEGER_CST);
5103 	      expand_builtin_trap ();
5104 	      to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
5105 	    }
5106 
5107 	  offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
5108 	  address_mode = get_address_mode (to_rtx);
5109 	  if (GET_MODE (offset_rtx) != address_mode)
5110 	    {
5111 		/* We cannot be sure that the RTL in offset_rtx is valid outside
5112 		   of a memory address context, so force it into a register
5113 		   before attempting to convert it to the desired mode.  */
5114 	      offset_rtx = force_operand (offset_rtx, NULL_RTX);
5115 	      offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
5116 	    }
5117 
5118 	  /* If we have an expression in OFFSET_RTX and a non-zero
5119 	     byte offset in BITPOS, adding the byte offset before the
5120 	     OFFSET_RTX results in better intermediate code, which makes
5121 	     later rtl optimization passes perform better.
5122 
5123 	     We prefer intermediate code like this:
5124 
5125 	     r124:DI=r123:DI+0x18
5126 	     [r124:DI]=r121:DI
5127 
5128 	     ... instead of ...
5129 
5130 	     r124:DI=r123:DI+0x10
5131 	     [r124:DI+0x8]=r121:DI
5132 
5133 	     This is only done for aligned data values, as these can
5134 	     be expected to result in single move instructions.  */
5135 	  poly_int64 bytepos;
5136 	  if (mode1 != VOIDmode
5137 	      && maybe_ne (bitpos, 0)
5138 	      && maybe_gt (bitsize, 0)
5139 	      && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
5140 	      && multiple_p (bitpos, bitsize)
5141 	      && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
5142 	      && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
5143 	    {
5144 	      to_rtx = adjust_address (to_rtx, mode1, bytepos);
5145 	      bitregion_start = 0;
5146 	      if (known_ge (bitregion_end, poly_uint64 (bitpos)))
5147 		bitregion_end -= bitpos;
5148 	      bitpos = 0;
5149 	    }
5150 
5151 	  to_rtx = offset_address (to_rtx, offset_rtx,
5152 				   highest_pow2_factor_for_target (to,
5153 				   				   offset));
5154 	}
5155 
5156       /* No action is needed if the target is not a memory and the field
5157 	 lies completely outside that target.  This can occur if the source
5158 	 code contains an out-of-bounds access to a small array.  */
5159       if (!MEM_P (to_rtx)
5160 	  && GET_MODE (to_rtx) != BLKmode
5161 	  && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
5162 	{
5163 	  expand_normal (from);
5164 	  result = NULL;
5165 	}
5166       /* Handle expand_expr of a complex value returning a CONCAT.  */
5167       else if (GET_CODE (to_rtx) == CONCAT)
5168 	{
5169 	  machine_mode to_mode = GET_MODE (to_rtx);
5170 	  gcc_checking_assert (COMPLEX_MODE_P (to_mode));
5171 	  poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
5172 	  unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
5173 	  if (TYPE_MODE (TREE_TYPE (from)) == to_mode
5174 	      && known_eq (bitpos, 0)
5175 	      && known_eq (bitsize, mode_bitsize))
5176 	    result = store_expr (from, to_rtx, false, nontemporal, reversep);
5177 	  else if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE_INNER (to_mode)
5178 		   && known_eq (bitsize, inner_bitsize)
5179 		   && (known_eq (bitpos, 0)
5180 		       || known_eq (bitpos, inner_bitsize)))
5181 	    result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
5182 				 false, nontemporal, reversep);
5183 	  else if (known_le (bitpos + bitsize, inner_bitsize))
5184 	    result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
5185 				  bitregion_start, bitregion_end,
5186 				  mode1, from, get_alias_set (to),
5187 				  nontemporal, reversep);
5188 	  else if (known_ge (bitpos, inner_bitsize))
5189 	    result = store_field (XEXP (to_rtx, 1), bitsize,
5190 				  bitpos - inner_bitsize,
5191 				  bitregion_start, bitregion_end,
5192 				  mode1, from, get_alias_set (to),
5193 				  nontemporal, reversep);
5194 	  else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
5195 	    {
5196 	      result = expand_normal (from);
5197 	      if (GET_CODE (result) == CONCAT)
5198 		{
5199 		  to_mode = GET_MODE_INNER (to_mode);
5200 		  machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
5201 		  rtx from_real
5202 		    = simplify_gen_subreg (to_mode, XEXP (result, 0),
5203 					   from_mode, 0);
5204 		  rtx from_imag
5205 		    = simplify_gen_subreg (to_mode, XEXP (result, 1),
5206 					   from_mode, 0);
5207 		  if (!from_real || !from_imag)
5208 		    goto concat_store_slow;
5209 		  emit_move_insn (XEXP (to_rtx, 0), from_real);
5210 		  emit_move_insn (XEXP (to_rtx, 1), from_imag);
5211 		}
5212 	      else
5213 		{
5214 		  rtx from_rtx;
5215 		  if (MEM_P (result))
5216 		    from_rtx = change_address (result, to_mode, NULL_RTX);
5217 		  else
5218 		    from_rtx
5219 		      = simplify_gen_subreg (to_mode, result,
5220 					     TYPE_MODE (TREE_TYPE (from)), 0);
5221 		  if (from_rtx)
5222 		    {
5223 		      emit_move_insn (XEXP (to_rtx, 0),
5224 				      read_complex_part (from_rtx, false));
5225 		      emit_move_insn (XEXP (to_rtx, 1),
5226 				      read_complex_part (from_rtx, true));
5227 		    }
5228 		  else
5229 		    {
5230 		      machine_mode to_mode
5231 			= GET_MODE_INNER (GET_MODE (to_rtx));
5232 		      rtx from_real
5233 			= simplify_gen_subreg (to_mode, result,
5234 					       TYPE_MODE (TREE_TYPE (from)),
5235 					       0);
5236 		      rtx from_imag
5237 			= simplify_gen_subreg (to_mode, result,
5238 					       TYPE_MODE (TREE_TYPE (from)),
5239 					       GET_MODE_SIZE (to_mode));
5240 		      if (!from_real || !from_imag)
5241 			goto concat_store_slow;
5242 		      emit_move_insn (XEXP (to_rtx, 0), from_real);
5243 		      emit_move_insn (XEXP (to_rtx, 1), from_imag);
5244 		    }
5245 		}
5246 	    }
5247 	  else
5248 	    {
5249 	    concat_store_slow:;
5250 	      rtx temp = assign_stack_temp (to_mode,
5251 					    GET_MODE_SIZE (GET_MODE (to_rtx)));
5252 	      write_complex_part (temp, XEXP (to_rtx, 0), false);
5253 	      write_complex_part (temp, XEXP (to_rtx, 1), true);
5254 	      result = store_field (temp, bitsize, bitpos,
5255 				    bitregion_start, bitregion_end,
5256 				    mode1, from, get_alias_set (to),
5257 				    nontemporal, reversep);
5258 	      emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
5259 	      emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
5260 	    }
5261 	}
5262       /* For calls to functions returning variable length structures, if TO_RTX
5263 	 is not a MEM, go through a MEM because we must not create temporaries
5264 	 of the VLA type.  */
5265       else if (!MEM_P (to_rtx)
5266 	       && TREE_CODE (from) == CALL_EXPR
5267 	       && COMPLETE_TYPE_P (TREE_TYPE (from))
5268 	       && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) != INTEGER_CST)
5269 	{
5270 	  rtx temp = assign_stack_temp (GET_MODE (to_rtx),
5271 					GET_MODE_SIZE (GET_MODE (to_rtx)));
5272 	  result = store_field (temp, bitsize, bitpos, bitregion_start,
5273 				bitregion_end, mode1, from, get_alias_set (to),
5274 				nontemporal, reversep);
5275 	  emit_move_insn (to_rtx, temp);
5276 	}
5277       else
5278 	{
5279 	  if (MEM_P (to_rtx))
5280 	    {
5281 	      /* If the field is at offset zero, we could have been given the
5282 		 DECL_RTX of the parent struct.  Don't munge it.  */
5283 	      to_rtx = shallow_copy_rtx (to_rtx);
5284 	      set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
5285 	      if (volatilep)
5286 		MEM_VOLATILE_P (to_rtx) = 1;
5287 	    }
5288 
5289 	  gcc_checking_assert (known_ge (bitpos, 0));
5290 	  if (optimize_bitfield_assignment_op (bitsize, bitpos,
5291 					       bitregion_start, bitregion_end,
5292 					       mode1, to_rtx, to, from,
5293 					       reversep))
5294 	    result = NULL;
5295 	  else
5296 	    result = store_field (to_rtx, bitsize, bitpos,
5297 				  bitregion_start, bitregion_end,
5298 				  mode1, from, get_alias_set (to),
5299 				  nontemporal, reversep);
5300 	}
5301 
5302       if (result)
5303 	preserve_temp_slots (result);
5304       pop_temp_slots ();
5305       return;
5306     }
5307 
5308   /* If the rhs is a function call and its value is not an aggregate,
5309      call the function before we start to compute the lhs.
5310      This is needed for correct code for cases such as
5311      val = setjmp (buf) on machines where reference to val
5312      requires loading up part of an address in a separate insn.
5313 
5314      Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5315      since it might be a promoted variable where the zero- or sign- extension
5316      needs to be done.  Handling this in the normal way is safe because no
5317      computation is done before the call.  The same is true for SSA names.  */
5318   if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
5319       && COMPLETE_TYPE_P (TREE_TYPE (from))
5320       && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
5321       && ! (((VAR_P (to)
5322 	      || TREE_CODE (to) == PARM_DECL
5323 	      || TREE_CODE (to) == RESULT_DECL)
5324 	     && REG_P (DECL_RTL (to)))
5325 	    || TREE_CODE (to) == SSA_NAME))
5326     {
5327       rtx value;
5328 
5329       push_temp_slots ();
5330       value = expand_normal (from);
5331 
5332       if (to_rtx == 0)
5333 	to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5334 
5335       /* Handle calls that return values in multiple non-contiguous locations.
5336 	 The Irix 6 ABI has examples of this.  */
5337       if (GET_CODE (to_rtx) == PARALLEL)
5338 	{
5339 	  if (GET_CODE (value) == PARALLEL)
5340 	    emit_group_move (to_rtx, value);
5341 	  else
5342 	    emit_group_load (to_rtx, value, TREE_TYPE (from),
5343 			     int_size_in_bytes (TREE_TYPE (from)));
5344 	}
5345       else if (GET_CODE (value) == PARALLEL)
5346 	emit_group_store (to_rtx, value, TREE_TYPE (from),
5347 			  int_size_in_bytes (TREE_TYPE (from)));
5348       else if (GET_MODE (to_rtx) == BLKmode)
5349 	{
5350 	  /* Handle calls that return BLKmode values in registers.  */
5351 	  if (REG_P (value))
5352 	    copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
5353 	  else
5354 	    emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
5355 	}
5356       else
5357 	{
5358 	  if (POINTER_TYPE_P (TREE_TYPE (to)))
5359 	    value = convert_memory_address_addr_space
5360 	      (as_a <scalar_int_mode> (GET_MODE (to_rtx)), value,
5361 	       TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to))));
5362 
5363 	  emit_move_insn (to_rtx, value);
5364 	}
5365 
5366       preserve_temp_slots (to_rtx);
5367       pop_temp_slots ();
5368       return;
5369     }
5370 
5371   /* Ordinary treatment.  Expand TO to get a REG or MEM rtx.  */
5372   to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5373 
5374   /* Don't move directly into a return register.  */
5375   if (TREE_CODE (to) == RESULT_DECL
5376       && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
5377     {
5378       rtx temp;
5379 
5380       push_temp_slots ();
5381 
5382       /* If the source is itself a return value, it still is in a pseudo at
5383 	 this point so we can move it back to the return register directly.  */
5384       if (REG_P (to_rtx)
5385 	  && TYPE_MODE (TREE_TYPE (from)) == BLKmode
5386 	  && TREE_CODE (from) != CALL_EXPR)
5387 	temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
5388       else
5389 	temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
5390 
5391       /* Handle calls that return values in multiple non-contiguous locations.
5392 	 The Irix 6 ABI has examples of this.  */
5393       if (GET_CODE (to_rtx) == PARALLEL)
5394 	{
5395 	  if (GET_CODE (temp) == PARALLEL)
5396 	    emit_group_move (to_rtx, temp);
5397 	  else
5398 	    emit_group_load (to_rtx, temp, TREE_TYPE (from),
5399 			     int_size_in_bytes (TREE_TYPE (from)));
5400 	}
5401       else if (temp)
5402 	emit_move_insn (to_rtx, temp);
5403 
5404       preserve_temp_slots (to_rtx);
5405       pop_temp_slots ();
5406       return;
5407     }
5408 
5409   /* In case we are returning the contents of an object which overlaps
5410      the place the value is being stored, use a safe function when copying
5411      a value through a pointer into a structure value return block.  */
5412   if (TREE_CODE (to) == RESULT_DECL
5413       && TREE_CODE (from) == INDIRECT_REF
5414       && ADDR_SPACE_GENERIC_P
5415 	   (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from, 0)))))
5416       && refs_may_alias_p (to, from)
5417       && cfun->returns_struct
5418       && !cfun->returns_pcc_struct)
5419     {
5420       rtx from_rtx, size;
5421 
5422       push_temp_slots ();
5423       size = expr_size (from);
5424       from_rtx = expand_normal (from);
5425 
5426       emit_block_move_via_libcall (XEXP (to_rtx, 0), XEXP (from_rtx, 0), size);
5427 
5428       preserve_temp_slots (to_rtx);
5429       pop_temp_slots ();
5430       return;
5431     }
5432 
5433   /* Compute FROM and store the value in the rtx we got.  */
5434 
5435   push_temp_slots ();
5436   result = store_expr (from, to_rtx, 0, nontemporal, false);
5437   preserve_temp_slots (result);
5438   pop_temp_slots ();
5439   return;
5440 }
5441 
5442 /* Emits nontemporal store insn that moves FROM to TO.  Returns true if this
5443    succeeded, false otherwise.  */
5444 
5445 bool
emit_storent_insn(rtx to,rtx from)5446 emit_storent_insn (rtx to, rtx from)
5447 {
5448   struct expand_operand ops[2];
5449   machine_mode mode = GET_MODE (to);
5450   enum insn_code code = optab_handler (storent_optab, mode);
5451 
5452   if (code == CODE_FOR_nothing)
5453     return false;
5454 
5455   create_fixed_operand (&ops[0], to);
5456   create_input_operand (&ops[1], from, mode);
5457   return maybe_expand_insn (code, 2, ops);
5458 }
5459 
5460 /* Helper function for store_expr storing of STRING_CST.  */
5461 
5462 static rtx
string_cst_read_str(void * data,HOST_WIDE_INT offset,scalar_int_mode mode)5463 string_cst_read_str (void *data, HOST_WIDE_INT offset, scalar_int_mode mode)
5464 {
5465   tree str = (tree) data;
5466 
5467   gcc_assert (offset >= 0);
5468   if (offset >= TREE_STRING_LENGTH (str))
5469     return const0_rtx;
5470 
5471   if ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
5472       > (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (str))
5473     {
5474       char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode));
5475       size_t l = TREE_STRING_LENGTH (str) - offset;
5476       memcpy (p, TREE_STRING_POINTER (str) + offset, l);
5477       memset (p + l, '\0', GET_MODE_SIZE (mode) - l);
5478       return c_readstr (p, mode, false);
5479     }
5480 
5481   return c_readstr (TREE_STRING_POINTER (str) + offset, mode, false);
5482 }
5483 
5484 /* Generate code for computing expression EXP,
5485    and storing the value into TARGET.
5486 
5487    If the mode is BLKmode then we may return TARGET itself.
5488    It turns out that in BLKmode it doesn't cause a problem.
5489    because C has no operators that could combine two different
5490    assignments into the same BLKmode object with different values
5491    with no sequence point.  Will other languages need this to
5492    be more thorough?
5493 
5494    If CALL_PARAM_P is nonzero, this is a store into a call param on the
5495    stack, and block moves may need to be treated specially.
5496 
5497    If NONTEMPORAL is true, try using a nontemporal store instruction.
5498 
5499    If REVERSE is true, the store is to be done in reverse order.  */
5500 
5501 rtx
store_expr(tree exp,rtx target,int call_param_p,bool nontemporal,bool reverse)5502 store_expr (tree exp, rtx target, int call_param_p,
5503 	    bool nontemporal, bool reverse)
5504 {
5505   rtx temp;
5506   rtx alt_rtl = NULL_RTX;
5507   location_t loc = curr_insn_location ();
5508 
5509   if (VOID_TYPE_P (TREE_TYPE (exp)))
5510     {
5511       /* C++ can generate ?: expressions with a throw expression in one
5512 	 branch and an rvalue in the other. Here, we resolve attempts to
5513 	 store the throw expression's nonexistent result.  */
5514       gcc_assert (!call_param_p);
5515       expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
5516       return NULL_RTX;
5517     }
5518   if (TREE_CODE (exp) == COMPOUND_EXPR)
5519     {
5520       /* Perform first part of compound expression, then assign from second
5521 	 part.  */
5522       expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
5523 		   call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5524       return store_expr (TREE_OPERAND (exp, 1), target,
5525 				     call_param_p, nontemporal, reverse);
5526     }
5527   else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
5528     {
5529       /* For conditional expression, get safe form of the target.  Then
5530 	 test the condition, doing the appropriate assignment on either
5531 	 side.  This avoids the creation of unnecessary temporaries.
5532 	 For non-BLKmode, it is more efficient not to do this.  */
5533 
5534       rtx_code_label *lab1 = gen_label_rtx (), *lab2 = gen_label_rtx ();
5535 
5536       do_pending_stack_adjust ();
5537       NO_DEFER_POP;
5538       jumpifnot (TREE_OPERAND (exp, 0), lab1,
5539 		 profile_probability::uninitialized ());
5540       store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
5541 		  nontemporal, reverse);
5542       emit_jump_insn (targetm.gen_jump (lab2));
5543       emit_barrier ();
5544       emit_label (lab1);
5545       store_expr (TREE_OPERAND (exp, 2), target, call_param_p,
5546 		  nontemporal, reverse);
5547       emit_label (lab2);
5548       OK_DEFER_POP;
5549 
5550       return NULL_RTX;
5551     }
5552   else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
5553     /* If this is a scalar in a register that is stored in a wider mode
5554        than the declared mode, compute the result into its declared mode
5555        and then convert to the wider mode.  Our value is the computed
5556        expression.  */
5557     {
5558       rtx inner_target = 0;
5559       scalar_int_mode outer_mode = subreg_unpromoted_mode (target);
5560       scalar_int_mode inner_mode = subreg_promoted_mode (target);
5561 
5562       /* We can do the conversion inside EXP, which will often result
5563 	 in some optimizations.  Do the conversion in two steps: first
5564 	 change the signedness, if needed, then the extend.  But don't
5565 	 do this if the type of EXP is a subtype of something else
5566 	 since then the conversion might involve more than just
5567 	 converting modes.  */
5568       if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
5569 	  && TREE_TYPE (TREE_TYPE (exp)) == 0
5570 	  && GET_MODE_PRECISION (outer_mode)
5571 	     == TYPE_PRECISION (TREE_TYPE (exp)))
5572 	{
5573 	  if (!SUBREG_CHECK_PROMOTED_SIGN (target,
5574 					  TYPE_UNSIGNED (TREE_TYPE (exp))))
5575 	    {
5576 	      /* Some types, e.g. Fortran's logical*4, won't have a signed
5577 		 version, so use the mode instead.  */
5578 	      tree ntype
5579 		= (signed_or_unsigned_type_for
5580 		   (SUBREG_PROMOTED_SIGN (target), TREE_TYPE (exp)));
5581 	      if (ntype == NULL)
5582 		ntype = lang_hooks.types.type_for_mode
5583 		  (TYPE_MODE (TREE_TYPE (exp)),
5584 		   SUBREG_PROMOTED_SIGN (target));
5585 
5586 	      exp = fold_convert_loc (loc, ntype, exp);
5587 	    }
5588 
5589 	  exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
5590 				  (inner_mode, SUBREG_PROMOTED_SIGN (target)),
5591 				  exp);
5592 
5593 	  inner_target = SUBREG_REG (target);
5594 	}
5595 
5596       temp = expand_expr (exp, inner_target, VOIDmode,
5597 			  call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5598 
5599 
5600       /* If TEMP is a VOIDmode constant, use convert_modes to make
5601 	 sure that we properly convert it.  */
5602       if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
5603 	{
5604 	  temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
5605 				temp, SUBREG_PROMOTED_SIGN (target));
5606 	  temp = convert_modes (inner_mode, outer_mode, temp,
5607 				SUBREG_PROMOTED_SIGN (target));
5608 	}
5609 
5610       convert_move (SUBREG_REG (target), temp,
5611 		    SUBREG_PROMOTED_SIGN (target));
5612 
5613       return NULL_RTX;
5614     }
5615   else if ((TREE_CODE (exp) == STRING_CST
5616 	    || (TREE_CODE (exp) == MEM_REF
5617 		&& TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
5618 		&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5619 		   == STRING_CST
5620 		&& integer_zerop (TREE_OPERAND (exp, 1))))
5621 	   && !nontemporal && !call_param_p
5622 	   && MEM_P (target))
5623     {
5624       /* Optimize initialization of an array with a STRING_CST.  */
5625       HOST_WIDE_INT exp_len, str_copy_len;
5626       rtx dest_mem;
5627       tree str = TREE_CODE (exp) == STRING_CST
5628 		 ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
5629 
5630       exp_len = int_expr_size (exp);
5631       if (exp_len <= 0)
5632 	goto normal_expr;
5633 
5634       if (TREE_STRING_LENGTH (str) <= 0)
5635 	goto normal_expr;
5636 
5637       if (can_store_by_pieces (exp_len, string_cst_read_str, (void *) str,
5638 			       MEM_ALIGN (target), false))
5639 	{
5640 	  store_by_pieces (target, exp_len, string_cst_read_str, (void *) str,
5641 			   MEM_ALIGN (target), false, RETURN_BEGIN);
5642 	  return NULL_RTX;
5643 	}
5644 
5645       str_copy_len = TREE_STRING_LENGTH (str);
5646       if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
5647 	{
5648 	  str_copy_len += STORE_MAX_PIECES - 1;
5649 	  str_copy_len &= ~(STORE_MAX_PIECES - 1);
5650 	}
5651       if (str_copy_len >= exp_len)
5652 	goto normal_expr;
5653 
5654       if (!can_store_by_pieces (str_copy_len, string_cst_read_str,
5655 				(void *) str, MEM_ALIGN (target), false))
5656 	goto normal_expr;
5657 
5658       dest_mem = store_by_pieces (target, str_copy_len, string_cst_read_str,
5659 				  (void *) str, MEM_ALIGN (target), false,
5660 				  RETURN_END);
5661       clear_storage (adjust_address_1 (dest_mem, BLKmode, 0, 1, 1, 0,
5662 				       exp_len - str_copy_len),
5663 		     GEN_INT (exp_len - str_copy_len), BLOCK_OP_NORMAL);
5664       return NULL_RTX;
5665     }
5666   else
5667     {
5668       rtx tmp_target;
5669 
5670   normal_expr:
5671       /* If we want to use a nontemporal or a reverse order store, force the
5672 	 value into a register first.  */
5673       tmp_target = nontemporal || reverse ? NULL_RTX : target;
5674       temp = expand_expr_real (exp, tmp_target, GET_MODE (target),
5675 			       (call_param_p
5676 				? EXPAND_STACK_PARM : EXPAND_NORMAL),
5677 			       &alt_rtl, false);
5678     }
5679 
5680   /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
5681      the same as that of TARGET, adjust the constant.  This is needed, for
5682      example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
5683      only a word-sized value.  */
5684   if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
5685       && TREE_CODE (exp) != ERROR_MARK
5686       && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
5687     {
5688       if (GET_MODE_CLASS (GET_MODE (target))
5689 	  != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
5690 	  && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
5691 		       GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
5692 	{
5693 	  rtx t = simplify_gen_subreg (GET_MODE (target), temp,
5694 				       TYPE_MODE (TREE_TYPE (exp)), 0);
5695 	  if (t)
5696 	    temp = t;
5697 	}
5698       if (GET_MODE (temp) == VOIDmode)
5699 	temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
5700 			      temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5701     }
5702 
5703   /* If value was not generated in the target, store it there.
5704      Convert the value to TARGET's type first if necessary and emit the
5705      pending incrementations that have been queued when expanding EXP.
5706      Note that we cannot emit the whole queue blindly because this will
5707      effectively disable the POST_INC optimization later.
5708 
5709      If TEMP and TARGET compare equal according to rtx_equal_p, but
5710      one or both of them are volatile memory refs, we have to distinguish
5711      two cases:
5712      - expand_expr has used TARGET.  In this case, we must not generate
5713        another copy.  This can be detected by TARGET being equal according
5714        to == .
5715      - expand_expr has not used TARGET - that means that the source just
5716        happens to have the same RTX form.  Since temp will have been created
5717        by expand_expr, it will compare unequal according to == .
5718        We must generate a copy in this case, to reach the correct number
5719        of volatile memory references.  */
5720 
5721   if ((! rtx_equal_p (temp, target)
5722        || (temp != target && (side_effects_p (temp)
5723 			      || side_effects_p (target))))
5724       && TREE_CODE (exp) != ERROR_MARK
5725       /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
5726 	 but TARGET is not valid memory reference, TEMP will differ
5727 	 from TARGET although it is really the same location.  */
5728       && !(alt_rtl
5729 	   && rtx_equal_p (alt_rtl, target)
5730 	   && !side_effects_p (alt_rtl)
5731 	   && !side_effects_p (target))
5732       /* If there's nothing to copy, don't bother.  Don't call
5733 	 expr_size unless necessary, because some front-ends (C++)
5734 	 expr_size-hook must not be given objects that are not
5735 	 supposed to be bit-copied or bit-initialized.  */
5736       && expr_size (exp) != const0_rtx)
5737     {
5738       if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
5739 	{
5740 	  if (GET_MODE (target) == BLKmode)
5741 	    {
5742 	      /* Handle calls that return BLKmode values in registers.  */
5743 	      if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
5744 		copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
5745 	      else
5746 		store_bit_field (target,
5747 				 INTVAL (expr_size (exp)) * BITS_PER_UNIT,
5748 				 0, 0, 0, GET_MODE (temp), temp, reverse);
5749 	    }
5750 	  else
5751 	    convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5752 	}
5753 
5754       else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
5755 	{
5756 	  /* Handle copying a string constant into an array.  The string
5757 	     constant may be shorter than the array.  So copy just the string's
5758 	     actual length, and clear the rest.  First get the size of the data
5759 	     type of the string, which is actually the size of the target.  */
5760 	  rtx size = expr_size (exp);
5761 
5762 	  if (CONST_INT_P (size)
5763 	      && INTVAL (size) < TREE_STRING_LENGTH (exp))
5764 	    emit_block_move (target, temp, size,
5765 			     (call_param_p
5766 			      ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5767 	  else
5768 	    {
5769 	      machine_mode pointer_mode
5770 		= targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
5771 	      machine_mode address_mode = get_address_mode (target);
5772 
5773 	      /* Compute the size of the data to copy from the string.  */
5774 	      tree copy_size
5775 		= size_binop_loc (loc, MIN_EXPR,
5776 				  make_tree (sizetype, size),
5777 				  size_int (TREE_STRING_LENGTH (exp)));
5778 	      rtx copy_size_rtx
5779 		= expand_expr (copy_size, NULL_RTX, VOIDmode,
5780 			       (call_param_p
5781 				? EXPAND_STACK_PARM : EXPAND_NORMAL));
5782 	      rtx_code_label *label = 0;
5783 
5784 	      /* Copy that much.  */
5785 	      copy_size_rtx = convert_to_mode (pointer_mode, copy_size_rtx,
5786 					       TYPE_UNSIGNED (sizetype));
5787 	      emit_block_move (target, temp, copy_size_rtx,
5788 			       (call_param_p
5789 				? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5790 
5791 	      /* Figure out how much is left in TARGET that we have to clear.
5792 		 Do all calculations in pointer_mode.  */
5793 	      poly_int64 const_copy_size;
5794 	      if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
5795 		{
5796 		  size = plus_constant (address_mode, size, -const_copy_size);
5797 		  target = adjust_address (target, BLKmode, const_copy_size);
5798 		}
5799 	      else
5800 		{
5801 		  size = expand_binop (TYPE_MODE (sizetype), sub_optab, size,
5802 				       copy_size_rtx, NULL_RTX, 0,
5803 				       OPTAB_LIB_WIDEN);
5804 
5805 		  if (GET_MODE (copy_size_rtx) != address_mode)
5806 		    copy_size_rtx = convert_to_mode (address_mode,
5807 						     copy_size_rtx,
5808 						     TYPE_UNSIGNED (sizetype));
5809 
5810 		  target = offset_address (target, copy_size_rtx,
5811 					   highest_pow2_factor (copy_size));
5812 		  label = gen_label_rtx ();
5813 		  emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX,
5814 					   GET_MODE (size), 0, label);
5815 		}
5816 
5817 	      if (size != const0_rtx)
5818 		clear_storage (target, size, BLOCK_OP_NORMAL);
5819 
5820 	      if (label)
5821 		emit_label (label);
5822 	    }
5823 	}
5824       /* Handle calls that return values in multiple non-contiguous locations.
5825 	 The Irix 6 ABI has examples of this.  */
5826       else if (GET_CODE (target) == PARALLEL)
5827 	{
5828 	  if (GET_CODE (temp) == PARALLEL)
5829 	    emit_group_move (target, temp);
5830 	  else
5831 	    emit_group_load (target, temp, TREE_TYPE (exp),
5832 			     int_size_in_bytes (TREE_TYPE (exp)));
5833 	}
5834       else if (GET_CODE (temp) == PARALLEL)
5835 	emit_group_store (target, temp, TREE_TYPE (exp),
5836 			  int_size_in_bytes (TREE_TYPE (exp)));
5837       else if (GET_MODE (temp) == BLKmode)
5838 	emit_block_move (target, temp, expr_size (exp),
5839 			 (call_param_p
5840 			  ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5841       /* If we emit a nontemporal store, there is nothing else to do.  */
5842       else if (nontemporal && emit_storent_insn (target, temp))
5843 	;
5844       else
5845 	{
5846 	  if (reverse)
5847 	    temp = flip_storage_order (GET_MODE (target), temp);
5848 	  temp = force_operand (temp, target);
5849 	  if (temp != target)
5850 	    emit_move_insn (target, temp);
5851 	}
5852     }
5853 
5854   return NULL_RTX;
5855 }
5856 
5857 /* Return true if field F of structure TYPE is a flexible array.  */
5858 
5859 static bool
flexible_array_member_p(const_tree f,const_tree type)5860 flexible_array_member_p (const_tree f, const_tree type)
5861 {
5862   const_tree tf;
5863 
5864   tf = TREE_TYPE (f);
5865   return (DECL_CHAIN (f) == NULL
5866 	  && TREE_CODE (tf) == ARRAY_TYPE
5867 	  && TYPE_DOMAIN (tf)
5868 	  && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
5869 	  && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
5870 	  && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
5871 	  && int_size_in_bytes (type) >= 0);
5872 }
5873 
5874 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
5875    must have in order for it to completely initialize a value of type TYPE.
5876    Return -1 if the number isn't known.
5877 
5878    If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE.  */
5879 
5880 static HOST_WIDE_INT
count_type_elements(const_tree type,bool for_ctor_p)5881 count_type_elements (const_tree type, bool for_ctor_p)
5882 {
5883   switch (TREE_CODE (type))
5884     {
5885     case ARRAY_TYPE:
5886       {
5887 	tree nelts;
5888 
5889 	nelts = array_type_nelts (type);
5890 	if (nelts && tree_fits_uhwi_p (nelts))
5891 	  {
5892 	    unsigned HOST_WIDE_INT n;
5893 
5894 	    n = tree_to_uhwi (nelts) + 1;
5895 	    if (n == 0 || for_ctor_p)
5896 	      return n;
5897 	    else
5898 	      return n * count_type_elements (TREE_TYPE (type), false);
5899 	  }
5900 	return for_ctor_p ? -1 : 1;
5901       }
5902 
5903     case RECORD_TYPE:
5904       {
5905 	unsigned HOST_WIDE_INT n;
5906 	tree f;
5907 
5908 	n = 0;
5909 	for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5910 	  if (TREE_CODE (f) == FIELD_DECL)
5911 	    {
5912 	      if (!for_ctor_p)
5913 		n += count_type_elements (TREE_TYPE (f), false);
5914 	      else if (!flexible_array_member_p (f, type))
5915 		/* Don't count flexible arrays, which are not supposed
5916 		   to be initialized.  */
5917 		n += 1;
5918 	    }
5919 
5920 	return n;
5921       }
5922 
5923     case UNION_TYPE:
5924     case QUAL_UNION_TYPE:
5925       {
5926 	tree f;
5927 	HOST_WIDE_INT n, m;
5928 
5929 	gcc_assert (!for_ctor_p);
5930 	/* Estimate the number of scalars in each field and pick the
5931 	   maximum.  Other estimates would do instead; the idea is simply
5932 	   to make sure that the estimate is not sensitive to the ordering
5933 	   of the fields.  */
5934 	n = 1;
5935 	for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5936 	  if (TREE_CODE (f) == FIELD_DECL)
5937 	    {
5938 	      m = count_type_elements (TREE_TYPE (f), false);
5939 	      /* If the field doesn't span the whole union, add an extra
5940 		 scalar for the rest.  */
5941 	      if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
5942 				    TYPE_SIZE (type)) != 1)
5943 		m++;
5944 	      if (n < m)
5945 		n = m;
5946 	    }
5947 	return n;
5948       }
5949 
5950     case COMPLEX_TYPE:
5951       return 2;
5952 
5953     case VECTOR_TYPE:
5954       {
5955 	unsigned HOST_WIDE_INT nelts;
5956 	if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
5957 	  return nelts;
5958 	else
5959 	  return -1;
5960       }
5961 
5962     case INTEGER_TYPE:
5963     case REAL_TYPE:
5964     case FIXED_POINT_TYPE:
5965     case ENUMERAL_TYPE:
5966     case BOOLEAN_TYPE:
5967     case POINTER_TYPE:
5968     case OFFSET_TYPE:
5969     case REFERENCE_TYPE:
5970     case NULLPTR_TYPE:
5971       return 1;
5972 
5973     case ERROR_MARK:
5974       return 0;
5975 
5976     case VOID_TYPE:
5977     case METHOD_TYPE:
5978     case FUNCTION_TYPE:
5979     case LANG_TYPE:
5980     default:
5981       gcc_unreachable ();
5982     }
5983 }
5984 
5985 /* Helper for categorize_ctor_elements.  Identical interface.  */
5986 
5987 static bool
categorize_ctor_elements_1(const_tree ctor,HOST_WIDE_INT * p_nz_elts,HOST_WIDE_INT * p_unique_nz_elts,HOST_WIDE_INT * p_init_elts,bool * p_complete)5988 categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
5989 			    HOST_WIDE_INT *p_unique_nz_elts,
5990 			    HOST_WIDE_INT *p_init_elts, bool *p_complete)
5991 {
5992   unsigned HOST_WIDE_INT idx;
5993   HOST_WIDE_INT nz_elts, unique_nz_elts, init_elts, num_fields;
5994   tree value, purpose, elt_type;
5995 
5996   /* Whether CTOR is a valid constant initializer, in accordance with what
5997      initializer_constant_valid_p does.  If inferred from the constructor
5998      elements, true until proven otherwise.  */
5999   bool const_from_elts_p = constructor_static_from_elts_p (ctor);
6000   bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
6001 
6002   nz_elts = 0;
6003   unique_nz_elts = 0;
6004   init_elts = 0;
6005   num_fields = 0;
6006   elt_type = NULL_TREE;
6007 
6008   FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
6009     {
6010       HOST_WIDE_INT mult = 1;
6011 
6012       if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
6013 	{
6014 	  tree lo_index = TREE_OPERAND (purpose, 0);
6015 	  tree hi_index = TREE_OPERAND (purpose, 1);
6016 
6017 	  if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index))
6018 	    mult = (tree_to_uhwi (hi_index)
6019 		    - tree_to_uhwi (lo_index) + 1);
6020 	}
6021       num_fields += mult;
6022       elt_type = TREE_TYPE (value);
6023 
6024       switch (TREE_CODE (value))
6025 	{
6026 	case CONSTRUCTOR:
6027 	  {
6028 	    HOST_WIDE_INT nz = 0, unz = 0, ic = 0;
6029 
6030 	    bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &unz,
6031 							   &ic, p_complete);
6032 
6033 	    nz_elts += mult * nz;
6034 	    unique_nz_elts += unz;
6035  	    init_elts += mult * ic;
6036 
6037 	    if (const_from_elts_p && const_p)
6038 	      const_p = const_elt_p;
6039 	  }
6040 	  break;
6041 
6042 	case INTEGER_CST:
6043 	case REAL_CST:
6044 	case FIXED_CST:
6045 	  if (!initializer_zerop (value))
6046 	    {
6047 	      nz_elts += mult;
6048 	      unique_nz_elts++;
6049 	    }
6050 	  init_elts += mult;
6051 	  break;
6052 
6053 	case STRING_CST:
6054 	  nz_elts += mult * TREE_STRING_LENGTH (value);
6055 	  unique_nz_elts += TREE_STRING_LENGTH (value);
6056 	  init_elts += mult * TREE_STRING_LENGTH (value);
6057 	  break;
6058 
6059 	case COMPLEX_CST:
6060 	  if (!initializer_zerop (TREE_REALPART (value)))
6061 	    {
6062 	      nz_elts += mult;
6063 	      unique_nz_elts++;
6064 	    }
6065 	  if (!initializer_zerop (TREE_IMAGPART (value)))
6066 	    {
6067 	      nz_elts += mult;
6068 	      unique_nz_elts++;
6069 	    }
6070 	  init_elts += 2 * mult;
6071 	  break;
6072 
6073 	case VECTOR_CST:
6074 	  {
6075 	    /* We can only construct constant-length vectors using
6076 	       CONSTRUCTOR.  */
6077 	    unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
6078 	    for (unsigned int i = 0; i < nunits; ++i)
6079 	      {
6080 		tree v = VECTOR_CST_ELT (value, i);
6081 		if (!initializer_zerop (v))
6082 		  {
6083 		    nz_elts += mult;
6084 		    unique_nz_elts++;
6085 		  }
6086 		init_elts += mult;
6087 	      }
6088 	  }
6089 	  break;
6090 
6091 	default:
6092 	  {
6093 	    HOST_WIDE_INT tc = count_type_elements (elt_type, false);
6094 	    nz_elts += mult * tc;
6095 	    unique_nz_elts += tc;
6096 	    init_elts += mult * tc;
6097 
6098 	    if (const_from_elts_p && const_p)
6099 	      const_p
6100 		= initializer_constant_valid_p (value,
6101 						elt_type,
6102 						TYPE_REVERSE_STORAGE_ORDER
6103 						(TREE_TYPE (ctor)))
6104 		  != NULL_TREE;
6105 	  }
6106 	  break;
6107 	}
6108     }
6109 
6110   if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
6111 						num_fields, elt_type))
6112     *p_complete = false;
6113 
6114   *p_nz_elts += nz_elts;
6115   *p_unique_nz_elts += unique_nz_elts;
6116   *p_init_elts += init_elts;
6117 
6118   return const_p;
6119 }
6120 
6121 /* Examine CTOR to discover:
6122    * how many scalar fields are set to nonzero values,
6123      and place it in *P_NZ_ELTS;
6124    * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
6125      high - low + 1 (this can be useful for callers to determine ctors
6126      that could be cheaply initialized with - perhaps nested - loops
6127      compared to copied from huge read-only data),
6128      and place it in *P_UNIQUE_NZ_ELTS;
6129    * how many scalar fields in total are in CTOR,
6130      and place it in *P_ELT_COUNT.
6131    * whether the constructor is complete -- in the sense that every
6132      meaningful byte is explicitly given a value --
6133      and place it in *P_COMPLETE.
6134 
6135    Return whether or not CTOR is a valid static constant initializer, the same
6136    as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0".  */
6137 
6138 bool
categorize_ctor_elements(const_tree ctor,HOST_WIDE_INT * p_nz_elts,HOST_WIDE_INT * p_unique_nz_elts,HOST_WIDE_INT * p_init_elts,bool * p_complete)6139 categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6140 			  HOST_WIDE_INT *p_unique_nz_elts,
6141 			  HOST_WIDE_INT *p_init_elts, bool *p_complete)
6142 {
6143   *p_nz_elts = 0;
6144   *p_unique_nz_elts = 0;
6145   *p_init_elts = 0;
6146   *p_complete = true;
6147 
6148   return categorize_ctor_elements_1 (ctor, p_nz_elts, p_unique_nz_elts,
6149 				     p_init_elts, p_complete);
6150 }
6151 
6152 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6153    of which had type LAST_TYPE.  Each element was itself a complete
6154    initializer, in the sense that every meaningful byte was explicitly
6155    given a value.  Return true if the same is true for the constructor
6156    as a whole.  */
6157 
6158 bool
complete_ctor_at_level_p(const_tree type,HOST_WIDE_INT num_elts,const_tree last_type)6159 complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
6160 			  const_tree last_type)
6161 {
6162   if (TREE_CODE (type) == UNION_TYPE
6163       || TREE_CODE (type) == QUAL_UNION_TYPE)
6164     {
6165       if (num_elts == 0)
6166 	return false;
6167 
6168       gcc_assert (num_elts == 1 && last_type);
6169 
6170       /* ??? We could look at each element of the union, and find the
6171 	 largest element.  Which would avoid comparing the size of the
6172 	 initialized element against any tail padding in the union.
6173 	 Doesn't seem worth the effort...  */
6174       return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
6175     }
6176 
6177   return count_type_elements (type, true) == num_elts;
6178 }
6179 
6180 /* Return 1 if EXP contains mostly (3/4) zeros.  */
6181 
6182 static int
mostly_zeros_p(const_tree exp)6183 mostly_zeros_p (const_tree exp)
6184 {
6185   if (TREE_CODE (exp) == CONSTRUCTOR)
6186     {
6187       HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6188       bool complete_p;
6189 
6190       categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6191 				&complete_p);
6192       return !complete_p || nz_elts < init_elts / 4;
6193     }
6194 
6195   return initializer_zerop (exp);
6196 }
6197 
6198 /* Return 1 if EXP contains all zeros.  */
6199 
6200 static int
all_zeros_p(const_tree exp)6201 all_zeros_p (const_tree exp)
6202 {
6203   if (TREE_CODE (exp) == CONSTRUCTOR)
6204     {
6205       HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6206       bool complete_p;
6207 
6208       categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6209 				&complete_p);
6210       return nz_elts == 0;
6211     }
6212 
6213   return initializer_zerop (exp);
6214 }
6215 
6216 /* Helper function for store_constructor.
6217    TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6218    CLEARED is as for store_constructor.
6219    ALIAS_SET is the alias set to use for any stores.
6220    If REVERSE is true, the store is to be done in reverse order.
6221 
6222    This provides a recursive shortcut back to store_constructor when it isn't
6223    necessary to go through store_field.  This is so that we can pass through
6224    the cleared field to let store_constructor know that we may not have to
6225    clear a substructure if the outer structure has already been cleared.  */
6226 
6227 static void
store_constructor_field(rtx target,poly_uint64 bitsize,poly_int64 bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,machine_mode mode,tree exp,int cleared,alias_set_type alias_set,bool reverse)6228 store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
6229 			 poly_uint64 bitregion_start,
6230 			 poly_uint64 bitregion_end,
6231 			 machine_mode mode,
6232 			 tree exp, int cleared,
6233 			 alias_set_type alias_set, bool reverse)
6234 {
6235   poly_int64 bytepos;
6236   poly_uint64 bytesize;
6237   if (TREE_CODE (exp) == CONSTRUCTOR
6238       /* We can only call store_constructor recursively if the size and
6239 	 bit position are on a byte boundary.  */
6240       && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
6241       && maybe_ne (bitsize, 0U)
6242       && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
6243       /* If we have a nonzero bitpos for a register target, then we just
6244 	 let store_field do the bitfield handling.  This is unlikely to
6245 	 generate unnecessary clear instructions anyways.  */
6246       && (known_eq (bitpos, 0) || MEM_P (target)))
6247     {
6248       if (MEM_P (target))
6249 	{
6250 	  machine_mode target_mode = GET_MODE (target);
6251 	  if (target_mode != BLKmode
6252 	      && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
6253 	    target_mode = BLKmode;
6254 	  target = adjust_address (target, target_mode, bytepos);
6255 	}
6256 
6257 
6258       /* Update the alias set, if required.  */
6259       if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
6260 	  && MEM_ALIAS_SET (target) != 0)
6261 	{
6262 	  target = copy_rtx (target);
6263 	  set_mem_alias_set (target, alias_set);
6264 	}
6265 
6266       store_constructor (exp, target, cleared, bytesize, reverse);
6267     }
6268   else
6269     store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
6270 		 exp, alias_set, false, reverse);
6271 }
6272 
6273 
6274 /* Returns the number of FIELD_DECLs in TYPE.  */
6275 
6276 static int
fields_length(const_tree type)6277 fields_length (const_tree type)
6278 {
6279   tree t = TYPE_FIELDS (type);
6280   int count = 0;
6281 
6282   for (; t; t = DECL_CHAIN (t))
6283     if (TREE_CODE (t) == FIELD_DECL)
6284       ++count;
6285 
6286   return count;
6287 }
6288 
6289 
6290 /* Store the value of constructor EXP into the rtx TARGET.
6291    TARGET is either a REG or a MEM; we know it cannot conflict, since
6292    safe_from_p has been called.
6293    CLEARED is true if TARGET is known to have been zero'd.
6294    SIZE is the number of bytes of TARGET we are allowed to modify: this
6295    may not be the same as the size of EXP if we are assigning to a field
6296    which has been packed to exclude padding bits.
6297    If REVERSE is true, the store is to be done in reverse order.  */
6298 
6299 static void
store_constructor(tree exp,rtx target,int cleared,poly_int64 size,bool reverse)6300 store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
6301 		   bool reverse)
6302 {
6303   tree type = TREE_TYPE (exp);
6304   HOST_WIDE_INT exp_size = int_size_in_bytes (type);
6305   poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
6306 
6307   switch (TREE_CODE (type))
6308     {
6309     case RECORD_TYPE:
6310     case UNION_TYPE:
6311     case QUAL_UNION_TYPE:
6312       {
6313 	unsigned HOST_WIDE_INT idx;
6314 	tree field, value;
6315 
6316 	/* The storage order is specified for every aggregate type.  */
6317 	reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6318 
6319 	/* If size is zero or the target is already cleared, do nothing.  */
6320 	if (known_eq (size, 0) || cleared)
6321 	  cleared = 1;
6322 	/* We either clear the aggregate or indicate the value is dead.  */
6323 	else if ((TREE_CODE (type) == UNION_TYPE
6324 		  || TREE_CODE (type) == QUAL_UNION_TYPE)
6325 		 && ! CONSTRUCTOR_ELTS (exp))
6326 	  /* If the constructor is empty, clear the union.  */
6327 	  {
6328 	    clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
6329 	    cleared = 1;
6330 	  }
6331 
6332 	/* If we are building a static constructor into a register,
6333 	   set the initial value as zero so we can fold the value into
6334 	   a constant.  But if more than one register is involved,
6335 	   this probably loses.  */
6336 	else if (REG_P (target) && TREE_STATIC (exp)
6337 		 && known_le (GET_MODE_SIZE (GET_MODE (target)),
6338 			      REGMODE_NATURAL_SIZE (GET_MODE (target))))
6339 	  {
6340 	    emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6341 	    cleared = 1;
6342 	  }
6343 
6344         /* If the constructor has fewer fields than the structure or
6345 	   if we are initializing the structure to mostly zeros, clear
6346 	   the whole structure first.  Don't do this if TARGET is a
6347 	   register whose mode size isn't equal to SIZE since
6348 	   clear_storage can't handle this case.  */
6349 	else if (known_size_p (size)
6350 		 && (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
6351 		     || mostly_zeros_p (exp))
6352 		 && (!REG_P (target)
6353 		     || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
6354 	  {
6355 	    clear_storage (target, gen_int_mode (size, Pmode),
6356 			   BLOCK_OP_NORMAL);
6357 	    cleared = 1;
6358 	  }
6359 
6360 	if (REG_P (target) && !cleared)
6361 	  emit_clobber (target);
6362 
6363 	/* Store each element of the constructor into the
6364 	   corresponding field of TARGET.  */
6365 	FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
6366 	  {
6367 	    machine_mode mode;
6368 	    HOST_WIDE_INT bitsize;
6369 	    HOST_WIDE_INT bitpos = 0;
6370 	    tree offset;
6371 	    rtx to_rtx = target;
6372 
6373 	    /* Just ignore missing fields.  We cleared the whole
6374 	       structure, above, if any fields are missing.  */
6375 	    if (field == 0)
6376 	      continue;
6377 
6378 	    if (cleared && initializer_zerop (value))
6379 	      continue;
6380 
6381 	    if (tree_fits_uhwi_p (DECL_SIZE (field)))
6382 	      bitsize = tree_to_uhwi (DECL_SIZE (field));
6383 	    else
6384 	      gcc_unreachable ();
6385 
6386 	    mode = DECL_MODE (field);
6387 	    if (DECL_BIT_FIELD (field))
6388 	      mode = VOIDmode;
6389 
6390 	    offset = DECL_FIELD_OFFSET (field);
6391 	    if (tree_fits_shwi_p (offset)
6392 		&& tree_fits_shwi_p (bit_position (field)))
6393 	      {
6394 		bitpos = int_bit_position (field);
6395 		offset = NULL_TREE;
6396 	      }
6397 	    else
6398 	      gcc_unreachable ();
6399 
6400 	    /* If this initializes a field that is smaller than a
6401 	       word, at the start of a word, try to widen it to a full
6402 	       word.  This special case allows us to output C++ member
6403 	       function initializations in a form that the optimizers
6404 	       can understand.  */
6405 	    if (WORD_REGISTER_OPERATIONS
6406 		&& REG_P (target)
6407 		&& bitsize < BITS_PER_WORD
6408 		&& bitpos % BITS_PER_WORD == 0
6409 		&& GET_MODE_CLASS (mode) == MODE_INT
6410 		&& TREE_CODE (value) == INTEGER_CST
6411 		&& exp_size >= 0
6412 		&& bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
6413 	      {
6414 		tree type = TREE_TYPE (value);
6415 
6416 		if (TYPE_PRECISION (type) < BITS_PER_WORD)
6417 		  {
6418 		    type = lang_hooks.types.type_for_mode
6419 		      (word_mode, TYPE_UNSIGNED (type));
6420 		    value = fold_convert (type, value);
6421 		    /* Make sure the bits beyond the original bitsize are zero
6422 		       so that we can correctly avoid extra zeroing stores in
6423 		       later constructor elements.  */
6424 		    tree bitsize_mask
6425 		      = wide_int_to_tree (type, wi::mask (bitsize, false,
6426 							   BITS_PER_WORD));
6427 		    value = fold_build2 (BIT_AND_EXPR, type, value, bitsize_mask);
6428 		  }
6429 
6430 		if (BYTES_BIG_ENDIAN)
6431 		  value
6432 		   = fold_build2 (LSHIFT_EXPR, type, value,
6433 				   build_int_cst (type,
6434 						  BITS_PER_WORD - bitsize));
6435 		bitsize = BITS_PER_WORD;
6436 		mode = word_mode;
6437 	      }
6438 
6439 	    if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
6440 		&& DECL_NONADDRESSABLE_P (field))
6441 	      {
6442 		to_rtx = copy_rtx (to_rtx);
6443 		MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
6444 	      }
6445 
6446 	    store_constructor_field (to_rtx, bitsize, bitpos,
6447 				     0, bitregion_end, mode,
6448 				     value, cleared,
6449 				     get_alias_set (TREE_TYPE (field)),
6450 				     reverse);
6451 	  }
6452 	break;
6453       }
6454     case ARRAY_TYPE:
6455       {
6456 	tree value, index;
6457 	unsigned HOST_WIDE_INT i;
6458 	int need_to_clear;
6459 	tree domain;
6460 	tree elttype = TREE_TYPE (type);
6461 	int const_bounds_p;
6462 	HOST_WIDE_INT minelt = 0;
6463 	HOST_WIDE_INT maxelt = 0;
6464 
6465 	/* The storage order is specified for every aggregate type.  */
6466 	reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6467 
6468 	domain = TYPE_DOMAIN (type);
6469 	const_bounds_p = (TYPE_MIN_VALUE (domain)
6470 			  && TYPE_MAX_VALUE (domain)
6471 			  && tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
6472 			  && tree_fits_shwi_p (TYPE_MAX_VALUE (domain)));
6473 
6474 	/* If we have constant bounds for the range of the type, get them.  */
6475 	if (const_bounds_p)
6476 	  {
6477 	    minelt = tree_to_shwi (TYPE_MIN_VALUE (domain));
6478 	    maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain));
6479 	  }
6480 
6481 	/* If the constructor has fewer elements than the array, clear
6482            the whole array first.  Similarly if this is static
6483            constructor of a non-BLKmode object.  */
6484 	if (cleared)
6485 	  need_to_clear = 0;
6486 	else if (REG_P (target) && TREE_STATIC (exp))
6487 	  need_to_clear = 1;
6488 	else
6489 	  {
6490 	    unsigned HOST_WIDE_INT idx;
6491 	    tree index, value;
6492 	    HOST_WIDE_INT count = 0, zero_count = 0;
6493 	    need_to_clear = ! const_bounds_p;
6494 
6495 	    /* This loop is a more accurate version of the loop in
6496 	       mostly_zeros_p (it handles RANGE_EXPR in an index).  It
6497 	       is also needed to check for missing elements.  */
6498 	    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
6499 	      {
6500 		HOST_WIDE_INT this_node_count;
6501 
6502 		if (need_to_clear)
6503 		  break;
6504 
6505 		if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6506 		  {
6507 		    tree lo_index = TREE_OPERAND (index, 0);
6508 		    tree hi_index = TREE_OPERAND (index, 1);
6509 
6510 		    if (! tree_fits_uhwi_p (lo_index)
6511 			|| ! tree_fits_uhwi_p (hi_index))
6512 		      {
6513 			need_to_clear = 1;
6514 			break;
6515 		      }
6516 
6517 		    this_node_count = (tree_to_uhwi (hi_index)
6518 				       - tree_to_uhwi (lo_index) + 1);
6519 		  }
6520 		else
6521 		  this_node_count = 1;
6522 
6523 		count += this_node_count;
6524 		if (mostly_zeros_p (value))
6525 		  zero_count += this_node_count;
6526 	      }
6527 
6528 	    /* Clear the entire array first if there are any missing
6529 	       elements, or if the incidence of zero elements is >=
6530 	       75%.  */
6531 	    if (! need_to_clear
6532 		&& (count < maxelt - minelt + 1
6533 		    || 4 * zero_count >= 3 * count))
6534 	      need_to_clear = 1;
6535 	  }
6536 
6537 	if (need_to_clear && maybe_gt (size, 0))
6538 	  {
6539 	    if (REG_P (target))
6540 	      emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6541 	    else
6542 	      clear_storage (target, gen_int_mode (size, Pmode),
6543 			     BLOCK_OP_NORMAL);
6544 	    cleared = 1;
6545 	  }
6546 
6547 	if (!cleared && REG_P (target))
6548 	  /* Inform later passes that the old value is dead.  */
6549 	  emit_clobber (target);
6550 
6551 	/* Store each element of the constructor into the
6552 	   corresponding element of TARGET, determined by counting the
6553 	   elements.  */
6554 	FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
6555 	  {
6556 	    machine_mode mode;
6557 	    poly_int64 bitsize;
6558 	    HOST_WIDE_INT bitpos;
6559 	    rtx xtarget = target;
6560 
6561 	    if (cleared && initializer_zerop (value))
6562 	      continue;
6563 
6564 	    mode = TYPE_MODE (elttype);
6565 	    if (mode != BLKmode)
6566 	      bitsize = GET_MODE_BITSIZE (mode);
6567 	    else if (!poly_int_tree_p (TYPE_SIZE (elttype), &bitsize))
6568 	      bitsize = -1;
6569 
6570 	    if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6571 	      {
6572 		tree lo_index = TREE_OPERAND (index, 0);
6573 		tree hi_index = TREE_OPERAND (index, 1);
6574 		rtx index_r, pos_rtx;
6575 		HOST_WIDE_INT lo, hi, count;
6576 		tree position;
6577 
6578 		/* If the range is constant and "small", unroll the loop.  */
6579 		if (const_bounds_p
6580 		    && tree_fits_shwi_p (lo_index)
6581 		    && tree_fits_shwi_p (hi_index)
6582 		    && (lo = tree_to_shwi (lo_index),
6583 			hi = tree_to_shwi (hi_index),
6584 			count = hi - lo + 1,
6585 			(!MEM_P (target)
6586 			 || count <= 2
6587 			 || (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6588 			     && (tree_to_uhwi (TYPE_SIZE (elttype)) * count
6589 				 <= 40 * 8)))))
6590 		  {
6591 		    lo -= minelt;  hi -= minelt;
6592 		    for (; lo <= hi; lo++)
6593 		      {
6594 			bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype));
6595 
6596 			if (MEM_P (target)
6597 			    && !MEM_KEEP_ALIAS_SET_P (target)
6598 			    && TREE_CODE (type) == ARRAY_TYPE
6599 			    && TYPE_NONALIASED_COMPONENT (type))
6600 			  {
6601 			    target = copy_rtx (target);
6602 			    MEM_KEEP_ALIAS_SET_P (target) = 1;
6603 			  }
6604 
6605 			store_constructor_field
6606 			  (target, bitsize, bitpos, 0, bitregion_end,
6607 			   mode, value, cleared,
6608 			   get_alias_set (elttype), reverse);
6609 		      }
6610 		  }
6611 		else
6612 		  {
6613 		    rtx_code_label *loop_start = gen_label_rtx ();
6614 		    rtx_code_label *loop_end = gen_label_rtx ();
6615 		    tree exit_cond;
6616 
6617 		    expand_normal (hi_index);
6618 
6619 		    index = build_decl (EXPR_LOCATION (exp),
6620 					VAR_DECL, NULL_TREE, domain);
6621 		    index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
6622 		    SET_DECL_RTL (index, index_r);
6623 		    store_expr (lo_index, index_r, 0, false, reverse);
6624 
6625 		    /* Build the head of the loop.  */
6626 		    do_pending_stack_adjust ();
6627 		    emit_label (loop_start);
6628 
6629 		    /* Assign value to element index.  */
6630 		    position =
6631 		      fold_convert (ssizetype,
6632 				    fold_build2 (MINUS_EXPR,
6633 						 TREE_TYPE (index),
6634 						 index,
6635 						 TYPE_MIN_VALUE (domain)));
6636 
6637 		    position =
6638 			size_binop (MULT_EXPR, position,
6639 				    fold_convert (ssizetype,
6640 						  TYPE_SIZE_UNIT (elttype)));
6641 
6642 		    pos_rtx = expand_normal (position);
6643 		    xtarget = offset_address (target, pos_rtx,
6644 					      highest_pow2_factor (position));
6645 		    xtarget = adjust_address (xtarget, mode, 0);
6646 		    if (TREE_CODE (value) == CONSTRUCTOR)
6647 		      store_constructor (value, xtarget, cleared,
6648 					 exact_div (bitsize, BITS_PER_UNIT),
6649 					 reverse);
6650 		    else
6651 		      store_expr (value, xtarget, 0, false, reverse);
6652 
6653 		    /* Generate a conditional jump to exit the loop.  */
6654 		    exit_cond = build2 (LT_EXPR, integer_type_node,
6655 					index, hi_index);
6656 		    jumpif (exit_cond, loop_end,
6657 			    profile_probability::uninitialized ());
6658 
6659 		    /* Update the loop counter, and jump to the head of
6660 		       the loop.  */
6661 		    expand_assignment (index,
6662 				       build2 (PLUS_EXPR, TREE_TYPE (index),
6663 					       index, integer_one_node),
6664 				       false);
6665 
6666 		    emit_jump (loop_start);
6667 
6668 		    /* Build the end of the loop.  */
6669 		    emit_label (loop_end);
6670 		  }
6671 	      }
6672 	    else if ((index != 0 && ! tree_fits_shwi_p (index))
6673 		     || ! tree_fits_uhwi_p (TYPE_SIZE (elttype)))
6674 	      {
6675 		tree position;
6676 
6677 		if (index == 0)
6678 		  index = ssize_int (1);
6679 
6680 		if (minelt)
6681 		  index = fold_convert (ssizetype,
6682 					fold_build2 (MINUS_EXPR,
6683 						     TREE_TYPE (index),
6684 						     index,
6685 						     TYPE_MIN_VALUE (domain)));
6686 
6687 		position =
6688 		  size_binop (MULT_EXPR, index,
6689 			      fold_convert (ssizetype,
6690 					    TYPE_SIZE_UNIT (elttype)));
6691 		xtarget = offset_address (target,
6692 					  expand_normal (position),
6693 					  highest_pow2_factor (position));
6694 		xtarget = adjust_address (xtarget, mode, 0);
6695 		store_expr (value, xtarget, 0, false, reverse);
6696 	      }
6697 	    else
6698 	      {
6699 		if (index != 0)
6700 		  bitpos = ((tree_to_shwi (index) - minelt)
6701 			    * tree_to_uhwi (TYPE_SIZE (elttype)));
6702 		else
6703 		  bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype)));
6704 
6705 		if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
6706 		    && TREE_CODE (type) == ARRAY_TYPE
6707 		    && TYPE_NONALIASED_COMPONENT (type))
6708 		  {
6709 		    target = copy_rtx (target);
6710 		    MEM_KEEP_ALIAS_SET_P (target) = 1;
6711 		  }
6712 		store_constructor_field (target, bitsize, bitpos, 0,
6713 					 bitregion_end, mode, value,
6714 					 cleared, get_alias_set (elttype),
6715 					 reverse);
6716 	      }
6717 	  }
6718 	break;
6719       }
6720 
6721     case VECTOR_TYPE:
6722       {
6723 	unsigned HOST_WIDE_INT idx;
6724 	constructor_elt *ce;
6725 	int i;
6726 	int need_to_clear;
6727 	insn_code icode = CODE_FOR_nothing;
6728 	tree elt;
6729 	tree elttype = TREE_TYPE (type);
6730 	int elt_size = tree_to_uhwi (TYPE_SIZE (elttype));
6731 	machine_mode eltmode = TYPE_MODE (elttype);
6732 	HOST_WIDE_INT bitsize;
6733 	HOST_WIDE_INT bitpos;
6734 	rtvec vector = NULL;
6735 	poly_uint64 n_elts;
6736 	unsigned HOST_WIDE_INT const_n_elts;
6737 	alias_set_type alias;
6738 	bool vec_vec_init_p = false;
6739 	machine_mode mode = GET_MODE (target);
6740 
6741 	gcc_assert (eltmode != BLKmode);
6742 
6743 	/* Try using vec_duplicate_optab for uniform vectors.  */
6744 	if (!TREE_SIDE_EFFECTS (exp)
6745 	    && VECTOR_MODE_P (mode)
6746 	    && eltmode == GET_MODE_INNER (mode)
6747 	    && ((icode = optab_handler (vec_duplicate_optab, mode))
6748 		!= CODE_FOR_nothing)
6749 	    && (elt = uniform_vector_p (exp)))
6750 	  {
6751 	    struct expand_operand ops[2];
6752 	    create_output_operand (&ops[0], target, mode);
6753 	    create_input_operand (&ops[1], expand_normal (elt), eltmode);
6754 	    expand_insn (icode, 2, ops);
6755 	    if (!rtx_equal_p (target, ops[0].value))
6756 	      emit_move_insn (target, ops[0].value);
6757 	    break;
6758 	  }
6759 
6760 	n_elts = TYPE_VECTOR_SUBPARTS (type);
6761 	if (REG_P (target)
6762 	    && VECTOR_MODE_P (mode)
6763 	    && n_elts.is_constant (&const_n_elts))
6764 	  {
6765 	    machine_mode emode = eltmode;
6766 
6767 	    if (CONSTRUCTOR_NELTS (exp)
6768 		&& (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value))
6769 		    == VECTOR_TYPE))
6770 	      {
6771 		tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
6772 		gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
6773 				      * TYPE_VECTOR_SUBPARTS (etype),
6774 				      n_elts));
6775 		emode = TYPE_MODE (etype);
6776 	      }
6777 	    icode = convert_optab_handler (vec_init_optab, mode, emode);
6778 	    if (icode != CODE_FOR_nothing)
6779 	      {
6780 		unsigned int i, n = const_n_elts;
6781 
6782 		if (emode != eltmode)
6783 		  {
6784 		    n = CONSTRUCTOR_NELTS (exp);
6785 		    vec_vec_init_p = true;
6786 		  }
6787 		vector = rtvec_alloc (n);
6788 		for (i = 0; i < n; i++)
6789 		  RTVEC_ELT (vector, i) = CONST0_RTX (emode);
6790 	      }
6791 	  }
6792 
6793 	/* If the constructor has fewer elements than the vector,
6794 	   clear the whole array first.  Similarly if this is static
6795 	   constructor of a non-BLKmode object.  */
6796 	if (cleared)
6797 	  need_to_clear = 0;
6798 	else if (REG_P (target) && TREE_STATIC (exp))
6799 	  need_to_clear = 1;
6800 	else
6801 	  {
6802 	    unsigned HOST_WIDE_INT count = 0, zero_count = 0;
6803 	    tree value;
6804 
6805 	    FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
6806 	      {
6807 		tree sz = TYPE_SIZE (TREE_TYPE (value));
6808 		int n_elts_here
6809 		  = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, sz,
6810 						   TYPE_SIZE (elttype)));
6811 
6812 		count += n_elts_here;
6813 		if (mostly_zeros_p (value))
6814 		  zero_count += n_elts_here;
6815 	      }
6816 
6817 	    /* Clear the entire vector first if there are any missing elements,
6818 	       or if the incidence of zero elements is >= 75%.  */
6819 	    need_to_clear = (maybe_lt (count, n_elts)
6820 			     || 4 * zero_count >= 3 * count);
6821 	  }
6822 
6823 	if (need_to_clear && maybe_gt (size, 0) && !vector)
6824 	  {
6825 	    if (REG_P (target))
6826 	      emit_move_insn (target, CONST0_RTX (mode));
6827 	    else
6828 	      clear_storage (target, gen_int_mode (size, Pmode),
6829 			     BLOCK_OP_NORMAL);
6830 	    cleared = 1;
6831 	  }
6832 
6833 	/* Inform later passes that the old value is dead.  */
6834 	if (!cleared && !vector && REG_P (target))
6835 	  emit_move_insn (target, CONST0_RTX (mode));
6836 
6837         if (MEM_P (target))
6838 	  alias = MEM_ALIAS_SET (target);
6839 	else
6840 	  alias = get_alias_set (elttype);
6841 
6842         /* Store each element of the constructor into the corresponding
6843 	   element of TARGET, determined by counting the elements.  */
6844 	for (idx = 0, i = 0;
6845 	     vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
6846 	     idx++, i += bitsize / elt_size)
6847 	  {
6848 	    HOST_WIDE_INT eltpos;
6849 	    tree value = ce->value;
6850 
6851 	    bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)));
6852 	    if (cleared && initializer_zerop (value))
6853 	      continue;
6854 
6855 	    if (ce->index)
6856 	      eltpos = tree_to_uhwi (ce->index);
6857 	    else
6858 	      eltpos = i;
6859 
6860 	    if (vector)
6861 	      {
6862 		if (vec_vec_init_p)
6863 		  {
6864 		    gcc_assert (ce->index == NULL_TREE);
6865 		    gcc_assert (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE);
6866 		    eltpos = idx;
6867 		  }
6868 		else
6869 		  gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
6870 		RTVEC_ELT (vector, eltpos) = expand_normal (value);
6871 	      }
6872 	    else
6873 	      {
6874 		machine_mode value_mode
6875 		  = (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE
6876 		     ? TYPE_MODE (TREE_TYPE (value)) : eltmode);
6877 		bitpos = eltpos * elt_size;
6878 		store_constructor_field (target, bitsize, bitpos, 0,
6879 					 bitregion_end, value_mode,
6880 					 value, cleared, alias, reverse);
6881 	      }
6882 	  }
6883 
6884 	if (vector)
6885 	  emit_insn (GEN_FCN (icode) (target,
6886 				      gen_rtx_PARALLEL (mode, vector)));
6887 	break;
6888       }
6889 
6890     default:
6891       gcc_unreachable ();
6892     }
6893 }
6894 
6895 /* Store the value of EXP (an expression tree)
6896    into a subfield of TARGET which has mode MODE and occupies
6897    BITSIZE bits, starting BITPOS bits from the start of TARGET.
6898    If MODE is VOIDmode, it means that we are storing into a bit-field.
6899 
6900    BITREGION_START is bitpos of the first bitfield in this region.
6901    BITREGION_END is the bitpos of the ending bitfield in this region.
6902    These two fields are 0, if the C++ memory model does not apply,
6903    or we are not interested in keeping track of bitfield regions.
6904 
6905    Always return const0_rtx unless we have something particular to
6906    return.
6907 
6908    ALIAS_SET is the alias set for the destination.  This value will
6909    (in general) be different from that for TARGET, since TARGET is a
6910    reference to the containing structure.
6911 
6912    If NONTEMPORAL is true, try generating a nontemporal store.
6913 
6914    If REVERSE is true, the store is to be done in reverse order.  */
6915 
6916 static rtx
store_field(rtx target,poly_int64 bitsize,poly_int64 bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,machine_mode mode,tree exp,alias_set_type alias_set,bool nontemporal,bool reverse)6917 store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
6918 	     poly_uint64 bitregion_start, poly_uint64 bitregion_end,
6919 	     machine_mode mode, tree exp,
6920 	     alias_set_type alias_set, bool nontemporal,  bool reverse)
6921 {
6922   if (TREE_CODE (exp) == ERROR_MARK)
6923     return const0_rtx;
6924 
6925   /* If we have nothing to store, do nothing unless the expression has
6926      side-effects.  Don't do that for zero sized addressable lhs of
6927      calls.  */
6928   if (known_eq (bitsize, 0)
6929       && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6930 	  || TREE_CODE (exp) != CALL_EXPR))
6931     return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6932 
6933   if (GET_CODE (target) == CONCAT)
6934     {
6935       /* We're storing into a struct containing a single __complex.  */
6936 
6937       gcc_assert (known_eq (bitpos, 0));
6938       return store_expr (exp, target, 0, nontemporal, reverse);
6939     }
6940 
6941   /* If the structure is in a register or if the component
6942      is a bit field, we cannot use addressing to access it.
6943      Use bit-field techniques or SUBREG to store in it.  */
6944 
6945   poly_int64 decl_bitsize;
6946   if (mode == VOIDmode
6947       || (mode != BLKmode && ! direct_store[(int) mode]
6948 	  && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
6949 	  && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
6950       || REG_P (target)
6951       || GET_CODE (target) == SUBREG
6952       /* If the field isn't aligned enough to store as an ordinary memref,
6953 	 store it as a bit field.  */
6954       || (mode != BLKmode
6955 	  && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
6956 		|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
6957 	       && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
6958 	      || !multiple_p (bitpos, BITS_PER_UNIT)))
6959       || (known_size_p (bitsize)
6960 	  && mode != BLKmode
6961 	  && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
6962       /* If the RHS and field are a constant size and the size of the
6963 	 RHS isn't the same size as the bitfield, we must use bitfield
6964 	 operations.  */
6965       || (known_size_p (bitsize)
6966 	  && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
6967 	  && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
6968 		       bitsize)
6969 	  /* Except for initialization of full bytes from a CONSTRUCTOR, which
6970 	     we will handle specially below.  */
6971 	  && !(TREE_CODE (exp) == CONSTRUCTOR
6972 	       && multiple_p (bitsize, BITS_PER_UNIT))
6973 	  /* And except for bitwise copying of TREE_ADDRESSABLE types,
6974 	     where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
6975 	     includes some extra padding.  store_expr / expand_expr will in
6976 	     that case call get_inner_reference that will have the bitsize
6977 	     we check here and thus the block move will not clobber the
6978 	     padding that shouldn't be clobbered.  In the future we could
6979 	     replace the TREE_ADDRESSABLE check with a check that
6980 	     get_base_address needs to live in memory.  */
6981 	  && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6982 	      || TREE_CODE (exp) != COMPONENT_REF
6983 	      || !multiple_p (bitsize, BITS_PER_UNIT)
6984 	      || !multiple_p (bitpos, BITS_PER_UNIT)
6985 	      || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
6986 				   &decl_bitsize)
6987 	      || maybe_ne (decl_bitsize, bitsize)))
6988       /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
6989          decl we must use bitfield operations.  */
6990       || (known_size_p (bitsize)
6991 	  && TREE_CODE (exp) == MEM_REF
6992 	  && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
6993 	  && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6994 	  && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6995 	  && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
6996     {
6997       rtx temp;
6998       gimple *nop_def;
6999 
7000       /* If EXP is a NOP_EXPR of precision less than its mode, then that
7001 	 implies a mask operation.  If the precision is the same size as
7002 	 the field we're storing into, that mask is redundant.  This is
7003 	 particularly common with bit field assignments generated by the
7004 	 C front end.  */
7005       nop_def = get_def_for_expr (exp, NOP_EXPR);
7006       if (nop_def)
7007 	{
7008 	  tree type = TREE_TYPE (exp);
7009 	  if (INTEGRAL_TYPE_P (type)
7010 	      && maybe_ne (TYPE_PRECISION (type),
7011 			   GET_MODE_BITSIZE (TYPE_MODE (type)))
7012 	      && known_eq (bitsize, TYPE_PRECISION (type)))
7013 	    {
7014 	      tree op = gimple_assign_rhs1 (nop_def);
7015 	      type = TREE_TYPE (op);
7016 	      if (INTEGRAL_TYPE_P (type)
7017 		  && known_ge (TYPE_PRECISION (type), bitsize))
7018 		exp = op;
7019 	    }
7020 	}
7021 
7022       temp = expand_normal (exp);
7023 
7024       /* We don't support variable-sized BLKmode bitfields, since our
7025 	 handling of BLKmode is bound up with the ability to break
7026 	 things into words.  */
7027       gcc_assert (mode != BLKmode || bitsize.is_constant ());
7028 
7029       /* Handle calls that return values in multiple non-contiguous locations.
7030 	 The Irix 6 ABI has examples of this.  */
7031       if (GET_CODE (temp) == PARALLEL)
7032 	{
7033 	  HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7034 	  machine_mode temp_mode = GET_MODE (temp);
7035 	  if (temp_mode == BLKmode || temp_mode == VOIDmode)
7036 	    temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
7037 	  rtx temp_target = gen_reg_rtx (temp_mode);
7038 	  emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
7039 	  temp = temp_target;
7040 	}
7041 
7042       /* Handle calls that return BLKmode values in registers.  */
7043       else if (mode == BLKmode && REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
7044 	{
7045 	  rtx temp_target = gen_reg_rtx (GET_MODE (temp));
7046 	  copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
7047 	  temp = temp_target;
7048 	}
7049 
7050       /* If the value has aggregate type and an integral mode then, if BITSIZE
7051 	 is narrower than this mode and this is for big-endian data, we first
7052 	 need to put the value into the low-order bits for store_bit_field,
7053 	 except when MODE is BLKmode and BITSIZE larger than the word size
7054 	 (see the handling of fields larger than a word in store_bit_field).
7055 	 Moreover, the field may be not aligned on a byte boundary; in this
7056 	 case, if it has reverse storage order, it needs to be accessed as a
7057 	 scalar field with reverse storage order and we must first put the
7058 	 value into target order.  */
7059       scalar_int_mode temp_mode;
7060       if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
7061 	  && is_int_mode (GET_MODE (temp), &temp_mode))
7062 	{
7063 	  HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
7064 
7065 	  reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
7066 
7067 	  if (reverse)
7068 	    temp = flip_storage_order (temp_mode, temp);
7069 
7070 	  gcc_checking_assert (known_le (bitsize, size));
7071 	  if (maybe_lt (bitsize, size)
7072 	      && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
7073 	      /* Use of to_constant for BLKmode was checked above.  */
7074 	      && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
7075 	    temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
7076 				 size - bitsize, NULL_RTX, 1);
7077 	}
7078 
7079       /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE.  */
7080       if (mode != VOIDmode && mode != BLKmode
7081 	  && mode != TYPE_MODE (TREE_TYPE (exp)))
7082 	temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
7083 
7084       /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7085 	 and BITPOS must be aligned on a byte boundary.  If so, we simply do
7086 	 a block copy.  Likewise for a BLKmode-like TARGET.  */
7087       if (GET_MODE (temp) == BLKmode
7088 	  && (GET_MODE (target) == BLKmode
7089 	      || (MEM_P (target)
7090 		  && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
7091 		  && multiple_p (bitpos, BITS_PER_UNIT)
7092 		  && multiple_p (bitsize, BITS_PER_UNIT))))
7093 	{
7094 	  gcc_assert (MEM_P (target) && MEM_P (temp));
7095 	  poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
7096 	  poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
7097 
7098 	  target = adjust_address (target, VOIDmode, bytepos);
7099 	  emit_block_move (target, temp,
7100 			   gen_int_mode (bytesize, Pmode),
7101 			   BLOCK_OP_NORMAL);
7102 
7103 	  return const0_rtx;
7104 	}
7105 
7106       /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7107 	 word size, we need to load the value (see again store_bit_field).  */
7108       if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
7109 	{
7110 	  scalar_int_mode temp_mode = smallest_int_mode_for_size (bitsize);
7111 	  temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
7112 				    temp_mode, false, NULL);
7113 	}
7114 
7115       /* Store the value in the bitfield.  */
7116       gcc_checking_assert (known_ge (bitpos, 0));
7117       store_bit_field (target, bitsize, bitpos,
7118 		       bitregion_start, bitregion_end,
7119 		       mode, temp, reverse);
7120 
7121       return const0_rtx;
7122     }
7123   else
7124     {
7125       /* Now build a reference to just the desired component.  */
7126       rtx to_rtx = adjust_address (target, mode,
7127 				   exact_div (bitpos, BITS_PER_UNIT));
7128 
7129       if (to_rtx == target)
7130 	to_rtx = copy_rtx (to_rtx);
7131 
7132       if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
7133 	set_mem_alias_set (to_rtx, alias_set);
7134 
7135       /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7136 	 into a target smaller than its type; handle that case now.  */
7137       if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
7138 	{
7139 	  poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
7140 	  store_constructor (exp, to_rtx, 0, bytesize, reverse);
7141 	  return to_rtx;
7142 	}
7143 
7144       return store_expr (exp, to_rtx, 0, nontemporal, reverse);
7145     }
7146 }
7147 
7148 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7149    an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7150    codes and find the ultimate containing object, which we return.
7151 
7152    We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7153    bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7154    storage order of the field.
7155    If the position of the field is variable, we store a tree
7156    giving the variable offset (in units) in *POFFSET.
7157    This offset is in addition to the bit position.
7158    If the position is not variable, we store 0 in *POFFSET.
7159 
7160    If any of the extraction expressions is volatile,
7161    we store 1 in *PVOLATILEP.  Otherwise we don't change that.
7162 
7163    If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7164    Otherwise, it is a mode that can be used to access the field.
7165 
7166    If the field describes a variable-sized object, *PMODE is set to
7167    BLKmode and *PBITSIZE is set to -1.  An access cannot be made in
7168    this case, but the address of the object can be found.  */
7169 
7170 tree
get_inner_reference(tree exp,poly_int64_pod * pbitsize,poly_int64_pod * pbitpos,tree * poffset,machine_mode * pmode,int * punsignedp,int * preversep,int * pvolatilep)7171 get_inner_reference (tree exp, poly_int64_pod *pbitsize,
7172 		     poly_int64_pod *pbitpos, tree *poffset,
7173 		     machine_mode *pmode, int *punsignedp,
7174 		     int *preversep, int *pvolatilep)
7175 {
7176   tree size_tree = 0;
7177   machine_mode mode = VOIDmode;
7178   bool blkmode_bitfield = false;
7179   tree offset = size_zero_node;
7180   poly_offset_int bit_offset = 0;
7181 
7182   /* First get the mode, signedness, storage order and size.  We do this from
7183      just the outermost expression.  */
7184   *pbitsize = -1;
7185   if (TREE_CODE (exp) == COMPONENT_REF)
7186     {
7187       tree field = TREE_OPERAND (exp, 1);
7188       size_tree = DECL_SIZE (field);
7189       if (flag_strict_volatile_bitfields > 0
7190 	  && TREE_THIS_VOLATILE (exp)
7191 	  && DECL_BIT_FIELD_TYPE (field)
7192 	  && DECL_MODE (field) != BLKmode)
7193 	/* Volatile bitfields should be accessed in the mode of the
7194 	     field's type, not the mode computed based on the bit
7195 	     size.  */
7196 	mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
7197       else if (!DECL_BIT_FIELD (field))
7198 	{
7199 	  mode = DECL_MODE (field);
7200 	  /* For vector fields re-check the target flags, as DECL_MODE
7201 	     could have been set with different target flags than
7202 	     the current function has.  */
7203 	  if (mode == BLKmode
7204 	      && VECTOR_TYPE_P (TREE_TYPE (field))
7205 	      && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
7206 	    mode = TYPE_MODE (TREE_TYPE (field));
7207 	}
7208       else if (DECL_MODE (field) == BLKmode)
7209 	blkmode_bitfield = true;
7210 
7211       *punsignedp = DECL_UNSIGNED (field);
7212     }
7213   else if (TREE_CODE (exp) == BIT_FIELD_REF)
7214     {
7215       size_tree = TREE_OPERAND (exp, 1);
7216       *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
7217 		     || TYPE_UNSIGNED (TREE_TYPE (exp)));
7218 
7219       /* For vector types, with the correct size of access, use the mode of
7220 	 inner type.  */
7221       if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
7222 	  && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
7223 	  && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
7224         mode = TYPE_MODE (TREE_TYPE (exp));
7225     }
7226   else
7227     {
7228       mode = TYPE_MODE (TREE_TYPE (exp));
7229       *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
7230 
7231       if (mode == BLKmode)
7232 	size_tree = TYPE_SIZE (TREE_TYPE (exp));
7233       else
7234 	*pbitsize = GET_MODE_BITSIZE (mode);
7235     }
7236 
7237   if (size_tree != 0)
7238     {
7239       if (! tree_fits_uhwi_p (size_tree))
7240 	mode = BLKmode, *pbitsize = -1;
7241       else
7242 	*pbitsize = tree_to_uhwi (size_tree);
7243     }
7244 
7245   *preversep = reverse_storage_order_for_component_p (exp);
7246 
7247   /* Compute cumulative bit-offset for nested component-refs and array-refs,
7248      and find the ultimate containing object.  */
7249   while (1)
7250     {
7251       switch (TREE_CODE (exp))
7252 	{
7253 	case BIT_FIELD_REF:
7254 	  bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
7255 	  break;
7256 
7257 	case COMPONENT_REF:
7258 	  {
7259 	    tree field = TREE_OPERAND (exp, 1);
7260 	    tree this_offset = component_ref_field_offset (exp);
7261 
7262 	    /* If this field hasn't been filled in yet, don't go past it.
7263 	       This should only happen when folding expressions made during
7264 	       type construction.  */
7265 	    if (this_offset == 0)
7266 	      break;
7267 
7268 	    offset = size_binop (PLUS_EXPR, offset, this_offset);
7269 	    bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
7270 
7271 	    /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN.  */
7272 	  }
7273 	  break;
7274 
7275 	case ARRAY_REF:
7276 	case ARRAY_RANGE_REF:
7277 	  {
7278 	    tree index = TREE_OPERAND (exp, 1);
7279 	    tree low_bound = array_ref_low_bound (exp);
7280 	    tree unit_size = array_ref_element_size (exp);
7281 
7282 	    /* We assume all arrays have sizes that are a multiple of a byte.
7283 	       First subtract the lower bound, if any, in the type of the
7284 	       index, then convert to sizetype and multiply by the size of
7285 	       the array element.  */
7286 	    if (! integer_zerop (low_bound))
7287 	      index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
7288 				   index, low_bound);
7289 
7290 	    offset = size_binop (PLUS_EXPR, offset,
7291 			         size_binop (MULT_EXPR,
7292 					     fold_convert (sizetype, index),
7293 					     unit_size));
7294 	  }
7295 	  break;
7296 
7297 	case REALPART_EXPR:
7298 	  break;
7299 
7300 	case IMAGPART_EXPR:
7301 	  bit_offset += *pbitsize;
7302 	  break;
7303 
7304 	case VIEW_CONVERT_EXPR:
7305 	  break;
7306 
7307 	case MEM_REF:
7308 	  /* Hand back the decl for MEM[&decl, off].  */
7309 	  if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
7310 	    {
7311 	      tree off = TREE_OPERAND (exp, 1);
7312 	      if (!integer_zerop (off))
7313 		{
7314 		  poly_offset_int boff = mem_ref_offset (exp);
7315 		  boff <<= LOG2_BITS_PER_UNIT;
7316 		  bit_offset += boff;
7317 		}
7318 	      exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7319 	    }
7320 	  goto done;
7321 
7322 	default:
7323 	  goto done;
7324 	}
7325 
7326       /* If any reference in the chain is volatile, the effect is volatile.  */
7327       if (TREE_THIS_VOLATILE (exp))
7328 	*pvolatilep = 1;
7329 
7330       exp = TREE_OPERAND (exp, 0);
7331     }
7332  done:
7333 
7334   /* If OFFSET is constant, see if we can return the whole thing as a
7335      constant bit position.  Make sure to handle overflow during
7336      this conversion.  */
7337   if (poly_int_tree_p (offset))
7338     {
7339       poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
7340 				      TYPE_PRECISION (sizetype));
7341       tem <<= LOG2_BITS_PER_UNIT;
7342       tem += bit_offset;
7343       if (tem.to_shwi (pbitpos))
7344 	*poffset = offset = NULL_TREE;
7345     }
7346 
7347   /* Otherwise, split it up.  */
7348   if (offset)
7349     {
7350       /* Avoid returning a negative bitpos as this may wreak havoc later.  */
7351       if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
7352         {
7353 	  *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
7354 	  poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
7355 	  offset = size_binop (PLUS_EXPR, offset,
7356 			       build_int_cst (sizetype, bytes.force_shwi ()));
7357 	}
7358 
7359       *poffset = offset;
7360     }
7361 
7362   /* We can use BLKmode for a byte-aligned BLKmode bitfield.  */
7363   if (mode == VOIDmode
7364       && blkmode_bitfield
7365       && multiple_p (*pbitpos, BITS_PER_UNIT)
7366       && multiple_p (*pbitsize, BITS_PER_UNIT))
7367     *pmode = BLKmode;
7368   else
7369     *pmode = mode;
7370 
7371   return exp;
7372 }
7373 
7374 /* Alignment in bits the TARGET of an assignment may be assumed to have.  */
7375 
7376 static unsigned HOST_WIDE_INT
target_align(const_tree target)7377 target_align (const_tree target)
7378 {
7379   /* We might have a chain of nested references with intermediate misaligning
7380      bitfields components, so need to recurse to find out.  */
7381 
7382   unsigned HOST_WIDE_INT this_align, outer_align;
7383 
7384   switch (TREE_CODE (target))
7385     {
7386     case BIT_FIELD_REF:
7387       return 1;
7388 
7389     case COMPONENT_REF:
7390       this_align = DECL_ALIGN (TREE_OPERAND (target, 1));
7391       outer_align = target_align (TREE_OPERAND (target, 0));
7392       return MIN (this_align, outer_align);
7393 
7394     case ARRAY_REF:
7395     case ARRAY_RANGE_REF:
7396       this_align = TYPE_ALIGN (TREE_TYPE (target));
7397       outer_align = target_align (TREE_OPERAND (target, 0));
7398       return MIN (this_align, outer_align);
7399 
7400     CASE_CONVERT:
7401     case NON_LVALUE_EXPR:
7402     case VIEW_CONVERT_EXPR:
7403       this_align = TYPE_ALIGN (TREE_TYPE (target));
7404       outer_align = target_align (TREE_OPERAND (target, 0));
7405       return MAX (this_align, outer_align);
7406 
7407     default:
7408       return TYPE_ALIGN (TREE_TYPE (target));
7409     }
7410 }
7411 
7412 
7413 /* Given an rtx VALUE that may contain additions and multiplications, return
7414    an equivalent value that just refers to a register, memory, or constant.
7415    This is done by generating instructions to perform the arithmetic and
7416    returning a pseudo-register containing the value.
7417 
7418    The returned value may be a REG, SUBREG, MEM or constant.  */
7419 
7420 rtx
force_operand(rtx value,rtx target)7421 force_operand (rtx value, rtx target)
7422 {
7423   rtx op1, op2;
7424   /* Use subtarget as the target for operand 0 of a binary operation.  */
7425   rtx subtarget = get_subtarget (target);
7426   enum rtx_code code = GET_CODE (value);
7427 
7428   /* Check for subreg applied to an expression produced by loop optimizer.  */
7429   if (code == SUBREG
7430       && !REG_P (SUBREG_REG (value))
7431       && !MEM_P (SUBREG_REG (value)))
7432     {
7433       value
7434 	= simplify_gen_subreg (GET_MODE (value),
7435 			       force_reg (GET_MODE (SUBREG_REG (value)),
7436 					  force_operand (SUBREG_REG (value),
7437 							 NULL_RTX)),
7438 			       GET_MODE (SUBREG_REG (value)),
7439 			       SUBREG_BYTE (value));
7440       code = GET_CODE (value);
7441     }
7442 
7443   /* Check for a PIC address load.  */
7444   if ((code == PLUS || code == MINUS)
7445       && XEXP (value, 0) == pic_offset_table_rtx
7446       && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
7447 	  || GET_CODE (XEXP (value, 1)) == LABEL_REF
7448 	  || GET_CODE (XEXP (value, 1)) == CONST))
7449     {
7450       if (!subtarget)
7451 	subtarget = gen_reg_rtx (GET_MODE (value));
7452       emit_move_insn (subtarget, value);
7453       return subtarget;
7454     }
7455 
7456   if (ARITHMETIC_P (value))
7457     {
7458       op2 = XEXP (value, 1);
7459       if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
7460 	subtarget = 0;
7461       if (code == MINUS && CONST_INT_P (op2))
7462 	{
7463 	  code = PLUS;
7464 	  op2 = negate_rtx (GET_MODE (value), op2);
7465 	}
7466 
7467       /* Check for an addition with OP2 a constant integer and our first
7468          operand a PLUS of a virtual register and something else.  In that
7469          case, we want to emit the sum of the virtual register and the
7470          constant first and then add the other value.  This allows virtual
7471          register instantiation to simply modify the constant rather than
7472          creating another one around this addition.  */
7473       if (code == PLUS && CONST_INT_P (op2)
7474 	  && GET_CODE (XEXP (value, 0)) == PLUS
7475 	  && REG_P (XEXP (XEXP (value, 0), 0))
7476 	  && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7477 	  && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
7478 	{
7479 	  rtx temp = expand_simple_binop (GET_MODE (value), code,
7480 					  XEXP (XEXP (value, 0), 0), op2,
7481 					  subtarget, 0, OPTAB_LIB_WIDEN);
7482 	  return expand_simple_binop (GET_MODE (value), code, temp,
7483 				      force_operand (XEXP (XEXP (value,
7484 								 0), 1), 0),
7485 				      target, 0, OPTAB_LIB_WIDEN);
7486 	}
7487 
7488       op1 = force_operand (XEXP (value, 0), subtarget);
7489       op2 = force_operand (op2, NULL_RTX);
7490       switch (code)
7491 	{
7492 	case MULT:
7493 	  return expand_mult (GET_MODE (value), op1, op2, target, 1);
7494 	case DIV:
7495 	  if (!INTEGRAL_MODE_P (GET_MODE (value)))
7496 	    return expand_simple_binop (GET_MODE (value), code, op1, op2,
7497 					target, 1, OPTAB_LIB_WIDEN);
7498 	  else
7499 	    return expand_divmod (0,
7500 				  FLOAT_MODE_P (GET_MODE (value))
7501 				  ? RDIV_EXPR : TRUNC_DIV_EXPR,
7502 				  GET_MODE (value), op1, op2, target, 0);
7503 	case MOD:
7504 	  return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7505 				target, 0);
7506 	case UDIV:
7507 	  return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
7508 				target, 1);
7509 	case UMOD:
7510 	  return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7511 				target, 1);
7512 	case ASHIFTRT:
7513 	  return expand_simple_binop (GET_MODE (value), code, op1, op2,
7514 				      target, 0, OPTAB_LIB_WIDEN);
7515 	default:
7516 	  return expand_simple_binop (GET_MODE (value), code, op1, op2,
7517 				      target, 1, OPTAB_LIB_WIDEN);
7518 	}
7519     }
7520   if (UNARY_P (value))
7521     {
7522       if (!target)
7523 	target = gen_reg_rtx (GET_MODE (value));
7524       op1 = force_operand (XEXP (value, 0), NULL_RTX);
7525       switch (code)
7526 	{
7527 	case ZERO_EXTEND:
7528 	case SIGN_EXTEND:
7529 	case TRUNCATE:
7530 	case FLOAT_EXTEND:
7531 	case FLOAT_TRUNCATE:
7532 	  convert_move (target, op1, code == ZERO_EXTEND);
7533 	  return target;
7534 
7535 	case FIX:
7536 	case UNSIGNED_FIX:
7537 	  expand_fix (target, op1, code == UNSIGNED_FIX);
7538 	  return target;
7539 
7540 	case FLOAT:
7541 	case UNSIGNED_FLOAT:
7542 	  expand_float (target, op1, code == UNSIGNED_FLOAT);
7543 	  return target;
7544 
7545 	default:
7546 	  return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
7547 	}
7548     }
7549 
7550 #ifdef INSN_SCHEDULING
7551   /* On machines that have insn scheduling, we want all memory reference to be
7552      explicit, so we need to deal with such paradoxical SUBREGs.  */
7553   if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
7554     value
7555       = simplify_gen_subreg (GET_MODE (value),
7556 			     force_reg (GET_MODE (SUBREG_REG (value)),
7557 					force_operand (SUBREG_REG (value),
7558 						       NULL_RTX)),
7559 			     GET_MODE (SUBREG_REG (value)),
7560 			     SUBREG_BYTE (value));
7561 #endif
7562 
7563   return value;
7564 }
7565 
7566 /* Subroutine of expand_expr: return nonzero iff there is no way that
7567    EXP can reference X, which is being modified.  TOP_P is nonzero if this
7568    call is going to be used to determine whether we need a temporary
7569    for EXP, as opposed to a recursive call to this function.
7570 
7571    It is always safe for this routine to return zero since it merely
7572    searches for optimization opportunities.  */
7573 
7574 int
safe_from_p(const_rtx x,tree exp,int top_p)7575 safe_from_p (const_rtx x, tree exp, int top_p)
7576 {
7577   rtx exp_rtl = 0;
7578   int i, nops;
7579 
7580   if (x == 0
7581       /* If EXP has varying size, we MUST use a target since we currently
7582 	 have no way of allocating temporaries of variable size
7583 	 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
7584 	 So we assume here that something at a higher level has prevented a
7585 	 clash.  This is somewhat bogus, but the best we can do.  Only
7586 	 do this when X is BLKmode and when we are at the top level.  */
7587       || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
7588 	  && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
7589 	  && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
7590 	      || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
7591 	      || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
7592 	      != INTEGER_CST)
7593 	  && GET_MODE (x) == BLKmode)
7594       /* If X is in the outgoing argument area, it is always safe.  */
7595       || (MEM_P (x)
7596 	  && (XEXP (x, 0) == virtual_outgoing_args_rtx
7597 	      || (GET_CODE (XEXP (x, 0)) == PLUS
7598 		  && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
7599     return 1;
7600 
7601   /* If this is a subreg of a hard register, declare it unsafe, otherwise,
7602      find the underlying pseudo.  */
7603   if (GET_CODE (x) == SUBREG)
7604     {
7605       x = SUBREG_REG (x);
7606       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7607 	return 0;
7608     }
7609 
7610   /* Now look at our tree code and possibly recurse.  */
7611   switch (TREE_CODE_CLASS (TREE_CODE (exp)))
7612     {
7613     case tcc_declaration:
7614       exp_rtl = DECL_RTL_IF_SET (exp);
7615       break;
7616 
7617     case tcc_constant:
7618       return 1;
7619 
7620     case tcc_exceptional:
7621       if (TREE_CODE (exp) == TREE_LIST)
7622 	{
7623 	  while (1)
7624 	    {
7625 	      if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0))
7626 		return 0;
7627 	      exp = TREE_CHAIN (exp);
7628 	      if (!exp)
7629 		return 1;
7630 	      if (TREE_CODE (exp) != TREE_LIST)
7631 		return safe_from_p (x, exp, 0);
7632 	    }
7633 	}
7634       else if (TREE_CODE (exp) == CONSTRUCTOR)
7635 	{
7636 	  constructor_elt *ce;
7637 	  unsigned HOST_WIDE_INT idx;
7638 
7639 	  FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
7640 	    if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
7641 		|| !safe_from_p (x, ce->value, 0))
7642 	      return 0;
7643 	  return 1;
7644 	}
7645       else if (TREE_CODE (exp) == ERROR_MARK)
7646 	return 1;	/* An already-visited SAVE_EXPR? */
7647       else
7648 	return 0;
7649 
7650     case tcc_statement:
7651       /* The only case we look at here is the DECL_INITIAL inside a
7652 	 DECL_EXPR.  */
7653       return (TREE_CODE (exp) != DECL_EXPR
7654 	      || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
7655 	      || !DECL_INITIAL (DECL_EXPR_DECL (exp))
7656 	      || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
7657 
7658     case tcc_binary:
7659     case tcc_comparison:
7660       if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
7661 	return 0;
7662       /* Fall through.  */
7663 
7664     case tcc_unary:
7665       return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7666 
7667     case tcc_expression:
7668     case tcc_reference:
7669     case tcc_vl_exp:
7670       /* Now do code-specific tests.  EXP_RTL is set to any rtx we find in
7671 	 the expression.  If it is set, we conflict iff we are that rtx or
7672 	 both are in memory.  Otherwise, we check all operands of the
7673 	 expression recursively.  */
7674 
7675       switch (TREE_CODE (exp))
7676 	{
7677 	case ADDR_EXPR:
7678 	  /* If the operand is static or we are static, we can't conflict.
7679 	     Likewise if we don't conflict with the operand at all.  */
7680 	  if (staticp (TREE_OPERAND (exp, 0))
7681 	      || TREE_STATIC (exp)
7682 	      || safe_from_p (x, TREE_OPERAND (exp, 0), 0))
7683 	    return 1;
7684 
7685 	  /* Otherwise, the only way this can conflict is if we are taking
7686 	     the address of a DECL a that address if part of X, which is
7687 	     very rare.  */
7688 	  exp = TREE_OPERAND (exp, 0);
7689 	  if (DECL_P (exp))
7690 	    {
7691 	      if (!DECL_RTL_SET_P (exp)
7692 		  || !MEM_P (DECL_RTL (exp)))
7693 		return 0;
7694 	      else
7695 		exp_rtl = XEXP (DECL_RTL (exp), 0);
7696 	    }
7697 	  break;
7698 
7699 	case MEM_REF:
7700 	  if (MEM_P (x)
7701 	      && alias_sets_conflict_p (MEM_ALIAS_SET (x),
7702 					get_alias_set (exp)))
7703 	    return 0;
7704 	  break;
7705 
7706 	case CALL_EXPR:
7707 	  /* Assume that the call will clobber all hard registers and
7708 	     all of memory.  */
7709 	  if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7710 	      || MEM_P (x))
7711 	    return 0;
7712 	  break;
7713 
7714 	case WITH_CLEANUP_EXPR:
7715 	case CLEANUP_POINT_EXPR:
7716 	  /* Lowered by gimplify.c.  */
7717 	  gcc_unreachable ();
7718 
7719 	case SAVE_EXPR:
7720 	  return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7721 
7722 	default:
7723 	  break;
7724 	}
7725 
7726       /* If we have an rtx, we do not need to scan our operands.  */
7727       if (exp_rtl)
7728 	break;
7729 
7730       nops = TREE_OPERAND_LENGTH (exp);
7731       for (i = 0; i < nops; i++)
7732 	if (TREE_OPERAND (exp, i) != 0
7733 	    && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
7734 	  return 0;
7735 
7736       break;
7737 
7738     case tcc_type:
7739       /* Should never get a type here.  */
7740       gcc_unreachable ();
7741     }
7742 
7743   /* If we have an rtl, find any enclosed object.  Then see if we conflict
7744      with it.  */
7745   if (exp_rtl)
7746     {
7747       if (GET_CODE (exp_rtl) == SUBREG)
7748 	{
7749 	  exp_rtl = SUBREG_REG (exp_rtl);
7750 	  if (REG_P (exp_rtl)
7751 	      && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
7752 	    return 0;
7753 	}
7754 
7755       /* If the rtl is X, then it is not safe.  Otherwise, it is unless both
7756 	 are memory and they conflict.  */
7757       return ! (rtx_equal_p (x, exp_rtl)
7758 		|| (MEM_P (x) && MEM_P (exp_rtl)
7759 		    && true_dependence (exp_rtl, VOIDmode, x)));
7760     }
7761 
7762   /* If we reach here, it is safe.  */
7763   return 1;
7764 }
7765 
7766 
7767 /* Return the highest power of two that EXP is known to be a multiple of.
7768    This is used in updating alignment of MEMs in array references.  */
7769 
7770 unsigned HOST_WIDE_INT
highest_pow2_factor(const_tree exp)7771 highest_pow2_factor (const_tree exp)
7772 {
7773   unsigned HOST_WIDE_INT ret;
7774   int trailing_zeros = tree_ctz (exp);
7775   if (trailing_zeros >= HOST_BITS_PER_WIDE_INT)
7776     return BIGGEST_ALIGNMENT;
7777   ret = HOST_WIDE_INT_1U << trailing_zeros;
7778   if (ret > BIGGEST_ALIGNMENT)
7779     return BIGGEST_ALIGNMENT;
7780   return ret;
7781 }
7782 
7783 /* Similar, except that the alignment requirements of TARGET are
7784    taken into account.  Assume it is at least as aligned as its
7785    type, unless it is a COMPONENT_REF in which case the layout of
7786    the structure gives the alignment.  */
7787 
7788 static unsigned HOST_WIDE_INT
highest_pow2_factor_for_target(const_tree target,const_tree exp)7789 highest_pow2_factor_for_target (const_tree target, const_tree exp)
7790 {
7791   unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
7792   unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
7793 
7794   return MAX (factor, talign);
7795 }
7796 
7797 /* Convert the tree comparison code TCODE to the rtl one where the
7798    signedness is UNSIGNEDP.  */
7799 
7800 static enum rtx_code
convert_tree_comp_to_rtx(enum tree_code tcode,int unsignedp)7801 convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
7802 {
7803   enum rtx_code code;
7804   switch (tcode)
7805     {
7806     case EQ_EXPR:
7807       code = EQ;
7808       break;
7809     case NE_EXPR:
7810       code = NE;
7811       break;
7812     case LT_EXPR:
7813       code = unsignedp ? LTU : LT;
7814       break;
7815     case LE_EXPR:
7816       code = unsignedp ? LEU : LE;
7817       break;
7818     case GT_EXPR:
7819       code = unsignedp ? GTU : GT;
7820       break;
7821     case GE_EXPR:
7822       code = unsignedp ? GEU : GE;
7823       break;
7824     case UNORDERED_EXPR:
7825       code = UNORDERED;
7826       break;
7827     case ORDERED_EXPR:
7828       code = ORDERED;
7829       break;
7830     case UNLT_EXPR:
7831       code = UNLT;
7832       break;
7833     case UNLE_EXPR:
7834       code = UNLE;
7835       break;
7836     case UNGT_EXPR:
7837       code = UNGT;
7838       break;
7839     case UNGE_EXPR:
7840       code = UNGE;
7841       break;
7842     case UNEQ_EXPR:
7843       code = UNEQ;
7844       break;
7845     case LTGT_EXPR:
7846       code = LTGT;
7847       break;
7848 
7849     default:
7850       gcc_unreachable ();
7851     }
7852   return code;
7853 }
7854 
7855 /* Subroutine of expand_expr.  Expand the two operands of a binary
7856    expression EXP0 and EXP1 placing the results in OP0 and OP1.
7857    The value may be stored in TARGET if TARGET is nonzero.  The
7858    MODIFIER argument is as documented by expand_expr.  */
7859 
7860 void
expand_operands(tree exp0,tree exp1,rtx target,rtx * op0,rtx * op1,enum expand_modifier modifier)7861 expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
7862 		 enum expand_modifier modifier)
7863 {
7864   if (! safe_from_p (target, exp1, 1))
7865     target = 0;
7866   if (operand_equal_p (exp0, exp1, 0))
7867     {
7868       *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7869       *op1 = copy_rtx (*op0);
7870     }
7871   else
7872     {
7873       *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7874       *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier);
7875     }
7876 }
7877 
7878 
7879 /* Return a MEM that contains constant EXP.  DEFER is as for
7880    output_constant_def and MODIFIER is as for expand_expr.  */
7881 
7882 static rtx
expand_expr_constant(tree exp,int defer,enum expand_modifier modifier)7883 expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
7884 {
7885   rtx mem;
7886 
7887   mem = output_constant_def (exp, defer);
7888   if (modifier != EXPAND_INITIALIZER)
7889     mem = use_anchored_address (mem);
7890   return mem;
7891 }
7892 
7893 /* A subroutine of expand_expr_addr_expr.  Evaluate the address of EXP.
7894    The TARGET, TMODE and MODIFIER arguments are as for expand_expr.  */
7895 
7896 static rtx
expand_expr_addr_expr_1(tree exp,rtx target,scalar_int_mode tmode,enum expand_modifier modifier,addr_space_t as)7897 expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
7898 		         enum expand_modifier modifier, addr_space_t as)
7899 {
7900   rtx result, subtarget;
7901   tree inner, offset;
7902   poly_int64 bitsize, bitpos;
7903   int unsignedp, reversep, volatilep = 0;
7904   machine_mode mode1;
7905 
7906   /* If we are taking the address of a constant and are at the top level,
7907      we have to use output_constant_def since we can't call force_const_mem
7908      at top level.  */
7909   /* ??? This should be considered a front-end bug.  We should not be
7910      generating ADDR_EXPR of something that isn't an LVALUE.  The only
7911      exception here is STRING_CST.  */
7912   if (CONSTANT_CLASS_P (exp))
7913     {
7914       result = XEXP (expand_expr_constant (exp, 0, modifier), 0);
7915       if (modifier < EXPAND_SUM)
7916 	result = force_operand (result, target);
7917       return result;
7918     }
7919 
7920   /* Everything must be something allowed by is_gimple_addressable.  */
7921   switch (TREE_CODE (exp))
7922     {
7923     case INDIRECT_REF:
7924       /* This case will happen via recursion for &a->b.  */
7925       return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
7926 
7927     case MEM_REF:
7928       {
7929 	tree tem = TREE_OPERAND (exp, 0);
7930 	if (!integer_zerop (TREE_OPERAND (exp, 1)))
7931 	  tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
7932 	return expand_expr (tem, target, tmode, modifier);
7933       }
7934 
7935     case TARGET_MEM_REF:
7936       return addr_for_mem_ref (exp, as, true);
7937 
7938     case CONST_DECL:
7939       /* Expand the initializer like constants above.  */
7940       result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
7941 					   0, modifier), 0);
7942       if (modifier < EXPAND_SUM)
7943 	result = force_operand (result, target);
7944       return result;
7945 
7946     case REALPART_EXPR:
7947       /* The real part of the complex number is always first, therefore
7948 	 the address is the same as the address of the parent object.  */
7949       offset = 0;
7950       bitpos = 0;
7951       inner = TREE_OPERAND (exp, 0);
7952       break;
7953 
7954     case IMAGPART_EXPR:
7955       /* The imaginary part of the complex number is always second.
7956 	 The expression is therefore always offset by the size of the
7957 	 scalar type.  */
7958       offset = 0;
7959       bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
7960       inner = TREE_OPERAND (exp, 0);
7961       break;
7962 
7963     case COMPOUND_LITERAL_EXPR:
7964       /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
7965 	 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
7966 	 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
7967 	 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
7968 	 the initializers aren't gimplified.  */
7969       if (COMPOUND_LITERAL_EXPR_DECL (exp)
7970 	  && is_global_var (COMPOUND_LITERAL_EXPR_DECL (exp)))
7971 	return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
7972 					target, tmode, modifier, as);
7973       /* FALLTHRU */
7974     default:
7975       /* If the object is a DECL, then expand it for its rtl.  Don't bypass
7976 	 expand_expr, as that can have various side effects; LABEL_DECLs for
7977 	 example, may not have their DECL_RTL set yet.  Expand the rtl of
7978 	 CONSTRUCTORs too, which should yield a memory reference for the
7979 	 constructor's contents.  Assume language specific tree nodes can
7980 	 be expanded in some interesting way.  */
7981       gcc_assert (TREE_CODE (exp) < LAST_AND_UNUSED_TREE_CODE);
7982       if (DECL_P (exp)
7983 	  || TREE_CODE (exp) == CONSTRUCTOR
7984 	  || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
7985 	{
7986 	  result = expand_expr (exp, target, tmode,
7987 				modifier == EXPAND_INITIALIZER
7988 				? EXPAND_INITIALIZER : EXPAND_CONST_ADDRESS);
7989 
7990 	  /* If the DECL isn't in memory, then the DECL wasn't properly
7991 	     marked TREE_ADDRESSABLE, which will be either a front-end
7992 	     or a tree optimizer bug.  */
7993 
7994 	  gcc_assert (MEM_P (result));
7995 	  result = XEXP (result, 0);
7996 
7997 	  /* ??? Is this needed anymore?  */
7998 	  if (DECL_P (exp))
7999 	    TREE_USED (exp) = 1;
8000 
8001 	  if (modifier != EXPAND_INITIALIZER
8002 	      && modifier != EXPAND_CONST_ADDRESS
8003 	      && modifier != EXPAND_SUM)
8004 	    result = force_operand (result, target);
8005 	  return result;
8006 	}
8007 
8008       /* Pass FALSE as the last argument to get_inner_reference although
8009 	 we are expanding to RTL.  The rationale is that we know how to
8010 	 handle "aligning nodes" here: we can just bypass them because
8011 	 they won't change the final object whose address will be returned
8012 	 (they actually exist only for that purpose).  */
8013       inner = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
8014 				   &unsignedp, &reversep, &volatilep);
8015       break;
8016     }
8017 
8018   /* We must have made progress.  */
8019   gcc_assert (inner != exp);
8020 
8021   subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
8022   /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
8023      inner alignment, force the inner to be sufficiently aligned.  */
8024   if (CONSTANT_CLASS_P (inner)
8025       && TYPE_ALIGN (TREE_TYPE (inner)) < TYPE_ALIGN (TREE_TYPE (exp)))
8026     {
8027       inner = copy_node (inner);
8028       TREE_TYPE (inner) = copy_node (TREE_TYPE (inner));
8029       SET_TYPE_ALIGN (TREE_TYPE (inner), TYPE_ALIGN (TREE_TYPE (exp)));
8030       TYPE_USER_ALIGN (TREE_TYPE (inner)) = 1;
8031     }
8032   result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier, as);
8033 
8034   if (offset)
8035     {
8036       rtx tmp;
8037 
8038       if (modifier != EXPAND_NORMAL)
8039 	result = force_operand (result, NULL);
8040       tmp = expand_expr (offset, NULL_RTX, tmode,
8041 			 modifier == EXPAND_INITIALIZER
8042 			  ? EXPAND_INITIALIZER : EXPAND_NORMAL);
8043 
8044       /* expand_expr is allowed to return an object in a mode other
8045 	 than TMODE.  If it did, we need to convert.  */
8046       if (GET_MODE (tmp) != VOIDmode && tmode != GET_MODE (tmp))
8047 	tmp = convert_modes (tmode, GET_MODE (tmp),
8048 			     tmp, TYPE_UNSIGNED (TREE_TYPE (offset)));
8049       result = convert_memory_address_addr_space (tmode, result, as);
8050       tmp = convert_memory_address_addr_space (tmode, tmp, as);
8051 
8052       if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8053 	result = simplify_gen_binary (PLUS, tmode, result, tmp);
8054       else
8055 	{
8056 	  subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
8057 	  result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
8058 					1, OPTAB_LIB_WIDEN);
8059 	}
8060     }
8061 
8062   if (maybe_ne (bitpos, 0))
8063     {
8064       /* Someone beforehand should have rejected taking the address
8065 	 of an object that isn't byte-aligned.  */
8066       poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
8067       result = convert_memory_address_addr_space (tmode, result, as);
8068       result = plus_constant (tmode, result, bytepos);
8069       if (modifier < EXPAND_SUM)
8070 	result = force_operand (result, target);
8071     }
8072 
8073   return result;
8074 }
8075 
8076 /* A subroutine of expand_expr.  Evaluate EXP, which is an ADDR_EXPR.
8077    The TARGET, TMODE and MODIFIER arguments are as for expand_expr.  */
8078 
8079 static rtx
expand_expr_addr_expr(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier)8080 expand_expr_addr_expr (tree exp, rtx target, machine_mode tmode,
8081 		       enum expand_modifier modifier)
8082 {
8083   addr_space_t as = ADDR_SPACE_GENERIC;
8084   scalar_int_mode address_mode = Pmode;
8085   scalar_int_mode pointer_mode = ptr_mode;
8086   machine_mode rmode;
8087   rtx result;
8088 
8089   /* Target mode of VOIDmode says "whatever's natural".  */
8090   if (tmode == VOIDmode)
8091     tmode = TYPE_MODE (TREE_TYPE (exp));
8092 
8093   if (POINTER_TYPE_P (TREE_TYPE (exp)))
8094     {
8095       as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
8096       address_mode = targetm.addr_space.address_mode (as);
8097       pointer_mode = targetm.addr_space.pointer_mode (as);
8098     }
8099 
8100   /* We can get called with some Weird Things if the user does silliness
8101      like "(short) &a".  In that case, convert_memory_address won't do
8102      the right thing, so ignore the given target mode.  */
8103   scalar_int_mode new_tmode = (tmode == pointer_mode
8104 			       ? pointer_mode
8105 			       : address_mode);
8106 
8107   result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
8108 				    new_tmode, modifier, as);
8109 
8110   /* Despite expand_expr claims concerning ignoring TMODE when not
8111      strictly convenient, stuff breaks if we don't honor it.  Note
8112      that combined with the above, we only do this for pointer modes.  */
8113   rmode = GET_MODE (result);
8114   if (rmode == VOIDmode)
8115     rmode = new_tmode;
8116   if (rmode != new_tmode)
8117     result = convert_memory_address_addr_space (new_tmode, result, as);
8118 
8119   return result;
8120 }
8121 
8122 /* Generate code for computing CONSTRUCTOR EXP.
8123    An rtx for the computed value is returned.  If AVOID_TEMP_MEM
8124    is TRUE, instead of creating a temporary variable in memory
8125    NULL is returned and the caller needs to handle it differently.  */
8126 
8127 static rtx
expand_constructor(tree exp,rtx target,enum expand_modifier modifier,bool avoid_temp_mem)8128 expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
8129 		    bool avoid_temp_mem)
8130 {
8131   tree type = TREE_TYPE (exp);
8132   machine_mode mode = TYPE_MODE (type);
8133 
8134   /* Try to avoid creating a temporary at all.  This is possible
8135      if all of the initializer is zero.
8136      FIXME: try to handle all [0..255] initializers we can handle
8137      with memset.  */
8138   if (TREE_STATIC (exp)
8139       && !TREE_ADDRESSABLE (exp)
8140       && target != 0 && mode == BLKmode
8141       && all_zeros_p (exp))
8142     {
8143       clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
8144       return target;
8145     }
8146 
8147   /* All elts simple constants => refer to a constant in memory.  But
8148      if this is a non-BLKmode mode, let it store a field at a time
8149      since that should make a CONST_INT, CONST_WIDE_INT or
8150      CONST_DOUBLE when we fold.  Likewise, if we have a target we can
8151      use, it is best to store directly into the target unless the type
8152      is large enough that memcpy will be used.  If we are making an
8153      initializer and all operands are constant, put it in memory as
8154      well.
8155 
8156      FIXME: Avoid trying to fill vector constructors piece-meal.
8157      Output them with output_constant_def below unless we're sure
8158      they're zeros.  This should go away when vector initializers
8159      are treated like VECTOR_CST instead of arrays.  */
8160   if ((TREE_STATIC (exp)
8161        && ((mode == BLKmode
8162 	    && ! (target != 0 && safe_from_p (target, exp, 1)))
8163 	   || TREE_ADDRESSABLE (exp)
8164 	   || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8165 	       && (! can_move_by_pieces
8166 		   (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8167 		    TYPE_ALIGN (type)))
8168 	       && ! mostly_zeros_p (exp))))
8169       || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
8170 	  && TREE_CONSTANT (exp)))
8171     {
8172       rtx constructor;
8173 
8174       if (avoid_temp_mem)
8175 	return NULL_RTX;
8176 
8177       constructor = expand_expr_constant (exp, 1, modifier);
8178 
8179       if (modifier != EXPAND_CONST_ADDRESS
8180 	  && modifier != EXPAND_INITIALIZER
8181 	  && modifier != EXPAND_SUM)
8182 	constructor = validize_mem (constructor);
8183 
8184       return constructor;
8185     }
8186 
8187   /* Handle calls that pass values in multiple non-contiguous
8188      locations.  The Irix 6 ABI has examples of this.  */
8189   if (target == 0 || ! safe_from_p (target, exp, 1)
8190       || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM)
8191     {
8192       if (avoid_temp_mem)
8193 	return NULL_RTX;
8194 
8195       target = assign_temp (type, TREE_ADDRESSABLE (exp), 1);
8196     }
8197 
8198   store_constructor (exp, target, 0, int_expr_size (exp), false);
8199   return target;
8200 }
8201 
8202 
8203 /* expand_expr: generate code for computing expression EXP.
8204    An rtx for the computed value is returned.  The value is never null.
8205    In the case of a void EXP, const0_rtx is returned.
8206 
8207    The value may be stored in TARGET if TARGET is nonzero.
8208    TARGET is just a suggestion; callers must assume that
8209    the rtx returned may not be the same as TARGET.
8210 
8211    If TARGET is CONST0_RTX, it means that the value will be ignored.
8212 
8213    If TMODE is not VOIDmode, it suggests generating the
8214    result in mode TMODE.  But this is done only when convenient.
8215    Otherwise, TMODE is ignored and the value generated in its natural mode.
8216    TMODE is just a suggestion; callers must assume that
8217    the rtx returned may not have mode TMODE.
8218 
8219    Note that TARGET may have neither TMODE nor MODE.  In that case, it
8220    probably will not be used.
8221 
8222    If MODIFIER is EXPAND_SUM then when EXP is an addition
8223    we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8224    or a nest of (PLUS ...) and (MINUS ...) where the terms are
8225    products as above, or REG or MEM, or constant.
8226    Ordinarily in such cases we would output mul or add instructions
8227    and then return a pseudo reg containing the sum.
8228 
8229    EXPAND_INITIALIZER is much like EXPAND_SUM except that
8230    it also marks a label as absolutely required (it can't be dead).
8231    It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8232    This is used for outputting expressions used in initializers.
8233 
8234    EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8235    with a constant address even if that address is not normally legitimate.
8236    EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8237 
8238    EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8239    a call parameter.  Such targets require special care as we haven't yet
8240    marked TARGET so that it's safe from being trashed by libcalls.  We
8241    don't want to use TARGET for anything but the final result;
8242    Intermediate values must go elsewhere.   Additionally, calls to
8243    emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8244 
8245    If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8246    address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8247    DECL_RTL of the VAR_DECL.  *ALT_RTL is also set if EXP is a
8248    COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8249    recursively.
8250 
8251    If INNER_REFERENCE_P is true, we are expanding an inner reference.
8252    In this case, we don't adjust a returned MEM rtx that wouldn't be
8253    sufficiently aligned for its mode; instead, it's up to the caller
8254    to deal with it afterwards.  This is used to make sure that unaligned
8255    base objects for which out-of-bounds accesses are supported, for
8256    example record types with trailing arrays, aren't realigned behind
8257    the back of the caller.
8258    The normal operating mode is to pass FALSE for this parameter.  */
8259 
8260 rtx
expand_expr_real(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier,rtx * alt_rtl,bool inner_reference_p)8261 expand_expr_real (tree exp, rtx target, machine_mode tmode,
8262 		  enum expand_modifier modifier, rtx *alt_rtl,
8263 		  bool inner_reference_p)
8264 {
8265   rtx ret;
8266 
8267   /* Handle ERROR_MARK before anybody tries to access its type.  */
8268   if (TREE_CODE (exp) == ERROR_MARK
8269       || (TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
8270     {
8271       ret = CONST0_RTX (tmode);
8272       return ret ? ret : const0_rtx;
8273     }
8274 
8275   ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl,
8276 			    inner_reference_p);
8277   return ret;
8278 }
8279 
8280 /* Try to expand the conditional expression which is represented by
8281    TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves.  If it succeeds
8282    return the rtl reg which represents the result.  Otherwise return
8283    NULL_RTX.  */
8284 
8285 static rtx
expand_cond_expr_using_cmove(tree treeop0 ATTRIBUTE_UNUSED,tree treeop1 ATTRIBUTE_UNUSED,tree treeop2 ATTRIBUTE_UNUSED)8286 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
8287 			      tree treeop1 ATTRIBUTE_UNUSED,
8288 			      tree treeop2 ATTRIBUTE_UNUSED)
8289 {
8290   rtx insn;
8291   rtx op00, op01, op1, op2;
8292   enum rtx_code comparison_code;
8293   machine_mode comparison_mode;
8294   gimple *srcstmt;
8295   rtx temp;
8296   tree type = TREE_TYPE (treeop1);
8297   int unsignedp = TYPE_UNSIGNED (type);
8298   machine_mode mode = TYPE_MODE (type);
8299   machine_mode orig_mode = mode;
8300   static bool expanding_cond_expr_using_cmove = false;
8301 
8302   /* Conditional move expansion can end up TERing two operands which,
8303      when recursively hitting conditional expressions can result in
8304      exponential behavior if the cmove expansion ultimatively fails.
8305      It's hardly profitable to TER a cmove into a cmove so avoid doing
8306      that by failing early if we end up recursing.  */
8307   if (expanding_cond_expr_using_cmove)
8308     return NULL_RTX;
8309 
8310   /* If we cannot do a conditional move on the mode, try doing it
8311      with the promoted mode. */
8312   if (!can_conditionally_move_p (mode))
8313     {
8314       mode = promote_mode (type, mode, &unsignedp);
8315       if (!can_conditionally_move_p (mode))
8316 	return NULL_RTX;
8317       temp = assign_temp (type, 0, 0); /* Use promoted mode for temp.  */
8318     }
8319   else
8320     temp = assign_temp (type, 0, 1);
8321 
8322   expanding_cond_expr_using_cmove = true;
8323   start_sequence ();
8324   expand_operands (treeop1, treeop2,
8325 		   temp, &op1, &op2, EXPAND_NORMAL);
8326 
8327   if (TREE_CODE (treeop0) == SSA_NAME
8328       && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
8329     {
8330       tree type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
8331       enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
8332       op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
8333       op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
8334       comparison_mode = TYPE_MODE (type);
8335       unsignedp = TYPE_UNSIGNED (type);
8336       comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8337     }
8338   else if (COMPARISON_CLASS_P (treeop0))
8339     {
8340       tree type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
8341       enum tree_code cmpcode = TREE_CODE (treeop0);
8342       op00 = expand_normal (TREE_OPERAND (treeop0, 0));
8343       op01 = expand_normal (TREE_OPERAND (treeop0, 1));
8344       unsignedp = TYPE_UNSIGNED (type);
8345       comparison_mode = TYPE_MODE (type);
8346       comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8347     }
8348   else
8349     {
8350       op00 = expand_normal (treeop0);
8351       op01 = const0_rtx;
8352       comparison_code = NE;
8353       comparison_mode = GET_MODE (op00);
8354       if (comparison_mode == VOIDmode)
8355 	comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
8356     }
8357   expanding_cond_expr_using_cmove = false;
8358 
8359   if (GET_MODE (op1) != mode)
8360     op1 = gen_lowpart (mode, op1);
8361 
8362   if (GET_MODE (op2) != mode)
8363     op2 = gen_lowpart (mode, op2);
8364 
8365   /* Try to emit the conditional move.  */
8366   insn = emit_conditional_move (temp, comparison_code,
8367 				op00, op01, comparison_mode,
8368 				op1, op2, mode,
8369 				unsignedp);
8370 
8371   /* If we could do the conditional move, emit the sequence,
8372      and return.  */
8373   if (insn)
8374     {
8375       rtx_insn *seq = get_insns ();
8376       end_sequence ();
8377       emit_insn (seq);
8378       return convert_modes (orig_mode, mode, temp, 0);
8379     }
8380 
8381   /* Otherwise discard the sequence and fall back to code with
8382      branches.  */
8383   end_sequence ();
8384   return NULL_RTX;
8385 }
8386 
8387 rtx
expand_expr_real_2(sepops ops,rtx target,machine_mode tmode,enum expand_modifier modifier)8388 expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
8389 		    enum expand_modifier modifier)
8390 {
8391   rtx op0, op1, op2, temp;
8392   rtx_code_label *lab;
8393   tree type;
8394   int unsignedp;
8395   machine_mode mode;
8396   scalar_int_mode int_mode;
8397   enum tree_code code = ops->code;
8398   optab this_optab;
8399   rtx subtarget, original_target;
8400   int ignore;
8401   bool reduce_bit_field;
8402   location_t loc = ops->location;
8403   tree treeop0, treeop1, treeop2;
8404 #define REDUCE_BIT_FIELD(expr)	(reduce_bit_field			  \
8405 				 ? reduce_to_bit_field_precision ((expr), \
8406 								  target, \
8407 								  type)	  \
8408 				 : (expr))
8409 
8410   type = ops->type;
8411   mode = TYPE_MODE (type);
8412   unsignedp = TYPE_UNSIGNED (type);
8413 
8414   treeop0 = ops->op0;
8415   treeop1 = ops->op1;
8416   treeop2 = ops->op2;
8417 
8418   /* We should be called only on simple (binary or unary) expressions,
8419      exactly those that are valid in gimple expressions that aren't
8420      GIMPLE_SINGLE_RHS (or invalid).  */
8421   gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
8422 	      || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
8423 	      || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
8424 
8425   ignore = (target == const0_rtx
8426 	    || ((CONVERT_EXPR_CODE_P (code)
8427 		 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
8428 		&& TREE_CODE (type) == VOID_TYPE));
8429 
8430   /* We should be called only if we need the result.  */
8431   gcc_assert (!ignore);
8432 
8433   /* An operation in what may be a bit-field type needs the
8434      result to be reduced to the precision of the bit-field type,
8435      which is narrower than that of the type's mode.  */
8436   reduce_bit_field = (INTEGRAL_TYPE_P (type)
8437 		      && !type_has_mode_precision_p (type));
8438 
8439   if (reduce_bit_field
8440       && (modifier == EXPAND_STACK_PARM
8441 	  || (target && GET_MODE (target) != mode)))
8442     target = 0;
8443 
8444   /* Use subtarget as the target for operand 0 of a binary operation.  */
8445   subtarget = get_subtarget (target);
8446   original_target = target;
8447 
8448   switch (code)
8449     {
8450     case NON_LVALUE_EXPR:
8451     case PAREN_EXPR:
8452     CASE_CONVERT:
8453       if (treeop0 == error_mark_node)
8454 	return const0_rtx;
8455 
8456       if (TREE_CODE (type) == UNION_TYPE)
8457 	{
8458 	  tree valtype = TREE_TYPE (treeop0);
8459 
8460 	  /* If both input and output are BLKmode, this conversion isn't doing
8461 	     anything except possibly changing memory attribute.  */
8462 	  if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode)
8463 	    {
8464 	      rtx result = expand_expr (treeop0, target, tmode,
8465 					modifier);
8466 
8467 	      result = copy_rtx (result);
8468 	      set_mem_attributes (result, type, 0);
8469 	      return result;
8470 	    }
8471 
8472 	  if (target == 0)
8473 	    {
8474 	      if (TYPE_MODE (type) != BLKmode)
8475 		target = gen_reg_rtx (TYPE_MODE (type));
8476 	      else
8477 		target = assign_temp (type, 1, 1);
8478 	    }
8479 
8480 	  if (MEM_P (target))
8481 	    /* Store data into beginning of memory target.  */
8482 	    store_expr (treeop0,
8483 			adjust_address (target, TYPE_MODE (valtype), 0),
8484 			modifier == EXPAND_STACK_PARM,
8485 			false, TYPE_REVERSE_STORAGE_ORDER (type));
8486 
8487 	  else
8488 	    {
8489 	      gcc_assert (REG_P (target)
8490 			  && !TYPE_REVERSE_STORAGE_ORDER (type));
8491 
8492 	      /* Store this field into a union of the proper type.  */
8493 	      poly_uint64 op0_size
8494 		= tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
8495 	      poly_uint64 union_size = GET_MODE_BITSIZE (mode);
8496 	      store_field (target,
8497 			   /* The conversion must be constructed so that
8498 			      we know at compile time how many bits
8499 			      to preserve.  */
8500 			   ordered_min (op0_size, union_size),
8501 			   0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
8502 			   false, false);
8503 	    }
8504 
8505 	  /* Return the entire union.  */
8506 	  return target;
8507 	}
8508 
8509       if (mode == TYPE_MODE (TREE_TYPE (treeop0)))
8510 	{
8511 	  op0 = expand_expr (treeop0, target, VOIDmode,
8512 			     modifier);
8513 
8514 	  /* If the signedness of the conversion differs and OP0 is
8515 	     a promoted SUBREG, clear that indication since we now
8516 	     have to do the proper extension.  */
8517 	  if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
8518 	      && GET_CODE (op0) == SUBREG)
8519 	    SUBREG_PROMOTED_VAR_P (op0) = 0;
8520 
8521 	  return REDUCE_BIT_FIELD (op0);
8522 	}
8523 
8524       op0 = expand_expr (treeop0, NULL_RTX, mode,
8525 			 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
8526       if (GET_MODE (op0) == mode)
8527 	;
8528 
8529       /* If OP0 is a constant, just convert it into the proper mode.  */
8530       else if (CONSTANT_P (op0))
8531 	{
8532 	  tree inner_type = TREE_TYPE (treeop0);
8533 	  machine_mode inner_mode = GET_MODE (op0);
8534 
8535 	  if (inner_mode == VOIDmode)
8536 	    inner_mode = TYPE_MODE (inner_type);
8537 
8538 	  if (modifier == EXPAND_INITIALIZER)
8539 	    op0 = lowpart_subreg (mode, op0, inner_mode);
8540 	  else
8541 	    op0=  convert_modes (mode, inner_mode, op0,
8542 				 TYPE_UNSIGNED (inner_type));
8543 	}
8544 
8545       else if (modifier == EXPAND_INITIALIZER)
8546 	op0 = gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8547 			     ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
8548 
8549       else if (target == 0)
8550 	op0 = convert_to_mode (mode, op0,
8551 			       TYPE_UNSIGNED (TREE_TYPE
8552 					      (treeop0)));
8553       else
8554 	{
8555 	  convert_move (target, op0,
8556 			TYPE_UNSIGNED (TREE_TYPE (treeop0)));
8557 	  op0 = target;
8558 	}
8559 
8560       return REDUCE_BIT_FIELD (op0);
8561 
8562     case ADDR_SPACE_CONVERT_EXPR:
8563       {
8564 	tree treeop0_type = TREE_TYPE (treeop0);
8565 
8566 	gcc_assert (POINTER_TYPE_P (type));
8567 	gcc_assert (POINTER_TYPE_P (treeop0_type));
8568 
8569 	addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
8570 	addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type));
8571 
8572         /* Conversions between pointers to the same address space should
8573 	   have been implemented via CONVERT_EXPR / NOP_EXPR.  */
8574 	gcc_assert (as_to != as_from);
8575 
8576 	op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
8577 
8578         /* Ask target code to handle conversion between pointers
8579 	   to overlapping address spaces.  */
8580 	if (targetm.addr_space.subset_p (as_to, as_from)
8581 	    || targetm.addr_space.subset_p (as_from, as_to))
8582 	  {
8583 	    op0 = targetm.addr_space.convert (op0, treeop0_type, type);
8584 	  }
8585         else
8586           {
8587 	    /* For disjoint address spaces, converting anything but a null
8588 	       pointer invokes undefined behavior.  We truncate or extend the
8589 	       value as if we'd converted via integers, which handles 0 as
8590 	       required, and all others as the programmer likely expects.  */
8591 #ifndef POINTERS_EXTEND_UNSIGNED
8592 	    const int POINTERS_EXTEND_UNSIGNED = 1;
8593 #endif
8594 	    op0 = convert_modes (mode, TYPE_MODE (treeop0_type),
8595 				 op0, POINTERS_EXTEND_UNSIGNED);
8596 	  }
8597 	gcc_assert (op0);
8598 	return op0;
8599       }
8600 
8601     case POINTER_PLUS_EXPR:
8602       /* Even though the sizetype mode and the pointer's mode can be different
8603          expand is able to handle this correctly and get the correct result out
8604          of the PLUS_EXPR code.  */
8605       /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
8606          if sizetype precision is smaller than pointer precision.  */
8607       if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
8608 	treeop1 = fold_convert_loc (loc, type,
8609 				    fold_convert_loc (loc, ssizetype,
8610 						      treeop1));
8611       /* If sizetype precision is larger than pointer precision, truncate the
8612 	 offset to have matching modes.  */
8613       else if (TYPE_PRECISION (sizetype) > TYPE_PRECISION (type))
8614 	treeop1 = fold_convert_loc (loc, type, treeop1);
8615       /* FALLTHRU */
8616 
8617     case PLUS_EXPR:
8618       /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
8619 	 something else, make sure we add the register to the constant and
8620 	 then to the other thing.  This case can occur during strength
8621 	 reduction and doing it this way will produce better code if the
8622 	 frame pointer or argument pointer is eliminated.
8623 
8624 	 fold-const.c will ensure that the constant is always in the inner
8625 	 PLUS_EXPR, so the only case we need to do anything about is if
8626 	 sp, ap, or fp is our second argument, in which case we must swap
8627 	 the innermost first argument and our second argument.  */
8628 
8629       if (TREE_CODE (treeop0) == PLUS_EXPR
8630 	  && TREE_CODE (TREE_OPERAND (treeop0, 1)) == INTEGER_CST
8631 	  && VAR_P (treeop1)
8632 	  && (DECL_RTL (treeop1) == frame_pointer_rtx
8633 	      || DECL_RTL (treeop1) == stack_pointer_rtx
8634 	      || DECL_RTL (treeop1) == arg_pointer_rtx))
8635 	{
8636 	  gcc_unreachable ();
8637 	}
8638 
8639       /* If the result is to be ptr_mode and we are adding an integer to
8640 	 something, we might be forming a constant.  So try to use
8641 	 plus_constant.  If it produces a sum and we can't accept it,
8642 	 use force_operand.  This allows P = &ARR[const] to generate
8643 	 efficient code on machines where a SYMBOL_REF is not a valid
8644 	 address.
8645 
8646 	 If this is an EXPAND_SUM call, always return the sum.  */
8647       if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
8648 	  || (mode == ptr_mode && (unsignedp || ! flag_trapv)))
8649 	{
8650 	  if (modifier == EXPAND_STACK_PARM)
8651 	    target = 0;
8652 	  if (TREE_CODE (treeop0) == INTEGER_CST
8653 	      && HWI_COMPUTABLE_MODE_P (mode)
8654 	      && TREE_CONSTANT (treeop1))
8655 	    {
8656 	      rtx constant_part;
8657 	      HOST_WIDE_INT wc;
8658 	      machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
8659 
8660 	      op1 = expand_expr (treeop1, subtarget, VOIDmode,
8661 				 EXPAND_SUM);
8662 	      /* Use wi::shwi to ensure that the constant is
8663 		 truncated according to the mode of OP1, then sign extended
8664 		 to a HOST_WIDE_INT.  Using the constant directly can result
8665 		 in non-canonical RTL in a 64x32 cross compile.  */
8666 	      wc = TREE_INT_CST_LOW (treeop0);
8667 	      constant_part =
8668 		immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8669 	      op1 = plus_constant (mode, op1, INTVAL (constant_part));
8670 	      if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8671 		op1 = force_operand (op1, target);
8672 	      return REDUCE_BIT_FIELD (op1);
8673 	    }
8674 
8675 	  else if (TREE_CODE (treeop1) == INTEGER_CST
8676 		   && HWI_COMPUTABLE_MODE_P (mode)
8677 		   && TREE_CONSTANT (treeop0))
8678 	    {
8679 	      rtx constant_part;
8680 	      HOST_WIDE_INT wc;
8681 	      machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
8682 
8683 	      op0 = expand_expr (treeop0, subtarget, VOIDmode,
8684 				 (modifier == EXPAND_INITIALIZER
8685 				 ? EXPAND_INITIALIZER : EXPAND_SUM));
8686 	      if (! CONSTANT_P (op0))
8687 		{
8688 		  op1 = expand_expr (treeop1, NULL_RTX,
8689 				     VOIDmode, modifier);
8690 		  /* Return a PLUS if modifier says it's OK.  */
8691 		  if (modifier == EXPAND_SUM
8692 		      || modifier == EXPAND_INITIALIZER)
8693 		    return simplify_gen_binary (PLUS, mode, op0, op1);
8694 		  goto binop2;
8695 		}
8696 	      /* Use wi::shwi to ensure that the constant is
8697 		 truncated according to the mode of OP1, then sign extended
8698 		 to a HOST_WIDE_INT.  Using the constant directly can result
8699 		 in non-canonical RTL in a 64x32 cross compile.  */
8700 	      wc = TREE_INT_CST_LOW (treeop1);
8701 	      constant_part
8702 		= immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8703 	      op0 = plus_constant (mode, op0, INTVAL (constant_part));
8704 	      if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8705 		op0 = force_operand (op0, target);
8706 	      return REDUCE_BIT_FIELD (op0);
8707 	    }
8708 	}
8709 
8710       /* Use TER to expand pointer addition of a negated value
8711 	 as pointer subtraction.  */
8712       if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
8713 	   || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
8714 	       && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
8715 	  && TREE_CODE (treeop1) == SSA_NAME
8716 	  && TYPE_MODE (TREE_TYPE (treeop0))
8717 	     == TYPE_MODE (TREE_TYPE (treeop1)))
8718 	{
8719 	  gimple *def = get_def_for_expr (treeop1, NEGATE_EXPR);
8720 	  if (def)
8721 	    {
8722 	      treeop1 = gimple_assign_rhs1 (def);
8723 	      code = MINUS_EXPR;
8724 	      goto do_minus;
8725 	    }
8726 	}
8727 
8728       /* No sense saving up arithmetic to be done
8729 	 if it's all in the wrong mode to form part of an address.
8730 	 And force_operand won't know whether to sign-extend or
8731 	 zero-extend.  */
8732       if (modifier != EXPAND_INITIALIZER
8733 	  && (modifier != EXPAND_SUM || mode != ptr_mode))
8734 	{
8735 	  expand_operands (treeop0, treeop1,
8736 			   subtarget, &op0, &op1, modifier);
8737 	  if (op0 == const0_rtx)
8738 	    return op1;
8739 	  if (op1 == const0_rtx)
8740 	    return op0;
8741 	  goto binop2;
8742 	}
8743 
8744       expand_operands (treeop0, treeop1,
8745 		       subtarget, &op0, &op1, modifier);
8746       return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8747 
8748     case MINUS_EXPR:
8749     case POINTER_DIFF_EXPR:
8750     do_minus:
8751       /* For initializers, we are allowed to return a MINUS of two
8752 	 symbolic constants.  Here we handle all cases when both operands
8753 	 are constant.  */
8754       /* Handle difference of two symbolic constants,
8755 	 for the sake of an initializer.  */
8756       if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8757 	  && really_constant_p (treeop0)
8758 	  && really_constant_p (treeop1))
8759 	{
8760 	  expand_operands (treeop0, treeop1,
8761 			   NULL_RTX, &op0, &op1, modifier);
8762 	  return simplify_gen_binary (MINUS, mode, op0, op1);
8763 	}
8764 
8765       /* No sense saving up arithmetic to be done
8766 	 if it's all in the wrong mode to form part of an address.
8767 	 And force_operand won't know whether to sign-extend or
8768 	 zero-extend.  */
8769       if (modifier != EXPAND_INITIALIZER
8770 	  && (modifier != EXPAND_SUM || mode != ptr_mode))
8771 	goto binop;
8772 
8773       expand_operands (treeop0, treeop1,
8774 		       subtarget, &op0, &op1, modifier);
8775 
8776       /* Convert A - const to A + (-const).  */
8777       if (CONST_INT_P (op1))
8778 	{
8779 	  op1 = negate_rtx (mode, op1);
8780 	  return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8781 	}
8782 
8783       goto binop2;
8784 
8785     case WIDEN_MULT_PLUS_EXPR:
8786     case WIDEN_MULT_MINUS_EXPR:
8787       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
8788       op2 = expand_normal (treeop2);
8789       target = expand_widen_pattern_expr (ops, op0, op1, op2,
8790 					  target, unsignedp);
8791       return target;
8792 
8793     case WIDEN_MULT_EXPR:
8794       /* If first operand is constant, swap them.
8795 	 Thus the following special case checks need only
8796 	 check the second operand.  */
8797       if (TREE_CODE (treeop0) == INTEGER_CST)
8798 	std::swap (treeop0, treeop1);
8799 
8800       /* First, check if we have a multiplication of one signed and one
8801 	 unsigned operand.  */
8802       if (TREE_CODE (treeop1) != INTEGER_CST
8803 	  && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8804 	      != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
8805 	{
8806 	  machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
8807 	  this_optab = usmul_widen_optab;
8808 	  if (find_widening_optab_handler (this_optab, mode, innermode)
8809 		!= CODE_FOR_nothing)
8810 	    {
8811 	      if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
8812 		expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8813 				 EXPAND_NORMAL);
8814 	      else
8815 		expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
8816 				 EXPAND_NORMAL);
8817 	      /* op0 and op1 might still be constant, despite the above
8818 		 != INTEGER_CST check.  Handle it.  */
8819 	      if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8820 		{
8821 		  op0 = convert_modes (mode, innermode, op0, true);
8822 		  op1 = convert_modes (mode, innermode, op1, false);
8823 		  return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8824 							target, unsignedp));
8825 		}
8826 	      goto binop3;
8827 	    }
8828 	}
8829       /* Check for a multiplication with matching signedness.  */
8830       else if ((TREE_CODE (treeop1) == INTEGER_CST
8831 		&& int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
8832 	       || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
8833 		   == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
8834 	{
8835 	  tree op0type = TREE_TYPE (treeop0);
8836 	  machine_mode innermode = TYPE_MODE (op0type);
8837 	  bool zextend_p = TYPE_UNSIGNED (op0type);
8838 	  optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
8839 	  this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
8840 
8841 	  if (TREE_CODE (treeop0) != INTEGER_CST)
8842 	    {
8843 	      if (find_widening_optab_handler (this_optab, mode, innermode)
8844 		  != CODE_FOR_nothing)
8845 		{
8846 		  expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8847 				   EXPAND_NORMAL);
8848 		  /* op0 and op1 might still be constant, despite the above
8849 		     != INTEGER_CST check.  Handle it.  */
8850 		  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8851 		    {
8852 		     widen_mult_const:
8853 		      op0 = convert_modes (mode, innermode, op0, zextend_p);
8854 		      op1
8855 			= convert_modes (mode, innermode, op1,
8856 					 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8857 		      return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8858 							    target,
8859 							    unsignedp));
8860 		    }
8861 		  temp = expand_widening_mult (mode, op0, op1, target,
8862 					       unsignedp, this_optab);
8863 		  return REDUCE_BIT_FIELD (temp);
8864 		}
8865 	      if (find_widening_optab_handler (other_optab, mode, innermode)
8866 		  != CODE_FOR_nothing
8867 		  && innermode == word_mode)
8868 		{
8869 		  rtx htem, hipart;
8870 		  op0 = expand_normal (treeop0);
8871 		  op1 = expand_normal (treeop1);
8872 		  /* op0 and op1 might be constants, despite the above
8873 		     != INTEGER_CST check.  Handle it.  */
8874 		  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8875 		    goto widen_mult_const;
8876 		  if (TREE_CODE (treeop1) == INTEGER_CST)
8877 		    op1 = convert_modes (mode, word_mode, op1,
8878 					 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8879 		  temp = expand_binop (mode, other_optab, op0, op1, target,
8880 				       unsignedp, OPTAB_LIB_WIDEN);
8881 		  hipart = gen_highpart (word_mode, temp);
8882 		  htem = expand_mult_highpart_adjust (word_mode, hipart,
8883 						      op0, op1, hipart,
8884 						      zextend_p);
8885 		  if (htem != hipart)
8886 		    emit_move_insn (hipart, htem);
8887 		  return REDUCE_BIT_FIELD (temp);
8888 		}
8889 	    }
8890 	}
8891       treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
8892       treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
8893       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8894       return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8895 
8896     case MULT_EXPR:
8897       /* If this is a fixed-point operation, then we cannot use the code
8898 	 below because "expand_mult" doesn't support sat/no-sat fixed-point
8899          multiplications.   */
8900       if (ALL_FIXED_POINT_MODE_P (mode))
8901 	goto binop;
8902 
8903       /* If first operand is constant, swap them.
8904 	 Thus the following special case checks need only
8905 	 check the second operand.  */
8906       if (TREE_CODE (treeop0) == INTEGER_CST)
8907 	std::swap (treeop0, treeop1);
8908 
8909       /* Attempt to return something suitable for generating an
8910 	 indexed address, for machines that support that.  */
8911 
8912       if (modifier == EXPAND_SUM && mode == ptr_mode
8913 	  && tree_fits_shwi_p (treeop1))
8914 	{
8915 	  tree exp1 = treeop1;
8916 
8917 	  op0 = expand_expr (treeop0, subtarget, VOIDmode,
8918 			     EXPAND_SUM);
8919 
8920 	  if (!REG_P (op0))
8921 	    op0 = force_operand (op0, NULL_RTX);
8922 	  if (!REG_P (op0))
8923 	    op0 = copy_to_mode_reg (mode, op0);
8924 
8925 	  return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
8926 			       gen_int_mode (tree_to_shwi (exp1),
8927 					     TYPE_MODE (TREE_TYPE (exp1)))));
8928 	}
8929 
8930       if (modifier == EXPAND_STACK_PARM)
8931 	target = 0;
8932 
8933       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8934       return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8935 
8936     case TRUNC_MOD_EXPR:
8937     case FLOOR_MOD_EXPR:
8938     case CEIL_MOD_EXPR:
8939     case ROUND_MOD_EXPR:
8940 
8941     case TRUNC_DIV_EXPR:
8942     case FLOOR_DIV_EXPR:
8943     case CEIL_DIV_EXPR:
8944     case ROUND_DIV_EXPR:
8945     case EXACT_DIV_EXPR:
8946      {
8947        /* If this is a fixed-point operation, then we cannot use the code
8948 	  below because "expand_divmod" doesn't support sat/no-sat fixed-point
8949 	  divisions.   */
8950        if (ALL_FIXED_POINT_MODE_P (mode))
8951 	 goto binop;
8952 
8953        if (modifier == EXPAND_STACK_PARM)
8954 	 target = 0;
8955        /* Possible optimization: compute the dividend with EXPAND_SUM
8956 	  then if the divisor is constant can optimize the case
8957 	  where some terms of the dividend have coeffs divisible by it.  */
8958        expand_operands (treeop0, treeop1,
8959 			subtarget, &op0, &op1, EXPAND_NORMAL);
8960        bool mod_p = code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR
8961 		    || code == CEIL_MOD_EXPR || code == ROUND_MOD_EXPR;
8962        if (SCALAR_INT_MODE_P (mode)
8963 	   && optimize >= 2
8964 	   && get_range_pos_neg (treeop0) == 1
8965 	   && get_range_pos_neg (treeop1) == 1)
8966 	 {
8967 	   /* If both arguments are known to be positive when interpreted
8968 	      as signed, we can expand it as both signed and unsigned
8969 	      division or modulo.  Choose the cheaper sequence in that case.  */
8970 	   bool speed_p = optimize_insn_for_speed_p ();
8971 	   do_pending_stack_adjust ();
8972 	   start_sequence ();
8973 	   rtx uns_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 1);
8974 	   rtx_insn *uns_insns = get_insns ();
8975 	   end_sequence ();
8976 	   start_sequence ();
8977 	   rtx sgn_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 0);
8978 	   rtx_insn *sgn_insns = get_insns ();
8979 	   end_sequence ();
8980 	   unsigned uns_cost = seq_cost (uns_insns, speed_p);
8981 	   unsigned sgn_cost = seq_cost (sgn_insns, speed_p);
8982 
8983 	   /* If costs are the same then use as tie breaker the other
8984 	      other factor.  */
8985 	   if (uns_cost == sgn_cost)
8986 	     {
8987 		uns_cost = seq_cost (uns_insns, !speed_p);
8988 		sgn_cost = seq_cost (sgn_insns, !speed_p);
8989 	     }
8990 
8991 	   if (uns_cost < sgn_cost || (uns_cost == sgn_cost && unsignedp))
8992 	     {
8993 	       emit_insn (uns_insns);
8994 	       return uns_ret;
8995 	     }
8996 	   emit_insn (sgn_insns);
8997 	   return sgn_ret;
8998 	 }
8999        return expand_divmod (mod_p, code, mode, op0, op1, target, unsignedp);
9000      }
9001     case RDIV_EXPR:
9002       goto binop;
9003 
9004     case MULT_HIGHPART_EXPR:
9005       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9006       temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
9007       gcc_assert (temp);
9008       return temp;
9009 
9010     case FIXED_CONVERT_EXPR:
9011       op0 = expand_normal (treeop0);
9012       if (target == 0 || modifier == EXPAND_STACK_PARM)
9013 	target = gen_reg_rtx (mode);
9014 
9015       if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
9016 	   && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9017           || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
9018 	expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
9019       else
9020 	expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
9021       return target;
9022 
9023     case FIX_TRUNC_EXPR:
9024       op0 = expand_normal (treeop0);
9025       if (target == 0 || modifier == EXPAND_STACK_PARM)
9026 	target = gen_reg_rtx (mode);
9027       expand_fix (target, op0, unsignedp);
9028       return target;
9029 
9030     case FLOAT_EXPR:
9031       op0 = expand_normal (treeop0);
9032       if (target == 0 || modifier == EXPAND_STACK_PARM)
9033 	target = gen_reg_rtx (mode);
9034       /* expand_float can't figure out what to do if FROM has VOIDmode.
9035 	 So give it the correct mode.  With -O, cse will optimize this.  */
9036       if (GET_MODE (op0) == VOIDmode)
9037 	op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
9038 				op0);
9039       expand_float (target, op0,
9040 		    TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9041       return target;
9042 
9043     case NEGATE_EXPR:
9044       op0 = expand_expr (treeop0, subtarget,
9045 			 VOIDmode, EXPAND_NORMAL);
9046       if (modifier == EXPAND_STACK_PARM)
9047 	target = 0;
9048       temp = expand_unop (mode,
9049       			  optab_for_tree_code (NEGATE_EXPR, type,
9050 					       optab_default),
9051 			  op0, target, 0);
9052       gcc_assert (temp);
9053       return REDUCE_BIT_FIELD (temp);
9054 
9055     case ABS_EXPR:
9056     case ABSU_EXPR:
9057       op0 = expand_expr (treeop0, subtarget,
9058 			 VOIDmode, EXPAND_NORMAL);
9059       if (modifier == EXPAND_STACK_PARM)
9060 	target = 0;
9061 
9062       /* ABS_EXPR is not valid for complex arguments.  */
9063       gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
9064 		  && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
9065 
9066       /* Unsigned abs is simply the operand.  Testing here means we don't
9067 	 risk generating incorrect code below.  */
9068       if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9069 	return op0;
9070 
9071       return expand_abs (mode, op0, target, unsignedp,
9072 			 safe_from_p (target, treeop0, 1));
9073 
9074     case MAX_EXPR:
9075     case MIN_EXPR:
9076       target = original_target;
9077       if (target == 0
9078 	  || modifier == EXPAND_STACK_PARM
9079 	  || (MEM_P (target) && MEM_VOLATILE_P (target))
9080 	  || GET_MODE (target) != mode
9081 	  || (REG_P (target)
9082 	      && REGNO (target) < FIRST_PSEUDO_REGISTER))
9083 	target = gen_reg_rtx (mode);
9084       expand_operands (treeop0, treeop1,
9085 		       target, &op0, &op1, EXPAND_NORMAL);
9086 
9087       /* First try to do it with a special MIN or MAX instruction.
9088 	 If that does not win, use a conditional jump to select the proper
9089 	 value.  */
9090       this_optab = optab_for_tree_code (code, type, optab_default);
9091       temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
9092 			   OPTAB_WIDEN);
9093       if (temp != 0)
9094 	return temp;
9095 
9096       /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
9097 	 and similarly for MAX <x, y>.  */
9098       if (VECTOR_TYPE_P (type))
9099 	{
9100 	  tree t0 = make_tree (type, op0);
9101 	  tree t1 = make_tree (type, op1);
9102 	  tree comparison = build2 (code == MIN_EXPR ? LE_EXPR : GE_EXPR,
9103 				    type, t0, t1);
9104 	  return expand_vec_cond_expr (type, comparison, t0, t1,
9105 				       original_target);
9106 	}
9107 
9108       /* At this point, a MEM target is no longer useful; we will get better
9109 	 code without it.  */
9110 
9111       if (! REG_P (target))
9112 	target = gen_reg_rtx (mode);
9113 
9114       /* If op1 was placed in target, swap op0 and op1.  */
9115       if (target != op0 && target == op1)
9116 	std::swap (op0, op1);
9117 
9118       /* We generate better code and avoid problems with op1 mentioning
9119 	 target by forcing op1 into a pseudo if it isn't a constant.  */
9120       if (! CONSTANT_P (op1))
9121 	op1 = force_reg (mode, op1);
9122 
9123       {
9124 	enum rtx_code comparison_code;
9125 	rtx cmpop1 = op1;
9126 
9127 	if (code == MAX_EXPR)
9128 	  comparison_code = unsignedp ? GEU : GE;
9129 	else
9130 	  comparison_code = unsignedp ? LEU : LE;
9131 
9132 	/* Canonicalize to comparisons against 0.  */
9133 	if (op1 == const1_rtx)
9134 	  {
9135 	    /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9136 	       or (a != 0 ? a : 1) for unsigned.
9137 	       For MIN we are safe converting (a <= 1 ? a : 1)
9138 	       into (a <= 0 ? a : 1)  */
9139 	    cmpop1 = const0_rtx;
9140 	    if (code == MAX_EXPR)
9141 	      comparison_code = unsignedp ? NE : GT;
9142 	  }
9143 	if (op1 == constm1_rtx && !unsignedp)
9144 	  {
9145 	    /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9146 	       and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9147 	    cmpop1 = const0_rtx;
9148 	    if (code == MIN_EXPR)
9149 	      comparison_code = LT;
9150 	  }
9151 
9152 	/* Use a conditional move if possible.  */
9153 	if (can_conditionally_move_p (mode))
9154 	  {
9155 	    rtx insn;
9156 
9157 	    start_sequence ();
9158 
9159 	    /* Try to emit the conditional move.  */
9160 	    insn = emit_conditional_move (target, comparison_code,
9161 					  op0, cmpop1, mode,
9162 					  op0, op1, mode,
9163 					  unsignedp);
9164 
9165 	    /* If we could do the conditional move, emit the sequence,
9166 	       and return.  */
9167 	    if (insn)
9168 	      {
9169 		rtx_insn *seq = get_insns ();
9170 		end_sequence ();
9171 		emit_insn (seq);
9172 		return target;
9173 	      }
9174 
9175 	    /* Otherwise discard the sequence and fall back to code with
9176 	       branches.  */
9177 	    end_sequence ();
9178 	  }
9179 
9180 	if (target != op0)
9181 	  emit_move_insn (target, op0);
9182 
9183 	lab = gen_label_rtx ();
9184 	do_compare_rtx_and_jump (target, cmpop1, comparison_code,
9185 				 unsignedp, mode, NULL_RTX, NULL, lab,
9186 				 profile_probability::uninitialized ());
9187       }
9188       emit_move_insn (target, op1);
9189       emit_label (lab);
9190       return target;
9191 
9192     case BIT_NOT_EXPR:
9193       op0 = expand_expr (treeop0, subtarget,
9194 			 VOIDmode, EXPAND_NORMAL);
9195       if (modifier == EXPAND_STACK_PARM)
9196 	target = 0;
9197       /* In case we have to reduce the result to bitfield precision
9198 	 for unsigned bitfield expand this as XOR with a proper constant
9199 	 instead.  */
9200       if (reduce_bit_field && TYPE_UNSIGNED (type))
9201 	{
9202 	  int_mode = SCALAR_INT_TYPE_MODE (type);
9203 	  wide_int mask = wi::mask (TYPE_PRECISION (type),
9204 				    false, GET_MODE_PRECISION (int_mode));
9205 
9206 	  temp = expand_binop (int_mode, xor_optab, op0,
9207 			       immed_wide_int_const (mask, int_mode),
9208 			       target, 1, OPTAB_LIB_WIDEN);
9209 	}
9210       else
9211 	temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
9212       gcc_assert (temp);
9213       return temp;
9214 
9215       /* ??? Can optimize bitwise operations with one arg constant.
9216 	 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9217 	 and (a bitwise1 b) bitwise2 b (etc)
9218 	 but that is probably not worth while.  */
9219 
9220     case BIT_AND_EXPR:
9221     case BIT_IOR_EXPR:
9222     case BIT_XOR_EXPR:
9223       goto binop;
9224 
9225     case LROTATE_EXPR:
9226     case RROTATE_EXPR:
9227       gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
9228 		  || type_has_mode_precision_p (type));
9229       /* fall through */
9230 
9231     case LSHIFT_EXPR:
9232     case RSHIFT_EXPR:
9233       {
9234 	/* If this is a fixed-point operation, then we cannot use the code
9235 	   below because "expand_shift" doesn't support sat/no-sat fixed-point
9236 	   shifts.  */
9237 	if (ALL_FIXED_POINT_MODE_P (mode))
9238 	  goto binop;
9239 
9240 	if (! safe_from_p (subtarget, treeop1, 1))
9241 	  subtarget = 0;
9242 	if (modifier == EXPAND_STACK_PARM)
9243 	  target = 0;
9244 	op0 = expand_expr (treeop0, subtarget,
9245 			   VOIDmode, EXPAND_NORMAL);
9246 
9247 	/* Left shift optimization when shifting across word_size boundary.
9248 
9249 	   If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9250 	   there isn't native instruction to support this wide mode
9251 	   left shift.  Given below scenario:
9252 
9253 	    Type A = (Type) B  << C
9254 
9255 	    |<		 T	    >|
9256 	    | dest_high  |  dest_low |
9257 
9258 			 | word_size |
9259 
9260 	   If the shift amount C caused we shift B to across the word
9261 	   size boundary, i.e part of B shifted into high half of
9262 	   destination register, and part of B remains in the low
9263 	   half, then GCC will use the following left shift expand
9264 	   logic:
9265 
9266 	   1. Initialize dest_low to B.
9267 	   2. Initialize every bit of dest_high to the sign bit of B.
9268 	   3. Logic left shift dest_low by C bit to finalize dest_low.
9269 	      The value of dest_low before this shift is kept in a temp D.
9270 	   4. Logic left shift dest_high by C.
9271 	   5. Logic right shift D by (word_size - C).
9272 	   6. Or the result of 4 and 5 to finalize dest_high.
9273 
9274 	   While, by checking gimple statements, if operand B is
9275 	   coming from signed extension, then we can simplify above
9276 	   expand logic into:
9277 
9278 	      1. dest_high = src_low >> (word_size - C).
9279 	      2. dest_low = src_low << C.
9280 
9281 	   We can use one arithmetic right shift to finish all the
9282 	   purpose of steps 2, 4, 5, 6, thus we reduce the steps
9283 	   needed from 6 into 2.
9284 
9285 	   The case is similar for zero extension, except that we
9286 	   initialize dest_high to zero rather than copies of the sign
9287 	   bit from B.  Furthermore, we need to use a logical right shift
9288 	   in this case.
9289 
9290 	   The choice of sign-extension versus zero-extension is
9291 	   determined entirely by whether or not B is signed and is
9292 	   independent of the current setting of unsignedp.  */
9293 
9294 	temp = NULL_RTX;
9295 	if (code == LSHIFT_EXPR
9296 	    && target
9297 	    && REG_P (target)
9298 	    && GET_MODE_2XWIDER_MODE (word_mode).exists (&int_mode)
9299 	    && mode == int_mode
9300 	    && TREE_CONSTANT (treeop1)
9301 	    && TREE_CODE (treeop0) == SSA_NAME)
9302 	  {
9303 	    gimple *def = SSA_NAME_DEF_STMT (treeop0);
9304 	    if (is_gimple_assign (def)
9305 		&& gimple_assign_rhs_code (def) == NOP_EXPR)
9306 	      {
9307 		scalar_int_mode rmode = SCALAR_INT_TYPE_MODE
9308 		  (TREE_TYPE (gimple_assign_rhs1 (def)));
9309 
9310 		if (GET_MODE_SIZE (rmode) < GET_MODE_SIZE (int_mode)
9311 		    && TREE_INT_CST_LOW (treeop1) < GET_MODE_BITSIZE (word_mode)
9312 		    && ((TREE_INT_CST_LOW (treeop1) + GET_MODE_BITSIZE (rmode))
9313 			>= GET_MODE_BITSIZE (word_mode)))
9314 		  {
9315 		    rtx_insn *seq, *seq_old;
9316 		    poly_uint64 high_off = subreg_highpart_offset (word_mode,
9317 								   int_mode);
9318 		    bool extend_unsigned
9319 		      = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
9320 		    rtx low = lowpart_subreg (word_mode, op0, int_mode);
9321 		    rtx dest_low = lowpart_subreg (word_mode, target, int_mode);
9322 		    rtx dest_high = simplify_gen_subreg (word_mode, target,
9323 							 int_mode, high_off);
9324 		    HOST_WIDE_INT ramount = (BITS_PER_WORD
9325 					     - TREE_INT_CST_LOW (treeop1));
9326 		    tree rshift = build_int_cst (TREE_TYPE (treeop1), ramount);
9327 
9328 		    start_sequence ();
9329 		    /* dest_high = src_low >> (word_size - C).  */
9330 		    temp = expand_variable_shift (RSHIFT_EXPR, word_mode, low,
9331 						  rshift, dest_high,
9332 						  extend_unsigned);
9333 		    if (temp != dest_high)
9334 		      emit_move_insn (dest_high, temp);
9335 
9336 		    /* dest_low = src_low << C.  */
9337 		    temp = expand_variable_shift (LSHIFT_EXPR, word_mode, low,
9338 						  treeop1, dest_low, unsignedp);
9339 		    if (temp != dest_low)
9340 		      emit_move_insn (dest_low, temp);
9341 
9342 		    seq = get_insns ();
9343 		    end_sequence ();
9344 		    temp = target ;
9345 
9346 		    if (have_insn_for (ASHIFT, int_mode))
9347 		      {
9348 			bool speed_p = optimize_insn_for_speed_p ();
9349 			start_sequence ();
9350 			rtx ret_old = expand_variable_shift (code, int_mode,
9351 							     op0, treeop1,
9352 							     target,
9353 							     unsignedp);
9354 
9355 			seq_old = get_insns ();
9356 			end_sequence ();
9357 			if (seq_cost (seq, speed_p)
9358 			    >= seq_cost (seq_old, speed_p))
9359 			  {
9360 			    seq = seq_old;
9361 			    temp = ret_old;
9362 			  }
9363 		      }
9364 		      emit_insn (seq);
9365 		  }
9366 	      }
9367 	  }
9368 
9369 	if (temp == NULL_RTX)
9370 	  temp = expand_variable_shift (code, mode, op0, treeop1, target,
9371 					unsignedp);
9372 	if (code == LSHIFT_EXPR)
9373 	  temp = REDUCE_BIT_FIELD (temp);
9374 	return temp;
9375       }
9376 
9377       /* Could determine the answer when only additive constants differ.  Also,
9378 	 the addition of one can be handled by changing the condition.  */
9379     case LT_EXPR:
9380     case LE_EXPR:
9381     case GT_EXPR:
9382     case GE_EXPR:
9383     case EQ_EXPR:
9384     case NE_EXPR:
9385     case UNORDERED_EXPR:
9386     case ORDERED_EXPR:
9387     case UNLT_EXPR:
9388     case UNLE_EXPR:
9389     case UNGT_EXPR:
9390     case UNGE_EXPR:
9391     case UNEQ_EXPR:
9392     case LTGT_EXPR:
9393       {
9394 	temp = do_store_flag (ops,
9395 			      modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
9396 			      tmode != VOIDmode ? tmode : mode);
9397 	if (temp)
9398 	  return temp;
9399 
9400 	/* Use a compare and a jump for BLKmode comparisons, or for function
9401 	   type comparisons is have_canonicalize_funcptr_for_compare.  */
9402 
9403 	if ((target == 0
9404 	     || modifier == EXPAND_STACK_PARM
9405 	     || ! safe_from_p (target, treeop0, 1)
9406 	     || ! safe_from_p (target, treeop1, 1)
9407 	     /* Make sure we don't have a hard reg (such as function's return
9408 		value) live across basic blocks, if not optimizing.  */
9409 	     || (!optimize && REG_P (target)
9410 		 && REGNO (target) < FIRST_PSEUDO_REGISTER)))
9411 	  target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
9412 
9413 	emit_move_insn (target, const0_rtx);
9414 
9415 	rtx_code_label *lab1 = gen_label_rtx ();
9416 	jumpifnot_1 (code, treeop0, treeop1, lab1,
9417 		     profile_probability::uninitialized ());
9418 
9419 	if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
9420 	  emit_move_insn (target, constm1_rtx);
9421 	else
9422 	  emit_move_insn (target, const1_rtx);
9423 
9424 	emit_label (lab1);
9425 	return target;
9426       }
9427     case COMPLEX_EXPR:
9428       /* Get the rtx code of the operands.  */
9429       op0 = expand_normal (treeop0);
9430       op1 = expand_normal (treeop1);
9431 
9432       if (!target)
9433 	target = gen_reg_rtx (TYPE_MODE (type));
9434       else
9435 	/* If target overlaps with op1, then either we need to force
9436 	   op1 into a pseudo (if target also overlaps with op0),
9437 	   or write the complex parts in reverse order.  */
9438 	switch (GET_CODE (target))
9439 	  {
9440 	  case CONCAT:
9441 	    if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
9442 	      {
9443 		if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
9444 		  {
9445 		  complex_expr_force_op1:
9446 		    temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
9447 		    emit_move_insn (temp, op1);
9448 		    op1 = temp;
9449 		    break;
9450 		  }
9451 	      complex_expr_swap_order:
9452 		/* Move the imaginary (op1) and real (op0) parts to their
9453 		   location.  */
9454 		write_complex_part (target, op1, true);
9455 		write_complex_part (target, op0, false);
9456 
9457 		return target;
9458 	      }
9459 	    break;
9460 	  case MEM:
9461 	    temp = adjust_address_nv (target,
9462 				      GET_MODE_INNER (GET_MODE (target)), 0);
9463 	    if (reg_overlap_mentioned_p (temp, op1))
9464 	      {
9465 		scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
9466 		temp = adjust_address_nv (target, imode,
9467 					  GET_MODE_SIZE (imode));
9468 		if (reg_overlap_mentioned_p (temp, op0))
9469 		  goto complex_expr_force_op1;
9470 		goto complex_expr_swap_order;
9471 	      }
9472 	    break;
9473 	  default:
9474 	    if (reg_overlap_mentioned_p (target, op1))
9475 	      {
9476 		if (reg_overlap_mentioned_p (target, op0))
9477 		  goto complex_expr_force_op1;
9478 		goto complex_expr_swap_order;
9479 	      }
9480 	    break;
9481 	  }
9482 
9483       /* Move the real (op0) and imaginary (op1) parts to their location.  */
9484       write_complex_part (target, op0, false);
9485       write_complex_part (target, op1, true);
9486 
9487       return target;
9488 
9489     case WIDEN_SUM_EXPR:
9490       {
9491         tree oprnd0 = treeop0;
9492         tree oprnd1 = treeop1;
9493 
9494         expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9495         target = expand_widen_pattern_expr (ops, op0, NULL_RTX, op1,
9496                                             target, unsignedp);
9497         return target;
9498       }
9499 
9500     case VEC_UNPACK_HI_EXPR:
9501     case VEC_UNPACK_LO_EXPR:
9502     case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
9503     case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
9504       {
9505 	op0 = expand_normal (treeop0);
9506 	temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
9507 					  target, unsignedp);
9508 	gcc_assert (temp);
9509 	return temp;
9510       }
9511 
9512     case VEC_UNPACK_FLOAT_HI_EXPR:
9513     case VEC_UNPACK_FLOAT_LO_EXPR:
9514       {
9515 	op0 = expand_normal (treeop0);
9516 	/* The signedness is determined from input operand.  */
9517 	temp = expand_widen_pattern_expr
9518 	  (ops, op0, NULL_RTX, NULL_RTX,
9519 	   target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9520 
9521 	gcc_assert (temp);
9522 	return temp;
9523       }
9524 
9525     case VEC_WIDEN_MULT_HI_EXPR:
9526     case VEC_WIDEN_MULT_LO_EXPR:
9527     case VEC_WIDEN_MULT_EVEN_EXPR:
9528     case VEC_WIDEN_MULT_ODD_EXPR:
9529     case VEC_WIDEN_LSHIFT_HI_EXPR:
9530     case VEC_WIDEN_LSHIFT_LO_EXPR:
9531       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9532       target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
9533 					  target, unsignedp);
9534       gcc_assert (target);
9535       return target;
9536 
9537     case VEC_PACK_SAT_EXPR:
9538     case VEC_PACK_FIX_TRUNC_EXPR:
9539       mode = TYPE_MODE (TREE_TYPE (treeop0));
9540       goto binop;
9541 
9542     case VEC_PACK_TRUNC_EXPR:
9543       if (VECTOR_BOOLEAN_TYPE_P (type)
9544 	  && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0))
9545 	  && mode == TYPE_MODE (TREE_TYPE (treeop0))
9546 	  && SCALAR_INT_MODE_P (mode))
9547 	{
9548 	  struct expand_operand eops[4];
9549 	  machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
9550 	  expand_operands (treeop0, treeop1,
9551 			   subtarget, &op0, &op1, EXPAND_NORMAL);
9552 	  this_optab = vec_pack_sbool_trunc_optab;
9553 	  enum insn_code icode = optab_handler (this_optab, imode);
9554 	  create_output_operand (&eops[0], target, mode);
9555 	  create_convert_operand_from (&eops[1], op0, imode, false);
9556 	  create_convert_operand_from (&eops[2], op1, imode, false);
9557 	  temp = GEN_INT (TYPE_VECTOR_SUBPARTS (type).to_constant ());
9558 	  create_input_operand (&eops[3], temp, imode);
9559 	  expand_insn (icode, 4, eops);
9560 	  return eops[0].value;
9561 	}
9562       mode = TYPE_MODE (TREE_TYPE (treeop0));
9563       goto binop;
9564 
9565     case VEC_PACK_FLOAT_EXPR:
9566       mode = TYPE_MODE (TREE_TYPE (treeop0));
9567       expand_operands (treeop0, treeop1,
9568 		       subtarget, &op0, &op1, EXPAND_NORMAL);
9569       this_optab = optab_for_tree_code (code, TREE_TYPE (treeop0),
9570 					optab_default);
9571       target = expand_binop (mode, this_optab, op0, op1, target,
9572 			     TYPE_UNSIGNED (TREE_TYPE (treeop0)),
9573 			     OPTAB_LIB_WIDEN);
9574       gcc_assert (target);
9575       return target;
9576 
9577     case VEC_PERM_EXPR:
9578       {
9579 	expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
9580 	vec_perm_builder sel;
9581 	if (TREE_CODE (treeop2) == VECTOR_CST
9582 	    && tree_to_vec_perm_builder (&sel, treeop2))
9583 	  {
9584 	    machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
9585 	    temp = expand_vec_perm_const (mode, op0, op1, sel,
9586 					  sel_mode, target);
9587 	  }
9588 	else
9589 	  {
9590 	    op2 = expand_normal (treeop2);
9591 	    temp = expand_vec_perm_var (mode, op0, op1, op2, target);
9592 	  }
9593 	gcc_assert (temp);
9594 	return temp;
9595       }
9596 
9597     case DOT_PROD_EXPR:
9598       {
9599 	tree oprnd0 = treeop0;
9600 	tree oprnd1 = treeop1;
9601 	tree oprnd2 = treeop2;
9602 	rtx op2;
9603 
9604 	expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9605 	op2 = expand_normal (oprnd2);
9606 	target = expand_widen_pattern_expr (ops, op0, op1, op2,
9607 					    target, unsignedp);
9608 	return target;
9609       }
9610 
9611       case SAD_EXPR:
9612       {
9613 	tree oprnd0 = treeop0;
9614 	tree oprnd1 = treeop1;
9615 	tree oprnd2 = treeop2;
9616 	rtx op2;
9617 
9618 	expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9619 	op2 = expand_normal (oprnd2);
9620 	target = expand_widen_pattern_expr (ops, op0, op1, op2,
9621 					    target, unsignedp);
9622 	return target;
9623       }
9624 
9625     case REALIGN_LOAD_EXPR:
9626       {
9627         tree oprnd0 = treeop0;
9628         tree oprnd1 = treeop1;
9629         tree oprnd2 = treeop2;
9630         rtx op2;
9631 
9632         this_optab = optab_for_tree_code (code, type, optab_default);
9633         expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9634         op2 = expand_normal (oprnd2);
9635         temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
9636 				  target, unsignedp);
9637         gcc_assert (temp);
9638         return temp;
9639       }
9640 
9641     case COND_EXPR:
9642       {
9643 	/* A COND_EXPR with its type being VOID_TYPE represents a
9644 	   conditional jump and is handled in
9645 	   expand_gimple_cond_expr.  */
9646 	gcc_assert (!VOID_TYPE_P (type));
9647 
9648 	/* Note that COND_EXPRs whose type is a structure or union
9649 	   are required to be constructed to contain assignments of
9650 	   a temporary variable, so that we can evaluate them here
9651 	   for side effect only.  If type is void, we must do likewise.  */
9652 
9653 	gcc_assert (!TREE_ADDRESSABLE (type)
9654 		    && !ignore
9655 		    && TREE_TYPE (treeop1) != void_type_node
9656 		    && TREE_TYPE (treeop2) != void_type_node);
9657 
9658 	temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
9659 	if (temp)
9660 	  return temp;
9661 
9662 	/* If we are not to produce a result, we have no target.  Otherwise,
9663 	   if a target was specified use it; it will not be used as an
9664 	   intermediate target unless it is safe.  If no target, use a
9665 	   temporary.  */
9666 
9667 	if (modifier != EXPAND_STACK_PARM
9668 	    && original_target
9669 	    && safe_from_p (original_target, treeop0, 1)
9670 	    && GET_MODE (original_target) == mode
9671 	    && !MEM_P (original_target))
9672 	  temp = original_target;
9673 	else
9674 	  temp = assign_temp (type, 0, 1);
9675 
9676 	do_pending_stack_adjust ();
9677 	NO_DEFER_POP;
9678 	rtx_code_label *lab0 = gen_label_rtx ();
9679 	rtx_code_label *lab1 = gen_label_rtx ();
9680 	jumpifnot (treeop0, lab0,
9681 		   profile_probability::uninitialized ());
9682 	store_expr (treeop1, temp,
9683 		    modifier == EXPAND_STACK_PARM,
9684 		    false, false);
9685 
9686 	emit_jump_insn (targetm.gen_jump (lab1));
9687 	emit_barrier ();
9688 	emit_label (lab0);
9689 	store_expr (treeop2, temp,
9690 		    modifier == EXPAND_STACK_PARM,
9691 		    false, false);
9692 
9693 	emit_label (lab1);
9694 	OK_DEFER_POP;
9695 	return temp;
9696       }
9697 
9698     case VEC_COND_EXPR:
9699       target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
9700       return target;
9701 
9702     case VEC_DUPLICATE_EXPR:
9703       op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
9704       target = expand_vector_broadcast (mode, op0);
9705       gcc_assert (target);
9706       return target;
9707 
9708     case VEC_SERIES_EXPR:
9709       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
9710       return expand_vec_series_expr (mode, op0, op1, target);
9711 
9712     case BIT_INSERT_EXPR:
9713       {
9714 	unsigned bitpos = tree_to_uhwi (treeop2);
9715 	unsigned bitsize;
9716 	if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1)))
9717 	  bitsize = TYPE_PRECISION (TREE_TYPE (treeop1));
9718 	else
9719 	  bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1)));
9720 	rtx op0 = expand_normal (treeop0);
9721 	rtx op1 = expand_normal (treeop1);
9722 	rtx dst = gen_reg_rtx (mode);
9723 	emit_move_insn (dst, op0);
9724 	store_bit_field (dst, bitsize, bitpos, 0, 0,
9725 			 TYPE_MODE (TREE_TYPE (treeop1)), op1, false);
9726 	return dst;
9727       }
9728 
9729     default:
9730       gcc_unreachable ();
9731     }
9732 
9733   /* Here to do an ordinary binary operator.  */
9734  binop:
9735   expand_operands (treeop0, treeop1,
9736 		   subtarget, &op0, &op1, EXPAND_NORMAL);
9737  binop2:
9738   this_optab = optab_for_tree_code (code, type, optab_default);
9739  binop3:
9740   if (modifier == EXPAND_STACK_PARM)
9741     target = 0;
9742   temp = expand_binop (mode, this_optab, op0, op1, target,
9743 		       unsignedp, OPTAB_LIB_WIDEN);
9744   gcc_assert (temp);
9745   /* Bitwise operations do not need bitfield reduction as we expect their
9746      operands being properly truncated.  */
9747   if (code == BIT_XOR_EXPR
9748       || code == BIT_AND_EXPR
9749       || code == BIT_IOR_EXPR)
9750     return temp;
9751   return REDUCE_BIT_FIELD (temp);
9752 }
9753 #undef REDUCE_BIT_FIELD
9754 
9755 
9756 /* Return TRUE if expression STMT is suitable for replacement.
9757    Never consider memory loads as replaceable, because those don't ever lead
9758    into constant expressions.  */
9759 
9760 static bool
stmt_is_replaceable_p(gimple * stmt)9761 stmt_is_replaceable_p (gimple *stmt)
9762 {
9763   if (ssa_is_replaceable_p (stmt))
9764     {
9765       /* Don't move around loads.  */
9766       if (!gimple_assign_single_p (stmt)
9767 	  || is_gimple_val (gimple_assign_rhs1 (stmt)))
9768 	return true;
9769     }
9770   return false;
9771 }
9772 
9773 rtx
expand_expr_real_1(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier,rtx * alt_rtl,bool inner_reference_p)9774 expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
9775 		    enum expand_modifier modifier, rtx *alt_rtl,
9776 		    bool inner_reference_p)
9777 {
9778   rtx op0, op1, temp, decl_rtl;
9779   tree type;
9780   int unsignedp;
9781   machine_mode mode, dmode;
9782   enum tree_code code = TREE_CODE (exp);
9783   rtx subtarget, original_target;
9784   int ignore;
9785   tree context;
9786   bool reduce_bit_field;
9787   location_t loc = EXPR_LOCATION (exp);
9788   struct separate_ops ops;
9789   tree treeop0, treeop1, treeop2;
9790   tree ssa_name = NULL_TREE;
9791   gimple *g;
9792 
9793   type = TREE_TYPE (exp);
9794   mode = TYPE_MODE (type);
9795   unsignedp = TYPE_UNSIGNED (type);
9796 
9797   treeop0 = treeop1 = treeop2 = NULL_TREE;
9798   if (!VL_EXP_CLASS_P (exp))
9799     switch (TREE_CODE_LENGTH (code))
9800       {
9801 	default:
9802 	case 3: treeop2 = TREE_OPERAND (exp, 2); /* FALLTHRU */
9803 	case 2: treeop1 = TREE_OPERAND (exp, 1); /* FALLTHRU */
9804 	case 1: treeop0 = TREE_OPERAND (exp, 0); /* FALLTHRU */
9805 	case 0: break;
9806       }
9807   ops.code = code;
9808   ops.type = type;
9809   ops.op0 = treeop0;
9810   ops.op1 = treeop1;
9811   ops.op2 = treeop2;
9812   ops.location = loc;
9813 
9814   ignore = (target == const0_rtx
9815 	    || ((CONVERT_EXPR_CODE_P (code)
9816 		 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
9817 		&& TREE_CODE (type) == VOID_TYPE));
9818 
9819   /* An operation in what may be a bit-field type needs the
9820      result to be reduced to the precision of the bit-field type,
9821      which is narrower than that of the type's mode.  */
9822   reduce_bit_field = (!ignore
9823 		      && INTEGRAL_TYPE_P (type)
9824 		      && !type_has_mode_precision_p (type));
9825 
9826   /* If we are going to ignore this result, we need only do something
9827      if there is a side-effect somewhere in the expression.  If there
9828      is, short-circuit the most common cases here.  Note that we must
9829      not call expand_expr with anything but const0_rtx in case this
9830      is an initial expansion of a size that contains a PLACEHOLDER_EXPR.  */
9831 
9832   if (ignore)
9833     {
9834       if (! TREE_SIDE_EFFECTS (exp))
9835 	return const0_rtx;
9836 
9837       /* Ensure we reference a volatile object even if value is ignored, but
9838 	 don't do this if all we are doing is taking its address.  */
9839       if (TREE_THIS_VOLATILE (exp)
9840 	  && TREE_CODE (exp) != FUNCTION_DECL
9841 	  && mode != VOIDmode && mode != BLKmode
9842 	  && modifier != EXPAND_CONST_ADDRESS)
9843 	{
9844 	  temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
9845 	  if (MEM_P (temp))
9846 	    copy_to_reg (temp);
9847 	  return const0_rtx;
9848 	}
9849 
9850       if (TREE_CODE_CLASS (code) == tcc_unary
9851 	  || code == BIT_FIELD_REF
9852 	  || code == COMPONENT_REF
9853 	  || code == INDIRECT_REF)
9854 	return expand_expr (treeop0, const0_rtx, VOIDmode,
9855 			    modifier);
9856 
9857       else if (TREE_CODE_CLASS (code) == tcc_binary
9858 	       || TREE_CODE_CLASS (code) == tcc_comparison
9859 	       || code == ARRAY_REF || code == ARRAY_RANGE_REF)
9860 	{
9861 	  expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
9862 	  expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
9863 	  return const0_rtx;
9864 	}
9865 
9866       target = 0;
9867     }
9868 
9869   if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
9870     target = 0;
9871 
9872   /* Use subtarget as the target for operand 0 of a binary operation.  */
9873   subtarget = get_subtarget (target);
9874   original_target = target;
9875 
9876   switch (code)
9877     {
9878     case LABEL_DECL:
9879       {
9880 	tree function = decl_function_context (exp);
9881 
9882 	temp = label_rtx (exp);
9883 	temp = gen_rtx_LABEL_REF (Pmode, temp);
9884 
9885 	if (function != current_function_decl
9886 	    && function != 0)
9887 	  LABEL_REF_NONLOCAL_P (temp) = 1;
9888 
9889 	temp = gen_rtx_MEM (FUNCTION_MODE, temp);
9890 	return temp;
9891       }
9892 
9893     case SSA_NAME:
9894       /* ??? ivopts calls expander, without any preparation from
9895          out-of-ssa.  So fake instructions as if this was an access to the
9896 	 base variable.  This unnecessarily allocates a pseudo, see how we can
9897 	 reuse it, if partition base vars have it set already.  */
9898       if (!currently_expanding_to_rtl)
9899 	{
9900 	  tree var = SSA_NAME_VAR (exp);
9901 	  if (var && DECL_RTL_SET_P (var))
9902 	    return DECL_RTL (var);
9903 	  return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
9904 			      LAST_VIRTUAL_REGISTER + 1);
9905 	}
9906 
9907       g = get_gimple_for_ssa_name (exp);
9908       /* For EXPAND_INITIALIZER try harder to get something simpler.  */
9909       if (g == NULL
9910 	  && modifier == EXPAND_INITIALIZER
9911 	  && !SSA_NAME_IS_DEFAULT_DEF (exp)
9912 	  && (optimize || !SSA_NAME_VAR (exp)
9913 	      || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
9914 	  && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
9915 	g = SSA_NAME_DEF_STMT (exp);
9916       if (g)
9917 	{
9918 	  rtx r;
9919 	  location_t saved_loc = curr_insn_location ();
9920 	  location_t loc = gimple_location (g);
9921 	  if (loc != UNKNOWN_LOCATION)
9922 	    set_curr_insn_location (loc);
9923 	  ops.code = gimple_assign_rhs_code (g);
9924           switch (get_gimple_rhs_class (ops.code))
9925 	    {
9926 	    case GIMPLE_TERNARY_RHS:
9927 	      ops.op2 = gimple_assign_rhs3 (g);
9928 	      /* Fallthru */
9929 	    case GIMPLE_BINARY_RHS:
9930 	      ops.op1 = gimple_assign_rhs2 (g);
9931 
9932 	      /* Try to expand conditonal compare.  */
9933 	      if (targetm.gen_ccmp_first)
9934 		{
9935 		  gcc_checking_assert (targetm.gen_ccmp_next != NULL);
9936 		  r = expand_ccmp_expr (g, mode);
9937 		  if (r)
9938 		    break;
9939 		}
9940 	      /* Fallthru */
9941 	    case GIMPLE_UNARY_RHS:
9942 	      ops.op0 = gimple_assign_rhs1 (g);
9943 	      ops.type = TREE_TYPE (gimple_assign_lhs (g));
9944 	      ops.location = loc;
9945 	      r = expand_expr_real_2 (&ops, target, tmode, modifier);
9946 	      break;
9947 	    case GIMPLE_SINGLE_RHS:
9948 	      {
9949 		r = expand_expr_real (gimple_assign_rhs1 (g), target,
9950 				      tmode, modifier, alt_rtl,
9951 				      inner_reference_p);
9952 		break;
9953 	      }
9954 	    default:
9955 	      gcc_unreachable ();
9956 	    }
9957 	  set_curr_insn_location (saved_loc);
9958 	  if (REG_P (r) && !REG_EXPR (r))
9959 	    set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
9960 	  return r;
9961 	}
9962 
9963       ssa_name = exp;
9964       decl_rtl = get_rtx_for_ssa_name (ssa_name);
9965       exp = SSA_NAME_VAR (ssa_name);
9966       goto expand_decl_rtl;
9967 
9968     case PARM_DECL:
9969     case VAR_DECL:
9970       /* If a static var's type was incomplete when the decl was written,
9971 	 but the type is complete now, lay out the decl now.  */
9972       if (DECL_SIZE (exp) == 0
9973 	  && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp))
9974 	  && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
9975 	layout_decl (exp, 0);
9976 
9977       /* fall through */
9978 
9979     case FUNCTION_DECL:
9980     case RESULT_DECL:
9981       decl_rtl = DECL_RTL (exp);
9982     expand_decl_rtl:
9983       gcc_assert (decl_rtl);
9984 
9985       /* DECL_MODE might change when TYPE_MODE depends on attribute target
9986 	 settings for VECTOR_TYPE_P that might switch for the function.  */
9987       if (currently_expanding_to_rtl
9988 	  && code == VAR_DECL && MEM_P (decl_rtl)
9989 	  && VECTOR_TYPE_P (type) && exp && DECL_MODE (exp) != mode)
9990 	decl_rtl = change_address (decl_rtl, TYPE_MODE (type), 0);
9991       else
9992 	decl_rtl = copy_rtx (decl_rtl);
9993 
9994       /* Record writes to register variables.  */
9995       if (modifier == EXPAND_WRITE
9996 	  && REG_P (decl_rtl)
9997 	  && HARD_REGISTER_P (decl_rtl))
9998         add_to_hard_reg_set (&crtl->asm_clobbers,
9999 			     GET_MODE (decl_rtl), REGNO (decl_rtl));
10000 
10001       /* Ensure variable marked as used even if it doesn't go through
10002 	 a parser.  If it hasn't be used yet, write out an external
10003 	 definition.  */
10004       if (exp)
10005 	TREE_USED (exp) = 1;
10006 
10007       /* Show we haven't gotten RTL for this yet.  */
10008       temp = 0;
10009 
10010       /* Variables inherited from containing functions should have
10011 	 been lowered by this point.  */
10012       if (exp)
10013 	context = decl_function_context (exp);
10014       gcc_assert (!exp
10015 		  || SCOPE_FILE_SCOPE_P (context)
10016 		  || context == current_function_decl
10017 		  || TREE_STATIC (exp)
10018 		  || DECL_EXTERNAL (exp)
10019 		  /* ??? C++ creates functions that are not TREE_STATIC.  */
10020 		  || TREE_CODE (exp) == FUNCTION_DECL);
10021 
10022       /* This is the case of an array whose size is to be determined
10023 	 from its initializer, while the initializer is still being parsed.
10024 	 ??? We aren't parsing while expanding anymore.  */
10025 
10026       if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
10027 	temp = validize_mem (decl_rtl);
10028 
10029       /* If DECL_RTL is memory, we are in the normal case and the
10030 	 address is not valid, get the address into a register.  */
10031 
10032       else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
10033 	{
10034 	  if (alt_rtl)
10035 	    *alt_rtl = decl_rtl;
10036 	  decl_rtl = use_anchored_address (decl_rtl);
10037 	  if (modifier != EXPAND_CONST_ADDRESS
10038 	      && modifier != EXPAND_SUM
10039 	      && !memory_address_addr_space_p (exp ? DECL_MODE (exp)
10040 					       : GET_MODE (decl_rtl),
10041 					       XEXP (decl_rtl, 0),
10042 					       MEM_ADDR_SPACE (decl_rtl)))
10043 	    temp = replace_equiv_address (decl_rtl,
10044 					  copy_rtx (XEXP (decl_rtl, 0)));
10045 	}
10046 
10047       /* If we got something, return it.  But first, set the alignment
10048 	 if the address is a register.  */
10049       if (temp != 0)
10050 	{
10051 	  if (exp && MEM_P (temp) && REG_P (XEXP (temp, 0)))
10052 	    mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
10053 
10054 	  return temp;
10055 	}
10056 
10057       if (exp)
10058 	dmode = DECL_MODE (exp);
10059       else
10060 	dmode = TYPE_MODE (TREE_TYPE (ssa_name));
10061 
10062       /* If the mode of DECL_RTL does not match that of the decl,
10063 	 there are two cases: we are dealing with a BLKmode value
10064 	 that is returned in a register, or we are dealing with
10065 	 a promoted value.  In the latter case, return a SUBREG
10066 	 of the wanted mode, but mark it so that we know that it
10067 	 was already extended.  */
10068       if (REG_P (decl_rtl)
10069 	  && dmode != BLKmode
10070 	  && GET_MODE (decl_rtl) != dmode)
10071 	{
10072 	  machine_mode pmode;
10073 
10074 	  /* Get the signedness to be used for this variable.  Ensure we get
10075 	     the same mode we got when the variable was declared.  */
10076 	  if (code != SSA_NAME)
10077 	    pmode = promote_decl_mode (exp, &unsignedp);
10078 	  else if ((g = SSA_NAME_DEF_STMT (ssa_name))
10079 		   && gimple_code (g) == GIMPLE_CALL
10080 		   && !gimple_call_internal_p (g))
10081 	    pmode = promote_function_mode (type, mode, &unsignedp,
10082 					   gimple_call_fntype (g),
10083 					   2);
10084 	  else
10085 	    pmode = promote_ssa_mode (ssa_name, &unsignedp);
10086 	  gcc_assert (GET_MODE (decl_rtl) == pmode);
10087 
10088 	  temp = gen_lowpart_SUBREG (mode, decl_rtl);
10089 	  SUBREG_PROMOTED_VAR_P (temp) = 1;
10090 	  SUBREG_PROMOTED_SET (temp, unsignedp);
10091 	  return temp;
10092 	}
10093 
10094       return decl_rtl;
10095 
10096     case INTEGER_CST:
10097       {
10098 	/* Given that TYPE_PRECISION (type) is not always equal to
10099 	   GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10100 	   the former to the latter according to the signedness of the
10101 	   type.  */
10102 	scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
10103 	temp = immed_wide_int_const
10104 	  (wi::to_wide (exp, GET_MODE_PRECISION (mode)), mode);
10105 	return temp;
10106       }
10107 
10108     case VECTOR_CST:
10109       {
10110 	tree tmp = NULL_TREE;
10111 	if (VECTOR_MODE_P (mode))
10112 	  return const_vector_from_tree (exp);
10113 	scalar_int_mode int_mode;
10114 	if (is_int_mode (mode, &int_mode))
10115 	  {
10116 	    if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
10117 	      return const_scalar_mask_from_tree (int_mode, exp);
10118 	    else
10119 	      {
10120 		tree type_for_mode
10121 		  = lang_hooks.types.type_for_mode (int_mode, 1);
10122 		if (type_for_mode)
10123 		  tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
10124 					type_for_mode, exp);
10125 	      }
10126 	  }
10127 	if (!tmp)
10128 	  {
10129 	    vec<constructor_elt, va_gc> *v;
10130 	    /* Constructors need to be fixed-length.  FIXME.  */
10131 	    unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
10132 	    vec_alloc (v, nunits);
10133 	    for (unsigned int i = 0; i < nunits; ++i)
10134 	      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
10135 	    tmp = build_constructor (type, v);
10136 	  }
10137 	return expand_expr (tmp, ignore ? const0_rtx : target,
10138 			    tmode, modifier);
10139       }
10140 
10141     case CONST_DECL:
10142       if (modifier == EXPAND_WRITE)
10143 	{
10144 	  /* Writing into CONST_DECL is always invalid, but handle it
10145 	     gracefully.  */
10146 	  addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
10147 	  scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
10148 	  op0 = expand_expr_addr_expr_1 (exp, NULL_RTX, address_mode,
10149 					 EXPAND_NORMAL, as);
10150 	  op0 = memory_address_addr_space (mode, op0, as);
10151 	  temp = gen_rtx_MEM (mode, op0);
10152 	  set_mem_addr_space (temp, as);
10153 	  return temp;
10154 	}
10155       return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
10156 
10157     case REAL_CST:
10158       /* If optimized, generate immediate CONST_DOUBLE
10159 	 which will be turned into memory by reload if necessary.
10160 
10161 	 We used to force a register so that loop.c could see it.  But
10162 	 this does not allow gen_* patterns to perform optimizations with
10163 	 the constants.  It also produces two insns in cases like "x = 1.0;".
10164 	 On most machines, floating-point constants are not permitted in
10165 	 many insns, so we'd end up copying it to a register in any case.
10166 
10167 	 Now, we do the copying in expand_binop, if appropriate.  */
10168       return const_double_from_real_value (TREE_REAL_CST (exp),
10169 					   TYPE_MODE (TREE_TYPE (exp)));
10170 
10171     case FIXED_CST:
10172       return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp),
10173 					   TYPE_MODE (TREE_TYPE (exp)));
10174 
10175     case COMPLEX_CST:
10176       /* Handle evaluating a complex constant in a CONCAT target.  */
10177       if (original_target && GET_CODE (original_target) == CONCAT)
10178 	{
10179 	  machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
10180 	  rtx rtarg, itarg;
10181 
10182 	  rtarg = XEXP (original_target, 0);
10183 	  itarg = XEXP (original_target, 1);
10184 
10185 	  /* Move the real and imaginary parts separately.  */
10186 	  op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
10187 	  op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
10188 
10189 	  if (op0 != rtarg)
10190 	    emit_move_insn (rtarg, op0);
10191 	  if (op1 != itarg)
10192 	    emit_move_insn (itarg, op1);
10193 
10194 	  return original_target;
10195 	}
10196 
10197       /* fall through */
10198 
10199     case STRING_CST:
10200       temp = expand_expr_constant (exp, 1, modifier);
10201 
10202       /* temp contains a constant address.
10203 	 On RISC machines where a constant address isn't valid,
10204 	 make some insns to get that address into a register.  */
10205       if (modifier != EXPAND_CONST_ADDRESS
10206 	  && modifier != EXPAND_INITIALIZER
10207 	  && modifier != EXPAND_SUM
10208 	  && ! memory_address_addr_space_p (mode, XEXP (temp, 0),
10209 					    MEM_ADDR_SPACE (temp)))
10210 	return replace_equiv_address (temp,
10211 				      copy_rtx (XEXP (temp, 0)));
10212       return temp;
10213 
10214     case POLY_INT_CST:
10215       return immed_wide_int_const (poly_int_cst_value (exp), mode);
10216 
10217     case SAVE_EXPR:
10218       {
10219 	tree val = treeop0;
10220 	rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl,
10221 				      inner_reference_p);
10222 
10223 	if (!SAVE_EXPR_RESOLVED_P (exp))
10224 	  {
10225 	    /* We can indeed still hit this case, typically via builtin
10226 	       expanders calling save_expr immediately before expanding
10227 	       something.  Assume this means that we only have to deal
10228 	       with non-BLKmode values.  */
10229 	    gcc_assert (GET_MODE (ret) != BLKmode);
10230 
10231 	    val = build_decl (curr_insn_location (),
10232 			      VAR_DECL, NULL, TREE_TYPE (exp));
10233 	    DECL_ARTIFICIAL (val) = 1;
10234 	    DECL_IGNORED_P (val) = 1;
10235 	    treeop0 = val;
10236 	    TREE_OPERAND (exp, 0) = treeop0;
10237 	    SAVE_EXPR_RESOLVED_P (exp) = 1;
10238 
10239 	    if (!CONSTANT_P (ret))
10240 	      ret = copy_to_reg (ret);
10241 	    SET_DECL_RTL (val, ret);
10242 	  }
10243 
10244         return ret;
10245       }
10246 
10247 
10248     case CONSTRUCTOR:
10249       /* If we don't need the result, just ensure we evaluate any
10250 	 subexpressions.  */
10251       if (ignore)
10252 	{
10253 	  unsigned HOST_WIDE_INT idx;
10254 	  tree value;
10255 
10256 	  FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
10257 	    expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
10258 
10259 	  return const0_rtx;
10260 	}
10261 
10262       return expand_constructor (exp, target, modifier, false);
10263 
10264     case TARGET_MEM_REF:
10265       {
10266 	addr_space_t as
10267 	  = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10268 	enum insn_code icode;
10269 	unsigned int align;
10270 
10271 	op0 = addr_for_mem_ref (exp, as, true);
10272 	op0 = memory_address_addr_space (mode, op0, as);
10273 	temp = gen_rtx_MEM (mode, op0);
10274 	set_mem_attributes (temp, exp, 0);
10275 	set_mem_addr_space (temp, as);
10276 	align = get_object_alignment (exp);
10277 	if (modifier != EXPAND_WRITE
10278 	    && modifier != EXPAND_MEMORY
10279 	    && mode != BLKmode
10280 	    && align < GET_MODE_ALIGNMENT (mode)
10281 	    /* If the target does not have special handling for unaligned
10282 	       loads of mode then it can use regular moves for them.  */
10283 	    && ((icode = optab_handler (movmisalign_optab, mode))
10284 		!= CODE_FOR_nothing))
10285 	  {
10286 	    struct expand_operand ops[2];
10287 
10288 	    /* We've already validated the memory, and we're creating a
10289 	       new pseudo destination.  The predicates really can't fail,
10290 	       nor can the generator.  */
10291 	    create_output_operand (&ops[0], NULL_RTX, mode);
10292 	    create_fixed_operand (&ops[1], temp);
10293 	    expand_insn (icode, 2, ops);
10294 	    temp = ops[0].value;
10295 	  }
10296 	return temp;
10297       }
10298 
10299     case MEM_REF:
10300       {
10301 	const bool reverse = REF_REVERSE_STORAGE_ORDER (exp);
10302 	addr_space_t as
10303 	  = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10304 	machine_mode address_mode;
10305 	tree base = TREE_OPERAND (exp, 0);
10306 	gimple *def_stmt;
10307 	enum insn_code icode;
10308 	unsigned align;
10309 	/* Handle expansion of non-aliased memory with non-BLKmode.  That
10310 	   might end up in a register.  */
10311 	if (mem_ref_refers_to_non_mem_p (exp))
10312 	  {
10313 	    poly_int64 offset = mem_ref_offset (exp).force_shwi ();
10314 	    base = TREE_OPERAND (base, 0);
10315 	    poly_uint64 type_size;
10316 	    if (known_eq (offset, 0)
10317 	        && !reverse
10318 		&& poly_int_tree_p (TYPE_SIZE (type), &type_size)
10319 		&& known_eq (GET_MODE_BITSIZE (DECL_MODE (base)), type_size))
10320 	      return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
10321 				  target, tmode, modifier);
10322 	    if (TYPE_MODE (type) == BLKmode)
10323 	      {
10324 		temp = assign_stack_temp (DECL_MODE (base),
10325 					  GET_MODE_SIZE (DECL_MODE (base)));
10326 		store_expr (base, temp, 0, false, false);
10327 		temp = adjust_address (temp, BLKmode, offset);
10328 		set_mem_size (temp, int_size_in_bytes (type));
10329 		return temp;
10330 	      }
10331 	    exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
10332 			  bitsize_int (offset * BITS_PER_UNIT));
10333 	    REF_REVERSE_STORAGE_ORDER (exp) = reverse;
10334 	    return expand_expr (exp, target, tmode, modifier);
10335 	  }
10336 	address_mode = targetm.addr_space.address_mode (as);
10337 	base = TREE_OPERAND (exp, 0);
10338 	if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
10339 	  {
10340 	    tree mask = gimple_assign_rhs2 (def_stmt);
10341 	    base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
10342 			   gimple_assign_rhs1 (def_stmt), mask);
10343 	    TREE_OPERAND (exp, 0) = base;
10344 	  }
10345 	align = get_object_alignment (exp);
10346 	op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
10347 	op0 = memory_address_addr_space (mode, op0, as);
10348 	if (!integer_zerop (TREE_OPERAND (exp, 1)))
10349 	  {
10350 	    rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
10351 	    op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
10352 	    op0 = memory_address_addr_space (mode, op0, as);
10353 	  }
10354 	temp = gen_rtx_MEM (mode, op0);
10355 	set_mem_attributes (temp, exp, 0);
10356 	set_mem_addr_space (temp, as);
10357 	if (TREE_THIS_VOLATILE (exp))
10358 	  MEM_VOLATILE_P (temp) = 1;
10359 	if (modifier != EXPAND_WRITE
10360 	    && modifier != EXPAND_MEMORY
10361 	    && !inner_reference_p
10362 	    && mode != BLKmode
10363 	    && align < GET_MODE_ALIGNMENT (mode))
10364 	  {
10365 	    if ((icode = optab_handler (movmisalign_optab, mode))
10366 		!= CODE_FOR_nothing)
10367 	      {
10368 		struct expand_operand ops[2];
10369 
10370 		/* We've already validated the memory, and we're creating a
10371 		   new pseudo destination.  The predicates really can't fail,
10372 		   nor can the generator.  */
10373 		create_output_operand (&ops[0], NULL_RTX, mode);
10374 		create_fixed_operand (&ops[1], temp);
10375 		expand_insn (icode, 2, ops);
10376 		temp = ops[0].value;
10377 	      }
10378 	    else if (targetm.slow_unaligned_access (mode, align))
10379 	      temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
10380 					0, TYPE_UNSIGNED (TREE_TYPE (exp)),
10381 					(modifier == EXPAND_STACK_PARM
10382 					 ? NULL_RTX : target),
10383 					mode, mode, false, alt_rtl);
10384 	  }
10385 	if (reverse
10386 	    && modifier != EXPAND_MEMORY
10387 	    && modifier != EXPAND_WRITE)
10388 	  temp = flip_storage_order (mode, temp);
10389 	return temp;
10390       }
10391 
10392     case ARRAY_REF:
10393 
10394       {
10395 	tree array = treeop0;
10396 	tree index = treeop1;
10397 	tree init;
10398 
10399 	/* Fold an expression like: "foo"[2].
10400 	   This is not done in fold so it won't happen inside &.
10401 	   Don't fold if this is for wide characters since it's too
10402 	   difficult to do correctly and this is a very rare case.  */
10403 
10404 	if (modifier != EXPAND_CONST_ADDRESS
10405 	    && modifier != EXPAND_INITIALIZER
10406 	    && modifier != EXPAND_MEMORY)
10407 	  {
10408 	    tree t = fold_read_from_constant_string (exp);
10409 
10410 	    if (t)
10411 	      return expand_expr (t, target, tmode, modifier);
10412 	  }
10413 
10414 	/* If this is a constant index into a constant array,
10415 	   just get the value from the array.  Handle both the cases when
10416 	   we have an explicit constructor and when our operand is a variable
10417 	   that was declared const.  */
10418 
10419 	if (modifier != EXPAND_CONST_ADDRESS
10420 	    && modifier != EXPAND_INITIALIZER
10421 	    && modifier != EXPAND_MEMORY
10422 	    && TREE_CODE (array) == CONSTRUCTOR
10423 	    && ! TREE_SIDE_EFFECTS (array)
10424 	    && TREE_CODE (index) == INTEGER_CST)
10425 	  {
10426 	    unsigned HOST_WIDE_INT ix;
10427 	    tree field, value;
10428 
10429 	    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
10430 				      field, value)
10431 	      if (tree_int_cst_equal (field, index))
10432 		{
10433 		  if (!TREE_SIDE_EFFECTS (value))
10434 		    return expand_expr (fold (value), target, tmode, modifier);
10435 		  break;
10436 		}
10437 	  }
10438 
10439 	else if (optimize >= 1
10440 		 && modifier != EXPAND_CONST_ADDRESS
10441 		 && modifier != EXPAND_INITIALIZER
10442 		 && modifier != EXPAND_MEMORY
10443 		 && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
10444 		 && TREE_CODE (index) == INTEGER_CST
10445 		 && (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
10446 		 && (init = ctor_for_folding (array)) != error_mark_node)
10447 	  {
10448 	    if (init == NULL_TREE)
10449 	      {
10450 		tree value = build_zero_cst (type);
10451 		if (TREE_CODE (value) == CONSTRUCTOR)
10452 		  {
10453 		    /* If VALUE is a CONSTRUCTOR, this optimization is only
10454 		       useful if this doesn't store the CONSTRUCTOR into
10455 		       memory.  If it does, it is more efficient to just
10456 		       load the data from the array directly.  */
10457 		    rtx ret = expand_constructor (value, target,
10458 						  modifier, true);
10459 		    if (ret == NULL_RTX)
10460 		      value = NULL_TREE;
10461 		  }
10462 
10463 		if (value)
10464 		  return expand_expr (value, target, tmode, modifier);
10465 	      }
10466 	    else if (TREE_CODE (init) == CONSTRUCTOR)
10467 	      {
10468 		unsigned HOST_WIDE_INT ix;
10469 		tree field, value;
10470 
10471 		FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
10472 					  field, value)
10473 		  if (tree_int_cst_equal (field, index))
10474 		    {
10475 		      if (TREE_SIDE_EFFECTS (value))
10476 			break;
10477 
10478 		      if (TREE_CODE (value) == CONSTRUCTOR)
10479 			{
10480 			  /* If VALUE is a CONSTRUCTOR, this
10481 			     optimization is only useful if
10482 			     this doesn't store the CONSTRUCTOR
10483 			     into memory.  If it does, it is more
10484 			     efficient to just load the data from
10485 			     the array directly.  */
10486 			  rtx ret = expand_constructor (value, target,
10487 							modifier, true);
10488 			  if (ret == NULL_RTX)
10489 			    break;
10490 			}
10491 
10492 		      return
10493 		        expand_expr (fold (value), target, tmode, modifier);
10494 		    }
10495 	      }
10496 	    else if (TREE_CODE (init) == STRING_CST)
10497 	      {
10498 		tree low_bound = array_ref_low_bound (exp);
10499 		tree index1 = fold_convert_loc (loc, sizetype, treeop1);
10500 
10501 		/* Optimize the special case of a zero lower bound.
10502 
10503 		   We convert the lower bound to sizetype to avoid problems
10504 		   with constant folding.  E.g. suppose the lower bound is
10505 		   1 and its mode is QI.  Without the conversion
10506 		      (ARRAY + (INDEX - (unsigned char)1))
10507 		   becomes
10508 		      (ARRAY + (-(unsigned char)1) + INDEX)
10509 		   which becomes
10510 		      (ARRAY + 255 + INDEX).  Oops!  */
10511 		if (!integer_zerop (low_bound))
10512 		  index1 = size_diffop_loc (loc, index1,
10513 					    fold_convert_loc (loc, sizetype,
10514 							      low_bound));
10515 
10516 		if (tree_fits_uhwi_p (index1)
10517 		    && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
10518 		  {
10519 		    tree type = TREE_TYPE (TREE_TYPE (init));
10520 		    scalar_int_mode mode;
10521 
10522 		    if (is_int_mode (TYPE_MODE (type), &mode)
10523 			&& GET_MODE_SIZE (mode) == 1)
10524 		      return gen_int_mode (TREE_STRING_POINTER (init)
10525 					   [TREE_INT_CST_LOW (index1)],
10526 					   mode);
10527 		  }
10528 	      }
10529 	  }
10530       }
10531       goto normal_inner_ref;
10532 
10533     case COMPONENT_REF:
10534       /* If the operand is a CONSTRUCTOR, we can just extract the
10535 	 appropriate field if it is present.  */
10536       if (TREE_CODE (treeop0) == CONSTRUCTOR)
10537 	{
10538 	  unsigned HOST_WIDE_INT idx;
10539 	  tree field, value;
10540 	  scalar_int_mode field_mode;
10541 
10542 	  FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
10543 				    idx, field, value)
10544 	    if (field == treeop1
10545 		/* We can normally use the value of the field in the
10546 		   CONSTRUCTOR.  However, if this is a bitfield in
10547 		   an integral mode that we can fit in a HOST_WIDE_INT,
10548 		   we must mask only the number of bits in the bitfield,
10549 		   since this is done implicitly by the constructor.  If
10550 		   the bitfield does not meet either of those conditions,
10551 		   we can't do this optimization.  */
10552 		&& (! DECL_BIT_FIELD (field)
10553 		    || (is_int_mode (DECL_MODE (field), &field_mode)
10554 			&& (GET_MODE_PRECISION (field_mode)
10555 			    <= HOST_BITS_PER_WIDE_INT))))
10556 	      {
10557 		if (DECL_BIT_FIELD (field)
10558 		    && modifier == EXPAND_STACK_PARM)
10559 		  target = 0;
10560 		op0 = expand_expr (value, target, tmode, modifier);
10561 		if (DECL_BIT_FIELD (field))
10562 		  {
10563 		    HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
10564 		    scalar_int_mode imode
10565 		      = SCALAR_INT_TYPE_MODE (TREE_TYPE (field));
10566 
10567 		    if (TYPE_UNSIGNED (TREE_TYPE (field)))
10568 		      {
10569 			op1 = gen_int_mode ((HOST_WIDE_INT_1 << bitsize) - 1,
10570 					    imode);
10571 			op0 = expand_and (imode, op0, op1, target);
10572 		      }
10573 		    else
10574 		      {
10575 			int count = GET_MODE_PRECISION (imode) - bitsize;
10576 
10577 			op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
10578 					    target, 0);
10579 			op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
10580 					    target, 0);
10581 		      }
10582 		  }
10583 
10584 		return op0;
10585 	      }
10586 	}
10587       goto normal_inner_ref;
10588 
10589     case BIT_FIELD_REF:
10590     case ARRAY_RANGE_REF:
10591     normal_inner_ref:
10592       {
10593 	machine_mode mode1, mode2;
10594 	poly_int64 bitsize, bitpos, bytepos;
10595 	tree offset;
10596 	int reversep, volatilep = 0, must_force_mem;
10597 	tree tem
10598 	  = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
10599 				 &unsignedp, &reversep, &volatilep);
10600 	rtx orig_op0, memloc;
10601 	bool clear_mem_expr = false;
10602 
10603 	/* If we got back the original object, something is wrong.  Perhaps
10604 	   we are evaluating an expression too early.  In any event, don't
10605 	   infinitely recurse.  */
10606 	gcc_assert (tem != exp);
10607 
10608 	/* If TEM's type is a union of variable size, pass TARGET to the inner
10609 	   computation, since it will need a temporary and TARGET is known
10610 	   to have to do.  This occurs in unchecked conversion in Ada.  */
10611 	orig_op0 = op0
10612 	  = expand_expr_real (tem,
10613 			      (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
10614 			       && COMPLETE_TYPE_P (TREE_TYPE (tem))
10615 			       && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
10616 				   != INTEGER_CST)
10617 			       && modifier != EXPAND_STACK_PARM
10618 			       ? target : NULL_RTX),
10619 			      VOIDmode,
10620 			      modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
10621 			      NULL, true);
10622 
10623 	/* If the field has a mode, we want to access it in the
10624 	   field's mode, not the computed mode.
10625 	   If a MEM has VOIDmode (external with incomplete type),
10626 	   use BLKmode for it instead.  */
10627 	if (MEM_P (op0))
10628 	  {
10629 	    if (mode1 != VOIDmode)
10630 	      op0 = adjust_address (op0, mode1, 0);
10631 	    else if (GET_MODE (op0) == VOIDmode)
10632 	      op0 = adjust_address (op0, BLKmode, 0);
10633 	  }
10634 
10635 	mode2
10636 	  = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
10637 
10638 	/* Make sure bitpos is not negative, it can wreak havoc later.  */
10639 	if (maybe_lt (bitpos, 0))
10640 	  {
10641 	    gcc_checking_assert (offset == NULL_TREE);
10642 	    offset = size_int (bits_to_bytes_round_down (bitpos));
10643 	    bitpos = num_trailing_bits (bitpos);
10644 	  }
10645 
10646 	/* If we have either an offset, a BLKmode result, or a reference
10647 	   outside the underlying object, we must force it to memory.
10648 	   Such a case can occur in Ada if we have unchecked conversion
10649 	   of an expression from a scalar type to an aggregate type or
10650 	   for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
10651 	   passed a partially uninitialized object or a view-conversion
10652 	   to a larger size.  */
10653 	must_force_mem = (offset
10654 			  || mode1 == BLKmode
10655 			  || (mode == BLKmode
10656 			      && !int_mode_for_size (bitsize, 1).exists ())
10657 			  || maybe_gt (bitpos + bitsize,
10658 				       GET_MODE_BITSIZE (mode2)));
10659 
10660 	/* Handle CONCAT first.  */
10661 	if (GET_CODE (op0) == CONCAT && !must_force_mem)
10662 	  {
10663 	    if (known_eq (bitpos, 0)
10664 		&& known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
10665 		&& COMPLEX_MODE_P (mode1)
10666 		&& COMPLEX_MODE_P (GET_MODE (op0))
10667 		&& (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
10668 		    == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0)))))
10669 	      {
10670 		if (reversep)
10671 		  op0 = flip_storage_order (GET_MODE (op0), op0);
10672 		if (mode1 != GET_MODE (op0))
10673 		  {
10674 		    rtx parts[2];
10675 		    for (int i = 0; i < 2; i++)
10676 		      {
10677 			rtx op = read_complex_part (op0, i != 0);
10678 			if (GET_CODE (op) == SUBREG)
10679 			  op = force_reg (GET_MODE (op), op);
10680 			rtx temp = gen_lowpart_common (GET_MODE_INNER (mode1),
10681 						       op);
10682 			if (temp)
10683 			  op = temp;
10684 			else
10685 			  {
10686 			    if (!REG_P (op) && !MEM_P (op))
10687 			      op = force_reg (GET_MODE (op), op);
10688 			    op = gen_lowpart (GET_MODE_INNER (mode1), op);
10689 			  }
10690 			parts[i] = op;
10691 		      }
10692 		    op0 = gen_rtx_CONCAT (mode1, parts[0], parts[1]);
10693 		  }
10694 		return op0;
10695 	      }
10696 	    if (known_eq (bitpos, 0)
10697 		&& known_eq (bitsize,
10698 			     GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10699 		&& maybe_ne (bitsize, 0))
10700 	      {
10701 		op0 = XEXP (op0, 0);
10702 		mode2 = GET_MODE (op0);
10703 	      }
10704 	    else if (known_eq (bitpos,
10705 			       GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10706 		     && known_eq (bitsize,
10707 				  GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
10708 		     && maybe_ne (bitpos, 0)
10709 		     && maybe_ne (bitsize, 0))
10710 	      {
10711 		op0 = XEXP (op0, 1);
10712 		bitpos = 0;
10713 		mode2 = GET_MODE (op0);
10714 	      }
10715 	    else
10716 	      /* Otherwise force into memory.  */
10717 	      must_force_mem = 1;
10718 	  }
10719 
10720 	/* If this is a constant, put it in a register if it is a legitimate
10721 	   constant and we don't need a memory reference.  */
10722 	if (CONSTANT_P (op0)
10723 	    && mode2 != BLKmode
10724 	    && targetm.legitimate_constant_p (mode2, op0)
10725 	    && !must_force_mem)
10726 	  op0 = force_reg (mode2, op0);
10727 
10728 	/* Otherwise, if this is a constant, try to force it to the constant
10729 	   pool.  Note that back-ends, e.g. MIPS, may refuse to do so if it
10730 	   is a legitimate constant.  */
10731 	else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
10732 	  op0 = validize_mem (memloc);
10733 
10734 	/* Otherwise, if this is a constant or the object is not in memory
10735 	   and need be, put it there.  */
10736 	else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
10737 	  {
10738 	    memloc = assign_temp (TREE_TYPE (tem), 1, 1);
10739 	    emit_move_insn (memloc, op0);
10740 	    op0 = memloc;
10741 	    clear_mem_expr = true;
10742 	  }
10743 
10744 	if (offset)
10745 	  {
10746 	    machine_mode address_mode;
10747 	    rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
10748 					  EXPAND_SUM);
10749 
10750 	    gcc_assert (MEM_P (op0));
10751 
10752 	    address_mode = get_address_mode (op0);
10753 	    if (GET_MODE (offset_rtx) != address_mode)
10754 	      {
10755 		/* We cannot be sure that the RTL in offset_rtx is valid outside
10756 		   of a memory address context, so force it into a register
10757 		   before attempting to convert it to the desired mode.  */
10758 		offset_rtx = force_operand (offset_rtx, NULL_RTX);
10759 		offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
10760 	      }
10761 
10762 	    /* See the comment in expand_assignment for the rationale.  */
10763 	    if (mode1 != VOIDmode
10764 		&& maybe_ne (bitpos, 0)
10765 		&& maybe_gt (bitsize, 0)
10766 		&& multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
10767 		&& multiple_p (bitpos, bitsize)
10768 		&& multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
10769 		&& MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
10770 	      {
10771 		op0 = adjust_address (op0, mode1, bytepos);
10772 		bitpos = 0;
10773 	      }
10774 
10775 	    op0 = offset_address (op0, offset_rtx,
10776 				  highest_pow2_factor (offset));
10777 	  }
10778 
10779 	/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
10780 	   record its alignment as BIGGEST_ALIGNMENT.  */
10781 	if (MEM_P (op0)
10782 	    && known_eq (bitpos, 0)
10783 	    && offset != 0
10784 	    && is_aligning_offset (offset, tem))
10785 	  set_mem_align (op0, BIGGEST_ALIGNMENT);
10786 
10787 	/* Don't forget about volatility even if this is a bitfield.  */
10788 	if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
10789 	  {
10790 	    if (op0 == orig_op0)
10791 	      op0 = copy_rtx (op0);
10792 
10793 	    MEM_VOLATILE_P (op0) = 1;
10794 	  }
10795 
10796 	/* In cases where an aligned union has an unaligned object
10797 	   as a field, we might be extracting a BLKmode value from
10798 	   an integer-mode (e.g., SImode) object.  Handle this case
10799 	   by doing the extract into an object as wide as the field
10800 	   (which we know to be the width of a basic mode), then
10801 	   storing into memory, and changing the mode to BLKmode.  */
10802 	if (mode1 == VOIDmode
10803 	    || REG_P (op0) || GET_CODE (op0) == SUBREG
10804 	    || (mode1 != BLKmode && ! direct_load[(int) mode1]
10805 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
10806 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
10807 		&& modifier != EXPAND_CONST_ADDRESS
10808 		&& modifier != EXPAND_INITIALIZER
10809 		&& modifier != EXPAND_MEMORY)
10810 	    /* If the bitfield is volatile and the bitsize
10811 	       is narrower than the access size of the bitfield,
10812 	       we need to extract bitfields from the access.  */
10813 	    || (volatilep && TREE_CODE (exp) == COMPONENT_REF
10814 		&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
10815 		&& mode1 != BLKmode
10816 		&& maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
10817 	    /* If the field isn't aligned enough to fetch as a memref,
10818 	       fetch it as a bit field.  */
10819 	    || (mode1 != BLKmode
10820 		&& (((MEM_P (op0)
10821 		      ? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
10822 			|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
10823 		      : TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
10824 			|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
10825 		     && modifier != EXPAND_MEMORY
10826 		     && ((modifier == EXPAND_CONST_ADDRESS
10827 			  || modifier == EXPAND_INITIALIZER)
10828 			 ? STRICT_ALIGNMENT
10829 			 : targetm.slow_unaligned_access (mode1,
10830 							  MEM_ALIGN (op0))))
10831 		    || !multiple_p (bitpos, BITS_PER_UNIT)))
10832 	    /* If the type and the field are a constant size and the
10833 	       size of the type isn't the same size as the bitfield,
10834 	       we must use bitfield operations.  */
10835 	    || (known_size_p (bitsize)
10836 		&& TYPE_SIZE (TREE_TYPE (exp))
10837 		&& poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
10838 		&& maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
10839 			     bitsize)))
10840 	  {
10841 	    machine_mode ext_mode = mode;
10842 
10843 	    if (ext_mode == BLKmode
10844 		&& ! (target != 0 && MEM_P (op0)
10845 		      && MEM_P (target)
10846 		      && multiple_p (bitpos, BITS_PER_UNIT)))
10847 	      ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
10848 
10849 	    if (ext_mode == BLKmode)
10850 	      {
10851 		if (target == 0)
10852 		  target = assign_temp (type, 1, 1);
10853 
10854 		/* ??? Unlike the similar test a few lines below, this one is
10855 		   very likely obsolete.  */
10856 		if (known_eq (bitsize, 0))
10857 		  return target;
10858 
10859 		/* In this case, BITPOS must start at a byte boundary and
10860 		   TARGET, if specified, must be a MEM.  */
10861 		gcc_assert (MEM_P (op0)
10862 			    && (!target || MEM_P (target)));
10863 
10864 		bytepos = exact_div (bitpos, BITS_PER_UNIT);
10865 		poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
10866 		emit_block_move (target,
10867 				 adjust_address (op0, VOIDmode, bytepos),
10868 				 gen_int_mode (bytesize, Pmode),
10869 				 (modifier == EXPAND_STACK_PARM
10870 				  ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
10871 
10872 		return target;
10873 	      }
10874 
10875 	    /* If we have nothing to extract, the result will be 0 for targets
10876 	       with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise.  Always
10877 	       return 0 for the sake of consistency, as reading a zero-sized
10878 	       bitfield is valid in Ada and the value is fully specified.  */
10879 	    if (known_eq (bitsize, 0))
10880 	      return const0_rtx;
10881 
10882 	    op0 = validize_mem (op0);
10883 
10884 	    if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
10885 	      mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10886 
10887 	    /* If the result has a record type and the extraction is done in
10888 	       an integral mode, then the field may be not aligned on a byte
10889 	       boundary; in this case, if it has reverse storage order, it
10890 	       needs to be extracted as a scalar field with reverse storage
10891 	       order and put back into memory order afterwards.  */
10892 	    if (TREE_CODE (type) == RECORD_TYPE
10893 		&& GET_MODE_CLASS (ext_mode) == MODE_INT)
10894 	      reversep = TYPE_REVERSE_STORAGE_ORDER (type);
10895 
10896 	    gcc_checking_assert (known_ge (bitpos, 0));
10897 	    op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
10898 				     (modifier == EXPAND_STACK_PARM
10899 				      ? NULL_RTX : target),
10900 				     ext_mode, ext_mode, reversep, alt_rtl);
10901 
10902 	    /* If the result has a record type and the mode of OP0 is an
10903 	       integral mode then, if BITSIZE is narrower than this mode
10904 	       and this is for big-endian data, we must put the field
10905 	       into the high-order bits.  And we must also put it back
10906 	       into memory order if it has been previously reversed.  */
10907 	    scalar_int_mode op0_mode;
10908 	    if (TREE_CODE (type) == RECORD_TYPE
10909 		&& is_int_mode (GET_MODE (op0), &op0_mode))
10910 	      {
10911 		HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
10912 
10913 		gcc_checking_assert (known_le (bitsize, size));
10914 		if (maybe_lt (bitsize, size)
10915 		    && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
10916 		  op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
10917 				      size - bitsize, op0, 1);
10918 
10919 		if (reversep)
10920 		  op0 = flip_storage_order (op0_mode, op0);
10921 	      }
10922 
10923 	    /* If the result type is BLKmode, store the data into a temporary
10924 	       of the appropriate type, but with the mode corresponding to the
10925 	       mode for the data we have (op0's mode).  */
10926 	    if (mode == BLKmode)
10927 	      {
10928 		rtx new_rtx
10929 		  = assign_stack_temp_for_type (ext_mode,
10930 						GET_MODE_BITSIZE (ext_mode),
10931 						type);
10932 		emit_move_insn (new_rtx, op0);
10933 		op0 = copy_rtx (new_rtx);
10934 		PUT_MODE (op0, BLKmode);
10935 	      }
10936 
10937 	    return op0;
10938 	  }
10939 
10940 	/* If the result is BLKmode, use that to access the object
10941 	   now as well.  */
10942 	if (mode == BLKmode)
10943 	  mode1 = BLKmode;
10944 
10945 	/* Get a reference to just this component.  */
10946 	bytepos = bits_to_bytes_round_down (bitpos);
10947 	if (modifier == EXPAND_CONST_ADDRESS
10948 	    || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
10949 	  op0 = adjust_address_nv (op0, mode1, bytepos);
10950 	else
10951 	  op0 = adjust_address (op0, mode1, bytepos);
10952 
10953 	if (op0 == orig_op0)
10954 	  op0 = copy_rtx (op0);
10955 
10956 	/* Don't set memory attributes if the base expression is
10957 	   SSA_NAME that got expanded as a MEM.  In that case, we should
10958 	   just honor its original memory attributes.  */
10959 	if (TREE_CODE (tem) != SSA_NAME || !MEM_P (orig_op0))
10960 	  set_mem_attributes (op0, exp, 0);
10961 
10962 	if (REG_P (XEXP (op0, 0)))
10963 	  mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10964 
10965 	/* If op0 is a temporary because the original expressions was forced
10966 	   to memory, clear MEM_EXPR so that the original expression cannot
10967 	   be marked as addressable through MEM_EXPR of the temporary.  */
10968 	if (clear_mem_expr)
10969 	  set_mem_expr (op0, NULL_TREE);
10970 
10971 	MEM_VOLATILE_P (op0) |= volatilep;
10972 
10973         if (reversep
10974 	    && modifier != EXPAND_MEMORY
10975 	    && modifier != EXPAND_WRITE)
10976 	  op0 = flip_storage_order (mode1, op0);
10977 
10978 	if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
10979 	    || modifier == EXPAND_CONST_ADDRESS
10980 	    || modifier == EXPAND_INITIALIZER)
10981 	  return op0;
10982 
10983 	if (target == 0)
10984 	  target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
10985 
10986 	convert_move (target, op0, unsignedp);
10987 	return target;
10988       }
10989 
10990     case OBJ_TYPE_REF:
10991       return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
10992 
10993     case CALL_EXPR:
10994       /* All valid uses of __builtin_va_arg_pack () are removed during
10995 	 inlining.  */
10996       if (CALL_EXPR_VA_ARG_PACK (exp))
10997 	error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp);
10998       {
10999 	tree fndecl = get_callee_fndecl (exp), attr;
11000 
11001 	if (fndecl
11002 	    /* Don't diagnose the error attribute in thunks, those are
11003 	       artificially created.  */
11004 	    && !CALL_FROM_THUNK_P (exp)
11005 	    && (attr = lookup_attribute ("error",
11006 					 DECL_ATTRIBUTES (fndecl))) != NULL)
11007 	  {
11008 	    const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11009 	    error ("%Kcall to %qs declared with attribute error: %s", exp,
11010 		   identifier_to_locale (ident),
11011 		   TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11012 	  }
11013 	if (fndecl
11014 	    /* Don't diagnose the warning attribute in thunks, those are
11015 	       artificially created.  */
11016 	    && !CALL_FROM_THUNK_P (exp)
11017 	    && (attr = lookup_attribute ("warning",
11018 					 DECL_ATTRIBUTES (fndecl))) != NULL)
11019 	  {
11020 	    const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11021 	    warning_at (tree_nonartificial_location (exp),
11022 			OPT_Wattribute_warning,
11023 			"%Kcall to %qs declared with attribute warning: %s",
11024 			exp, identifier_to_locale (ident),
11025 			TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11026 	  }
11027 
11028 	/* Check for a built-in function.  */
11029 	if (fndecl && fndecl_built_in_p (fndecl))
11030 	  {
11031 	    gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
11032 	    return expand_builtin (exp, target, subtarget, tmode, ignore);
11033 	  }
11034       }
11035       return expand_call (exp, target, ignore);
11036 
11037     case VIEW_CONVERT_EXPR:
11038       op0 = NULL_RTX;
11039 
11040       /* If we are converting to BLKmode, try to avoid an intermediate
11041 	 temporary by fetching an inner memory reference.  */
11042       if (mode == BLKmode
11043 	  && poly_int_tree_p (TYPE_SIZE (type))
11044 	  && TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
11045 	  && handled_component_p (treeop0))
11046       {
11047 	machine_mode mode1;
11048 	poly_int64 bitsize, bitpos, bytepos;
11049 	tree offset;
11050 	int unsignedp, reversep, volatilep = 0;
11051 	tree tem
11052 	  = get_inner_reference (treeop0, &bitsize, &bitpos, &offset, &mode1,
11053 				 &unsignedp, &reversep, &volatilep);
11054 	rtx orig_op0;
11055 
11056 	/* ??? We should work harder and deal with non-zero offsets.  */
11057 	if (!offset
11058 	    && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11059 	    && !reversep
11060 	    && known_size_p (bitsize)
11061 	    && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
11062 	  {
11063 	    /* See the normal_inner_ref case for the rationale.  */
11064 	    orig_op0
11065 	      = expand_expr_real (tem,
11066 				  (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11067 				   && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11068 				       != INTEGER_CST)
11069 				   && modifier != EXPAND_STACK_PARM
11070 				   ? target : NULL_RTX),
11071 				  VOIDmode,
11072 				  modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11073 				  NULL, true);
11074 
11075 	    if (MEM_P (orig_op0))
11076 	      {
11077 		op0 = orig_op0;
11078 
11079 		/* Get a reference to just this component.  */
11080 		if (modifier == EXPAND_CONST_ADDRESS
11081 		    || modifier == EXPAND_SUM
11082 		    || modifier == EXPAND_INITIALIZER)
11083 		  op0 = adjust_address_nv (op0, mode, bytepos);
11084 		else
11085 		  op0 = adjust_address (op0, mode, bytepos);
11086 
11087 		if (op0 == orig_op0)
11088 		  op0 = copy_rtx (op0);
11089 
11090 		set_mem_attributes (op0, treeop0, 0);
11091 		if (REG_P (XEXP (op0, 0)))
11092 		  mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11093 
11094 		MEM_VOLATILE_P (op0) |= volatilep;
11095 	      }
11096 	  }
11097       }
11098 
11099       if (!op0)
11100 	op0 = expand_expr_real (treeop0, NULL_RTX, VOIDmode, modifier,
11101 				NULL, inner_reference_p);
11102 
11103       /* If the input and output modes are both the same, we are done.  */
11104       if (mode == GET_MODE (op0))
11105 	;
11106       /* If neither mode is BLKmode, and both modes are the same size
11107 	 then we can use gen_lowpart.  */
11108       else if (mode != BLKmode
11109 	       && GET_MODE (op0) != BLKmode
11110 	       && known_eq (GET_MODE_PRECISION (mode),
11111 			    GET_MODE_PRECISION (GET_MODE (op0)))
11112 	       && !COMPLEX_MODE_P (GET_MODE (op0)))
11113 	{
11114 	  if (GET_CODE (op0) == SUBREG)
11115 	    op0 = force_reg (GET_MODE (op0), op0);
11116 	  temp = gen_lowpart_common (mode, op0);
11117 	  if (temp)
11118 	    op0 = temp;
11119 	  else
11120 	    {
11121 	      if (!REG_P (op0) && !MEM_P (op0))
11122 		op0 = force_reg (GET_MODE (op0), op0);
11123 	      op0 = gen_lowpart (mode, op0);
11124 	    }
11125 	}
11126       /* If both types are integral, convert from one mode to the other.  */
11127       else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
11128 	op0 = convert_modes (mode, GET_MODE (op0), op0,
11129 			     TYPE_UNSIGNED (TREE_TYPE (treeop0)));
11130       /* If the output type is a bit-field type, do an extraction.  */
11131       else if (reduce_bit_field)
11132 	return extract_bit_field (op0, TYPE_PRECISION (type), 0,
11133 				  TYPE_UNSIGNED (type), NULL_RTX,
11134 				  mode, mode, false, NULL);
11135       /* As a last resort, spill op0 to memory, and reload it in a
11136 	 different mode.  */
11137       else if (!MEM_P (op0))
11138 	{
11139 	  /* If the operand is not a MEM, force it into memory.  Since we
11140 	     are going to be changing the mode of the MEM, don't call
11141 	     force_const_mem for constants because we don't allow pool
11142 	     constants to change mode.  */
11143 	  tree inner_type = TREE_TYPE (treeop0);
11144 
11145 	  gcc_assert (!TREE_ADDRESSABLE (exp));
11146 
11147 	  if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type))
11148 	    target
11149 	      = assign_stack_temp_for_type
11150 		(TYPE_MODE (inner_type),
11151 		 GET_MODE_SIZE (TYPE_MODE (inner_type)), inner_type);
11152 
11153 	  emit_move_insn (target, op0);
11154 	  op0 = target;
11155 	}
11156 
11157       /* If OP0 is (now) a MEM, we need to deal with alignment issues.  If the
11158 	 output type is such that the operand is known to be aligned, indicate
11159 	 that it is.  Otherwise, we need only be concerned about alignment for
11160 	 non-BLKmode results.  */
11161       if (MEM_P (op0))
11162 	{
11163 	  enum insn_code icode;
11164 
11165 	  if (modifier != EXPAND_WRITE
11166 	      && modifier != EXPAND_MEMORY
11167 	      && !inner_reference_p
11168 	      && mode != BLKmode
11169 	      && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode))
11170 	    {
11171 	      /* If the target does have special handling for unaligned
11172 		 loads of mode then use them.  */
11173 	      if ((icode = optab_handler (movmisalign_optab, mode))
11174 		  != CODE_FOR_nothing)
11175 		{
11176 		  rtx reg;
11177 
11178 		  op0 = adjust_address (op0, mode, 0);
11179 		  /* We've already validated the memory, and we're creating a
11180 		     new pseudo destination.  The predicates really can't
11181 		     fail.  */
11182 		  reg = gen_reg_rtx (mode);
11183 
11184 		  /* Nor can the insn generator.  */
11185 		  rtx_insn *insn = GEN_FCN (icode) (reg, op0);
11186 		  emit_insn (insn);
11187 		  return reg;
11188 		}
11189 	      else if (STRICT_ALIGNMENT)
11190 		{
11191 		  poly_uint64 mode_size = GET_MODE_SIZE (mode);
11192 		  poly_uint64 temp_size = mode_size;
11193 		  if (GET_MODE (op0) != BLKmode)
11194 		    temp_size = upper_bound (temp_size,
11195 					     GET_MODE_SIZE (GET_MODE (op0)));
11196 		  rtx new_rtx
11197 		    = assign_stack_temp_for_type (mode, temp_size, type);
11198 		  rtx new_with_op0_mode
11199 		    = adjust_address (new_rtx, GET_MODE (op0), 0);
11200 
11201 		  gcc_assert (!TREE_ADDRESSABLE (exp));
11202 
11203 		  if (GET_MODE (op0) == BLKmode)
11204 		    {
11205 		      rtx size_rtx = gen_int_mode (mode_size, Pmode);
11206 		      emit_block_move (new_with_op0_mode, op0, size_rtx,
11207 				       (modifier == EXPAND_STACK_PARM
11208 					? BLOCK_OP_CALL_PARM
11209 					: BLOCK_OP_NORMAL));
11210 		    }
11211 		  else
11212 		    emit_move_insn (new_with_op0_mode, op0);
11213 
11214 		  op0 = new_rtx;
11215 		}
11216 	    }
11217 
11218 	  op0 = adjust_address (op0, mode, 0);
11219 	}
11220 
11221       return op0;
11222 
11223     case MODIFY_EXPR:
11224       {
11225 	tree lhs = treeop0;
11226 	tree rhs = treeop1;
11227 	gcc_assert (ignore);
11228 
11229 	/* Check for |= or &= of a bitfield of size one into another bitfield
11230 	   of size 1.  In this case, (unless we need the result of the
11231 	   assignment) we can do this more efficiently with a
11232 	   test followed by an assignment, if necessary.
11233 
11234 	   ??? At this point, we can't get a BIT_FIELD_REF here.  But if
11235 	   things change so we do, this code should be enhanced to
11236 	   support it.  */
11237 	if (TREE_CODE (lhs) == COMPONENT_REF
11238 	    && (TREE_CODE (rhs) == BIT_IOR_EXPR
11239 		|| TREE_CODE (rhs) == BIT_AND_EXPR)
11240 	    && TREE_OPERAND (rhs, 0) == lhs
11241 	    && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
11242 	    && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
11243 	    && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
11244 	  {
11245 	    rtx_code_label *label = gen_label_rtx ();
11246 	    int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
11247 	    profile_probability prob = profile_probability::uninitialized ();
11248  	    if (value)
11249  	      jumpifnot (TREE_OPERAND (rhs, 1), label, prob);
11250  	    else
11251  	      jumpif (TREE_OPERAND (rhs, 1), label, prob);
11252 	    expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
11253 			       false);
11254 	    do_pending_stack_adjust ();
11255 	    emit_label (label);
11256 	    return const0_rtx;
11257 	  }
11258 
11259 	expand_assignment (lhs, rhs, false);
11260 	return const0_rtx;
11261       }
11262 
11263     case ADDR_EXPR:
11264       return expand_expr_addr_expr (exp, target, tmode, modifier);
11265 
11266     case REALPART_EXPR:
11267       op0 = expand_normal (treeop0);
11268       return read_complex_part (op0, false);
11269 
11270     case IMAGPART_EXPR:
11271       op0 = expand_normal (treeop0);
11272       return read_complex_part (op0, true);
11273 
11274     case RETURN_EXPR:
11275     case LABEL_EXPR:
11276     case GOTO_EXPR:
11277     case SWITCH_EXPR:
11278     case ASM_EXPR:
11279       /* Expanded in cfgexpand.c.  */
11280       gcc_unreachable ();
11281 
11282     case TRY_CATCH_EXPR:
11283     case CATCH_EXPR:
11284     case EH_FILTER_EXPR:
11285     case TRY_FINALLY_EXPR:
11286       /* Lowered by tree-eh.c.  */
11287       gcc_unreachable ();
11288 
11289     case WITH_CLEANUP_EXPR:
11290     case CLEANUP_POINT_EXPR:
11291     case TARGET_EXPR:
11292     case CASE_LABEL_EXPR:
11293     case VA_ARG_EXPR:
11294     case BIND_EXPR:
11295     case INIT_EXPR:
11296     case CONJ_EXPR:
11297     case COMPOUND_EXPR:
11298     case PREINCREMENT_EXPR:
11299     case PREDECREMENT_EXPR:
11300     case POSTINCREMENT_EXPR:
11301     case POSTDECREMENT_EXPR:
11302     case LOOP_EXPR:
11303     case EXIT_EXPR:
11304     case COMPOUND_LITERAL_EXPR:
11305       /* Lowered by gimplify.c.  */
11306       gcc_unreachable ();
11307 
11308     case FDESC_EXPR:
11309       /* Function descriptors are not valid except for as
11310 	 initialization constants, and should not be expanded.  */
11311       gcc_unreachable ();
11312 
11313     case WITH_SIZE_EXPR:
11314       /* WITH_SIZE_EXPR expands to its first argument.  The caller should
11315 	 have pulled out the size to use in whatever context it needed.  */
11316       return expand_expr_real (treeop0, original_target, tmode,
11317 			       modifier, alt_rtl, inner_reference_p);
11318 
11319     default:
11320       return expand_expr_real_2 (&ops, target, tmode, modifier);
11321     }
11322 }
11323 
11324 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11325    signedness of TYPE), possibly returning the result in TARGET.
11326    TYPE is known to be a partial integer type.  */
11327 static rtx
reduce_to_bit_field_precision(rtx exp,rtx target,tree type)11328 reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
11329 {
11330   scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
11331   HOST_WIDE_INT prec = TYPE_PRECISION (type);
11332   gcc_assert ((GET_MODE (exp) == VOIDmode || GET_MODE (exp) == mode)
11333 	      && (!target || GET_MODE (target) == mode));
11334 
11335   /* For constant values, reduce using wide_int_to_tree. */
11336   if (poly_int_rtx_p (exp))
11337     {
11338       tree t = wide_int_to_tree (type, wi::to_poly_wide (exp, mode));
11339       return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
11340     }
11341   else if (TYPE_UNSIGNED (type))
11342     {
11343       rtx mask = immed_wide_int_const
11344 	(wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
11345       return expand_and (mode, exp, mask, target);
11346     }
11347   else
11348     {
11349       int count = GET_MODE_PRECISION (mode) - prec;
11350       exp = expand_shift (LSHIFT_EXPR, mode, exp, count, target, 0);
11351       return expand_shift (RSHIFT_EXPR, mode, exp, count, target, 0);
11352     }
11353 }
11354 
11355 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11356    when applied to the address of EXP produces an address known to be
11357    aligned more than BIGGEST_ALIGNMENT.  */
11358 
11359 static int
is_aligning_offset(const_tree offset,const_tree exp)11360 is_aligning_offset (const_tree offset, const_tree exp)
11361 {
11362   /* Strip off any conversions.  */
11363   while (CONVERT_EXPR_P (offset))
11364     offset = TREE_OPERAND (offset, 0);
11365 
11366   /* We must now have a BIT_AND_EXPR with a constant that is one less than
11367      power of 2 and which is larger than BIGGEST_ALIGNMENT.  */
11368   if (TREE_CODE (offset) != BIT_AND_EXPR
11369       || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
11370       || compare_tree_int (TREE_OPERAND (offset, 1),
11371 			   BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
11372       || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
11373     return 0;
11374 
11375   /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11376      It must be NEGATE_EXPR.  Then strip any more conversions.  */
11377   offset = TREE_OPERAND (offset, 0);
11378   while (CONVERT_EXPR_P (offset))
11379     offset = TREE_OPERAND (offset, 0);
11380 
11381   if (TREE_CODE (offset) != NEGATE_EXPR)
11382     return 0;
11383 
11384   offset = TREE_OPERAND (offset, 0);
11385   while (CONVERT_EXPR_P (offset))
11386     offset = TREE_OPERAND (offset, 0);
11387 
11388   /* This must now be the address of EXP.  */
11389   return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp;
11390 }
11391 
11392 /* Return the tree node if an ARG corresponds to a string constant or zero
11393    if it doesn't.  If we return nonzero, set *PTR_OFFSET to the (possibly
11394    non-constant) offset in bytes within the string that ARG is accessing.
11395    If MEM_SIZE is non-zero the storage size of the memory is returned.
11396    If DECL is non-zero the constant declaration is returned if available.  */
11397 
11398 tree
string_constant(tree arg,tree * ptr_offset,tree * mem_size,tree * decl)11399 string_constant (tree arg, tree *ptr_offset, tree *mem_size, tree *decl)
11400 {
11401   tree array;
11402   STRIP_NOPS (arg);
11403 
11404   /* Non-constant index into the character array in an ARRAY_REF
11405      expression or null.  */
11406   tree varidx = NULL_TREE;
11407 
11408   poly_int64 base_off = 0;
11409 
11410   if (TREE_CODE (arg) == ADDR_EXPR)
11411     {
11412       arg = TREE_OPERAND (arg, 0);
11413       tree ref = arg;
11414       if (TREE_CODE (arg) == ARRAY_REF)
11415 	{
11416 	  tree idx = TREE_OPERAND (arg, 1);
11417 	  if (TREE_CODE (idx) != INTEGER_CST)
11418 	    {
11419 	      /* From a pointer (but not array) argument extract the variable
11420 		 index to prevent get_addr_base_and_unit_offset() from failing
11421 		 due to it.  Use it later to compute the non-constant offset
11422 		 into the string and return it to the caller.  */
11423 	      varidx = idx;
11424 	      ref = TREE_OPERAND (arg, 0);
11425 
11426 	      if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE)
11427 		return NULL_TREE;
11428 
11429 	      if (!integer_zerop (array_ref_low_bound (arg)))
11430 		return NULL_TREE;
11431 
11432 	      if (!integer_onep (array_ref_element_size (arg)))
11433 		return NULL_TREE;
11434 	    }
11435 	}
11436       array = get_addr_base_and_unit_offset (ref, &base_off);
11437       if (!array
11438 	  || (TREE_CODE (array) != VAR_DECL
11439 	      && TREE_CODE (array) != CONST_DECL
11440 	      && TREE_CODE (array) != STRING_CST))
11441 	return NULL_TREE;
11442     }
11443   else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
11444     {
11445       tree arg0 = TREE_OPERAND (arg, 0);
11446       tree arg1 = TREE_OPERAND (arg, 1);
11447 
11448       tree offset;
11449       tree str = string_constant (arg0, &offset, mem_size, decl);
11450       if (!str)
11451 	{
11452 	   str = string_constant (arg1, &offset, mem_size, decl);
11453 	   arg1 = arg0;
11454 	}
11455 
11456       if (str)
11457 	{
11458 	  /* Avoid pointers to arrays (see bug 86622).  */
11459 	  if (POINTER_TYPE_P (TREE_TYPE (arg))
11460 	      && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == ARRAY_TYPE
11461 	      && !(decl && !*decl)
11462 	      && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11463 		   && mem_size && tree_fits_uhwi_p (*mem_size)
11464 		   && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11465 	    return NULL_TREE;
11466 
11467 	  tree type = TREE_TYPE (offset);
11468 	  arg1 = fold_convert (type, arg1);
11469 	  *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, arg1);
11470 	  return str;
11471 	}
11472       return NULL_TREE;
11473     }
11474   else if (TREE_CODE (arg) == SSA_NAME)
11475     {
11476       gimple *stmt = SSA_NAME_DEF_STMT (arg);
11477       if (!is_gimple_assign (stmt))
11478 	return NULL_TREE;
11479 
11480       tree rhs1 = gimple_assign_rhs1 (stmt);
11481       tree_code code = gimple_assign_rhs_code (stmt);
11482       if (code == ADDR_EXPR)
11483 	return string_constant (rhs1, ptr_offset, mem_size, decl);
11484       else if (code != POINTER_PLUS_EXPR)
11485 	return NULL_TREE;
11486 
11487       tree offset;
11488       if (tree str = string_constant (rhs1, &offset, mem_size, decl))
11489 	{
11490 	  /* Avoid pointers to arrays (see bug 86622).  */
11491 	  if (POINTER_TYPE_P (TREE_TYPE (rhs1))
11492 	      && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1))) == ARRAY_TYPE
11493 	      && !(decl && !*decl)
11494 	      && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11495 		   && mem_size && tree_fits_uhwi_p (*mem_size)
11496 		   && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11497 	    return NULL_TREE;
11498 
11499 	  tree rhs2 = gimple_assign_rhs2 (stmt);
11500 	  tree type = TREE_TYPE (offset);
11501 	  rhs2 = fold_convert (type, rhs2);
11502 	  *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, rhs2);
11503 	  return str;
11504 	}
11505       return NULL_TREE;
11506     }
11507   else if (DECL_P (arg))
11508     array = arg;
11509   else
11510     return NULL_TREE;
11511 
11512   tree offset = wide_int_to_tree (sizetype, base_off);
11513   if (varidx)
11514     {
11515       if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE)
11516 	return NULL_TREE;
11517 
11518       gcc_assert (TREE_CODE (arg) == ARRAY_REF);
11519       tree chartype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg, 0)));
11520       if (TREE_CODE (chartype) != INTEGER_TYPE)
11521 	return NULL;
11522 
11523       offset = fold_convert (sizetype, varidx);
11524     }
11525 
11526   if (TREE_CODE (array) == STRING_CST)
11527     {
11528       *ptr_offset = fold_convert (sizetype, offset);
11529       if (mem_size)
11530 	*mem_size = TYPE_SIZE_UNIT (TREE_TYPE (array));
11531       if (decl)
11532 	*decl = NULL_TREE;
11533       gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array)))
11534 			   >= TREE_STRING_LENGTH (array));
11535       return array;
11536     }
11537 
11538   if (!VAR_P (array) && TREE_CODE (array) != CONST_DECL)
11539     return NULL_TREE;
11540 
11541   tree init = ctor_for_folding (array);
11542 
11543   /* Handle variables initialized with string literals.  */
11544   if (!init || init == error_mark_node)
11545     return NULL_TREE;
11546   if (TREE_CODE (init) == CONSTRUCTOR)
11547     {
11548       /* Convert the 64-bit constant offset to a wider type to avoid
11549 	 overflow.  */
11550       offset_int wioff;
11551       if (!base_off.is_constant (&wioff))
11552 	return NULL_TREE;
11553 
11554       wioff *= BITS_PER_UNIT;
11555       if (!wi::fits_uhwi_p (wioff))
11556 	return NULL_TREE;
11557 
11558       base_off = wioff.to_uhwi ();
11559       unsigned HOST_WIDE_INT fieldoff = 0;
11560       init = fold_ctor_reference (TREE_TYPE (arg), init, base_off, 0, array,
11561 				  &fieldoff);
11562       HOST_WIDE_INT cstoff;
11563       if (!base_off.is_constant (&cstoff))
11564 	return NULL_TREE;
11565 
11566       cstoff = (cstoff - fieldoff) / BITS_PER_UNIT;
11567       tree off = build_int_cst (sizetype, cstoff);
11568       if (varidx)
11569 	offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, off);
11570       else
11571 	offset = off;
11572     }
11573 
11574   if (!init)
11575     return NULL_TREE;
11576 
11577   *ptr_offset = offset;
11578 
11579   tree eltype = TREE_TYPE (init);
11580   tree initsize = TYPE_SIZE_UNIT (eltype);
11581   if (mem_size)
11582     *mem_size = initsize;
11583 
11584   if (decl)
11585     *decl = array;
11586 
11587   if (TREE_CODE (init) == INTEGER_CST
11588       && (TREE_CODE (TREE_TYPE (array)) == INTEGER_TYPE
11589 	  || TYPE_MAIN_VARIANT (eltype) == char_type_node))
11590     {
11591       /* For a reference to (address of) a single constant character,
11592 	 store the native representation of the character in CHARBUF.
11593 	 If the reference is to an element of an array or a member
11594 	 of a struct, only consider narrow characters until ctors
11595 	 for wide character arrays are transformed to STRING_CSTs
11596 	 like those for narrow arrays.  */
11597       unsigned char charbuf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT];
11598       int len = native_encode_expr (init, charbuf, sizeof charbuf, 0);
11599       if (len > 0)
11600 	{
11601 	  /* Construct a string literal with elements of ELTYPE and
11602 	     the representation above.  Then strip
11603 	     the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST.  */
11604 	  init = build_string_literal (len, (char *)charbuf, eltype);
11605 	  init = TREE_OPERAND (TREE_OPERAND (init, 0), 0);
11606 	}
11607     }
11608 
11609   if (TREE_CODE (init) != STRING_CST)
11610     return NULL_TREE;
11611 
11612   gcc_checking_assert (tree_to_shwi (initsize) >= TREE_STRING_LENGTH (init));
11613 
11614   return init;
11615 }
11616 
11617 /* Compute the modular multiplicative inverse of A modulo M
11618    using extended Euclid's algorithm.  Assumes A and M are coprime.  */
11619 static wide_int
mod_inv(const wide_int & a,const wide_int & b)11620 mod_inv (const wide_int &a, const wide_int &b)
11621 {
11622   /* Verify the assumption.  */
11623   gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
11624 
11625   unsigned int p = a.get_precision () + 1;
11626   gcc_checking_assert (b.get_precision () + 1 == p);
11627   wide_int c = wide_int::from (a, p, UNSIGNED);
11628   wide_int d = wide_int::from (b, p, UNSIGNED);
11629   wide_int x0 = wide_int::from (0, p, UNSIGNED);
11630   wide_int x1 = wide_int::from (1, p, UNSIGNED);
11631 
11632   if (wi::eq_p (b, 1))
11633     return wide_int::from (1, p, UNSIGNED);
11634 
11635   while (wi::gt_p (c, 1, UNSIGNED))
11636     {
11637       wide_int t = d;
11638       wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
11639       c = t;
11640       wide_int s = x0;
11641       x0 = wi::sub (x1, wi::mul (q, x0));
11642       x1 = s;
11643     }
11644   if (wi::lt_p (x1, 0, SIGNED))
11645     x1 += d;
11646   return x1;
11647 }
11648 
11649 /* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
11650    is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
11651    for C2 > 0 to x & C3 == C2
11652    for C2 < 0 to x & C3 == (C2 & C3).  */
11653 enum tree_code
maybe_optimize_pow2p_mod_cmp(enum tree_code code,tree * arg0,tree * arg1)11654 maybe_optimize_pow2p_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
11655 {
11656   gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
11657   tree treeop0 = gimple_assign_rhs1 (stmt);
11658   tree treeop1 = gimple_assign_rhs2 (stmt);
11659   tree type = TREE_TYPE (*arg0);
11660   scalar_int_mode mode;
11661   if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
11662     return code;
11663   if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
11664       || TYPE_PRECISION (type) <= 1
11665       || TYPE_UNSIGNED (type)
11666       /* Signed x % c == 0 should have been optimized into unsigned modulo
11667 	 earlier.  */
11668       || integer_zerop (*arg1)
11669       /* If c is known to be non-negative, modulo will be expanded as unsigned
11670 	 modulo.  */
11671       || get_range_pos_neg (treeop0) == 1)
11672     return code;
11673 
11674   /* x % c == d where d < 0 && d <= -c should be always false.  */
11675   if (tree_int_cst_sgn (*arg1) == -1
11676       && -wi::to_widest (treeop1) >= wi::to_widest (*arg1))
11677     return code;
11678 
11679   int prec = TYPE_PRECISION (type);
11680   wide_int w = wi::to_wide (treeop1) - 1;
11681   w |= wi::shifted_mask (0, prec - 1, true, prec);
11682   tree c3 = wide_int_to_tree (type, w);
11683   tree c4 = *arg1;
11684   if (tree_int_cst_sgn (*arg1) == -1)
11685     c4 = wide_int_to_tree (type, w & wi::to_wide (*arg1));
11686 
11687   rtx op0 = expand_normal (treeop0);
11688   treeop0 = make_tree (TREE_TYPE (treeop0), op0);
11689 
11690   bool speed_p = optimize_insn_for_speed_p ();
11691 
11692   do_pending_stack_adjust ();
11693 
11694   location_t loc = gimple_location (stmt);
11695   struct separate_ops ops;
11696   ops.code = TRUNC_MOD_EXPR;
11697   ops.location = loc;
11698   ops.type = TREE_TYPE (treeop0);
11699   ops.op0 = treeop0;
11700   ops.op1 = treeop1;
11701   ops.op2 = NULL_TREE;
11702   start_sequence ();
11703   rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11704 				EXPAND_NORMAL);
11705   rtx_insn *moinsns = get_insns ();
11706   end_sequence ();
11707 
11708   unsigned mocost = seq_cost (moinsns, speed_p);
11709   mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
11710   mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
11711 
11712   ops.code = BIT_AND_EXPR;
11713   ops.location = loc;
11714   ops.type = TREE_TYPE (treeop0);
11715   ops.op0 = treeop0;
11716   ops.op1 = c3;
11717   ops.op2 = NULL_TREE;
11718   start_sequence ();
11719   rtx mur = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11720 				EXPAND_NORMAL);
11721   rtx_insn *muinsns = get_insns ();
11722   end_sequence ();
11723 
11724   unsigned mucost = seq_cost (muinsns, speed_p);
11725   mucost += rtx_cost (mur, mode, EQ, 0, speed_p);
11726   mucost += rtx_cost (expand_normal (c4), mode, EQ, 1, speed_p);
11727 
11728   if (mocost <= mucost)
11729     {
11730       emit_insn (moinsns);
11731       *arg0 = make_tree (TREE_TYPE (*arg0), mor);
11732       return code;
11733     }
11734 
11735   emit_insn (muinsns);
11736   *arg0 = make_tree (TREE_TYPE (*arg0), mur);
11737   *arg1 = c4;
11738   return code;
11739 }
11740 
11741 /* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
11742    If C1 is odd to:
11743    (X - C2) * C3 <= C4 (or >), where
11744    C3 is modular multiplicative inverse of C1 and 1<<prec and
11745    C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
11746    if C2 > ((1<<prec) - 1) % C1).
11747    If C1 is even, S = ctz (C1) and C2 is 0, use
11748    ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
11749    inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
11750 
11751    For signed (X % C1) == 0 if C1 is odd to (all operations in it
11752    unsigned):
11753    (X * C3) + C4 <= 2 * C4, where
11754    C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
11755    C4 is ((1<<(prec - 1) - 1) / C1).
11756    If C1 is even, S = ctz(C1), use
11757    ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
11758    where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
11759    and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
11760 
11761    See the Hacker's Delight book, section 10-17.  */
11762 enum tree_code
maybe_optimize_mod_cmp(enum tree_code code,tree * arg0,tree * arg1)11763 maybe_optimize_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
11764 {
11765   gcc_checking_assert (code == EQ_EXPR || code == NE_EXPR);
11766   gcc_checking_assert (TREE_CODE (*arg1) == INTEGER_CST);
11767 
11768   if (optimize < 2)
11769     return code;
11770 
11771   gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
11772   if (stmt == NULL)
11773     return code;
11774 
11775   tree treeop0 = gimple_assign_rhs1 (stmt);
11776   tree treeop1 = gimple_assign_rhs2 (stmt);
11777   if (TREE_CODE (treeop0) != SSA_NAME
11778       || TREE_CODE (treeop1) != INTEGER_CST
11779       /* Don't optimize the undefined behavior case x % 0;
11780 	 x % 1 should have been optimized into zero, punt if
11781 	 it makes it here for whatever reason;
11782 	 x % -c should have been optimized into x % c.  */
11783       || compare_tree_int (treeop1, 2) <= 0
11784       /* Likewise x % c == d where d >= c should be always false.  */
11785       || tree_int_cst_le (treeop1, *arg1))
11786     return code;
11787 
11788   /* Unsigned x % pow2 is handled right already, for signed
11789      modulo handle it in maybe_optimize_pow2p_mod_cmp.  */
11790   if (integer_pow2p (treeop1))
11791     return maybe_optimize_pow2p_mod_cmp (code, arg0, arg1);
11792 
11793   tree type = TREE_TYPE (*arg0);
11794   scalar_int_mode mode;
11795   if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
11796     return code;
11797   if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
11798       || TYPE_PRECISION (type) <= 1)
11799     return code;
11800 
11801   signop sgn = UNSIGNED;
11802   /* If both operands are known to have the sign bit clear, handle
11803      even the signed modulo case as unsigned.  treeop1 is always
11804      positive >= 2, checked above.  */
11805   if (!TYPE_UNSIGNED (type) && get_range_pos_neg (treeop0) != 1)
11806     sgn = SIGNED;
11807 
11808   if (!TYPE_UNSIGNED (type))
11809     {
11810       if (tree_int_cst_sgn (*arg1) == -1)
11811 	return code;
11812       type = unsigned_type_for (type);
11813       if (!type || TYPE_MODE (type) != TYPE_MODE (TREE_TYPE (*arg0)))
11814 	return code;
11815     }
11816 
11817   int prec = TYPE_PRECISION (type);
11818   wide_int w = wi::to_wide (treeop1);
11819   int shift = wi::ctz (w);
11820   /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
11821      C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
11822      If C1 is odd, we can handle all cases by subtracting
11823      C4 below.  We could handle even the even C1 and C2 > -1U % C1 cases
11824      e.g. by testing for overflow on the subtraction, punt on that for now
11825      though.  */
11826   if ((sgn == SIGNED || shift) && !integer_zerop (*arg1))
11827     {
11828       if (sgn == SIGNED)
11829 	return code;
11830       wide_int x = wi::umod_trunc (wi::mask (prec, false, prec), w);
11831       if (wi::gtu_p (wi::to_wide (*arg1), x))
11832 	return code;
11833     }
11834 
11835   imm_use_iterator imm_iter;
11836   use_operand_p use_p;
11837   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, treeop0)
11838     {
11839       gimple *use_stmt = USE_STMT (use_p);
11840       /* Punt if treeop0 is used in the same bb in a division
11841 	 or another modulo with the same divisor.  We should expect
11842 	 the division and modulo combined together.  */
11843       if (use_stmt == stmt
11844 	  || gimple_bb (use_stmt) != gimple_bb (stmt))
11845 	continue;
11846       if (!is_gimple_assign (use_stmt)
11847 	  || (gimple_assign_rhs_code (use_stmt) != TRUNC_DIV_EXPR
11848 	      && gimple_assign_rhs_code (use_stmt) != TRUNC_MOD_EXPR))
11849 	continue;
11850       if (gimple_assign_rhs1 (use_stmt) != treeop0
11851 	  || !operand_equal_p (gimple_assign_rhs2 (use_stmt), treeop1, 0))
11852 	continue;
11853       return code;
11854     }
11855 
11856   w = wi::lrshift (w, shift);
11857   wide_int a = wide_int::from (w, prec + 1, UNSIGNED);
11858   wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
11859   wide_int m = wide_int::from (mod_inv (a, b), prec, UNSIGNED);
11860   tree c3 = wide_int_to_tree (type, m);
11861   tree c5 = NULL_TREE;
11862   wide_int d, e;
11863   if (sgn == UNSIGNED)
11864     {
11865       d = wi::divmod_trunc (wi::mask (prec, false, prec), w, UNSIGNED, &e);
11866       /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
11867 	 otherwise use < or subtract one from C4.  E.g. for
11868 	 x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
11869 	 x % 3U == 1 already needs to be
11870 	 (x - 1) * 0xaaaaaaabU <= 0x55555554.  */
11871       if (!shift && wi::gtu_p (wi::to_wide (*arg1), e))
11872 	d -= 1;
11873       if (shift)
11874 	d = wi::lrshift (d, shift);
11875     }
11876   else
11877     {
11878       e = wi::udiv_trunc (wi::mask (prec - 1, false, prec), w);
11879       if (!shift)
11880 	d = wi::lshift (e, 1);
11881       else
11882 	{
11883 	  e = wi::bit_and (e, wi::mask (shift, true, prec));
11884 	  d = wi::lrshift (e, shift - 1);
11885 	}
11886       c5 = wide_int_to_tree (type, e);
11887     }
11888   tree c4 = wide_int_to_tree (type, d);
11889 
11890   rtx op0 = expand_normal (treeop0);
11891   treeop0 = make_tree (TREE_TYPE (treeop0), op0);
11892 
11893   bool speed_p = optimize_insn_for_speed_p ();
11894 
11895   do_pending_stack_adjust ();
11896 
11897   location_t loc = gimple_location (stmt);
11898   struct separate_ops ops;
11899   ops.code = TRUNC_MOD_EXPR;
11900   ops.location = loc;
11901   ops.type = TREE_TYPE (treeop0);
11902   ops.op0 = treeop0;
11903   ops.op1 = treeop1;
11904   ops.op2 = NULL_TREE;
11905   start_sequence ();
11906   rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11907 				EXPAND_NORMAL);
11908   rtx_insn *moinsns = get_insns ();
11909   end_sequence ();
11910 
11911   unsigned mocost = seq_cost (moinsns, speed_p);
11912   mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
11913   mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
11914 
11915   tree t = fold_convert_loc (loc, type, treeop0);
11916   if (!integer_zerop (*arg1))
11917     t = fold_build2_loc (loc, MINUS_EXPR, type, t, fold_convert (type, *arg1));
11918   t = fold_build2_loc (loc, MULT_EXPR, type, t, c3);
11919   if (sgn == SIGNED)
11920     t = fold_build2_loc (loc, PLUS_EXPR, type, t, c5);
11921   if (shift)
11922     {
11923       tree s = build_int_cst (NULL_TREE, shift);
11924       t = fold_build2_loc (loc, RROTATE_EXPR, type, t, s);
11925     }
11926 
11927   start_sequence ();
11928   rtx mur = expand_normal (t);
11929   rtx_insn *muinsns = get_insns ();
11930   end_sequence ();
11931 
11932   unsigned mucost = seq_cost (muinsns, speed_p);
11933   mucost += rtx_cost (mur, mode, LE, 0, speed_p);
11934   mucost += rtx_cost (expand_normal (c4), mode, LE, 1, speed_p);
11935 
11936   if (mocost <= mucost)
11937     {
11938       emit_insn (moinsns);
11939       *arg0 = make_tree (TREE_TYPE (*arg0), mor);
11940       return code;
11941     }
11942 
11943   emit_insn (muinsns);
11944   *arg0 = make_tree (type, mur);
11945   *arg1 = c4;
11946   return code == EQ_EXPR ? LE_EXPR : GT_EXPR;
11947 }
11948 
11949 /* Generate code to calculate OPS, and exploded expression
11950    using a store-flag instruction and return an rtx for the result.
11951    OPS reflects a comparison.
11952 
11953    If TARGET is nonzero, store the result there if convenient.
11954 
11955    Return zero if there is no suitable set-flag instruction
11956    available on this machine.
11957 
11958    Once expand_expr has been called on the arguments of the comparison,
11959    we are committed to doing the store flag, since it is not safe to
11960    re-evaluate the expression.  We emit the store-flag insn by calling
11961    emit_store_flag, but only expand the arguments if we have a reason
11962    to believe that emit_store_flag will be successful.  If we think that
11963    it will, but it isn't, we have to simulate the store-flag with a
11964    set/jump/set sequence.  */
11965 
11966 static rtx
do_store_flag(sepops ops,rtx target,machine_mode mode)11967 do_store_flag (sepops ops, rtx target, machine_mode mode)
11968 {
11969   enum rtx_code code;
11970   tree arg0, arg1, type;
11971   machine_mode operand_mode;
11972   int unsignedp;
11973   rtx op0, op1;
11974   rtx subtarget = target;
11975   location_t loc = ops->location;
11976 
11977   arg0 = ops->op0;
11978   arg1 = ops->op1;
11979 
11980   /* Don't crash if the comparison was erroneous.  */
11981   if (arg0 == error_mark_node || arg1 == error_mark_node)
11982     return const0_rtx;
11983 
11984   type = TREE_TYPE (arg0);
11985   operand_mode = TYPE_MODE (type);
11986   unsignedp = TYPE_UNSIGNED (type);
11987 
11988   /* We won't bother with BLKmode store-flag operations because it would mean
11989      passing a lot of information to emit_store_flag.  */
11990   if (operand_mode == BLKmode)
11991     return 0;
11992 
11993   /* We won't bother with store-flag operations involving function pointers
11994      when function pointers must be canonicalized before comparisons.  */
11995   if (targetm.have_canonicalize_funcptr_for_compare ()
11996       && ((POINTER_TYPE_P (TREE_TYPE (arg0))
11997 	   && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))))
11998 	  || (POINTER_TYPE_P (TREE_TYPE (arg1))
11999 	      && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1))))))
12000     return 0;
12001 
12002   STRIP_NOPS (arg0);
12003   STRIP_NOPS (arg1);
12004 
12005   /* For vector typed comparisons emit code to generate the desired
12006      all-ones or all-zeros mask.  Conveniently use the VEC_COND_EXPR
12007      expander for this.  */
12008   if (TREE_CODE (ops->type) == VECTOR_TYPE)
12009     {
12010       tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
12011       if (VECTOR_BOOLEAN_TYPE_P (ops->type)
12012 	  && expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
12013 	return expand_vec_cmp_expr (ops->type, ifexp, target);
12014       else
12015 	{
12016 	  tree if_true = constant_boolean_node (true, ops->type);
12017 	  tree if_false = constant_boolean_node (false, ops->type);
12018 	  return expand_vec_cond_expr (ops->type, ifexp, if_true,
12019 				       if_false, target);
12020 	}
12021     }
12022 
12023   /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
12024      into (x - C2) * C3 < C4.  */
12025   if ((ops->code == EQ_EXPR || ops->code == NE_EXPR)
12026       && TREE_CODE (arg0) == SSA_NAME
12027       && TREE_CODE (arg1) == INTEGER_CST)
12028     {
12029       enum tree_code code = maybe_optimize_mod_cmp (ops->code, &arg0, &arg1);
12030       if (code != ops->code)
12031 	{
12032 	  struct separate_ops nops = *ops;
12033 	  nops.code = ops->code = code;
12034 	  nops.op0 = arg0;
12035 	  nops.op1 = arg1;
12036 	  nops.type = TREE_TYPE (arg0);
12037 	  return do_store_flag (&nops, target, mode);
12038 	}
12039     }
12040 
12041   /* Get the rtx comparison code to use.  We know that EXP is a comparison
12042      operation of some type.  Some comparisons against 1 and -1 can be
12043      converted to comparisons with zero.  Do so here so that the tests
12044      below will be aware that we have a comparison with zero.   These
12045      tests will not catch constants in the first operand, but constants
12046      are rarely passed as the first operand.  */
12047 
12048   switch (ops->code)
12049     {
12050     case EQ_EXPR:
12051       code = EQ;
12052       break;
12053     case NE_EXPR:
12054       code = NE;
12055       break;
12056     case LT_EXPR:
12057       if (integer_onep (arg1))
12058 	arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
12059       else
12060 	code = unsignedp ? LTU : LT;
12061       break;
12062     case LE_EXPR:
12063       if (! unsignedp && integer_all_onesp (arg1))
12064 	arg1 = integer_zero_node, code = LT;
12065       else
12066 	code = unsignedp ? LEU : LE;
12067       break;
12068     case GT_EXPR:
12069       if (! unsignedp && integer_all_onesp (arg1))
12070 	arg1 = integer_zero_node, code = GE;
12071       else
12072 	code = unsignedp ? GTU : GT;
12073       break;
12074     case GE_EXPR:
12075       if (integer_onep (arg1))
12076 	arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
12077       else
12078 	code = unsignedp ? GEU : GE;
12079       break;
12080 
12081     case UNORDERED_EXPR:
12082       code = UNORDERED;
12083       break;
12084     case ORDERED_EXPR:
12085       code = ORDERED;
12086       break;
12087     case UNLT_EXPR:
12088       code = UNLT;
12089       break;
12090     case UNLE_EXPR:
12091       code = UNLE;
12092       break;
12093     case UNGT_EXPR:
12094       code = UNGT;
12095       break;
12096     case UNGE_EXPR:
12097       code = UNGE;
12098       break;
12099     case UNEQ_EXPR:
12100       code = UNEQ;
12101       break;
12102     case LTGT_EXPR:
12103       code = LTGT;
12104       break;
12105 
12106     default:
12107       gcc_unreachable ();
12108     }
12109 
12110   /* Put a constant second.  */
12111   if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
12112       || TREE_CODE (arg0) == FIXED_CST)
12113     {
12114       std::swap (arg0, arg1);
12115       code = swap_condition (code);
12116     }
12117 
12118   /* If this is an equality or inequality test of a single bit, we can
12119      do this by shifting the bit being tested to the low-order bit and
12120      masking the result with the constant 1.  If the condition was EQ,
12121      we xor it with 1.  This does not require an scc insn and is faster
12122      than an scc insn even if we have it.
12123 
12124      The code to make this transformation was moved into fold_single_bit_test,
12125      so we just call into the folder and expand its result.  */
12126 
12127   if ((code == NE || code == EQ)
12128       && integer_zerop (arg1)
12129       && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
12130     {
12131       gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR);
12132       if (srcstmt
12133 	  && integer_pow2p (gimple_assign_rhs2 (srcstmt)))
12134 	{
12135 	  enum tree_code tcode = code == NE ? NE_EXPR : EQ_EXPR;
12136 	  tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
12137 	  tree temp = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg1),
12138 				       gimple_assign_rhs1 (srcstmt),
12139 				       gimple_assign_rhs2 (srcstmt));
12140 	  temp = fold_single_bit_test (loc, tcode, temp, arg1, type);
12141 	  if (temp)
12142 	    return expand_expr (temp, target, VOIDmode, EXPAND_NORMAL);
12143 	}
12144     }
12145 
12146   if (! get_subtarget (target)
12147       || GET_MODE (subtarget) != operand_mode)
12148     subtarget = 0;
12149 
12150   expand_operands (arg0, arg1, subtarget, &op0, &op1, EXPAND_NORMAL);
12151 
12152   if (target == 0)
12153     target = gen_reg_rtx (mode);
12154 
12155   /* Try a cstore if possible.  */
12156   return emit_store_flag_force (target, code, op0, op1,
12157 				operand_mode, unsignedp,
12158 				(TYPE_PRECISION (ops->type) == 1
12159 				 && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
12160 }
12161 
12162 /* Attempt to generate a casesi instruction.  Returns 1 if successful,
12163    0 otherwise (i.e. if there is no casesi instruction).
12164 
12165    DEFAULT_PROBABILITY is the probability of jumping to the default
12166    label.  */
12167 int
try_casesi(tree index_type,tree index_expr,tree minval,tree range,rtx table_label,rtx default_label,rtx fallback_label,profile_probability default_probability)12168 try_casesi (tree index_type, tree index_expr, tree minval, tree range,
12169 	    rtx table_label, rtx default_label, rtx fallback_label,
12170             profile_probability default_probability)
12171 {
12172   struct expand_operand ops[5];
12173   scalar_int_mode index_mode = SImode;
12174   rtx op1, op2, index;
12175 
12176   if (! targetm.have_casesi ())
12177     return 0;
12178 
12179   /* The index must be some form of integer.  Convert it to SImode.  */
12180   scalar_int_mode omode = SCALAR_INT_TYPE_MODE (index_type);
12181   if (GET_MODE_BITSIZE (omode) > GET_MODE_BITSIZE (index_mode))
12182     {
12183       rtx rangertx = expand_normal (range);
12184 
12185       /* We must handle the endpoints in the original mode.  */
12186       index_expr = build2 (MINUS_EXPR, index_type,
12187 			   index_expr, minval);
12188       minval = integer_zero_node;
12189       index = expand_normal (index_expr);
12190       if (default_label)
12191         emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
12192 				 omode, 1, default_label,
12193                                  default_probability);
12194       /* Now we can safely truncate.  */
12195       index = convert_to_mode (index_mode, index, 0);
12196     }
12197   else
12198     {
12199       if (omode != index_mode)
12200 	{
12201 	  index_type = lang_hooks.types.type_for_mode (index_mode, 0);
12202 	  index_expr = fold_convert (index_type, index_expr);
12203 	}
12204 
12205       index = expand_normal (index_expr);
12206     }
12207 
12208   do_pending_stack_adjust ();
12209 
12210   op1 = expand_normal (minval);
12211   op2 = expand_normal (range);
12212 
12213   create_input_operand (&ops[0], index, index_mode);
12214   create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
12215   create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
12216   create_fixed_operand (&ops[3], table_label);
12217   create_fixed_operand (&ops[4], (default_label
12218 				  ? default_label
12219 				  : fallback_label));
12220   expand_jump_insn (targetm.code_for_casesi, 5, ops);
12221   return 1;
12222 }
12223 
12224 /* Attempt to generate a tablejump instruction; same concept.  */
12225 /* Subroutine of the next function.
12226 
12227    INDEX is the value being switched on, with the lowest value
12228    in the table already subtracted.
12229    MODE is its expected mode (needed if INDEX is constant).
12230    RANGE is the length of the jump table.
12231    TABLE_LABEL is a CODE_LABEL rtx for the table itself.
12232 
12233    DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
12234    index value is out of range.
12235    DEFAULT_PROBABILITY is the probability of jumping to
12236    the default label.  */
12237 
12238 static void
do_tablejump(rtx index,machine_mode mode,rtx range,rtx table_label,rtx default_label,profile_probability default_probability)12239 do_tablejump (rtx index, machine_mode mode, rtx range, rtx table_label,
12240 	      rtx default_label, profile_probability default_probability)
12241 {
12242   rtx temp, vector;
12243 
12244   if (INTVAL (range) > cfun->cfg->max_jumptable_ents)
12245     cfun->cfg->max_jumptable_ents = INTVAL (range);
12246 
12247   /* Do an unsigned comparison (in the proper mode) between the index
12248      expression and the value which represents the length of the range.
12249      Since we just finished subtracting the lower bound of the range
12250      from the index expression, this comparison allows us to simultaneously
12251      check that the original index expression value is both greater than
12252      or equal to the minimum value of the range and less than or equal to
12253      the maximum value of the range.  */
12254 
12255   if (default_label)
12256     emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
12257 			     default_label, default_probability);
12258 
12259   /* If index is in range, it must fit in Pmode.
12260      Convert to Pmode so we can index with it.  */
12261   if (mode != Pmode)
12262     {
12263       unsigned int width;
12264 
12265       /* We know the value of INDEX is between 0 and RANGE.  If we have a
12266 	 sign-extended subreg, and RANGE does not have the sign bit set, then
12267 	 we have a value that is valid for both sign and zero extension.  In
12268 	 this case, we get better code if we sign extend.  */
12269       if (GET_CODE (index) == SUBREG
12270 	  && SUBREG_PROMOTED_VAR_P (index)
12271 	  && SUBREG_PROMOTED_SIGNED_P (index)
12272 	  && ((width = GET_MODE_PRECISION (as_a <scalar_int_mode> (mode)))
12273 	      <= HOST_BITS_PER_WIDE_INT)
12274 	  && ! (UINTVAL (range) & (HOST_WIDE_INT_1U << (width - 1))))
12275 	index = convert_to_mode (Pmode, index, 0);
12276       else
12277 	index = convert_to_mode (Pmode, index, 1);
12278     }
12279 
12280   /* Don't let a MEM slip through, because then INDEX that comes
12281      out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
12282      and break_out_memory_refs will go to work on it and mess it up.  */
12283 #ifdef PIC_CASE_VECTOR_ADDRESS
12284   if (flag_pic && !REG_P (index))
12285     index = copy_to_mode_reg (Pmode, index);
12286 #endif
12287 
12288   /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
12289      GET_MODE_SIZE, because this indicates how large insns are.  The other
12290      uses should all be Pmode, because they are addresses.  This code
12291      could fail if addresses and insns are not the same size.  */
12292   index = simplify_gen_binary (MULT, Pmode, index,
12293 			       gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE),
12294 					     Pmode));
12295   index = simplify_gen_binary (PLUS, Pmode, index,
12296 			       gen_rtx_LABEL_REF (Pmode, table_label));
12297 
12298 #ifdef PIC_CASE_VECTOR_ADDRESS
12299   if (flag_pic)
12300     index = PIC_CASE_VECTOR_ADDRESS (index);
12301   else
12302 #endif
12303     index = memory_address (CASE_VECTOR_MODE, index);
12304   temp = gen_reg_rtx (CASE_VECTOR_MODE);
12305   vector = gen_const_mem (CASE_VECTOR_MODE, index);
12306   convert_move (temp, vector, 0);
12307 
12308   emit_jump_insn (targetm.gen_tablejump (temp, table_label));
12309 
12310   /* If we are generating PIC code or if the table is PC-relative, the
12311      table and JUMP_INSN must be adjacent, so don't output a BARRIER.  */
12312   if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
12313     emit_barrier ();
12314 }
12315 
12316 int
try_tablejump(tree index_type,tree index_expr,tree minval,tree range,rtx table_label,rtx default_label,profile_probability default_probability)12317 try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
12318 	       rtx table_label, rtx default_label,
12319 	       profile_probability default_probability)
12320 {
12321   rtx index;
12322 
12323   if (! targetm.have_tablejump ())
12324     return 0;
12325 
12326   index_expr = fold_build2 (MINUS_EXPR, index_type,
12327 			    fold_convert (index_type, index_expr),
12328 			    fold_convert (index_type, minval));
12329   index = expand_normal (index_expr);
12330   do_pending_stack_adjust ();
12331 
12332   do_tablejump (index, TYPE_MODE (index_type),
12333 		convert_modes (TYPE_MODE (index_type),
12334 			       TYPE_MODE (TREE_TYPE (range)),
12335 			       expand_normal (range),
12336 			       TYPE_UNSIGNED (TREE_TYPE (range))),
12337 		table_label, default_label, default_probability);
12338   return 1;
12339 }
12340 
12341 /* Return a CONST_VECTOR rtx representing vector mask for
12342    a VECTOR_CST of booleans.  */
12343 static rtx
const_vector_mask_from_tree(tree exp)12344 const_vector_mask_from_tree (tree exp)
12345 {
12346   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12347   machine_mode inner = GET_MODE_INNER (mode);
12348 
12349   rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12350 			      VECTOR_CST_NELTS_PER_PATTERN (exp));
12351   unsigned int count = builder.encoded_nelts ();
12352   for (unsigned int i = 0; i < count; ++i)
12353     {
12354       tree elt = VECTOR_CST_ELT (exp, i);
12355       gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12356       if (integer_zerop (elt))
12357 	builder.quick_push (CONST0_RTX (inner));
12358       else if (integer_onep (elt)
12359 	       || integer_minus_onep (elt))
12360 	builder.quick_push (CONSTM1_RTX (inner));
12361       else
12362 	gcc_unreachable ();
12363     }
12364   return builder.build ();
12365 }
12366 
12367 /* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
12368    Return a constant scalar rtx of mode MODE in which bit X is set if element
12369    X of EXP is nonzero.  */
12370 static rtx
const_scalar_mask_from_tree(scalar_int_mode mode,tree exp)12371 const_scalar_mask_from_tree (scalar_int_mode mode, tree exp)
12372 {
12373   wide_int res = wi::zero (GET_MODE_PRECISION (mode));
12374   tree elt;
12375 
12376   /* The result has a fixed number of bits so the input must too.  */
12377   unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
12378   for (unsigned int i = 0; i < nunits; ++i)
12379     {
12380       elt = VECTOR_CST_ELT (exp, i);
12381       gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12382       if (integer_all_onesp (elt))
12383 	res = wi::set_bit (res, i);
12384       else
12385 	gcc_assert (integer_zerop (elt));
12386     }
12387 
12388   return immed_wide_int_const (res, mode);
12389 }
12390 
12391 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree.  */
12392 static rtx
const_vector_from_tree(tree exp)12393 const_vector_from_tree (tree exp)
12394 {
12395   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12396 
12397   if (initializer_zerop (exp))
12398     return CONST0_RTX (mode);
12399 
12400   if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
12401     return const_vector_mask_from_tree (exp);
12402 
12403   machine_mode inner = GET_MODE_INNER (mode);
12404 
12405   rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12406 			      VECTOR_CST_NELTS_PER_PATTERN (exp));
12407   unsigned int count = builder.encoded_nelts ();
12408   for (unsigned int i = 0; i < count; ++i)
12409     {
12410       tree elt = VECTOR_CST_ELT (exp, i);
12411       if (TREE_CODE (elt) == REAL_CST)
12412 	builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
12413 							  inner));
12414       else if (TREE_CODE (elt) == FIXED_CST)
12415 	builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
12416 							  inner));
12417       else
12418 	builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
12419 						  inner));
12420     }
12421   return builder.build ();
12422 }
12423 
12424 /* Build a decl for a personality function given a language prefix.  */
12425 
12426 tree
build_personality_function(const char * lang)12427 build_personality_function (const char *lang)
12428 {
12429   const char *unwind_and_version;
12430   tree decl, type;
12431   char *name;
12432 
12433   switch (targetm_common.except_unwind_info (&global_options))
12434     {
12435     case UI_NONE:
12436       return NULL;
12437     case UI_SJLJ:
12438       unwind_and_version = "_sj0";
12439       break;
12440     case UI_DWARF2:
12441     case UI_TARGET:
12442       unwind_and_version = "_v0";
12443       break;
12444     case UI_SEH:
12445       unwind_and_version = "_seh0";
12446       break;
12447     default:
12448       gcc_unreachable ();
12449     }
12450 
12451   name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
12452 
12453   type = build_function_type_list (integer_type_node, integer_type_node,
12454 				   long_long_unsigned_type_node,
12455 				   ptr_type_node, ptr_type_node, NULL_TREE);
12456   decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
12457 		     get_identifier (name), type);
12458   DECL_ARTIFICIAL (decl) = 1;
12459   DECL_EXTERNAL (decl) = 1;
12460   TREE_PUBLIC (decl) = 1;
12461 
12462   /* Zap the nonsensical SYMBOL_REF_DECL for this.  What we're left with
12463      are the flags assigned by targetm.encode_section_info.  */
12464   SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
12465 
12466   return decl;
12467 }
12468 
12469 /* Extracts the personality function of DECL and returns the corresponding
12470    libfunc.  */
12471 
12472 rtx
get_personality_function(tree decl)12473 get_personality_function (tree decl)
12474 {
12475   tree personality = DECL_FUNCTION_PERSONALITY (decl);
12476   enum eh_personality_kind pk;
12477 
12478   pk = function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl));
12479   if (pk == eh_personality_none)
12480     return NULL;
12481 
12482   if (!personality
12483       && pk == eh_personality_any)
12484     personality = lang_hooks.eh_personality ();
12485 
12486   if (pk == eh_personality_lang)
12487     gcc_assert (personality != NULL_TREE);
12488 
12489   return XEXP (DECL_RTL (personality), 0);
12490 }
12491 
12492 /* Returns a tree for the size of EXP in bytes.  */
12493 
12494 static tree
tree_expr_size(const_tree exp)12495 tree_expr_size (const_tree exp)
12496 {
12497   if (DECL_P (exp)
12498       && DECL_SIZE_UNIT (exp) != 0)
12499     return DECL_SIZE_UNIT (exp);
12500   else
12501     return size_in_bytes (TREE_TYPE (exp));
12502 }
12503 
12504 /* Return an rtx for the size in bytes of the value of EXP.  */
12505 
12506 rtx
expr_size(tree exp)12507 expr_size (tree exp)
12508 {
12509   tree size;
12510 
12511   if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12512     size = TREE_OPERAND (exp, 1);
12513   else
12514     {
12515       size = tree_expr_size (exp);
12516       gcc_assert (size);
12517       gcc_assert (size == SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, exp));
12518     }
12519 
12520   return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), EXPAND_NORMAL);
12521 }
12522 
12523 /* Return a wide integer for the size in bytes of the value of EXP, or -1
12524    if the size can vary or is larger than an integer.  */
12525 
12526 static HOST_WIDE_INT
int_expr_size(tree exp)12527 int_expr_size (tree exp)
12528 {
12529   tree size;
12530 
12531   if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12532     size = TREE_OPERAND (exp, 1);
12533   else
12534     {
12535       size = tree_expr_size (exp);
12536       gcc_assert (size);
12537     }
12538 
12539   if (size == 0 || !tree_fits_shwi_p (size))
12540     return -1;
12541 
12542   return tree_to_shwi (size);
12543 }
12544