xref: /dragonfly/contrib/gcc-4.7/gcc/stor-layout.c (revision d4ef6694)
1 /* C-compiler utilities for types and variables storage layout
2    Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3    1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4    2011 Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "function.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "diagnostic-core.h"
35 #include "ggc.h"
36 #include "target.h"
37 #include "langhooks.h"
38 #include "regs.h"
39 #include "params.h"
40 #include "cgraph.h"
41 #include "tree-inline.h"
42 #include "tree-dump.h"
43 #include "gimple.h"
44 
45 /* Data type for the expressions representing sizes of data types.
46    It is the first integer type laid out.  */
47 tree sizetype_tab[(int) TYPE_KIND_LAST];
48 
49 /* If nonzero, this is an upper limit on alignment of structure fields.
50    The value is measured in bits.  */
51 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
52 
53 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
54    in the address spaces' address_mode, not pointer_mode.   Set only by
55    internal_reference_types called only by a front end.  */
56 static int reference_types_internal = 0;
57 
58 static tree self_referential_size (tree);
59 static void finalize_record_size (record_layout_info);
60 static void finalize_type_size (tree);
61 static void place_union_field (record_layout_info, tree);
62 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
63 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
64 			     HOST_WIDE_INT, tree);
65 #endif
66 extern void debug_rli (record_layout_info);
67 
68 /* Show that REFERENCE_TYPES are internal and should use address_mode.
69    Called only by front end.  */
70 
71 void
72 internal_reference_types (void)
73 {
74   reference_types_internal = 1;
75 }
76 
77 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
78    to serve as the actual size-expression for a type or decl.  */
79 
80 tree
81 variable_size (tree size)
82 {
83   /* Obviously.  */
84   if (TREE_CONSTANT (size))
85     return size;
86 
87   /* If the size is self-referential, we can't make a SAVE_EXPR (see
88      save_expr for the rationale).  But we can do something else.  */
89   if (CONTAINS_PLACEHOLDER_P (size))
90     return self_referential_size (size);
91 
92   /* If we are in the global binding level, we can't make a SAVE_EXPR
93      since it may end up being shared across functions, so it is up
94      to the front-end to deal with this case.  */
95   if (lang_hooks.decls.global_bindings_p ())
96     return size;
97 
98   return save_expr (size);
99 }
100 
101 /* An array of functions used for self-referential size computation.  */
102 static GTY(()) VEC (tree, gc) *size_functions;
103 
104 /* Look inside EXPR into simple arithmetic operations involving constants.
105    Return the outermost non-arithmetic or non-constant node.  */
106 
107 static tree
108 skip_simple_constant_arithmetic (tree expr)
109 {
110   while (true)
111     {
112       if (UNARY_CLASS_P (expr))
113 	expr = TREE_OPERAND (expr, 0);
114       else if (BINARY_CLASS_P (expr))
115 	{
116 	  if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
117 	    expr = TREE_OPERAND (expr, 0);
118 	  else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
119 	    expr = TREE_OPERAND (expr, 1);
120 	  else
121 	    break;
122 	}
123       else
124 	break;
125     }
126 
127   return expr;
128 }
129 
130 /* Similar to copy_tree_r but do not copy component references involving
131    PLACEHOLDER_EXPRs.  These nodes are spotted in find_placeholder_in_expr
132    and substituted in substitute_in_expr.  */
133 
134 static tree
135 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
136 {
137   enum tree_code code = TREE_CODE (*tp);
138 
139   /* Stop at types, decls, constants like copy_tree_r.  */
140   if (TREE_CODE_CLASS (code) == tcc_type
141       || TREE_CODE_CLASS (code) == tcc_declaration
142       || TREE_CODE_CLASS (code) == tcc_constant)
143     {
144       *walk_subtrees = 0;
145       return NULL_TREE;
146     }
147 
148   /* This is the pattern built in ada/make_aligning_type.  */
149   else if (code == ADDR_EXPR
150 	   && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
151     {
152       *walk_subtrees = 0;
153       return NULL_TREE;
154     }
155 
156   /* Default case: the component reference.  */
157   else if (code == COMPONENT_REF)
158     {
159       tree inner;
160       for (inner = TREE_OPERAND (*tp, 0);
161 	   REFERENCE_CLASS_P (inner);
162 	   inner = TREE_OPERAND (inner, 0))
163 	;
164 
165       if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
166 	{
167 	  *walk_subtrees = 0;
168 	  return NULL_TREE;
169 	}
170     }
171 
172   /* We're not supposed to have them in self-referential size trees
173      because we wouldn't properly control when they are evaluated.
174      However, not creating superfluous SAVE_EXPRs requires accurate
175      tracking of readonly-ness all the way down to here, which we
176      cannot always guarantee in practice.  So punt in this case.  */
177   else if (code == SAVE_EXPR)
178     return error_mark_node;
179 
180   else if (code == STATEMENT_LIST)
181     gcc_unreachable ();
182 
183   return copy_tree_r (tp, walk_subtrees, data);
184 }
185 
186 /* Given a SIZE expression that is self-referential, return an equivalent
187    expression to serve as the actual size expression for a type.  */
188 
189 static tree
190 self_referential_size (tree size)
191 {
192   static unsigned HOST_WIDE_INT fnno = 0;
193   VEC (tree, heap) *self_refs = NULL;
194   tree param_type_list = NULL, param_decl_list = NULL;
195   tree t, ref, return_type, fntype, fnname, fndecl;
196   unsigned int i;
197   char buf[128];
198   VEC(tree,gc) *args = NULL;
199 
200   /* Do not factor out simple operations.  */
201   t = skip_simple_constant_arithmetic (size);
202   if (TREE_CODE (t) == CALL_EXPR)
203     return size;
204 
205   /* Collect the list of self-references in the expression.  */
206   find_placeholder_in_expr (size, &self_refs);
207   gcc_assert (VEC_length (tree, self_refs) > 0);
208 
209   /* Obtain a private copy of the expression.  */
210   t = size;
211   if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
212     return size;
213   size = t;
214 
215   /* Build the parameter and argument lists in parallel; also
216      substitute the former for the latter in the expression.  */
217   args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
218   FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
219     {
220       tree subst, param_name, param_type, param_decl;
221 
222       if (DECL_P (ref))
223 	{
224 	  /* We shouldn't have true variables here.  */
225 	  gcc_assert (TREE_READONLY (ref));
226 	  subst = ref;
227 	}
228       /* This is the pattern built in ada/make_aligning_type.  */
229       else if (TREE_CODE (ref) == ADDR_EXPR)
230         subst = ref;
231       /* Default case: the component reference.  */
232       else
233 	subst = TREE_OPERAND (ref, 1);
234 
235       sprintf (buf, "p%d", i);
236       param_name = get_identifier (buf);
237       param_type = TREE_TYPE (ref);
238       param_decl
239 	= build_decl (input_location, PARM_DECL, param_name, param_type);
240       if (targetm.calls.promote_prototypes (NULL_TREE)
241 	  && INTEGRAL_TYPE_P (param_type)
242 	  && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
243 	DECL_ARG_TYPE (param_decl) = integer_type_node;
244       else
245 	DECL_ARG_TYPE (param_decl) = param_type;
246       DECL_ARTIFICIAL (param_decl) = 1;
247       TREE_READONLY (param_decl) = 1;
248 
249       size = substitute_in_expr (size, subst, param_decl);
250 
251       param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
252       param_decl_list = chainon (param_decl, param_decl_list);
253       VEC_quick_push (tree, args, ref);
254     }
255 
256   VEC_free (tree, heap, self_refs);
257 
258   /* Append 'void' to indicate that the number of parameters is fixed.  */
259   param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
260 
261   /* The 3 lists have been created in reverse order.  */
262   param_type_list = nreverse (param_type_list);
263   param_decl_list = nreverse (param_decl_list);
264 
265   /* Build the function type.  */
266   return_type = TREE_TYPE (size);
267   fntype = build_function_type (return_type, param_type_list);
268 
269   /* Build the function declaration.  */
270   sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
271   fnname = get_file_function_name (buf);
272   fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
273   for (t = param_decl_list; t; t = DECL_CHAIN (t))
274     DECL_CONTEXT (t) = fndecl;
275   DECL_ARGUMENTS (fndecl) = param_decl_list;
276   DECL_RESULT (fndecl)
277     = build_decl (input_location, RESULT_DECL, 0, return_type);
278   DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
279 
280   /* The function has been created by the compiler and we don't
281      want to emit debug info for it.  */
282   DECL_ARTIFICIAL (fndecl) = 1;
283   DECL_IGNORED_P (fndecl) = 1;
284 
285   /* It is supposed to be "const" and never throw.  */
286   TREE_READONLY (fndecl) = 1;
287   TREE_NOTHROW (fndecl) = 1;
288 
289   /* We want it to be inlined when this is deemed profitable, as
290      well as discarded if every call has been integrated.  */
291   DECL_DECLARED_INLINE_P (fndecl) = 1;
292 
293   /* It is made up of a unique return statement.  */
294   DECL_INITIAL (fndecl) = make_node (BLOCK);
295   BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
296   t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
297   DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
298   TREE_STATIC (fndecl) = 1;
299 
300   /* Put it onto the list of size functions.  */
301   VEC_safe_push (tree, gc, size_functions, fndecl);
302 
303   /* Replace the original expression with a call to the size function.  */
304   return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
305 }
306 
307 /* Take, queue and compile all the size functions.  It is essential that
308    the size functions be gimplified at the very end of the compilation
309    in order to guarantee transparent handling of self-referential sizes.
310    Otherwise the GENERIC inliner would not be able to inline them back
311    at each of their call sites, thus creating artificial non-constant
312    size expressions which would trigger nasty problems later on.  */
313 
314 void
315 finalize_size_functions (void)
316 {
317   unsigned int i;
318   tree fndecl;
319 
320   for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
321     {
322       dump_function (TDI_original, fndecl);
323       gimplify_function_tree (fndecl);
324       dump_function (TDI_generic, fndecl);
325       cgraph_finalize_function (fndecl, false);
326     }
327 
328   VEC_free (tree, gc, size_functions);
329 }
330 
331 /* Return the machine mode to use for a nonscalar of SIZE bits.  The
332    mode must be in class MCLASS, and have exactly that many value bits;
333    it may have padding as well.  If LIMIT is nonzero, modes of wider
334    than MAX_FIXED_MODE_SIZE will not be used.  */
335 
336 enum machine_mode
337 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
338 {
339   enum machine_mode mode;
340 
341   if (limit && size > MAX_FIXED_MODE_SIZE)
342     return BLKmode;
343 
344   /* Get the first mode which has this size, in the specified class.  */
345   for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
346        mode = GET_MODE_WIDER_MODE (mode))
347     if (GET_MODE_PRECISION (mode) == size)
348       return mode;
349 
350   return BLKmode;
351 }
352 
353 /* Similar, except passed a tree node.  */
354 
355 enum machine_mode
356 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
357 {
358   unsigned HOST_WIDE_INT uhwi;
359   unsigned int ui;
360 
361   if (!host_integerp (size, 1))
362     return BLKmode;
363   uhwi = tree_low_cst (size, 1);
364   ui = uhwi;
365   if (uhwi != ui)
366     return BLKmode;
367   return mode_for_size (ui, mclass, limit);
368 }
369 
370 /* Similar, but never return BLKmode; return the narrowest mode that
371    contains at least the requested number of value bits.  */
372 
373 enum machine_mode
374 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
375 {
376   enum machine_mode mode;
377 
378   /* Get the first mode which has at least this size, in the
379      specified class.  */
380   for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
381        mode = GET_MODE_WIDER_MODE (mode))
382     if (GET_MODE_PRECISION (mode) >= size)
383       return mode;
384 
385   gcc_unreachable ();
386 }
387 
388 /* Find an integer mode of the exact same size, or BLKmode on failure.  */
389 
390 enum machine_mode
391 int_mode_for_mode (enum machine_mode mode)
392 {
393   switch (GET_MODE_CLASS (mode))
394     {
395     case MODE_INT:
396     case MODE_PARTIAL_INT:
397       break;
398 
399     case MODE_COMPLEX_INT:
400     case MODE_COMPLEX_FLOAT:
401     case MODE_FLOAT:
402     case MODE_DECIMAL_FLOAT:
403     case MODE_VECTOR_INT:
404     case MODE_VECTOR_FLOAT:
405     case MODE_FRACT:
406     case MODE_ACCUM:
407     case MODE_UFRACT:
408     case MODE_UACCUM:
409     case MODE_VECTOR_FRACT:
410     case MODE_VECTOR_ACCUM:
411     case MODE_VECTOR_UFRACT:
412     case MODE_VECTOR_UACCUM:
413       mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
414       break;
415 
416     case MODE_RANDOM:
417       if (mode == BLKmode)
418 	break;
419 
420       /* ... fall through ...  */
421 
422     case MODE_CC:
423     default:
424       gcc_unreachable ();
425     }
426 
427   return mode;
428 }
429 
430 /* Find a mode that is suitable for representing a vector with
431    NUNITS elements of mode INNERMODE.  Returns BLKmode if there
432    is no suitable mode.  */
433 
434 enum machine_mode
435 mode_for_vector (enum machine_mode innermode, unsigned nunits)
436 {
437   enum machine_mode mode;
438 
439   /* First, look for a supported vector type.  */
440   if (SCALAR_FLOAT_MODE_P (innermode))
441     mode = MIN_MODE_VECTOR_FLOAT;
442   else if (SCALAR_FRACT_MODE_P (innermode))
443     mode = MIN_MODE_VECTOR_FRACT;
444   else if (SCALAR_UFRACT_MODE_P (innermode))
445     mode = MIN_MODE_VECTOR_UFRACT;
446   else if (SCALAR_ACCUM_MODE_P (innermode))
447     mode = MIN_MODE_VECTOR_ACCUM;
448   else if (SCALAR_UACCUM_MODE_P (innermode))
449     mode = MIN_MODE_VECTOR_UACCUM;
450   else
451     mode = MIN_MODE_VECTOR_INT;
452 
453   /* Do not check vector_mode_supported_p here.  We'll do that
454      later in vector_type_mode.  */
455   for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
456     if (GET_MODE_NUNITS (mode) == nunits
457 	&& GET_MODE_INNER (mode) == innermode)
458       break;
459 
460   /* For integers, try mapping it to a same-sized scalar mode.  */
461   if (mode == VOIDmode
462       && GET_MODE_CLASS (innermode) == MODE_INT)
463     mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
464 			  MODE_INT, 0);
465 
466   if (mode == VOIDmode
467       || (GET_MODE_CLASS (mode) == MODE_INT
468 	  && !have_regs_of_mode[mode]))
469     return BLKmode;
470 
471   return mode;
472 }
473 
474 /* Return the alignment of MODE. This will be bounded by 1 and
475    BIGGEST_ALIGNMENT.  */
476 
477 unsigned int
478 get_mode_alignment (enum machine_mode mode)
479 {
480   return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
481 }
482 
483 /* Return the natural mode of an array, given that it is SIZE bytes in
484    total and has elements of type ELEM_TYPE.  */
485 
486 static enum machine_mode
487 mode_for_array (tree elem_type, tree size)
488 {
489   tree elem_size;
490   unsigned HOST_WIDE_INT int_size, int_elem_size;
491   bool limit_p;
492 
493   /* One-element arrays get the component type's mode.  */
494   elem_size = TYPE_SIZE (elem_type);
495   if (simple_cst_equal (size, elem_size))
496     return TYPE_MODE (elem_type);
497 
498   limit_p = true;
499   if (host_integerp (size, 1) && host_integerp (elem_size, 1))
500     {
501       int_size = tree_low_cst (size, 1);
502       int_elem_size = tree_low_cst (elem_size, 1);
503       if (int_elem_size > 0
504 	  && int_size % int_elem_size == 0
505 	  && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
506 					     int_size / int_elem_size))
507 	limit_p = false;
508     }
509   return mode_for_size_tree (size, MODE_INT, limit_p);
510 }
511 
512 /* Subroutine of layout_decl: Force alignment required for the data type.
513    But if the decl itself wants greater alignment, don't override that.  */
514 
515 static inline void
516 do_type_align (tree type, tree decl)
517 {
518   if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
519     {
520       DECL_ALIGN (decl) = TYPE_ALIGN (type);
521       if (TREE_CODE (decl) == FIELD_DECL)
522 	DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
523     }
524 }
525 
526 /* Set the size, mode and alignment of a ..._DECL node.
527    TYPE_DECL does need this for C++.
528    Note that LABEL_DECL and CONST_DECL nodes do not need this,
529    and FUNCTION_DECL nodes have them set up in a special (and simple) way.
530    Don't call layout_decl for them.
531 
532    KNOWN_ALIGN is the amount of alignment we can assume this
533    decl has with no special effort.  It is relevant only for FIELD_DECLs
534    and depends on the previous fields.
535    All that matters about KNOWN_ALIGN is which powers of 2 divide it.
536    If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
537    the record will be aligned to suit.  */
538 
539 void
540 layout_decl (tree decl, unsigned int known_align)
541 {
542   tree type = TREE_TYPE (decl);
543   enum tree_code code = TREE_CODE (decl);
544   rtx rtl = NULL_RTX;
545   location_t loc = DECL_SOURCE_LOCATION (decl);
546 
547   if (code == CONST_DECL)
548     return;
549 
550   gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
551 	      || code == TYPE_DECL ||code == FIELD_DECL);
552 
553   rtl = DECL_RTL_IF_SET (decl);
554 
555   if (type == error_mark_node)
556     type = void_type_node;
557 
558   /* Usually the size and mode come from the data type without change,
559      however, the front-end may set the explicit width of the field, so its
560      size may not be the same as the size of its type.  This happens with
561      bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
562      also happens with other fields.  For example, the C++ front-end creates
563      zero-sized fields corresponding to empty base classes, and depends on
564      layout_type setting DECL_FIELD_BITPOS correctly for the field.  Set the
565      size in bytes from the size in bits.  If we have already set the mode,
566      don't set it again since we can be called twice for FIELD_DECLs.  */
567 
568   DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
569   if (DECL_MODE (decl) == VOIDmode)
570     DECL_MODE (decl) = TYPE_MODE (type);
571 
572   if (DECL_SIZE (decl) == 0)
573     {
574       DECL_SIZE (decl) = TYPE_SIZE (type);
575       DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
576     }
577   else if (DECL_SIZE_UNIT (decl) == 0)
578     DECL_SIZE_UNIT (decl)
579       = fold_convert_loc (loc, sizetype,
580 			  size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
581 					  bitsize_unit_node));
582 
583   if (code != FIELD_DECL)
584     /* For non-fields, update the alignment from the type.  */
585     do_type_align (type, decl);
586   else
587     /* For fields, it's a bit more complicated...  */
588     {
589       bool old_user_align = DECL_USER_ALIGN (decl);
590       bool zero_bitfield = false;
591       bool packed_p = DECL_PACKED (decl);
592       unsigned int mfa;
593 
594       if (DECL_BIT_FIELD (decl))
595 	{
596 	  DECL_BIT_FIELD_TYPE (decl) = type;
597 
598 	  /* A zero-length bit-field affects the alignment of the next
599 	     field.  In essence such bit-fields are not influenced by
600 	     any packing due to #pragma pack or attribute packed.  */
601 	  if (integer_zerop (DECL_SIZE (decl))
602 	      && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
603 	    {
604 	      zero_bitfield = true;
605 	      packed_p = false;
606 #ifdef PCC_BITFIELD_TYPE_MATTERS
607 	      if (PCC_BITFIELD_TYPE_MATTERS)
608 		do_type_align (type, decl);
609 	      else
610 #endif
611 		{
612 #ifdef EMPTY_FIELD_BOUNDARY
613 		  if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
614 		    {
615 		      DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
616 		      DECL_USER_ALIGN (decl) = 0;
617 		    }
618 #endif
619 		}
620 	    }
621 
622 	  /* See if we can use an ordinary integer mode for a bit-field.
623 	     Conditions are: a fixed size that is correct for another mode,
624 	     occupying a complete byte or bytes on proper boundary,
625 	     and not -fstrict-volatile-bitfields.  If the latter is set,
626 	     we unfortunately can't check TREE_THIS_VOLATILE, as a cast
627 	     may make a volatile object later.  */
628 	  if (TYPE_SIZE (type) != 0
629 	      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
630 	      && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
631 	      && flag_strict_volatile_bitfields <= 0)
632 	    {
633 	      enum machine_mode xmode
634 		= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
635 	      unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
636 
637 	      if (xmode != BLKmode
638 		  && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
639 		  && (known_align == 0 || known_align >= xalign))
640 		{
641 		  DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
642 		  DECL_MODE (decl) = xmode;
643 		  DECL_BIT_FIELD (decl) = 0;
644 		}
645 	    }
646 
647 	  /* Turn off DECL_BIT_FIELD if we won't need it set.  */
648 	  if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
649 	      && known_align >= TYPE_ALIGN (type)
650 	      && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
651 	    DECL_BIT_FIELD (decl) = 0;
652 	}
653       else if (packed_p && DECL_USER_ALIGN (decl))
654 	/* Don't touch DECL_ALIGN.  For other packed fields, go ahead and
655 	   round up; we'll reduce it again below.  We want packing to
656 	   supersede USER_ALIGN inherited from the type, but defer to
657 	   alignment explicitly specified on the field decl.  */;
658       else
659 	do_type_align (type, decl);
660 
661       /* If the field is packed and not explicitly aligned, give it the
662 	 minimum alignment.  Note that do_type_align may set
663 	 DECL_USER_ALIGN, so we need to check old_user_align instead.  */
664       if (packed_p
665 	  && !old_user_align)
666 	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
667 
668       if (! packed_p && ! DECL_USER_ALIGN (decl))
669 	{
670 	  /* Some targets (i.e. i386, VMS) limit struct field alignment
671 	     to a lower boundary than alignment of variables unless
672 	     it was overridden by attribute aligned.  */
673 #ifdef BIGGEST_FIELD_ALIGNMENT
674 	  DECL_ALIGN (decl)
675 	    = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
676 #endif
677 #ifdef ADJUST_FIELD_ALIGN
678 	  DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
679 #endif
680 	}
681 
682       if (zero_bitfield)
683         mfa = initial_max_fld_align * BITS_PER_UNIT;
684       else
685 	mfa = maximum_field_alignment;
686       /* Should this be controlled by DECL_USER_ALIGN, too?  */
687       if (mfa != 0)
688 	DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
689     }
690 
691   /* Evaluate nonconstant size only once, either now or as soon as safe.  */
692   if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
693     DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
694   if (DECL_SIZE_UNIT (decl) != 0
695       && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
696     DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
697 
698   /* If requested, warn about definitions of large data objects.  */
699   if (warn_larger_than
700       && (code == VAR_DECL || code == PARM_DECL)
701       && ! DECL_EXTERNAL (decl))
702     {
703       tree size = DECL_SIZE_UNIT (decl);
704 
705       if (size != 0 && TREE_CODE (size) == INTEGER_CST
706 	  && compare_tree_int (size, larger_than_size) > 0)
707 	{
708 	  int size_as_int = TREE_INT_CST_LOW (size);
709 
710 	  if (compare_tree_int (size, size_as_int) == 0)
711 	    warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
712 	  else
713 	    warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
714                      decl, larger_than_size);
715 	}
716     }
717 
718   /* If the RTL was already set, update its mode and mem attributes.  */
719   if (rtl)
720     {
721       PUT_MODE (rtl, DECL_MODE (decl));
722       SET_DECL_RTL (decl, 0);
723       set_mem_attributes (rtl, decl, 1);
724       SET_DECL_RTL (decl, rtl);
725     }
726 }
727 
728 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
729    a previous call to layout_decl and calls it again.  */
730 
731 void
732 relayout_decl (tree decl)
733 {
734   DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
735   DECL_MODE (decl) = VOIDmode;
736   if (!DECL_USER_ALIGN (decl))
737     DECL_ALIGN (decl) = 0;
738   SET_DECL_RTL (decl, 0);
739 
740   layout_decl (decl, 0);
741 }
742 
743 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
744    QUAL_UNION_TYPE.  Return a pointer to a struct record_layout_info which
745    is to be passed to all other layout functions for this record.  It is the
746    responsibility of the caller to call `free' for the storage returned.
747    Note that garbage collection is not permitted until we finish laying
748    out the record.  */
749 
750 record_layout_info
751 start_record_layout (tree t)
752 {
753   record_layout_info rli = XNEW (struct record_layout_info_s);
754 
755   rli->t = t;
756 
757   /* If the type has a minimum specified alignment (via an attribute
758      declaration, for example) use it -- otherwise, start with a
759      one-byte alignment.  */
760   rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
761   rli->unpacked_align = rli->record_align;
762   rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
763 
764 #ifdef STRUCTURE_SIZE_BOUNDARY
765   /* Packed structures don't need to have minimum size.  */
766   if (! TYPE_PACKED (t))
767     {
768       unsigned tmp;
769 
770       /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY.  */
771       tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
772       if (maximum_field_alignment != 0)
773 	tmp = MIN (tmp, maximum_field_alignment);
774       rli->record_align = MAX (rli->record_align, tmp);
775     }
776 #endif
777 
778   rli->offset = size_zero_node;
779   rli->bitpos = bitsize_zero_node;
780   rli->prev_field = 0;
781   rli->pending_statics = NULL;
782   rli->packed_maybe_necessary = 0;
783   rli->remaining_in_alignment = 0;
784 
785   return rli;
786 }
787 
788 /* These four routines perform computations that convert between
789    the offset/bitpos forms and byte and bit offsets.  */
790 
791 tree
792 bit_from_pos (tree offset, tree bitpos)
793 {
794   return size_binop (PLUS_EXPR, bitpos,
795 		     size_binop (MULT_EXPR,
796 				 fold_convert (bitsizetype, offset),
797 				 bitsize_unit_node));
798 }
799 
800 tree
801 byte_from_pos (tree offset, tree bitpos)
802 {
803   return size_binop (PLUS_EXPR, offset,
804 		     fold_convert (sizetype,
805 				   size_binop (TRUNC_DIV_EXPR, bitpos,
806 					       bitsize_unit_node)));
807 }
808 
809 void
810 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
811 	      tree pos)
812 {
813   *poffset = size_binop (MULT_EXPR,
814 			 fold_convert (sizetype,
815 				       size_binop (FLOOR_DIV_EXPR, pos,
816 						   bitsize_int (off_align))),
817 			 size_int (off_align / BITS_PER_UNIT));
818   *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
819 }
820 
821 /* Given a pointer to bit and byte offsets and an offset alignment,
822    normalize the offsets so they are within the alignment.  */
823 
824 void
825 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
826 {
827   /* If the bit position is now larger than it should be, adjust it
828      downwards.  */
829   if (compare_tree_int (*pbitpos, off_align) >= 0)
830     {
831       tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
832 				      bitsize_int (off_align));
833 
834       *poffset
835 	= size_binop (PLUS_EXPR, *poffset,
836 		      size_binop (MULT_EXPR,
837 				  fold_convert (sizetype, extra_aligns),
838 				  size_int (off_align / BITS_PER_UNIT)));
839 
840       *pbitpos
841 	= size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
842     }
843 }
844 
845 /* Print debugging information about the information in RLI.  */
846 
847 DEBUG_FUNCTION void
848 debug_rli (record_layout_info rli)
849 {
850   print_node_brief (stderr, "type", rli->t, 0);
851   print_node_brief (stderr, "\noffset", rli->offset, 0);
852   print_node_brief (stderr, " bitpos", rli->bitpos, 0);
853 
854   fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
855 	   rli->record_align, rli->unpacked_align,
856 	   rli->offset_align);
857 
858   /* The ms_struct code is the only that uses this.  */
859   if (targetm.ms_bitfield_layout_p (rli->t))
860     fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
861 
862   if (rli->packed_maybe_necessary)
863     fprintf (stderr, "packed may be necessary\n");
864 
865   if (!VEC_empty (tree, rli->pending_statics))
866     {
867       fprintf (stderr, "pending statics:\n");
868       debug_vec_tree (rli->pending_statics);
869     }
870 }
871 
872 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
873    BITPOS if necessary to keep BITPOS below OFFSET_ALIGN.  */
874 
875 void
876 normalize_rli (record_layout_info rli)
877 {
878   normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
879 }
880 
881 /* Returns the size in bytes allocated so far.  */
882 
883 tree
884 rli_size_unit_so_far (record_layout_info rli)
885 {
886   return byte_from_pos (rli->offset, rli->bitpos);
887 }
888 
889 /* Returns the size in bits allocated so far.  */
890 
891 tree
892 rli_size_so_far (record_layout_info rli)
893 {
894   return bit_from_pos (rli->offset, rli->bitpos);
895 }
896 
897 /* FIELD is about to be added to RLI->T.  The alignment (in bits) of
898    the next available location within the record is given by KNOWN_ALIGN.
899    Update the variable alignment fields in RLI, and return the alignment
900    to give the FIELD.  */
901 
902 unsigned int
903 update_alignment_for_field (record_layout_info rli, tree field,
904 			    unsigned int known_align)
905 {
906   /* The alignment required for FIELD.  */
907   unsigned int desired_align;
908   /* The type of this field.  */
909   tree type = TREE_TYPE (field);
910   /* True if the field was explicitly aligned by the user.  */
911   bool user_align;
912   bool is_bitfield;
913 
914   /* Do not attempt to align an ERROR_MARK node */
915   if (TREE_CODE (type) == ERROR_MARK)
916     return 0;
917 
918   /* Lay out the field so we know what alignment it needs.  */
919   layout_decl (field, known_align);
920   desired_align = DECL_ALIGN (field);
921   user_align = DECL_USER_ALIGN (field);
922 
923   is_bitfield = (type != error_mark_node
924 		 && DECL_BIT_FIELD_TYPE (field)
925 		 && ! integer_zerop (TYPE_SIZE (type)));
926 
927   /* Record must have at least as much alignment as any field.
928      Otherwise, the alignment of the field within the record is
929      meaningless.  */
930   if (targetm.ms_bitfield_layout_p (rli->t))
931     {
932       /* Here, the alignment of the underlying type of a bitfield can
933 	 affect the alignment of a record; even a zero-sized field
934 	 can do this.  The alignment should be to the alignment of
935 	 the type, except that for zero-size bitfields this only
936 	 applies if there was an immediately prior, nonzero-size
937 	 bitfield.  (That's the way it is, experimentally.) */
938       if ((!is_bitfield && !DECL_PACKED (field))
939 	  || ((DECL_SIZE (field) == NULL_TREE
940 	       || !integer_zerop (DECL_SIZE (field)))
941 	      ? !DECL_PACKED (field)
942 	      : (rli->prev_field
943 		 && DECL_BIT_FIELD_TYPE (rli->prev_field)
944 		 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
945 	{
946 	  unsigned int type_align = TYPE_ALIGN (type);
947 	  type_align = MAX (type_align, desired_align);
948 	  if (maximum_field_alignment != 0)
949 	    type_align = MIN (type_align, maximum_field_alignment);
950 	  rli->record_align = MAX (rli->record_align, type_align);
951 	  rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
952 	}
953     }
954 #ifdef PCC_BITFIELD_TYPE_MATTERS
955   else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
956     {
957       /* Named bit-fields cause the entire structure to have the
958 	 alignment implied by their type.  Some targets also apply the same
959 	 rules to unnamed bitfields.  */
960       if (DECL_NAME (field) != 0
961 	  || targetm.align_anon_bitfield ())
962 	{
963 	  unsigned int type_align = TYPE_ALIGN (type);
964 
965 #ifdef ADJUST_FIELD_ALIGN
966 	  if (! TYPE_USER_ALIGN (type))
967 	    type_align = ADJUST_FIELD_ALIGN (field, type_align);
968 #endif
969 
970 	  /* Targets might chose to handle unnamed and hence possibly
971 	     zero-width bitfield.  Those are not influenced by #pragmas
972 	     or packed attributes.  */
973 	  if (integer_zerop (DECL_SIZE (field)))
974 	    {
975 	      if (initial_max_fld_align)
976 	        type_align = MIN (type_align,
977 				  initial_max_fld_align * BITS_PER_UNIT);
978 	    }
979 	  else if (maximum_field_alignment != 0)
980 	    type_align = MIN (type_align, maximum_field_alignment);
981 	  else if (DECL_PACKED (field))
982 	    type_align = MIN (type_align, BITS_PER_UNIT);
983 
984 	  /* The alignment of the record is increased to the maximum
985 	     of the current alignment, the alignment indicated on the
986 	     field (i.e., the alignment specified by an __aligned__
987 	     attribute), and the alignment indicated by the type of
988 	     the field.  */
989 	  rli->record_align = MAX (rli->record_align, desired_align);
990 	  rli->record_align = MAX (rli->record_align, type_align);
991 
992 	  if (warn_packed)
993 	    rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
994 	  user_align |= TYPE_USER_ALIGN (type);
995 	}
996     }
997 #endif
998   else
999     {
1000       rli->record_align = MAX (rli->record_align, desired_align);
1001       rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1002     }
1003 
1004   TYPE_USER_ALIGN (rli->t) |= user_align;
1005 
1006   return desired_align;
1007 }
1008 
1009 /* Called from place_field to handle unions.  */
1010 
1011 static void
1012 place_union_field (record_layout_info rli, tree field)
1013 {
1014   update_alignment_for_field (rli, field, /*known_align=*/0);
1015 
1016   DECL_FIELD_OFFSET (field) = size_zero_node;
1017   DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1018   SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1019 
1020   /* If this is an ERROR_MARK return *after* having set the
1021      field at the start of the union. This helps when parsing
1022      invalid fields. */
1023   if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1024     return;
1025 
1026   /* We assume the union's size will be a multiple of a byte so we don't
1027      bother with BITPOS.  */
1028   if (TREE_CODE (rli->t) == UNION_TYPE)
1029     rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1030   else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1031     rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1032 			       DECL_SIZE_UNIT (field), rli->offset);
1033 }
1034 
1035 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1036 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1037    at BYTE_OFFSET / BIT_OFFSET.  Return nonzero if the field would span more
1038    units of alignment than the underlying TYPE.  */
1039 static int
1040 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1041 		  HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1042 {
1043   /* Note that the calculation of OFFSET might overflow; we calculate it so
1044      that we still get the right result as long as ALIGN is a power of two.  */
1045   unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1046 
1047   offset = offset % align;
1048   return ((offset + size + align - 1) / align
1049 	  > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1050 	     / align));
1051 }
1052 #endif
1053 
1054 /* RLI contains information about the layout of a RECORD_TYPE.  FIELD
1055    is a FIELD_DECL to be added after those fields already present in
1056    T.  (FIELD is not actually added to the TYPE_FIELDS list here;
1057    callers that desire that behavior must manually perform that step.)  */
1058 
1059 void
1060 place_field (record_layout_info rli, tree field)
1061 {
1062   /* The alignment required for FIELD.  */
1063   unsigned int desired_align;
1064   /* The alignment FIELD would have if we just dropped it into the
1065      record as it presently stands.  */
1066   unsigned int known_align;
1067   unsigned int actual_align;
1068   /* The type of this field.  */
1069   tree type = TREE_TYPE (field);
1070 
1071   gcc_assert (TREE_CODE (field) != ERROR_MARK);
1072 
1073   /* If FIELD is static, then treat it like a separate variable, not
1074      really like a structure field.  If it is a FUNCTION_DECL, it's a
1075      method.  In both cases, all we do is lay out the decl, and we do
1076      it *after* the record is laid out.  */
1077   if (TREE_CODE (field) == VAR_DECL)
1078     {
1079       VEC_safe_push (tree, gc, rli->pending_statics, field);
1080       return;
1081     }
1082 
1083   /* Enumerators and enum types which are local to this class need not
1084      be laid out.  Likewise for initialized constant fields.  */
1085   else if (TREE_CODE (field) != FIELD_DECL)
1086     return;
1087 
1088   /* Unions are laid out very differently than records, so split
1089      that code off to another function.  */
1090   else if (TREE_CODE (rli->t) != RECORD_TYPE)
1091     {
1092       place_union_field (rli, field);
1093       return;
1094     }
1095 
1096   else if (TREE_CODE (type) == ERROR_MARK)
1097     {
1098       /* Place this field at the current allocation position, so we
1099 	 maintain monotonicity.  */
1100       DECL_FIELD_OFFSET (field) = rli->offset;
1101       DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1102       SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1103       return;
1104     }
1105 
1106   /* Work out the known alignment so far.  Note that A & (-A) is the
1107      value of the least-significant bit in A that is one.  */
1108   if (! integer_zerop (rli->bitpos))
1109     known_align = (tree_low_cst (rli->bitpos, 1)
1110 		   & - tree_low_cst (rli->bitpos, 1));
1111   else if (integer_zerop (rli->offset))
1112     known_align = 0;
1113   else if (host_integerp (rli->offset, 1))
1114     known_align = (BITS_PER_UNIT
1115 		   * (tree_low_cst (rli->offset, 1)
1116 		      & - tree_low_cst (rli->offset, 1)));
1117   else
1118     known_align = rli->offset_align;
1119 
1120   desired_align = update_alignment_for_field (rli, field, known_align);
1121   if (known_align == 0)
1122     known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1123 
1124   if (warn_packed && DECL_PACKED (field))
1125     {
1126       if (known_align >= TYPE_ALIGN (type))
1127 	{
1128 	  if (TYPE_ALIGN (type) > desired_align)
1129 	    {
1130 	      if (STRICT_ALIGNMENT)
1131 		warning (OPT_Wattributes, "packed attribute causes "
1132                          "inefficient alignment for %q+D", field);
1133 	      /* Don't warn if DECL_PACKED was set by the type.  */
1134 	      else if (!TYPE_PACKED (rli->t))
1135 		warning (OPT_Wattributes, "packed attribute is "
1136 			 "unnecessary for %q+D", field);
1137 	    }
1138 	}
1139       else
1140 	rli->packed_maybe_necessary = 1;
1141     }
1142 
1143   /* Does this field automatically have alignment it needs by virtue
1144      of the fields that precede it and the record's own alignment?  */
1145   if (known_align < desired_align)
1146     {
1147       /* No, we need to skip space before this field.
1148 	 Bump the cumulative size to multiple of field alignment.  */
1149 
1150       if (!targetm.ms_bitfield_layout_p (rli->t)
1151           && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1152 	warning (OPT_Wpadded, "padding struct to align %q+D", field);
1153 
1154       /* If the alignment is still within offset_align, just align
1155 	 the bit position.  */
1156       if (desired_align < rli->offset_align)
1157 	rli->bitpos = round_up (rli->bitpos, desired_align);
1158       else
1159 	{
1160 	  /* First adjust OFFSET by the partial bits, then align.  */
1161 	  rli->offset
1162 	    = size_binop (PLUS_EXPR, rli->offset,
1163 			  fold_convert (sizetype,
1164 					size_binop (CEIL_DIV_EXPR, rli->bitpos,
1165 						    bitsize_unit_node)));
1166 	  rli->bitpos = bitsize_zero_node;
1167 
1168 	  rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1169 	}
1170 
1171       if (! TREE_CONSTANT (rli->offset))
1172 	rli->offset_align = desired_align;
1173       if (targetm.ms_bitfield_layout_p (rli->t))
1174 	rli->prev_field = NULL;
1175     }
1176 
1177   /* Handle compatibility with PCC.  Note that if the record has any
1178      variable-sized fields, we need not worry about compatibility.  */
1179 #ifdef PCC_BITFIELD_TYPE_MATTERS
1180   if (PCC_BITFIELD_TYPE_MATTERS
1181       && ! targetm.ms_bitfield_layout_p (rli->t)
1182       && TREE_CODE (field) == FIELD_DECL
1183       && type != error_mark_node
1184       && DECL_BIT_FIELD (field)
1185       && (! DECL_PACKED (field)
1186 	  /* Enter for these packed fields only to issue a warning.  */
1187 	  || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1188       && maximum_field_alignment == 0
1189       && ! integer_zerop (DECL_SIZE (field))
1190       && host_integerp (DECL_SIZE (field), 1)
1191       && host_integerp (rli->offset, 1)
1192       && host_integerp (TYPE_SIZE (type), 1))
1193     {
1194       unsigned int type_align = TYPE_ALIGN (type);
1195       tree dsize = DECL_SIZE (field);
1196       HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1197       HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1198       HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1199 
1200 #ifdef ADJUST_FIELD_ALIGN
1201       if (! TYPE_USER_ALIGN (type))
1202 	type_align = ADJUST_FIELD_ALIGN (field, type_align);
1203 #endif
1204 
1205       /* A bit field may not span more units of alignment of its type
1206 	 than its type itself.  Advance to next boundary if necessary.  */
1207       if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1208 	{
1209 	  if (DECL_PACKED (field))
1210 	    {
1211 	      if (warn_packed_bitfield_compat == 1)
1212 		inform
1213 		  (input_location,
1214 		   "offset of packed bit-field %qD has changed in GCC 4.4",
1215 		   field);
1216 	    }
1217 	  else
1218 	    rli->bitpos = round_up (rli->bitpos, type_align);
1219 	}
1220 
1221       if (! DECL_PACKED (field))
1222 	TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1223     }
1224 #endif
1225 
1226 #ifdef BITFIELD_NBYTES_LIMITED
1227   if (BITFIELD_NBYTES_LIMITED
1228       && ! targetm.ms_bitfield_layout_p (rli->t)
1229       && TREE_CODE (field) == FIELD_DECL
1230       && type != error_mark_node
1231       && DECL_BIT_FIELD_TYPE (field)
1232       && ! DECL_PACKED (field)
1233       && ! integer_zerop (DECL_SIZE (field))
1234       && host_integerp (DECL_SIZE (field), 1)
1235       && host_integerp (rli->offset, 1)
1236       && host_integerp (TYPE_SIZE (type), 1))
1237     {
1238       unsigned int type_align = TYPE_ALIGN (type);
1239       tree dsize = DECL_SIZE (field);
1240       HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1241       HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1242       HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1243 
1244 #ifdef ADJUST_FIELD_ALIGN
1245       if (! TYPE_USER_ALIGN (type))
1246 	type_align = ADJUST_FIELD_ALIGN (field, type_align);
1247 #endif
1248 
1249       if (maximum_field_alignment != 0)
1250 	type_align = MIN (type_align, maximum_field_alignment);
1251       /* ??? This test is opposite the test in the containing if
1252 	 statement, so this code is unreachable currently.  */
1253       else if (DECL_PACKED (field))
1254 	type_align = MIN (type_align, BITS_PER_UNIT);
1255 
1256       /* A bit field may not span the unit of alignment of its type.
1257 	 Advance to next boundary if necessary.  */
1258       if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1259 	rli->bitpos = round_up (rli->bitpos, type_align);
1260 
1261       TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1262     }
1263 #endif
1264 
1265   /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1266      A subtlety:
1267 	When a bit field is inserted into a packed record, the whole
1268 	size of the underlying type is used by one or more same-size
1269 	adjacent bitfields.  (That is, if its long:3, 32 bits is
1270 	used in the record, and any additional adjacent long bitfields are
1271 	packed into the same chunk of 32 bits. However, if the size
1272 	changes, a new field of that size is allocated.)  In an unpacked
1273 	record, this is the same as using alignment, but not equivalent
1274 	when packing.
1275 
1276      Note: for compatibility, we use the type size, not the type alignment
1277      to determine alignment, since that matches the documentation */
1278 
1279   if (targetm.ms_bitfield_layout_p (rli->t))
1280     {
1281       tree prev_saved = rli->prev_field;
1282       tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1283 
1284       /* This is a bitfield if it exists.  */
1285       if (rli->prev_field)
1286 	{
1287 	  /* If both are bitfields, nonzero, and the same size, this is
1288 	     the middle of a run.  Zero declared size fields are special
1289 	     and handled as "end of run". (Note: it's nonzero declared
1290 	     size, but equal type sizes!) (Since we know that both
1291 	     the current and previous fields are bitfields by the
1292 	     time we check it, DECL_SIZE must be present for both.) */
1293 	  if (DECL_BIT_FIELD_TYPE (field)
1294 	      && !integer_zerop (DECL_SIZE (field))
1295 	      && !integer_zerop (DECL_SIZE (rli->prev_field))
1296 	      && host_integerp (DECL_SIZE (rli->prev_field), 0)
1297 	      && host_integerp (TYPE_SIZE (type), 0)
1298 	      && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1299 	    {
1300 	      /* We're in the middle of a run of equal type size fields; make
1301 		 sure we realign if we run out of bits.  (Not decl size,
1302 		 type size!) */
1303 	      HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1304 
1305 	      if (rli->remaining_in_alignment < bitsize)
1306 		{
1307 		  HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1308 
1309 		  /* out of bits; bump up to next 'word'.  */
1310 		  rli->bitpos
1311 		    = size_binop (PLUS_EXPR, rli->bitpos,
1312 				  bitsize_int (rli->remaining_in_alignment));
1313 		  rli->prev_field = field;
1314 		  if (typesize < bitsize)
1315 		    rli->remaining_in_alignment = 0;
1316 		  else
1317 		    rli->remaining_in_alignment = typesize - bitsize;
1318 		}
1319 	      else
1320 		rli->remaining_in_alignment -= bitsize;
1321 	    }
1322 	  else
1323 	    {
1324 	      /* End of a run: if leaving a run of bitfields of the same type
1325 		 size, we have to "use up" the rest of the bits of the type
1326 		 size.
1327 
1328 		 Compute the new position as the sum of the size for the prior
1329 		 type and where we first started working on that type.
1330 		 Note: since the beginning of the field was aligned then
1331 		 of course the end will be too.  No round needed.  */
1332 
1333 	      if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1334 		{
1335 		  rli->bitpos
1336 		    = size_binop (PLUS_EXPR, rli->bitpos,
1337 				  bitsize_int (rli->remaining_in_alignment));
1338 		}
1339 	      else
1340 		/* We "use up" size zero fields; the code below should behave
1341 		   as if the prior field was not a bitfield.  */
1342 		prev_saved = NULL;
1343 
1344 	      /* Cause a new bitfield to be captured, either this time (if
1345 		 currently a bitfield) or next time we see one.  */
1346 	      if (!DECL_BIT_FIELD_TYPE(field)
1347 		  || integer_zerop (DECL_SIZE (field)))
1348 		rli->prev_field = NULL;
1349 	    }
1350 
1351 	  normalize_rli (rli);
1352         }
1353 
1354       /* If we're starting a new run of same size type bitfields
1355 	 (or a run of non-bitfields), set up the "first of the run"
1356 	 fields.
1357 
1358 	 That is, if the current field is not a bitfield, or if there
1359 	 was a prior bitfield the type sizes differ, or if there wasn't
1360 	 a prior bitfield the size of the current field is nonzero.
1361 
1362 	 Note: we must be sure to test ONLY the type size if there was
1363 	 a prior bitfield and ONLY for the current field being zero if
1364 	 there wasn't.  */
1365 
1366       if (!DECL_BIT_FIELD_TYPE (field)
1367 	  || (prev_saved != NULL
1368 	      ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1369 	      : !integer_zerop (DECL_SIZE (field)) ))
1370 	{
1371 	  /* Never smaller than a byte for compatibility.  */
1372 	  unsigned int type_align = BITS_PER_UNIT;
1373 
1374 	  /* (When not a bitfield), we could be seeing a flex array (with
1375 	     no DECL_SIZE).  Since we won't be using remaining_in_alignment
1376 	     until we see a bitfield (and come by here again) we just skip
1377 	     calculating it.  */
1378 	  if (DECL_SIZE (field) != NULL
1379 	      && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1380 	      && host_integerp (DECL_SIZE (field), 1))
1381 	    {
1382 	      unsigned HOST_WIDE_INT bitsize
1383 		= tree_low_cst (DECL_SIZE (field), 1);
1384 	      unsigned HOST_WIDE_INT typesize
1385 		= tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1386 
1387 	      if (typesize < bitsize)
1388 		rli->remaining_in_alignment = 0;
1389 	      else
1390 		rli->remaining_in_alignment = typesize - bitsize;
1391 	    }
1392 
1393 	  /* Now align (conventionally) for the new type.  */
1394 	  type_align = TYPE_ALIGN (TREE_TYPE (field));
1395 
1396 	  if (maximum_field_alignment != 0)
1397 	    type_align = MIN (type_align, maximum_field_alignment);
1398 
1399 	  rli->bitpos = round_up (rli->bitpos, type_align);
1400 
1401           /* If we really aligned, don't allow subsequent bitfields
1402 	     to undo that.  */
1403 	  rli->prev_field = NULL;
1404 	}
1405     }
1406 
1407   /* Offset so far becomes the position of this field after normalizing.  */
1408   normalize_rli (rli);
1409   DECL_FIELD_OFFSET (field) = rli->offset;
1410   DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1411   SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1412 
1413   /* If this field ended up more aligned than we thought it would be (we
1414      approximate this by seeing if its position changed), lay out the field
1415      again; perhaps we can use an integral mode for it now.  */
1416   if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1417     actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1418 		    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1419   else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1420     actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1421   else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1422     actual_align = (BITS_PER_UNIT
1423 		   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1424 		      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1425   else
1426     actual_align = DECL_OFFSET_ALIGN (field);
1427   /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1428      store / extract bit field operations will check the alignment of the
1429      record against the mode of bit fields.  */
1430 
1431   if (known_align != actual_align)
1432     layout_decl (field, actual_align);
1433 
1434   if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1435     rli->prev_field = field;
1436 
1437   /* Now add size of this field to the size of the record.  If the size is
1438      not constant, treat the field as being a multiple of bytes and just
1439      adjust the offset, resetting the bit position.  Otherwise, apportion the
1440      size amongst the bit position and offset.  First handle the case of an
1441      unspecified size, which can happen when we have an invalid nested struct
1442      definition, such as struct j { struct j { int i; } }.  The error message
1443      is printed in finish_struct.  */
1444   if (DECL_SIZE (field) == 0)
1445     /* Do nothing.  */;
1446   else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1447 	   || TREE_OVERFLOW (DECL_SIZE (field)))
1448     {
1449       rli->offset
1450 	= size_binop (PLUS_EXPR, rli->offset,
1451 		      fold_convert (sizetype,
1452 				    size_binop (CEIL_DIV_EXPR, rli->bitpos,
1453 						bitsize_unit_node)));
1454       rli->offset
1455 	= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1456       rli->bitpos = bitsize_zero_node;
1457       rli->offset_align = MIN (rli->offset_align, desired_align);
1458     }
1459   else if (targetm.ms_bitfield_layout_p (rli->t))
1460     {
1461       rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1462 
1463       /* If we ended a bitfield before the full length of the type then
1464 	 pad the struct out to the full length of the last type.  */
1465       if ((DECL_CHAIN (field) == NULL
1466 	   || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1467 	  && DECL_BIT_FIELD_TYPE (field)
1468 	  && !integer_zerop (DECL_SIZE (field)))
1469 	rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1470 				  bitsize_int (rli->remaining_in_alignment));
1471 
1472       normalize_rli (rli);
1473     }
1474   else
1475     {
1476       rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1477       normalize_rli (rli);
1478     }
1479 }
1480 
1481 /* Assuming that all the fields have been laid out, this function uses
1482    RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1483    indicated by RLI.  */
1484 
1485 static void
1486 finalize_record_size (record_layout_info rli)
1487 {
1488   tree unpadded_size, unpadded_size_unit;
1489 
1490   /* Now we want just byte and bit offsets, so set the offset alignment
1491      to be a byte and then normalize.  */
1492   rli->offset_align = BITS_PER_UNIT;
1493   normalize_rli (rli);
1494 
1495   /* Determine the desired alignment.  */
1496 #ifdef ROUND_TYPE_ALIGN
1497   TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1498 					  rli->record_align);
1499 #else
1500   TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1501 #endif
1502 
1503   /* Compute the size so far.  Be sure to allow for extra bits in the
1504      size in bytes.  We have guaranteed above that it will be no more
1505      than a single byte.  */
1506   unpadded_size = rli_size_so_far (rli);
1507   unpadded_size_unit = rli_size_unit_so_far (rli);
1508   if (! integer_zerop (rli->bitpos))
1509     unpadded_size_unit
1510       = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1511 
1512   /* Round the size up to be a multiple of the required alignment.  */
1513   TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1514   TYPE_SIZE_UNIT (rli->t)
1515     = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1516 
1517   if (TREE_CONSTANT (unpadded_size)
1518       && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1519       && input_location != BUILTINS_LOCATION)
1520     warning (OPT_Wpadded, "padding struct size to alignment boundary");
1521 
1522   if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1523       && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1524       && TREE_CONSTANT (unpadded_size))
1525     {
1526       tree unpacked_size;
1527 
1528 #ifdef ROUND_TYPE_ALIGN
1529       rli->unpacked_align
1530 	= ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1531 #else
1532       rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1533 #endif
1534 
1535       unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1536       if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1537 	{
1538 	  if (TYPE_NAME (rli->t))
1539 	    {
1540 	      tree name;
1541 
1542 	      if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1543 		name = TYPE_NAME (rli->t);
1544 	      else
1545 		name = DECL_NAME (TYPE_NAME (rli->t));
1546 
1547 	      if (STRICT_ALIGNMENT)
1548 		warning (OPT_Wpacked, "packed attribute causes inefficient "
1549 			 "alignment for %qE", name);
1550 	      else
1551 		warning (OPT_Wpacked,
1552 			 "packed attribute is unnecessary for %qE", name);
1553 	    }
1554 	  else
1555 	    {
1556 	      if (STRICT_ALIGNMENT)
1557 		warning (OPT_Wpacked,
1558 			 "packed attribute causes inefficient alignment");
1559 	      else
1560 		warning (OPT_Wpacked, "packed attribute is unnecessary");
1561 	    }
1562 	}
1563     }
1564 }
1565 
1566 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE).  */
1567 
1568 void
1569 compute_record_mode (tree type)
1570 {
1571   tree field;
1572   enum machine_mode mode = VOIDmode;
1573 
1574   /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1575      However, if possible, we use a mode that fits in a register
1576      instead, in order to allow for better optimization down the
1577      line.  */
1578   SET_TYPE_MODE (type, BLKmode);
1579 
1580   if (! host_integerp (TYPE_SIZE (type), 1))
1581     return;
1582 
1583   /* A record which has any BLKmode members must itself be
1584      BLKmode; it can't go in a register.  Unless the member is
1585      BLKmode only because it isn't aligned.  */
1586   for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1587     {
1588       if (TREE_CODE (field) != FIELD_DECL)
1589 	continue;
1590 
1591       if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1592 	  || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1593 	      && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1594 	      && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1595 		   && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1596 	  || ! host_integerp (bit_position (field), 1)
1597 	  || DECL_SIZE (field) == 0
1598 	  || ! host_integerp (DECL_SIZE (field), 1))
1599 	return;
1600 
1601       /* If this field is the whole struct, remember its mode so
1602 	 that, say, we can put a double in a class into a DF
1603 	 register instead of forcing it to live in the stack.  */
1604       if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1605 	mode = DECL_MODE (field);
1606 
1607 #ifdef MEMBER_TYPE_FORCES_BLK
1608       /* With some targets, eg. c4x, it is sub-optimal
1609 	 to access an aligned BLKmode structure as a scalar.  */
1610 
1611       if (MEMBER_TYPE_FORCES_BLK (field, mode))
1612 	return;
1613 #endif /* MEMBER_TYPE_FORCES_BLK  */
1614     }
1615 
1616   /* If we only have one real field; use its mode if that mode's size
1617      matches the type's size.  This only applies to RECORD_TYPE.  This
1618      does not apply to unions.  */
1619   if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1620       && host_integerp (TYPE_SIZE (type), 1)
1621       && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1622     SET_TYPE_MODE (type, mode);
1623   else
1624     SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1625 
1626   /* If structure's known alignment is less than what the scalar
1627      mode would need, and it matters, then stick with BLKmode.  */
1628   if (TYPE_MODE (type) != BLKmode
1629       && STRICT_ALIGNMENT
1630       && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1631 	    || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1632     {
1633       /* If this is the only reason this type is BLKmode, then
1634 	 don't force containing types to be BLKmode.  */
1635       TYPE_NO_FORCE_BLK (type) = 1;
1636       SET_TYPE_MODE (type, BLKmode);
1637     }
1638 }
1639 
1640 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1641    out.  */
1642 
1643 static void
1644 finalize_type_size (tree type)
1645 {
1646   /* Normally, use the alignment corresponding to the mode chosen.
1647      However, where strict alignment is not required, avoid
1648      over-aligning structures, since most compilers do not do this
1649      alignment.  */
1650 
1651   if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1652       && (STRICT_ALIGNMENT
1653 	  || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1654 	      && TREE_CODE (type) != QUAL_UNION_TYPE
1655 	      && TREE_CODE (type) != ARRAY_TYPE)))
1656     {
1657       unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1658 
1659       /* Don't override a larger alignment requirement coming from a user
1660 	 alignment of one of the fields.  */
1661       if (mode_align >= TYPE_ALIGN (type))
1662 	{
1663 	  TYPE_ALIGN (type) = mode_align;
1664 	  TYPE_USER_ALIGN (type) = 0;
1665 	}
1666     }
1667 
1668   /* Do machine-dependent extra alignment.  */
1669 #ifdef ROUND_TYPE_ALIGN
1670   TYPE_ALIGN (type)
1671     = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1672 #endif
1673 
1674   /* If we failed to find a simple way to calculate the unit size
1675      of the type, find it by division.  */
1676   if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1677     /* TYPE_SIZE (type) is computed in bitsizetype.  After the division, the
1678        result will fit in sizetype.  We will get more efficient code using
1679        sizetype, so we force a conversion.  */
1680     TYPE_SIZE_UNIT (type)
1681       = fold_convert (sizetype,
1682 		      size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1683 				  bitsize_unit_node));
1684 
1685   if (TYPE_SIZE (type) != 0)
1686     {
1687       TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1688       TYPE_SIZE_UNIT (type)
1689 	= round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1690     }
1691 
1692   /* Evaluate nonconstant sizes only once, either now or as soon as safe.  */
1693   if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1694     TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1695   if (TYPE_SIZE_UNIT (type) != 0
1696       && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1697     TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1698 
1699   /* Also layout any other variants of the type.  */
1700   if (TYPE_NEXT_VARIANT (type)
1701       || type != TYPE_MAIN_VARIANT (type))
1702     {
1703       tree variant;
1704       /* Record layout info of this variant.  */
1705       tree size = TYPE_SIZE (type);
1706       tree size_unit = TYPE_SIZE_UNIT (type);
1707       unsigned int align = TYPE_ALIGN (type);
1708       unsigned int user_align = TYPE_USER_ALIGN (type);
1709       enum machine_mode mode = TYPE_MODE (type);
1710 
1711       /* Copy it into all variants.  */
1712       for (variant = TYPE_MAIN_VARIANT (type);
1713 	   variant != 0;
1714 	   variant = TYPE_NEXT_VARIANT (variant))
1715 	{
1716 	  TYPE_SIZE (variant) = size;
1717 	  TYPE_SIZE_UNIT (variant) = size_unit;
1718 	  TYPE_ALIGN (variant) = align;
1719 	  TYPE_USER_ALIGN (variant) = user_align;
1720 	  SET_TYPE_MODE (variant, mode);
1721 	}
1722     }
1723 }
1724 
1725 /* Return a new underlying object for a bitfield started with FIELD.  */
1726 
1727 static tree
1728 start_bitfield_representative (tree field)
1729 {
1730   tree repr = make_node (FIELD_DECL);
1731   DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
1732   /* Force the representative to begin at a BITS_PER_UNIT aligned
1733      boundary - C++ may use tail-padding of a base object to
1734      continue packing bits so the bitfield region does not start
1735      at bit zero (see g++.dg/abi/bitfield5.C for example).
1736      Unallocated bits may happen for other reasons as well,
1737      for example Ada which allows explicit bit-granular structure layout.  */
1738   DECL_FIELD_BIT_OFFSET (repr)
1739     = size_binop (BIT_AND_EXPR,
1740 		  DECL_FIELD_BIT_OFFSET (field),
1741 		  bitsize_int (~(BITS_PER_UNIT - 1)));
1742   SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
1743   DECL_SIZE (repr) = DECL_SIZE (field);
1744   DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
1745   DECL_PACKED (repr) = DECL_PACKED (field);
1746   DECL_CONTEXT (repr) = DECL_CONTEXT (field);
1747   return repr;
1748 }
1749 
1750 /* Finish up a bitfield group that was started by creating the underlying
1751    object REPR with the last field in the bitfield group FIELD.  */
1752 
1753 static void
1754 finish_bitfield_representative (tree repr, tree field)
1755 {
1756   unsigned HOST_WIDE_INT bitsize, maxbitsize;
1757   enum machine_mode mode;
1758   tree nextf, size;
1759 
1760   size = size_diffop (DECL_FIELD_OFFSET (field),
1761 		      DECL_FIELD_OFFSET (repr));
1762   gcc_assert (host_integerp (size, 1));
1763   bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT
1764 	     + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1765 	     - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)
1766 	     + tree_low_cst (DECL_SIZE (field), 1));
1767 
1768   /* Round up bitsize to multiples of BITS_PER_UNIT.  */
1769   bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1770 
1771   /* Now nothing tells us how to pad out bitsize ...  */
1772   nextf = DECL_CHAIN (field);
1773   while (nextf && TREE_CODE (nextf) != FIELD_DECL)
1774     nextf = DECL_CHAIN (nextf);
1775   if (nextf)
1776     {
1777       tree maxsize;
1778       /* If there was an error, the field may be not laid out
1779          correctly.  Don't bother to do anything.  */
1780       if (TREE_TYPE (nextf) == error_mark_node)
1781 	return;
1782       maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
1783 			     DECL_FIELD_OFFSET (repr));
1784       if (host_integerp (maxsize, 1))
1785 	{
1786 	  maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
1787 			+ tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1)
1788 			- tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
1789 	  /* If the group ends within a bitfield nextf does not need to be
1790 	     aligned to BITS_PER_UNIT.  Thus round up.  */
1791 	  maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
1792 	}
1793       else
1794 	maxbitsize = bitsize;
1795     }
1796   else
1797     {
1798       /* ???  If you consider that tail-padding of this struct might be
1799          re-used when deriving from it we cannot really do the following
1800 	 and thus need to set maxsize to bitsize?  Also we cannot
1801 	 generally rely on maxsize to fold to an integer constant, so
1802 	 use bitsize as fallback for this case.  */
1803       tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)),
1804 				  DECL_FIELD_OFFSET (repr));
1805       if (host_integerp (maxsize, 1))
1806 	maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT
1807 		      - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1));
1808       else
1809 	maxbitsize = bitsize;
1810     }
1811 
1812   /* Only if we don't artificially break up the representative in
1813      the middle of a large bitfield with different possibly
1814      overlapping representatives.  And all representatives start
1815      at byte offset.  */
1816   gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
1817 
1818   /* Find the smallest nice mode to use.  */
1819   for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1820        mode = GET_MODE_WIDER_MODE (mode))
1821     if (GET_MODE_BITSIZE (mode) >= bitsize)
1822       break;
1823   if (mode != VOIDmode
1824       && (GET_MODE_BITSIZE (mode) > maxbitsize
1825 	  || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE))
1826     mode = VOIDmode;
1827 
1828   if (mode == VOIDmode)
1829     {
1830       /* We really want a BLKmode representative only as a last resort,
1831          considering the member b in
1832 	   struct { int a : 7; int b : 17; int c; } __attribute__((packed));
1833 	 Otherwise we simply want to split the representative up
1834 	 allowing for overlaps within the bitfield region as required for
1835 	   struct { int a : 7; int b : 7;
1836 		    int c : 10; int d; } __attribute__((packed));
1837 	 [0, 15] HImode for a and b, [8, 23] HImode for c.  */
1838       DECL_SIZE (repr) = bitsize_int (bitsize);
1839       DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
1840       DECL_MODE (repr) = BLKmode;
1841       TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
1842 						 bitsize / BITS_PER_UNIT);
1843     }
1844   else
1845     {
1846       unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
1847       DECL_SIZE (repr) = bitsize_int (modesize);
1848       DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
1849       DECL_MODE (repr) = mode;
1850       TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
1851     }
1852 
1853   /* Remember whether the bitfield group is at the end of the
1854      structure or not.  */
1855   DECL_CHAIN (repr) = nextf;
1856 }
1857 
1858 /* Compute and set FIELD_DECLs for the underlying objects we should
1859    use for bitfield access for the structure laid out with RLI.  */
1860 
1861 static void
1862 finish_bitfield_layout (record_layout_info rli)
1863 {
1864   tree field, prev;
1865   tree repr = NULL_TREE;
1866 
1867   /* Unions would be special, for the ease of type-punning optimizations
1868      we could use the underlying type as hint for the representative
1869      if the bitfield would fit and the representative would not exceed
1870      the union in size.  */
1871   if (TREE_CODE (rli->t) != RECORD_TYPE)
1872     return;
1873 
1874   for (prev = NULL_TREE, field = TYPE_FIELDS (rli->t);
1875        field; field = DECL_CHAIN (field))
1876     {
1877       if (TREE_CODE (field) != FIELD_DECL)
1878 	continue;
1879 
1880       /* In the C++ memory model, consecutive bit fields in a structure are
1881 	 considered one memory location and updating a memory location
1882 	 may not store into adjacent memory locations.  */
1883       if (!repr
1884 	  && DECL_BIT_FIELD_TYPE (field))
1885 	{
1886 	  /* Start new representative.  */
1887 	  repr = start_bitfield_representative (field);
1888 	}
1889       else if (repr
1890 	       && ! DECL_BIT_FIELD_TYPE (field))
1891 	{
1892 	  /* Finish off new representative.  */
1893 	  finish_bitfield_representative (repr, prev);
1894 	  repr = NULL_TREE;
1895 	}
1896       else if (DECL_BIT_FIELD_TYPE (field))
1897 	{
1898 	  gcc_assert (repr != NULL_TREE);
1899 
1900 	  /* Zero-size bitfields finish off a representative and
1901 	     do not have a representative themselves.  This is
1902 	     required by the C++ memory model.  */
1903 	  if (integer_zerop (DECL_SIZE (field)))
1904 	    {
1905 	      finish_bitfield_representative (repr, prev);
1906 	      repr = NULL_TREE;
1907 	    }
1908 
1909 	  /* We assume that either DECL_FIELD_OFFSET of the representative
1910 	     and each bitfield member is a constant or they are equal.
1911 	     This is because we need to be able to compute the bit-offset
1912 	     of each field relative to the representative in get_bit_range
1913 	     during RTL expansion.
1914 	     If these constraints are not met, simply force a new
1915 	     representative to be generated.  That will at most
1916 	     generate worse code but still maintain correctness with
1917 	     respect to the C++ memory model.  */
1918 	  else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1)
1919 		      && host_integerp (DECL_FIELD_OFFSET (field), 1))
1920 		     || operand_equal_p (DECL_FIELD_OFFSET (repr),
1921 					 DECL_FIELD_OFFSET (field), 0)))
1922 	    {
1923 	      finish_bitfield_representative (repr, prev);
1924 	      repr = start_bitfield_representative (field);
1925 	    }
1926 	}
1927       else
1928 	continue;
1929 
1930       if (repr)
1931 	DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
1932 
1933       prev = field;
1934     }
1935 
1936   if (repr)
1937     finish_bitfield_representative (repr, prev);
1938 }
1939 
1940 /* Do all of the work required to layout the type indicated by RLI,
1941    once the fields have been laid out.  This function will call `free'
1942    for RLI, unless FREE_P is false.  Passing a value other than false
1943    for FREE_P is bad practice; this option only exists to support the
1944    G++ 3.2 ABI.  */
1945 
1946 void
1947 finish_record_layout (record_layout_info rli, int free_p)
1948 {
1949   tree variant;
1950 
1951   /* Compute the final size.  */
1952   finalize_record_size (rli);
1953 
1954   /* Compute the TYPE_MODE for the record.  */
1955   compute_record_mode (rli->t);
1956 
1957   /* Perform any last tweaks to the TYPE_SIZE, etc.  */
1958   finalize_type_size (rli->t);
1959 
1960   /* Compute bitfield representatives.  */
1961   finish_bitfield_layout (rli);
1962 
1963   /* Propagate TYPE_PACKED to variants.  With C++ templates,
1964      handle_packed_attribute is too early to do this.  */
1965   for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1966        variant = TYPE_NEXT_VARIANT (variant))
1967     TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1968 
1969   /* Lay out any static members.  This is done now because their type
1970      may use the record's type.  */
1971   while (!VEC_empty (tree, rli->pending_statics))
1972     layout_decl (VEC_pop (tree, rli->pending_statics), 0);
1973 
1974   /* Clean up.  */
1975   if (free_p)
1976     {
1977       VEC_free (tree, gc, rli->pending_statics);
1978       free (rli);
1979     }
1980 }
1981 
1982 
1983 /* Finish processing a builtin RECORD_TYPE type TYPE.  It's name is
1984    NAME, its fields are chained in reverse on FIELDS.
1985 
1986    If ALIGN_TYPE is non-null, it is given the same alignment as
1987    ALIGN_TYPE.  */
1988 
1989 void
1990 finish_builtin_struct (tree type, const char *name, tree fields,
1991 		       tree align_type)
1992 {
1993   tree tail, next;
1994 
1995   for (tail = NULL_TREE; fields; tail = fields, fields = next)
1996     {
1997       DECL_FIELD_CONTEXT (fields) = type;
1998       next = DECL_CHAIN (fields);
1999       DECL_CHAIN (fields) = tail;
2000     }
2001   TYPE_FIELDS (type) = tail;
2002 
2003   if (align_type)
2004     {
2005       TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
2006       TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2007     }
2008 
2009   layout_type (type);
2010 #if 0 /* not yet, should get fixed properly later */
2011   TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2012 #else
2013   TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2014 				 TYPE_DECL, get_identifier (name), type);
2015 #endif
2016   TYPE_STUB_DECL (type) = TYPE_NAME (type);
2017   layout_decl (TYPE_NAME (type), 0);
2018 }
2019 
2020 /* Calculate the mode, size, and alignment for TYPE.
2021    For an array type, calculate the element separation as well.
2022    Record TYPE on the chain of permanent or temporary types
2023    so that dbxout will find out about it.
2024 
2025    TYPE_SIZE of a type is nonzero if the type has been laid out already.
2026    layout_type does nothing on such a type.
2027 
2028    If the type is incomplete, its TYPE_SIZE remains zero.  */
2029 
2030 void
2031 layout_type (tree type)
2032 {
2033   gcc_assert (type);
2034 
2035   if (type == error_mark_node)
2036     return;
2037 
2038   /* Do nothing if type has been laid out before.  */
2039   if (TYPE_SIZE (type))
2040     return;
2041 
2042   switch (TREE_CODE (type))
2043     {
2044     case LANG_TYPE:
2045       /* This kind of type is the responsibility
2046 	 of the language-specific code.  */
2047       gcc_unreachable ();
2048 
2049     case BOOLEAN_TYPE:  /* Used for Java, Pascal, and Chill.  */
2050       if (TYPE_PRECISION (type) == 0)
2051 	TYPE_PRECISION (type) = 1; /* default to one byte/boolean.  */
2052 
2053       /* ... fall through ...  */
2054 
2055     case INTEGER_TYPE:
2056     case ENUMERAL_TYPE:
2057       if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
2058 	  && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
2059 	TYPE_UNSIGNED (type) = 1;
2060 
2061       SET_TYPE_MODE (type,
2062 		     smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
2063       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2064       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2065       break;
2066 
2067     case REAL_TYPE:
2068       SET_TYPE_MODE (type,
2069 		     mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
2070       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2071       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2072       break;
2073 
2074    case FIXED_POINT_TYPE:
2075      /* TYPE_MODE (type) has been set already.  */
2076      TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2077      TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2078      break;
2079 
2080     case COMPLEX_TYPE:
2081       TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2082       SET_TYPE_MODE (type,
2083 		     mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
2084 				    (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2085 				     ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
2086 				     0));
2087       TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2088       TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2089       break;
2090 
2091     case VECTOR_TYPE:
2092       {
2093 	int nunits = TYPE_VECTOR_SUBPARTS (type);
2094 	tree innertype = TREE_TYPE (type);
2095 
2096 	gcc_assert (!(nunits & (nunits - 1)));
2097 
2098 	/* Find an appropriate mode for the vector type.  */
2099 	if (TYPE_MODE (type) == VOIDmode)
2100 	  SET_TYPE_MODE (type,
2101 			 mode_for_vector (TYPE_MODE (innertype), nunits));
2102 
2103 	TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2104         TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2105 	TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2106 					         TYPE_SIZE_UNIT (innertype),
2107 					         size_int (nunits));
2108 	TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
2109 					    bitsize_int (nunits));
2110 
2111 	/* For vector types, we do not default to the mode's alignment.
2112 	   Instead, query a target hook, defaulting to natural alignment.
2113 	   This prevents ABI changes depending on whether or not native
2114 	   vector modes are supported.  */
2115 	TYPE_ALIGN (type) = targetm.vector_alignment (type);
2116 
2117 	/* However, if the underlying mode requires a bigger alignment than
2118 	   what the target hook provides, we cannot use the mode.  For now,
2119 	   simply reject that case.  */
2120 	gcc_assert (TYPE_ALIGN (type)
2121 		    >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2122         break;
2123       }
2124 
2125     case VOID_TYPE:
2126       /* This is an incomplete type and so doesn't have a size.  */
2127       TYPE_ALIGN (type) = 1;
2128       TYPE_USER_ALIGN (type) = 0;
2129       SET_TYPE_MODE (type, VOIDmode);
2130       break;
2131 
2132     case OFFSET_TYPE:
2133       TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2134       TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
2135       /* A pointer might be MODE_PARTIAL_INT,
2136 	 but ptrdiff_t must be integral.  */
2137       SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
2138       TYPE_PRECISION (type) = POINTER_SIZE;
2139       break;
2140 
2141     case FUNCTION_TYPE:
2142     case METHOD_TYPE:
2143       /* It's hard to see what the mode and size of a function ought to
2144 	 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2145 	 make it consistent with that.  */
2146       SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
2147       TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2148       TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2149       break;
2150 
2151     case POINTER_TYPE:
2152     case REFERENCE_TYPE:
2153       {
2154 	enum machine_mode mode = TYPE_MODE (type);
2155 	if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
2156 	  {
2157 	    addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
2158 	    mode = targetm.addr_space.address_mode (as);
2159 	  }
2160 
2161 	TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2162 	TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2163 	TYPE_UNSIGNED (type) = 1;
2164 	TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
2165       }
2166       break;
2167 
2168     case ARRAY_TYPE:
2169       {
2170 	tree index = TYPE_DOMAIN (type);
2171 	tree element = TREE_TYPE (type);
2172 
2173 	build_pointer_type (element);
2174 
2175 	/* We need to know both bounds in order to compute the size.  */
2176 	if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2177 	    && TYPE_SIZE (element))
2178 	  {
2179 	    tree ub = TYPE_MAX_VALUE (index);
2180 	    tree lb = TYPE_MIN_VALUE (index);
2181 	    tree element_size = TYPE_SIZE (element);
2182 	    tree length;
2183 
2184 	    /* Make sure that an array of zero-sized element is zero-sized
2185 	       regardless of its extent.  */
2186 	    if (integer_zerop (element_size))
2187 	      length = size_zero_node;
2188 
2189 	    /* The computation should happen in the original signedness so
2190 	       that (possible) negative values are handled appropriately
2191 	       when determining overflow.  */
2192 	    else
2193 	      length
2194 		= fold_convert (sizetype,
2195 				size_binop (PLUS_EXPR,
2196 					    build_int_cst (TREE_TYPE (lb), 1),
2197 					    size_binop (MINUS_EXPR, ub, lb)));
2198 
2199 	    TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2200 					   fold_convert (bitsizetype,
2201 							 length));
2202 
2203 	    /* If we know the size of the element, calculate the total size
2204 	       directly, rather than do some division thing below.  This
2205 	       optimization helps Fortran assumed-size arrays (where the
2206 	       size of the array is determined at runtime) substantially.  */
2207 	    if (TYPE_SIZE_UNIT (element))
2208 	      TYPE_SIZE_UNIT (type)
2209 		= size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2210 	  }
2211 
2212 	/* Now round the alignment and size,
2213 	   using machine-dependent criteria if any.  */
2214 
2215 #ifdef ROUND_TYPE_ALIGN
2216 	TYPE_ALIGN (type)
2217 	  = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2218 #else
2219 	TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2220 #endif
2221 	TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2222 	SET_TYPE_MODE (type, BLKmode);
2223 	if (TYPE_SIZE (type) != 0
2224 #ifdef MEMBER_TYPE_FORCES_BLK
2225 	    && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
2226 #endif
2227 	    /* BLKmode elements force BLKmode aggregate;
2228 	       else extract/store fields may lose.  */
2229 	    && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2230 		|| TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2231 	  {
2232 	    SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2233 						 TYPE_SIZE (type)));
2234 	    if (TYPE_MODE (type) != BLKmode
2235 		&& STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2236 		&& TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2237 	      {
2238 		TYPE_NO_FORCE_BLK (type) = 1;
2239 		SET_TYPE_MODE (type, BLKmode);
2240 	      }
2241 	  }
2242 	/* When the element size is constant, check that it is at least as
2243 	   large as the element alignment.  */
2244 	if (TYPE_SIZE_UNIT (element)
2245 	    && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2246 	    /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2247 	       TYPE_ALIGN_UNIT.  */
2248 	    && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2249 	    && !integer_zerop (TYPE_SIZE_UNIT (element))
2250 	    && compare_tree_int (TYPE_SIZE_UNIT (element),
2251 			  	 TYPE_ALIGN_UNIT (element)) < 0)
2252 	  error ("alignment of array elements is greater than element size");
2253 	break;
2254       }
2255 
2256     case RECORD_TYPE:
2257     case UNION_TYPE:
2258     case QUAL_UNION_TYPE:
2259       {
2260 	tree field;
2261 	record_layout_info rli;
2262 
2263 	/* Initialize the layout information.  */
2264 	rli = start_record_layout (type);
2265 
2266 	/* If this is a QUAL_UNION_TYPE, we want to process the fields
2267 	   in the reverse order in building the COND_EXPR that denotes
2268 	   its size.  We reverse them again later.  */
2269 	if (TREE_CODE (type) == QUAL_UNION_TYPE)
2270 	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2271 
2272 	/* Place all the fields.  */
2273 	for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2274 	  place_field (rli, field);
2275 
2276 	if (TREE_CODE (type) == QUAL_UNION_TYPE)
2277 	  TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2278 
2279 	/* Finish laying out the record.  */
2280 	finish_record_layout (rli, /*free_p=*/true);
2281       }
2282       break;
2283 
2284     default:
2285       gcc_unreachable ();
2286     }
2287 
2288   /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE.  For
2289      records and unions, finish_record_layout already called this
2290      function.  */
2291   if (TREE_CODE (type) != RECORD_TYPE
2292       && TREE_CODE (type) != UNION_TYPE
2293       && TREE_CODE (type) != QUAL_UNION_TYPE)
2294     finalize_type_size (type);
2295 
2296   /* We should never see alias sets on incomplete aggregates.  And we
2297      should not call layout_type on not incomplete aggregates.  */
2298   if (AGGREGATE_TYPE_P (type))
2299     gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2300 }
2301 
2302 /* Vector types need to re-check the target flags each time we report
2303    the machine mode.  We need to do this because attribute target can
2304    change the result of vector_mode_supported_p and have_regs_of_mode
2305    on a per-function basis.  Thus the TYPE_MODE of a VECTOR_TYPE can
2306    change on a per-function basis.  */
2307 /* ??? Possibly a better solution is to run through all the types
2308    referenced by a function and re-compute the TYPE_MODE once, rather
2309    than make the TYPE_MODE macro call a function.  */
2310 
2311 enum machine_mode
2312 vector_type_mode (const_tree t)
2313 {
2314   enum machine_mode mode;
2315 
2316   gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2317 
2318   mode = t->type_common.mode;
2319   if (VECTOR_MODE_P (mode)
2320       && (!targetm.vector_mode_supported_p (mode)
2321 	  || !have_regs_of_mode[mode]))
2322     {
2323       enum machine_mode innermode = TREE_TYPE (t)->type_common.mode;
2324 
2325       /* For integers, try mapping it to a same-sized scalar mode.  */
2326       if (GET_MODE_CLASS (innermode) == MODE_INT)
2327 	{
2328 	  mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2329 				* GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2330 
2331 	  if (mode != VOIDmode && have_regs_of_mode[mode])
2332 	    return mode;
2333 	}
2334 
2335       return BLKmode;
2336     }
2337 
2338   return mode;
2339 }
2340 
2341 /* Create and return a type for signed integers of PRECISION bits.  */
2342 
2343 tree
2344 make_signed_type (int precision)
2345 {
2346   tree type = make_node (INTEGER_TYPE);
2347 
2348   TYPE_PRECISION (type) = precision;
2349 
2350   fixup_signed_type (type);
2351   return type;
2352 }
2353 
2354 /* Create and return a type for unsigned integers of PRECISION bits.  */
2355 
2356 tree
2357 make_unsigned_type (int precision)
2358 {
2359   tree type = make_node (INTEGER_TYPE);
2360 
2361   TYPE_PRECISION (type) = precision;
2362 
2363   fixup_unsigned_type (type);
2364   return type;
2365 }
2366 
2367 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2368    and SATP.  */
2369 
2370 tree
2371 make_fract_type (int precision, int unsignedp, int satp)
2372 {
2373   tree type = make_node (FIXED_POINT_TYPE);
2374 
2375   TYPE_PRECISION (type) = precision;
2376 
2377   if (satp)
2378     TYPE_SATURATING (type) = 1;
2379 
2380   /* Lay out the type: set its alignment, size, etc.  */
2381   if (unsignedp)
2382     {
2383       TYPE_UNSIGNED (type) = 1;
2384       SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2385     }
2386   else
2387     SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2388   layout_type (type);
2389 
2390   return type;
2391 }
2392 
2393 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2394    and SATP.  */
2395 
2396 tree
2397 make_accum_type (int precision, int unsignedp, int satp)
2398 {
2399   tree type = make_node (FIXED_POINT_TYPE);
2400 
2401   TYPE_PRECISION (type) = precision;
2402 
2403   if (satp)
2404     TYPE_SATURATING (type) = 1;
2405 
2406   /* Lay out the type: set its alignment, size, etc.  */
2407   if (unsignedp)
2408     {
2409       TYPE_UNSIGNED (type) = 1;
2410       SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2411     }
2412   else
2413     SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2414   layout_type (type);
2415 
2416   return type;
2417 }
2418 
2419 /* Initialize sizetypes so layout_type can use them.  */
2420 
2421 void
2422 initialize_sizetypes (void)
2423 {
2424   int precision, bprecision;
2425 
2426   /* Get sizetypes precision from the SIZE_TYPE target macro.  */
2427   if (strcmp (SIZE_TYPE, "unsigned int") == 0)
2428     precision = INT_TYPE_SIZE;
2429   else if (strcmp (SIZE_TYPE, "long unsigned int") == 0)
2430     precision = LONG_TYPE_SIZE;
2431   else if (strcmp (SIZE_TYPE, "long long unsigned int") == 0)
2432     precision = LONG_LONG_TYPE_SIZE;
2433   else if (strcmp (SIZE_TYPE, "short unsigned int") == 0)
2434     precision = SHORT_TYPE_SIZE;
2435   else
2436     gcc_unreachable ();
2437 
2438   bprecision
2439     = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2440   bprecision
2441     = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT));
2442   if (bprecision > HOST_BITS_PER_WIDE_INT * 2)
2443     bprecision = HOST_BITS_PER_WIDE_INT * 2;
2444 
2445   /* Create stubs for sizetype and bitsizetype so we can create constants.  */
2446   sizetype = make_node (INTEGER_TYPE);
2447   TYPE_NAME (sizetype) = get_identifier ("sizetype");
2448   TYPE_PRECISION (sizetype) = precision;
2449   TYPE_UNSIGNED (sizetype) = 1;
2450   TYPE_IS_SIZETYPE (sizetype) = 1;
2451   bitsizetype = make_node (INTEGER_TYPE);
2452   TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2453   TYPE_PRECISION (bitsizetype) = bprecision;
2454   TYPE_UNSIGNED (bitsizetype) = 1;
2455   TYPE_IS_SIZETYPE (bitsizetype) = 1;
2456 
2457   /* Now layout both types manually.  */
2458   SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT));
2459   TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
2460   TYPE_SIZE (sizetype) = bitsize_int (precision);
2461   TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
2462   set_min_and_max_values_for_integral_type (sizetype, precision,
2463 					    /*is_unsigned=*/true);
2464   /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2465      sign-extended in a way consistent with force_fit_type.  */
2466   TYPE_MAX_VALUE (sizetype)
2467     = double_int_to_tree (sizetype,
2468 			  tree_to_double_int (TYPE_MAX_VALUE (sizetype)));
2469 
2470   SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
2471   TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
2472   TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2473   TYPE_SIZE_UNIT (bitsizetype)
2474     = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
2475   set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
2476 					    /*is_unsigned=*/true);
2477   /* bitsizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2478      sign-extended in a way consistent with force_fit_type.  */
2479   TYPE_MAX_VALUE (bitsizetype)
2480     = double_int_to_tree (bitsizetype,
2481 			  tree_to_double_int (TYPE_MAX_VALUE (bitsizetype)));
2482 
2483   /* Create the signed variants of *sizetype.  */
2484   ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2485   TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2486   TYPE_IS_SIZETYPE (ssizetype) = 1;
2487   sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2488   TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2489   TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2490 }
2491 
2492 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2493    or BOOLEAN_TYPE.  Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2494    for TYPE, based on the PRECISION and whether or not the TYPE
2495    IS_UNSIGNED.  PRECISION need not correspond to a width supported
2496    natively by the hardware; for example, on a machine with 8-bit,
2497    16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2498    61.  */
2499 
2500 void
2501 set_min_and_max_values_for_integral_type (tree type,
2502 					  int precision,
2503 					  bool is_unsigned)
2504 {
2505   tree min_value;
2506   tree max_value;
2507 
2508   if (is_unsigned)
2509     {
2510       min_value = build_int_cst (type, 0);
2511       max_value
2512 	= build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2513 			      ? -1
2514 			      : ((HOST_WIDE_INT) 1 << precision) - 1,
2515 			      precision - HOST_BITS_PER_WIDE_INT > 0
2516 			      ? ((unsigned HOST_WIDE_INT) ~0
2517 				 >> (HOST_BITS_PER_WIDE_INT
2518 				     - (precision - HOST_BITS_PER_WIDE_INT)))
2519 			      : 0);
2520     }
2521   else
2522     {
2523       min_value
2524 	= build_int_cst_wide (type,
2525 			      (precision - HOST_BITS_PER_WIDE_INT > 0
2526 			       ? 0
2527 			       : (HOST_WIDE_INT) (-1) << (precision - 1)),
2528 			      (((HOST_WIDE_INT) (-1)
2529 				<< (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2530 				    ? precision - HOST_BITS_PER_WIDE_INT - 1
2531 				    : 0))));
2532       max_value
2533 	= build_int_cst_wide (type,
2534 			      (precision - HOST_BITS_PER_WIDE_INT > 0
2535 			       ? -1
2536 			       : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2537 			      (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2538 			       ? (((HOST_WIDE_INT) 1
2539 				   << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2540 			       : 0));
2541     }
2542 
2543   TYPE_MIN_VALUE (type) = min_value;
2544   TYPE_MAX_VALUE (type) = max_value;
2545 }
2546 
2547 /* Set the extreme values of TYPE based on its precision in bits,
2548    then lay it out.  Used when make_signed_type won't do
2549    because the tree code is not INTEGER_TYPE.
2550    E.g. for Pascal, when the -fsigned-char option is given.  */
2551 
2552 void
2553 fixup_signed_type (tree type)
2554 {
2555   int precision = TYPE_PRECISION (type);
2556 
2557   /* We can not represent properly constants greater then
2558      2 * HOST_BITS_PER_WIDE_INT, still we need the types
2559      as they are used by i386 vector extensions and friends.  */
2560   if (precision > HOST_BITS_PER_WIDE_INT * 2)
2561     precision = HOST_BITS_PER_WIDE_INT * 2;
2562 
2563   set_min_and_max_values_for_integral_type (type, precision,
2564 					    /*is_unsigned=*/false);
2565 
2566   /* Lay out the type: set its alignment, size, etc.  */
2567   layout_type (type);
2568 }
2569 
2570 /* Set the extreme values of TYPE based on its precision in bits,
2571    then lay it out.  This is used both in `make_unsigned_type'
2572    and for enumeral types.  */
2573 
2574 void
2575 fixup_unsigned_type (tree type)
2576 {
2577   int precision = TYPE_PRECISION (type);
2578 
2579   /* We can not represent properly constants greater then
2580      2 * HOST_BITS_PER_WIDE_INT, still we need the types
2581      as they are used by i386 vector extensions and friends.  */
2582   if (precision > HOST_BITS_PER_WIDE_INT * 2)
2583     precision = HOST_BITS_PER_WIDE_INT * 2;
2584 
2585   TYPE_UNSIGNED (type) = 1;
2586 
2587   set_min_and_max_values_for_integral_type (type, precision,
2588 					    /*is_unsigned=*/true);
2589 
2590   /* Lay out the type: set its alignment, size, etc.  */
2591   layout_type (type);
2592 }
2593 
2594 /* Find the best machine mode to use when referencing a bit field of length
2595    BITSIZE bits starting at BITPOS.
2596 
2597    BITREGION_START is the bit position of the first bit in this
2598    sequence of bit fields.  BITREGION_END is the last bit in this
2599    sequence.  If these two fields are non-zero, we should restrict the
2600    memory access to a maximum sized chunk of
2601    BITREGION_END - BITREGION_START + 1.  Otherwise, we are allowed to touch
2602    any adjacent non bit-fields.
2603 
2604    The underlying object is known to be aligned to a boundary of ALIGN bits.
2605    If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2606    larger than LARGEST_MODE (usually SImode).
2607 
2608    If no mode meets all these conditions, we return VOIDmode.
2609 
2610    If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2611    smallest mode meeting these conditions.
2612 
2613    If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2614    largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2615    all the conditions.
2616 
2617    If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2618    decide which of the above modes should be used.  */
2619 
2620 enum machine_mode
2621 get_best_mode (int bitsize, int bitpos,
2622 	       unsigned HOST_WIDE_INT bitregion_start,
2623 	       unsigned HOST_WIDE_INT bitregion_end,
2624 	       unsigned int align,
2625 	       enum machine_mode largest_mode, int volatilep)
2626 {
2627   enum machine_mode mode;
2628   unsigned int unit = 0;
2629   unsigned HOST_WIDE_INT maxbits;
2630 
2631   /* If unset, no restriction.  */
2632   if (!bitregion_end)
2633     maxbits = MAX_FIXED_MODE_SIZE;
2634   else
2635     maxbits = bitregion_end - bitregion_start + 1;
2636 
2637   /* Find the narrowest integer mode that contains the bit field.  */
2638   for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2639        mode = GET_MODE_WIDER_MODE (mode))
2640     {
2641       unit = GET_MODE_BITSIZE (mode);
2642       if (unit == GET_MODE_PRECISION (mode)
2643 	  && (bitpos % unit) + bitsize <= unit)
2644 	break;
2645     }
2646 
2647   if (mode == VOIDmode
2648       /* It is tempting to omit the following line
2649 	 if STRICT_ALIGNMENT is true.
2650 	 But that is incorrect, since if the bitfield uses part of 3 bytes
2651 	 and we use a 4-byte mode, we could get a spurious segv
2652 	 if the extra 4th byte is past the end of memory.
2653 	 (Though at least one Unix compiler ignores this problem:
2654 	 that on the Sequent 386 machine.  */
2655       || MIN (unit, BIGGEST_ALIGNMENT) > align
2656       || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))
2657       || unit > maxbits
2658       || (bitregion_end
2659 	  && bitpos - (bitpos % unit) + unit > bitregion_end + 1))
2660     return VOIDmode;
2661 
2662   if ((SLOW_BYTE_ACCESS && ! volatilep)
2663       || (volatilep && !targetm.narrow_volatile_bitfield ()))
2664     {
2665       enum machine_mode wide_mode = VOIDmode, tmode;
2666 
2667       for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2668 	   tmode = GET_MODE_WIDER_MODE (tmode))
2669 	{
2670 	  unit = GET_MODE_BITSIZE (tmode);
2671 	  if (unit == GET_MODE_PRECISION (tmode)
2672 	      && bitpos / unit == (bitpos + bitsize - 1) / unit
2673 	      && unit <= BITS_PER_WORD
2674 	      && unit <= MIN (align, BIGGEST_ALIGNMENT)
2675 	      && unit <= maxbits
2676 	      && (largest_mode == VOIDmode
2677 		  || unit <= GET_MODE_BITSIZE (largest_mode))
2678 	      && (bitregion_end == 0
2679 		  || bitpos - (bitpos % unit) + unit <= bitregion_end + 1))
2680 	    wide_mode = tmode;
2681 	}
2682 
2683       if (wide_mode != VOIDmode)
2684 	return wide_mode;
2685     }
2686 
2687   return mode;
2688 }
2689 
2690 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2691    SIGN).  The returned constants are made to be usable in TARGET_MODE.  */
2692 
2693 void
2694 get_mode_bounds (enum machine_mode mode, int sign,
2695 		 enum machine_mode target_mode,
2696 		 rtx *mmin, rtx *mmax)
2697 {
2698   unsigned size = GET_MODE_BITSIZE (mode);
2699   unsigned HOST_WIDE_INT min_val, max_val;
2700 
2701   gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2702 
2703   if (sign)
2704     {
2705       min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2706       max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2707     }
2708   else
2709     {
2710       min_val = 0;
2711       max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2712     }
2713 
2714   *mmin = gen_int_mode (min_val, target_mode);
2715   *mmax = gen_int_mode (max_val, target_mode);
2716 }
2717 
2718 #include "gt-stor-layout.h"
2719