1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "function.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "toplev.h"
35 #include "ggc.h"
36 #include "target.h"
37 #include "langhooks.h"
38 #include "regs.h"
39 #include "params.h"
40
41 /* Data type for the expressions representing sizes of data types.
42 It is the first integer type laid out. */
43 tree sizetype_tab[(int) TYPE_KIND_LAST];
44
45 /* If nonzero, this is an upper limit on alignment of structure fields.
46 The value is measured in bits. */
47 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
48 /* ... and its original value in bytes, specified via -fpack-struct=<value>. */
49 unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
50
51 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be
52 allocated in Pmode, not ptr_mode. Set only by internal_reference_types
53 called only by a front end. */
54 static int reference_types_internal = 0;
55
56 static void finalize_record_size (record_layout_info);
57 static void finalize_type_size (tree);
58 static void place_union_field (record_layout_info, tree);
59 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
60 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
61 HOST_WIDE_INT, tree);
62 #endif
63 extern void debug_rli (record_layout_info);
64
65 /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
66
67 static GTY(()) tree pending_sizes;
68
69 /* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
70 by front end. */
71
72 void
internal_reference_types(void)73 internal_reference_types (void)
74 {
75 reference_types_internal = 1;
76 }
77
78 /* Get a list of all the objects put on the pending sizes list. */
79
80 tree
get_pending_sizes(void)81 get_pending_sizes (void)
82 {
83 tree chain = pending_sizes;
84
85 pending_sizes = 0;
86 return chain;
87 }
88
89 /* Add EXPR to the pending sizes list. */
90
91 void
put_pending_size(tree expr)92 put_pending_size (tree expr)
93 {
94 /* Strip any simple arithmetic from EXPR to see if it has an underlying
95 SAVE_EXPR. */
96 expr = skip_simple_arithmetic (expr);
97
98 if (TREE_CODE (expr) == SAVE_EXPR)
99 pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
100 }
101
102 /* Put a chain of objects into the pending sizes list, which must be
103 empty. */
104
105 void
put_pending_sizes(tree chain)106 put_pending_sizes (tree chain)
107 {
108 gcc_assert (!pending_sizes);
109 pending_sizes = chain;
110 }
111
112 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
113 to serve as the actual size-expression for a type or decl. */
114
115 tree
variable_size(tree size)116 variable_size (tree size)
117 {
118 tree save;
119
120 /* If the language-processor is to take responsibility for variable-sized
121 items (e.g., languages which have elaboration procedures like Ada),
122 just return SIZE unchanged. Likewise for self-referential sizes and
123 constant sizes. */
124 if (TREE_CONSTANT (size)
125 || lang_hooks.decls.global_bindings_p () < 0
126 || CONTAINS_PLACEHOLDER_P (size))
127 return size;
128
129 size = save_expr (size);
130
131 /* If an array with a variable number of elements is declared, and
132 the elements require destruction, we will emit a cleanup for the
133 array. That cleanup is run both on normal exit from the block
134 and in the exception-handler for the block. Normally, when code
135 is used in both ordinary code and in an exception handler it is
136 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
137 not wish to do that here; the array-size is the same in both
138 places. */
139 save = skip_simple_arithmetic (size);
140
141 if (cfun && cfun->x_dont_save_pending_sizes_p)
142 /* The front-end doesn't want us to keep a list of the expressions
143 that determine sizes for variable size objects. Trust it. */
144 return size;
145
146 if (lang_hooks.decls.global_bindings_p ())
147 {
148 if (TREE_CONSTANT (size))
149 error ("type size can%'t be explicitly evaluated");
150 else
151 error ("variable-size type declared outside of any function");
152
153 return size_one_node;
154 }
155
156 put_pending_size (save);
157
158 return size;
159 }
160
161 #ifndef MAX_FIXED_MODE_SIZE
162 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
163 #endif
164
165 /* Return the machine mode to use for a nonscalar of SIZE bits. The
166 mode must be in class CLASS, and have exactly that many value bits;
167 it may have padding as well. If LIMIT is nonzero, modes of wider
168 than MAX_FIXED_MODE_SIZE will not be used. */
169
170 enum machine_mode
mode_for_size(unsigned int size,enum mode_class class,int limit)171 mode_for_size (unsigned int size, enum mode_class class, int limit)
172 {
173 enum machine_mode mode;
174
175 if (limit && size > MAX_FIXED_MODE_SIZE)
176 return BLKmode;
177
178 /* Get the first mode which has this size, in the specified class. */
179 for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
180 mode = GET_MODE_WIDER_MODE (mode))
181 if (GET_MODE_PRECISION (mode) == size)
182 return mode;
183
184 return BLKmode;
185 }
186
187 /* Similar, except passed a tree node. */
188
189 enum machine_mode
mode_for_size_tree(tree size,enum mode_class class,int limit)190 mode_for_size_tree (tree size, enum mode_class class, int limit)
191 {
192 if (TREE_CODE (size) != INTEGER_CST
193 || TREE_OVERFLOW (size)
194 /* What we really want to say here is that the size can fit in a
195 host integer, but we know there's no way we'd find a mode for
196 this many bits, so there's no point in doing the precise test. */
197 || compare_tree_int (size, 1000) > 0)
198 return BLKmode;
199 else
200 return mode_for_size (tree_low_cst (size, 1), class, limit);
201 }
202
203 /* Similar, but never return BLKmode; return the narrowest mode that
204 contains at least the requested number of value bits. */
205
206 enum machine_mode
smallest_mode_for_size(unsigned int size,enum mode_class class)207 smallest_mode_for_size (unsigned int size, enum mode_class class)
208 {
209 enum machine_mode mode;
210
211 /* Get the first mode which has at least this size, in the
212 specified class. */
213 for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode;
214 mode = GET_MODE_WIDER_MODE (mode))
215 if (GET_MODE_PRECISION (mode) >= size)
216 return mode;
217
218 gcc_unreachable ();
219 }
220
221 /* Find an integer mode of the exact same size, or BLKmode on failure. */
222
223 enum machine_mode
int_mode_for_mode(enum machine_mode mode)224 int_mode_for_mode (enum machine_mode mode)
225 {
226 switch (GET_MODE_CLASS (mode))
227 {
228 case MODE_INT:
229 case MODE_PARTIAL_INT:
230 break;
231
232 case MODE_COMPLEX_INT:
233 case MODE_COMPLEX_FLOAT:
234 case MODE_FLOAT:
235 case MODE_VECTOR_INT:
236 case MODE_VECTOR_FLOAT:
237 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
238 break;
239
240 case MODE_RANDOM:
241 if (mode == BLKmode)
242 break;
243
244 /* ... fall through ... */
245
246 case MODE_CC:
247 default:
248 gcc_unreachable ();
249 }
250
251 return mode;
252 }
253
254 /* Return the alignment of MODE. This will be bounded by 1 and
255 BIGGEST_ALIGNMENT. */
256
257 unsigned int
get_mode_alignment(enum machine_mode mode)258 get_mode_alignment (enum machine_mode mode)
259 {
260 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
261 }
262
263
264 /* Subroutine of layout_decl: Force alignment required for the data type.
265 But if the decl itself wants greater alignment, don't override that. */
266
267 static inline void
do_type_align(tree type,tree decl)268 do_type_align (tree type, tree decl)
269 {
270 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
271 {
272 DECL_ALIGN (decl) = TYPE_ALIGN (type);
273 if (TREE_CODE (decl) == FIELD_DECL)
274 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
275 }
276 }
277
278 /* Set the size, mode and alignment of a ..._DECL node.
279 TYPE_DECL does need this for C++.
280 Note that LABEL_DECL and CONST_DECL nodes do not need this,
281 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
282 Don't call layout_decl for them.
283
284 KNOWN_ALIGN is the amount of alignment we can assume this
285 decl has with no special effort. It is relevant only for FIELD_DECLs
286 and depends on the previous fields.
287 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
288 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
289 the record will be aligned to suit. */
290
291 void
layout_decl(tree decl,unsigned int known_align)292 layout_decl (tree decl, unsigned int known_align)
293 {
294 tree type = TREE_TYPE (decl);
295 enum tree_code code = TREE_CODE (decl);
296 rtx rtl = NULL_RTX;
297
298 if (code == CONST_DECL)
299 return;
300
301 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
302 || code == TYPE_DECL ||code == FIELD_DECL);
303
304 rtl = DECL_RTL_IF_SET (decl);
305
306 if (type == error_mark_node)
307 type = void_type_node;
308
309 /* Usually the size and mode come from the data type without change,
310 however, the front-end may set the explicit width of the field, so its
311 size may not be the same as the size of its type. This happens with
312 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
313 also happens with other fields. For example, the C++ front-end creates
314 zero-sized fields corresponding to empty base classes, and depends on
315 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
316 size in bytes from the size in bits. If we have already set the mode,
317 don't set it again since we can be called twice for FIELD_DECLs. */
318
319 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
320 if (DECL_MODE (decl) == VOIDmode)
321 DECL_MODE (decl) = TYPE_MODE (type);
322
323 if (DECL_SIZE (decl) == 0)
324 {
325 DECL_SIZE (decl) = TYPE_SIZE (type);
326 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
327 }
328 else if (DECL_SIZE_UNIT (decl) == 0)
329 DECL_SIZE_UNIT (decl)
330 = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
331 bitsize_unit_node));
332
333 if (code != FIELD_DECL)
334 /* For non-fields, update the alignment from the type. */
335 do_type_align (type, decl);
336 else
337 /* For fields, it's a bit more complicated... */
338 {
339 bool old_user_align = DECL_USER_ALIGN (decl);
340 bool zero_bitfield = false;
341 unsigned int mfa;
342
343 if (DECL_BIT_FIELD (decl))
344 {
345 DECL_BIT_FIELD_TYPE (decl) = type;
346
347 /* A zero-length bit-field affects the alignment of the next
348 field. */
349 if (integer_zerop (DECL_SIZE (decl))
350 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
351 {
352 zero_bitfield = true;
353 #ifdef PCC_BITFIELD_TYPE_MATTERS
354 if (PCC_BITFIELD_TYPE_MATTERS)
355 do_type_align (type, decl);
356 else
357 #endif
358 {
359 #ifdef EMPTY_FIELD_BOUNDARY
360 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
361 {
362 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
363 DECL_USER_ALIGN (decl) = 0;
364 }
365 #endif
366 }
367 }
368
369 /* See if we can use an ordinary integer mode for a bit-field.
370 Conditions are: a fixed size that is correct for another mode
371 and occupying a complete byte or bytes on proper boundary. */
372 if (TYPE_SIZE (type) != 0
373 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
374 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
375 {
376 enum machine_mode xmode
377 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
378
379 if (xmode != BLKmode
380 && (known_align == 0
381 || known_align >= GET_MODE_ALIGNMENT (xmode)))
382 {
383 DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode),
384 DECL_ALIGN (decl));
385 DECL_MODE (decl) = xmode;
386 DECL_BIT_FIELD (decl) = 0;
387 }
388 }
389
390 /* Turn off DECL_BIT_FIELD if we won't need it set. */
391 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
392 && known_align >= TYPE_ALIGN (type)
393 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
394 DECL_BIT_FIELD (decl) = 0;
395 }
396 else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl))
397 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
398 round up; we'll reduce it again below. We want packing to
399 supersede USER_ALIGN inherited from the type, but defer to
400 alignment explicitly specified on the field decl. */;
401 else
402 do_type_align (type, decl);
403
404 /* If the field is of variable size, we can't misalign it since we
405 have no way to make a temporary to align the result. But this
406 isn't an issue if the decl is not addressable. Likewise if it
407 is of unknown size.
408
409 Note that do_type_align may set DECL_USER_ALIGN, so we need to
410 check old_user_align instead. */
411 if (DECL_PACKED (decl)
412 && !old_user_align
413 && !zero_bitfield
414 && (DECL_NONADDRESSABLE_P (decl)
415 || DECL_SIZE_UNIT (decl) == 0
416 || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
417 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
418
419 if (! DECL_USER_ALIGN (decl) && (! DECL_PACKED (decl) || zero_bitfield))
420 {
421 /* Some targets (i.e. i386, VMS) limit struct field alignment
422 to a lower boundary than alignment of variables unless
423 it was overridden by attribute aligned. */
424 #ifdef BIGGEST_FIELD_ALIGNMENT
425 DECL_ALIGN (decl)
426 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
427 #endif
428 #ifdef ADJUST_FIELD_ALIGN
429 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
430 #endif
431 }
432
433 if (zero_bitfield)
434 mfa = initial_max_fld_align * BITS_PER_UNIT;
435 else
436 mfa = maximum_field_alignment;
437 /* Should this be controlled by DECL_USER_ALIGN, too? */
438 if (mfa != 0)
439 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
440 }
441
442 /* Evaluate nonconstant size only once, either now or as soon as safe. */
443 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
444 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
445 if (DECL_SIZE_UNIT (decl) != 0
446 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
447 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
448
449 /* If requested, warn about definitions of large data objects. */
450 if (warn_larger_than
451 && (code == VAR_DECL || code == PARM_DECL)
452 && ! DECL_EXTERNAL (decl))
453 {
454 tree size = DECL_SIZE_UNIT (decl);
455
456 if (size != 0 && TREE_CODE (size) == INTEGER_CST
457 && compare_tree_int (size, larger_than_size) > 0)
458 {
459 int size_as_int = TREE_INT_CST_LOW (size);
460
461 if (compare_tree_int (size, size_as_int) == 0)
462 warning (0, "size of %q+D is %d bytes", decl, size_as_int);
463 else
464 warning (0, "size of %q+D is larger than %wd bytes",
465 decl, larger_than_size);
466 }
467 }
468
469 /* If the RTL was already set, update its mode and mem attributes. */
470 if (rtl)
471 {
472 PUT_MODE (rtl, DECL_MODE (decl));
473 SET_DECL_RTL (decl, 0);
474 set_mem_attributes (rtl, decl, 1);
475 SET_DECL_RTL (decl, rtl);
476 }
477 }
478
479 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
480 a previous call to layout_decl and calls it again. */
481
482 void
relayout_decl(tree decl)483 relayout_decl (tree decl)
484 {
485 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
486 DECL_MODE (decl) = VOIDmode;
487 DECL_ALIGN (decl) = 0;
488 SET_DECL_RTL (decl, 0);
489
490 layout_decl (decl, 0);
491 }
492
493 /* Hook for a front-end function that can modify the record layout as needed
494 immediately before it is finalized. */
495
496 static void (*lang_adjust_rli) (record_layout_info) = 0;
497
498 void
set_lang_adjust_rli(void (* f)(record_layout_info))499 set_lang_adjust_rli (void (*f) (record_layout_info))
500 {
501 lang_adjust_rli = f;
502 }
503
504 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
505 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
506 is to be passed to all other layout functions for this record. It is the
507 responsibility of the caller to call `free' for the storage returned.
508 Note that garbage collection is not permitted until we finish laying
509 out the record. */
510
511 record_layout_info
start_record_layout(tree t)512 start_record_layout (tree t)
513 {
514 record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s));
515
516 rli->t = t;
517
518 /* If the type has a minimum specified alignment (via an attribute
519 declaration, for example) use it -- otherwise, start with a
520 one-byte alignment. */
521 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
522 rli->unpacked_align = rli->record_align;
523 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
524
525 #ifdef STRUCTURE_SIZE_BOUNDARY
526 /* Packed structures don't need to have minimum size. */
527 if (! TYPE_PACKED (t))
528 rli->record_align = MAX (rli->record_align, (unsigned) STRUCTURE_SIZE_BOUNDARY);
529 #endif
530
531 rli->offset = size_zero_node;
532 rli->bitpos = bitsize_zero_node;
533 rli->prev_field = 0;
534 rli->pending_statics = 0;
535 rli->packed_maybe_necessary = 0;
536
537 return rli;
538 }
539
540 /* These four routines perform computations that convert between
541 the offset/bitpos forms and byte and bit offsets. */
542
543 tree
bit_from_pos(tree offset,tree bitpos)544 bit_from_pos (tree offset, tree bitpos)
545 {
546 return size_binop (PLUS_EXPR, bitpos,
547 size_binop (MULT_EXPR,
548 fold_convert (bitsizetype, offset),
549 bitsize_unit_node));
550 }
551
552 tree
byte_from_pos(tree offset,tree bitpos)553 byte_from_pos (tree offset, tree bitpos)
554 {
555 return size_binop (PLUS_EXPR, offset,
556 fold_convert (sizetype,
557 size_binop (TRUNC_DIV_EXPR, bitpos,
558 bitsize_unit_node)));
559 }
560
561 void
pos_from_bit(tree * poffset,tree * pbitpos,unsigned int off_align,tree pos)562 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
563 tree pos)
564 {
565 *poffset = size_binop (MULT_EXPR,
566 fold_convert (sizetype,
567 size_binop (FLOOR_DIV_EXPR, pos,
568 bitsize_int (off_align))),
569 size_int (off_align / BITS_PER_UNIT));
570 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
571 }
572
573 /* Given a pointer to bit and byte offsets and an offset alignment,
574 normalize the offsets so they are within the alignment. */
575
576 void
normalize_offset(tree * poffset,tree * pbitpos,unsigned int off_align)577 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
578 {
579 /* If the bit position is now larger than it should be, adjust it
580 downwards. */
581 if (compare_tree_int (*pbitpos, off_align) >= 0)
582 {
583 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
584 bitsize_int (off_align));
585
586 *poffset
587 = size_binop (PLUS_EXPR, *poffset,
588 size_binop (MULT_EXPR,
589 fold_convert (sizetype, extra_aligns),
590 size_int (off_align / BITS_PER_UNIT)));
591
592 *pbitpos
593 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
594 }
595 }
596
597 /* Print debugging information about the information in RLI. */
598
599 void
debug_rli(record_layout_info rli)600 debug_rli (record_layout_info rli)
601 {
602 print_node_brief (stderr, "type", rli->t, 0);
603 print_node_brief (stderr, "\noffset", rli->offset, 0);
604 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
605
606 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
607 rli->record_align, rli->unpacked_align,
608 rli->offset_align);
609 if (rli->packed_maybe_necessary)
610 fprintf (stderr, "packed may be necessary\n");
611
612 if (rli->pending_statics)
613 {
614 fprintf (stderr, "pending statics:\n");
615 debug_tree (rli->pending_statics);
616 }
617 }
618
619 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
620 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
621
622 void
normalize_rli(record_layout_info rli)623 normalize_rli (record_layout_info rli)
624 {
625 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
626 }
627
628 /* Returns the size in bytes allocated so far. */
629
630 tree
rli_size_unit_so_far(record_layout_info rli)631 rli_size_unit_so_far (record_layout_info rli)
632 {
633 return byte_from_pos (rli->offset, rli->bitpos);
634 }
635
636 /* Returns the size in bits allocated so far. */
637
638 tree
rli_size_so_far(record_layout_info rli)639 rli_size_so_far (record_layout_info rli)
640 {
641 return bit_from_pos (rli->offset, rli->bitpos);
642 }
643
644 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
645 the next available location within the record is given by KNOWN_ALIGN.
646 Update the variable alignment fields in RLI, and return the alignment
647 to give the FIELD. */
648
649 unsigned int
update_alignment_for_field(record_layout_info rli,tree field,unsigned int known_align)650 update_alignment_for_field (record_layout_info rli, tree field,
651 unsigned int known_align)
652 {
653 /* The alignment required for FIELD. */
654 unsigned int desired_align;
655 /* The type of this field. */
656 tree type = TREE_TYPE (field);
657 /* True if the field was explicitly aligned by the user. */
658 bool user_align;
659 bool is_bitfield;
660
661 /* Do not attempt to align an ERROR_MARK node */
662 if (TREE_CODE (type) == ERROR_MARK)
663 return 0;
664
665 /* Lay out the field so we know what alignment it needs. */
666 layout_decl (field, known_align);
667 desired_align = DECL_ALIGN (field);
668 user_align = DECL_USER_ALIGN (field);
669
670 is_bitfield = (type != error_mark_node
671 && DECL_BIT_FIELD_TYPE (field)
672 && ! integer_zerop (TYPE_SIZE (type)));
673
674 /* Record must have at least as much alignment as any field.
675 Otherwise, the alignment of the field within the record is
676 meaningless. */
677 if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t))
678 {
679 /* Here, the alignment of the underlying type of a bitfield can
680 affect the alignment of a record; even a zero-sized field
681 can do this. The alignment should be to the alignment of
682 the type, except that for zero-size bitfields this only
683 applies if there was an immediately prior, nonzero-size
684 bitfield. (That's the way it is, experimentally.) */
685 if (! integer_zerop (DECL_SIZE (field))
686 ? ! DECL_PACKED (field)
687 : (rli->prev_field
688 && DECL_BIT_FIELD_TYPE (rli->prev_field)
689 && ! integer_zerop (DECL_SIZE (rli->prev_field))))
690 {
691 unsigned int type_align = TYPE_ALIGN (type);
692 type_align = MAX (type_align, desired_align);
693 if (maximum_field_alignment != 0)
694 type_align = MIN (type_align, maximum_field_alignment);
695 rli->record_align = MAX (rli->record_align, type_align);
696 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
697 /* If we start a new run, make sure we start it properly aligned. */
698 if ((!rli->prev_field
699 || integer_zerop (DECL_SIZE (field))
700 || integer_zerop (DECL_SIZE (rli->prev_field))
701 || !host_integerp (DECL_SIZE (rli->prev_field), 0)
702 || !host_integerp (TYPE_SIZE (type), 0)
703 || !simple_cst_equal (TYPE_SIZE (type),
704 TYPE_SIZE (TREE_TYPE (rli->prev_field)))
705 || (rli->remaining_in_alignment
706 < tree_low_cst (DECL_SIZE (field), 0)))
707 && desired_align < type_align)
708 desired_align = type_align;
709 }
710 }
711 #ifdef PCC_BITFIELD_TYPE_MATTERS
712 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
713 {
714 /* Named bit-fields cause the entire structure to have the
715 alignment implied by their type. Some targets also apply the same
716 rules to unnamed bitfields. */
717 if (DECL_NAME (field) != 0
718 || targetm.align_anon_bitfield ())
719 {
720 unsigned int type_align = TYPE_ALIGN (type);
721
722 #ifdef ADJUST_FIELD_ALIGN
723 if (! TYPE_USER_ALIGN (type))
724 type_align = ADJUST_FIELD_ALIGN (field, type_align);
725 #endif
726
727 /* Targets might chose to handle unnamed and hence possibly
728 zero-width bitfield. Those are not influenced by #pragmas
729 or packed attributes. */
730 if (integer_zerop (DECL_SIZE (field)))
731 {
732 if (initial_max_fld_align)
733 type_align = MIN (type_align,
734 initial_max_fld_align * BITS_PER_UNIT);
735 }
736 else if (maximum_field_alignment != 0)
737 type_align = MIN (type_align, maximum_field_alignment);
738 else if (DECL_PACKED (field))
739 type_align = MIN (type_align, BITS_PER_UNIT);
740
741 /* The alignment of the record is increased to the maximum
742 of the current alignment, the alignment indicated on the
743 field (i.e., the alignment specified by an __aligned__
744 attribute), and the alignment indicated by the type of
745 the field. */
746 rli->record_align = MAX (rli->record_align, desired_align);
747 rli->record_align = MAX (rli->record_align, type_align);
748
749 if (warn_packed)
750 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
751 user_align |= TYPE_USER_ALIGN (type);
752 }
753 }
754 #endif
755 else
756 {
757 rli->record_align = MAX (rli->record_align, desired_align);
758 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
759 }
760
761 TYPE_USER_ALIGN (rli->t) |= user_align;
762
763 return desired_align;
764 }
765
766 /* Called from place_field to handle unions. */
767
768 static void
place_union_field(record_layout_info rli,tree field)769 place_union_field (record_layout_info rli, tree field)
770 {
771 update_alignment_for_field (rli, field, /*known_align=*/0);
772
773 DECL_FIELD_OFFSET (field) = size_zero_node;
774 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
775 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
776
777 /* If this is an ERROR_MARK return *after* having set the
778 field at the start of the union. This helps when parsing
779 invalid fields. */
780 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
781 return;
782
783 /* We assume the union's size will be a multiple of a byte so we don't
784 bother with BITPOS. */
785 if (TREE_CODE (rli->t) == UNION_TYPE)
786 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
787 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
788 rli->offset = fold_build3 (COND_EXPR, sizetype,
789 DECL_QUALIFIER (field),
790 DECL_SIZE_UNIT (field), rli->offset);
791 }
792
793 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
794 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
795 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
796 units of alignment than the underlying TYPE. */
797 static int
excess_unit_span(HOST_WIDE_INT byte_offset,HOST_WIDE_INT bit_offset,HOST_WIDE_INT size,HOST_WIDE_INT align,tree type)798 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
799 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
800 {
801 /* Note that the calculation of OFFSET might overflow; we calculate it so
802 that we still get the right result as long as ALIGN is a power of two. */
803 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
804
805 offset = offset % align;
806 return ((offset + size + align - 1) / align
807 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
808 / align));
809 }
810 #endif
811
812 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
813 is a FIELD_DECL to be added after those fields already present in
814 T. (FIELD is not actually added to the TYPE_FIELDS list here;
815 callers that desire that behavior must manually perform that step.) */
816
817 void
place_field(record_layout_info rli,tree field)818 place_field (record_layout_info rli, tree field)
819 {
820 /* The alignment required for FIELD. */
821 unsigned int desired_align;
822 /* The alignment FIELD would have if we just dropped it into the
823 record as it presently stands. */
824 unsigned int known_align;
825 unsigned int actual_align;
826 /* The type of this field. */
827 tree type = TREE_TYPE (field);
828
829 gcc_assert (TREE_CODE (field) != ERROR_MARK);
830
831 /* If FIELD is static, then treat it like a separate variable, not
832 really like a structure field. If it is a FUNCTION_DECL, it's a
833 method. In both cases, all we do is lay out the decl, and we do
834 it *after* the record is laid out. */
835 if (TREE_CODE (field) == VAR_DECL)
836 {
837 rli->pending_statics = tree_cons (NULL_TREE, field,
838 rli->pending_statics);
839 return;
840 }
841
842 /* Enumerators and enum types which are local to this class need not
843 be laid out. Likewise for initialized constant fields. */
844 else if (TREE_CODE (field) != FIELD_DECL)
845 return;
846
847 /* Unions are laid out very differently than records, so split
848 that code off to another function. */
849 else if (TREE_CODE (rli->t) != RECORD_TYPE)
850 {
851 place_union_field (rli, field);
852 return;
853 }
854
855 else if (TREE_CODE (type) == ERROR_MARK)
856 {
857 /* Place this field at the current allocation position, so we
858 maintain monotonicity. */
859 DECL_FIELD_OFFSET (field) = rli->offset;
860 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
861 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
862 return;
863 }
864
865 /* Work out the known alignment so far. Note that A & (-A) is the
866 value of the least-significant bit in A that is one. */
867 if (! integer_zerop (rli->bitpos))
868 known_align = (tree_low_cst (rli->bitpos, 1)
869 & - tree_low_cst (rli->bitpos, 1));
870 else if (integer_zerop (rli->offset))
871 known_align = 0;
872 else if (host_integerp (rli->offset, 1))
873 known_align = (BITS_PER_UNIT
874 * (tree_low_cst (rli->offset, 1)
875 & - tree_low_cst (rli->offset, 1)));
876 else
877 known_align = rli->offset_align;
878
879 desired_align = update_alignment_for_field (rli, field, known_align);
880 if (known_align == 0)
881 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
882
883 if (warn_packed && DECL_PACKED (field))
884 {
885 if (known_align >= TYPE_ALIGN (type))
886 {
887 if (TYPE_ALIGN (type) > desired_align)
888 {
889 if (STRICT_ALIGNMENT)
890 warning (OPT_Wattributes, "packed attribute causes "
891 "inefficient alignment for %q+D", field);
892 else
893 warning (OPT_Wattributes, "packed attribute is "
894 "unnecessary for %q+D", field);
895 }
896 }
897 else
898 rli->packed_maybe_necessary = 1;
899 }
900
901 /* Does this field automatically have alignment it needs by virtue
902 of the fields that precede it and the record's own alignment? */
903 if (known_align < desired_align)
904 {
905 /* No, we need to skip space before this field.
906 Bump the cumulative size to multiple of field alignment. */
907
908 warning (OPT_Wpadded, "padding struct to align %q+D", field);
909
910 /* If the alignment is still within offset_align, just align
911 the bit position. */
912 if (desired_align < rli->offset_align)
913 rli->bitpos = round_up (rli->bitpos, desired_align);
914 else
915 {
916 /* First adjust OFFSET by the partial bits, then align. */
917 rli->offset
918 = size_binop (PLUS_EXPR, rli->offset,
919 fold_convert (sizetype,
920 size_binop (CEIL_DIV_EXPR, rli->bitpos,
921 bitsize_unit_node)));
922 rli->bitpos = bitsize_zero_node;
923
924 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
925 }
926
927 if (! TREE_CONSTANT (rli->offset))
928 rli->offset_align = desired_align;
929
930 }
931
932 /* Handle compatibility with PCC. Note that if the record has any
933 variable-sized fields, we need not worry about compatibility. */
934 #ifdef PCC_BITFIELD_TYPE_MATTERS
935 if (PCC_BITFIELD_TYPE_MATTERS
936 && ! targetm.ms_bitfield_layout_p (rli->t)
937 && TREE_CODE (field) == FIELD_DECL
938 && type != error_mark_node
939 && DECL_BIT_FIELD (field)
940 && ! DECL_PACKED (field)
941 && maximum_field_alignment == 0
942 && ! integer_zerop (DECL_SIZE (field))
943 && host_integerp (DECL_SIZE (field), 1)
944 && host_integerp (rli->offset, 1)
945 && host_integerp (TYPE_SIZE (type), 1))
946 {
947 unsigned int type_align = TYPE_ALIGN (type);
948 tree dsize = DECL_SIZE (field);
949 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
950 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
951 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
952
953 #ifdef ADJUST_FIELD_ALIGN
954 if (! TYPE_USER_ALIGN (type))
955 type_align = ADJUST_FIELD_ALIGN (field, type_align);
956 #endif
957
958 /* A bit field may not span more units of alignment of its type
959 than its type itself. Advance to next boundary if necessary. */
960 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
961 rli->bitpos = round_up (rli->bitpos, type_align);
962
963 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
964 }
965 #endif
966
967 #ifdef BITFIELD_NBYTES_LIMITED
968 if (BITFIELD_NBYTES_LIMITED
969 && ! targetm.ms_bitfield_layout_p (rli->t)
970 && TREE_CODE (field) == FIELD_DECL
971 && type != error_mark_node
972 && DECL_BIT_FIELD_TYPE (field)
973 && ! DECL_PACKED (field)
974 && ! integer_zerop (DECL_SIZE (field))
975 && host_integerp (DECL_SIZE (field), 1)
976 && host_integerp (rli->offset, 1)
977 && host_integerp (TYPE_SIZE (type), 1))
978 {
979 unsigned int type_align = TYPE_ALIGN (type);
980 tree dsize = DECL_SIZE (field);
981 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
982 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
983 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
984
985 #ifdef ADJUST_FIELD_ALIGN
986 if (! TYPE_USER_ALIGN (type))
987 type_align = ADJUST_FIELD_ALIGN (field, type_align);
988 #endif
989
990 if (maximum_field_alignment != 0)
991 type_align = MIN (type_align, maximum_field_alignment);
992 /* ??? This test is opposite the test in the containing if
993 statement, so this code is unreachable currently. */
994 else if (DECL_PACKED (field))
995 type_align = MIN (type_align, BITS_PER_UNIT);
996
997 /* A bit field may not span the unit of alignment of its type.
998 Advance to next boundary if necessary. */
999 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1000 rli->bitpos = round_up (rli->bitpos, type_align);
1001
1002 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1003 }
1004 #endif
1005
1006 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1007 A subtlety:
1008 When a bit field is inserted into a packed record, the whole
1009 size of the underlying type is used by one or more same-size
1010 adjacent bitfields. (That is, if its long:3, 32 bits is
1011 used in the record, and any additional adjacent long bitfields are
1012 packed into the same chunk of 32 bits. However, if the size
1013 changes, a new field of that size is allocated.) In an unpacked
1014 record, this is the same as using alignment, but not equivalent
1015 when packing.
1016
1017 Note: for compatibility, we use the type size, not the type alignment
1018 to determine alignment, since that matches the documentation */
1019
1020 if (targetm.ms_bitfield_layout_p (rli->t)
1021 && ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
1022 || (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
1023 {
1024 /* At this point, either the prior or current are bitfields,
1025 (possibly both), and we're dealing with MS packing. */
1026 tree prev_saved = rli->prev_field;
1027
1028 /* Is the prior field a bitfield? If so, handle "runs" of same
1029 type size fields. */
1030 if (rli->prev_field /* necessarily a bitfield if it exists. */)
1031 {
1032 /* If both are bitfields, nonzero, and the same size, this is
1033 the middle of a run. Zero declared size fields are special
1034 and handled as "end of run". (Note: it's nonzero declared
1035 size, but equal type sizes!) (Since we know that both
1036 the current and previous fields are bitfields by the
1037 time we check it, DECL_SIZE must be present for both.) */
1038 if (DECL_BIT_FIELD_TYPE (field)
1039 && !integer_zerop (DECL_SIZE (field))
1040 && !integer_zerop (DECL_SIZE (rli->prev_field))
1041 && host_integerp (DECL_SIZE (rli->prev_field), 0)
1042 && host_integerp (TYPE_SIZE (type), 0)
1043 && simple_cst_equal (TYPE_SIZE (type),
1044 TYPE_SIZE (TREE_TYPE (rli->prev_field))))
1045 {
1046 /* We're in the middle of a run of equal type size fields; make
1047 sure we realign if we run out of bits. (Not decl size,
1048 type size!) */
1049 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
1050
1051 if (rli->remaining_in_alignment < bitsize)
1052 {
1053 /* If PREV_FIELD is packed, and we haven't lumped
1054 non-packed bitfields with it, treat this as if PREV_FIELD
1055 was not a bitfield. This avoids anomalies where a packed
1056 bitfield with long long base type can take up more
1057 space than a same-size bitfield with base type short. */
1058 if (rli->prev_packed)
1059 rli->prev_field = prev_saved = NULL;
1060 else
1061 {
1062 /* out of bits; bump up to next 'word'. */
1063 rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
1064 rli->bitpos
1065 = size_binop (PLUS_EXPR, TYPE_SIZE (type),
1066 DECL_FIELD_BIT_OFFSET (rli->prev_field));
1067 rli->prev_field = field;
1068 rli->remaining_in_alignment
1069 = tree_low_cst (TYPE_SIZE (type), 0) - bitsize;
1070 }
1071 }
1072 else
1073 rli->remaining_in_alignment -= bitsize;
1074 }
1075 else if (rli->prev_packed)
1076 rli->prev_field = prev_saved = NULL;
1077 else
1078 {
1079 /* End of a run: if leaving a run of bitfields of the same type
1080 size, we have to "use up" the rest of the bits of the type
1081 size.
1082
1083 Compute the new position as the sum of the size for the prior
1084 type and where we first started working on that type.
1085 Note: since the beginning of the field was aligned then
1086 of course the end will be too. No round needed. */
1087
1088 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1089 {
1090 tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
1091
1092 /* If the desired alignment is greater or equal to TYPE_SIZE,
1093 we have already adjusted rli->bitpos / rli->offset above.
1094 */
1095 if ((unsigned HOST_WIDE_INT) tree_low_cst (type_size, 0)
1096 > desired_align)
1097 rli->bitpos
1098 = size_binop (PLUS_EXPR, type_size,
1099 DECL_FIELD_BIT_OFFSET (rli->prev_field));
1100 }
1101 else
1102 /* We "use up" size zero fields; the code below should behave
1103 as if the prior field was not a bitfield. */
1104 prev_saved = NULL;
1105
1106 /* Cause a new bitfield to be captured, either this time (if
1107 currently a bitfield) or next time we see one. */
1108 if (!DECL_BIT_FIELD_TYPE(field)
1109 || integer_zerop (DECL_SIZE (field)))
1110 rli->prev_field = NULL;
1111 }
1112
1113 rli->prev_packed = 0;
1114 normalize_rli (rli);
1115 }
1116
1117 /* If we're starting a new run of same size type bitfields
1118 (or a run of non-bitfields), set up the "first of the run"
1119 fields.
1120
1121 That is, if the current field is not a bitfield, or if there
1122 was a prior bitfield the type sizes differ, or if there wasn't
1123 a prior bitfield the size of the current field is nonzero.
1124
1125 Note: we must be sure to test ONLY the type size if there was
1126 a prior bitfield and ONLY for the current field being zero if
1127 there wasn't. */
1128
1129 if (!DECL_BIT_FIELD_TYPE (field)
1130 || ( prev_saved != NULL
1131 ? !simple_cst_equal (TYPE_SIZE (type),
1132 TYPE_SIZE (TREE_TYPE (prev_saved)))
1133 : !integer_zerop (DECL_SIZE (field)) ))
1134 {
1135 /* Never smaller than a byte for compatibility. */
1136 unsigned int type_align = BITS_PER_UNIT;
1137
1138 /* (When not a bitfield), we could be seeing a flex array (with
1139 no DECL_SIZE). Since we won't be using remaining_in_alignment
1140 until we see a bitfield (and come by here again) we just skip
1141 calculating it. */
1142 if (DECL_SIZE (field) != NULL
1143 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
1144 && host_integerp (DECL_SIZE (field), 0))
1145 rli->remaining_in_alignment
1146 = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 0)
1147 - tree_low_cst (DECL_SIZE (field), 0);
1148
1149 /* Now align (conventionally) for the new type. */
1150 if (!DECL_PACKED(field))
1151 type_align = MAX(TYPE_ALIGN (type), type_align);
1152
1153 if (prev_saved
1154 && DECL_BIT_FIELD_TYPE (prev_saved)
1155 /* If the previous bit-field is zero-sized, we've already
1156 accounted for its alignment needs (or ignored it, if
1157 appropriate) while placing it. */
1158 && ! integer_zerop (DECL_SIZE (prev_saved)))
1159 type_align = MAX (type_align,
1160 TYPE_ALIGN (TREE_TYPE (prev_saved)));
1161
1162 if (maximum_field_alignment != 0)
1163 type_align = MIN (type_align, maximum_field_alignment);
1164
1165 rli->bitpos = round_up (rli->bitpos, type_align);
1166
1167 /* If we really aligned, don't allow subsequent bitfields
1168 to undo that. */
1169 rli->prev_field = NULL;
1170 }
1171 }
1172
1173 /* Offset so far becomes the position of this field after normalizing. */
1174 normalize_rli (rli);
1175 DECL_FIELD_OFFSET (field) = rli->offset;
1176 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1177 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1178
1179 /* If this field ended up more aligned than we thought it would be (we
1180 approximate this by seeing if its position changed), lay out the field
1181 again; perhaps we can use an integral mode for it now. */
1182 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1183 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1184 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1185 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1186 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1187 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1188 actual_align = (BITS_PER_UNIT
1189 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1190 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1191 else
1192 actual_align = DECL_OFFSET_ALIGN (field);
1193 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1194 store / extract bit field operations will check the alignment of the
1195 record against the mode of bit fields. */
1196
1197 if (known_align != actual_align)
1198 layout_decl (field, actual_align);
1199
1200 if (DECL_BIT_FIELD_TYPE (field))
1201 {
1202 unsigned int type_align = TYPE_ALIGN (type);
1203 unsigned int mfa = maximum_field_alignment;
1204
1205 if (integer_zerop (DECL_SIZE (field)))
1206 mfa = initial_max_fld_align * BITS_PER_UNIT;
1207
1208 /* Only the MS bitfields use this. We used to also put any kind of
1209 packed bit fields into prev_field, but that makes no sense, because
1210 an 8 bit packed bit field shouldn't impose more restriction on
1211 following fields than a char field, and the alignment requirements
1212 are also not fulfilled.
1213 There is no sane value to set rli->remaining_in_alignment to when
1214 a packed bitfield in prev_field is unaligned. */
1215 if (mfa != 0)
1216 type_align = MIN (type_align, mfa);
1217 gcc_assert (rli->prev_field
1218 || actual_align >= type_align || DECL_PACKED (field)
1219 || integer_zerop (DECL_SIZE (field))
1220 || !targetm.ms_bitfield_layout_p (rli->t));
1221 if (rli->prev_field == NULL && actual_align >= type_align
1222 && !integer_zerop (DECL_SIZE (field)))
1223 {
1224 rli->prev_field = field;
1225 /* rli->remaining_in_alignment has not been set if the bitfield
1226 has size zero, or if it is a packed bitfield. */
1227 rli->remaining_in_alignment
1228 = (tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 0)
1229 - tree_low_cst (DECL_SIZE (field), 0));
1230 rli->prev_packed = DECL_PACKED (field);
1231
1232 }
1233 else if (rli->prev_field && DECL_PACKED (field))
1234 {
1235 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
1236
1237 if (rli->remaining_in_alignment < bitsize)
1238 rli->prev_field = NULL;
1239 else
1240 rli->remaining_in_alignment -= bitsize;
1241 }
1242 }
1243
1244 /* Now add size of this field to the size of the record. If the size is
1245 not constant, treat the field as being a multiple of bytes and just
1246 adjust the offset, resetting the bit position. Otherwise, apportion the
1247 size amongst the bit position and offset. First handle the case of an
1248 unspecified size, which can happen when we have an invalid nested struct
1249 definition, such as struct j { struct j { int i; } }. The error message
1250 is printed in finish_struct. */
1251 if (DECL_SIZE (field) == 0)
1252 /* Do nothing. */;
1253 else if (TREE_CODE (DECL_SIZE_UNIT (field)) != INTEGER_CST
1254 || TREE_CONSTANT_OVERFLOW (DECL_SIZE_UNIT (field)))
1255 {
1256 rli->offset
1257 = size_binop (PLUS_EXPR, rli->offset,
1258 fold_convert (sizetype,
1259 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1260 bitsize_unit_node)));
1261 rli->offset
1262 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1263 rli->bitpos = bitsize_zero_node;
1264 rli->offset_align = MIN (rli->offset_align, desired_align);
1265 }
1266 else
1267 {
1268 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1269 normalize_rli (rli);
1270 }
1271 }
1272
1273 /* Assuming that all the fields have been laid out, this function uses
1274 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1275 indicated by RLI. */
1276
1277 static void
finalize_record_size(record_layout_info rli)1278 finalize_record_size (record_layout_info rli)
1279 {
1280 tree unpadded_size, unpadded_size_unit;
1281
1282 /* Now we want just byte and bit offsets, so set the offset alignment
1283 to be a byte and then normalize. */
1284 rli->offset_align = BITS_PER_UNIT;
1285 normalize_rli (rli);
1286
1287 /* Determine the desired alignment. */
1288 #ifdef ROUND_TYPE_ALIGN
1289 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1290 rli->record_align);
1291 #else
1292 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1293 #endif
1294
1295 /* Compute the size so far. Be sure to allow for extra bits in the
1296 size in bytes. We have guaranteed above that it will be no more
1297 than a single byte. */
1298 unpadded_size = rli_size_so_far (rli);
1299 unpadded_size_unit = rli_size_unit_so_far (rli);
1300 if (! integer_zerop (rli->bitpos))
1301 unpadded_size_unit
1302 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1303
1304 /* Round the size up to be a multiple of the required alignment. */
1305 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1306 TYPE_SIZE_UNIT (rli->t)
1307 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1308
1309 if (TREE_CONSTANT (unpadded_size)
1310 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
1311 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1312
1313 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1314 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1315 && TREE_CONSTANT (unpadded_size))
1316 {
1317 tree unpacked_size;
1318
1319 #ifdef ROUND_TYPE_ALIGN
1320 rli->unpacked_align
1321 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1322 #else
1323 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1324 #endif
1325
1326 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1327 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1328 {
1329 TYPE_PACKED (rli->t) = 0;
1330
1331 if (TYPE_NAME (rli->t))
1332 {
1333 const char *name;
1334
1335 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1336 name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
1337 else
1338 name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
1339
1340 if (STRICT_ALIGNMENT)
1341 warning (OPT_Wpacked, "packed attribute causes inefficient "
1342 "alignment for %qs", name);
1343 else
1344 warning (OPT_Wpacked,
1345 "packed attribute is unnecessary for %qs", name);
1346 }
1347 else
1348 {
1349 if (STRICT_ALIGNMENT)
1350 warning (OPT_Wpacked,
1351 "packed attribute causes inefficient alignment");
1352 else
1353 warning (OPT_Wpacked, "packed attribute is unnecessary");
1354 }
1355 }
1356 }
1357 }
1358
1359 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1360
1361 void
compute_record_mode(tree type)1362 compute_record_mode (tree type)
1363 {
1364 tree field;
1365 enum machine_mode mode = VOIDmode;
1366
1367 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1368 However, if possible, we use a mode that fits in a register
1369 instead, in order to allow for better optimization down the
1370 line. */
1371 TYPE_MODE (type) = BLKmode;
1372
1373 if (! host_integerp (TYPE_SIZE (type), 1))
1374 return;
1375
1376 /* A record which has any BLKmode members must itself be
1377 BLKmode; it can't go in a register. Unless the member is
1378 BLKmode only because it isn't aligned. */
1379 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1380 {
1381 if (TREE_CODE (field) != FIELD_DECL)
1382 continue;
1383
1384 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1385 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1386 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1387 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1388 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1389 || ! host_integerp (bit_position (field), 1)
1390 || DECL_SIZE (field) == 0
1391 || ! host_integerp (DECL_SIZE (field), 1))
1392 return;
1393
1394 /* If this field is the whole struct, remember its mode so
1395 that, say, we can put a double in a class into a DF
1396 register instead of forcing it to live in the stack. */
1397 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1398 mode = DECL_MODE (field);
1399
1400 #ifdef MEMBER_TYPE_FORCES_BLK
1401 /* With some targets, eg. c4x, it is sub-optimal
1402 to access an aligned BLKmode structure as a scalar. */
1403
1404 if (MEMBER_TYPE_FORCES_BLK (field, mode))
1405 return;
1406 #endif /* MEMBER_TYPE_FORCES_BLK */
1407 }
1408
1409 /* If we only have one real field; use its mode if that mode's size
1410 matches the type's size. This only applies to RECORD_TYPE. This
1411 does not apply to unions. */
1412 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1413 && host_integerp (TYPE_SIZE (type), 1)
1414 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1415 TYPE_MODE (type) = mode;
1416 else
1417 TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1418
1419 /* If structure's known alignment is less than what the scalar
1420 mode would need, and it matters, then stick with BLKmode. */
1421 if (TYPE_MODE (type) != BLKmode
1422 && STRICT_ALIGNMENT
1423 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1424 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1425 {
1426 /* If this is the only reason this type is BLKmode, then
1427 don't force containing types to be BLKmode. */
1428 TYPE_NO_FORCE_BLK (type) = 1;
1429 TYPE_MODE (type) = BLKmode;
1430 }
1431 }
1432
1433 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1434 out. */
1435
1436 static void
finalize_type_size(tree type)1437 finalize_type_size (tree type)
1438 {
1439 /* Normally, use the alignment corresponding to the mode chosen.
1440 However, where strict alignment is not required, avoid
1441 over-aligning structures, since most compilers do not do this
1442 alignment. */
1443
1444 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1445 && (STRICT_ALIGNMENT
1446 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1447 && TREE_CODE (type) != QUAL_UNION_TYPE
1448 && TREE_CODE (type) != ARRAY_TYPE)))
1449 {
1450 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1451
1452 /* Don't override a larger alignment requirement coming from a user
1453 alignment of one of the fields. */
1454 if (mode_align >= TYPE_ALIGN (type))
1455 {
1456 TYPE_ALIGN (type) = mode_align;
1457 TYPE_USER_ALIGN (type) = 0;
1458 }
1459 }
1460
1461 /* Do machine-dependent extra alignment. */
1462 #ifdef ROUND_TYPE_ALIGN
1463 TYPE_ALIGN (type)
1464 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1465 #endif
1466
1467 /* If we failed to find a simple way to calculate the unit size
1468 of the type, find it by division. */
1469 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1470 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1471 result will fit in sizetype. We will get more efficient code using
1472 sizetype, so we force a conversion. */
1473 TYPE_SIZE_UNIT (type)
1474 = fold_convert (sizetype,
1475 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1476 bitsize_unit_node));
1477
1478 if (TYPE_SIZE (type) != 0)
1479 {
1480 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1481 TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
1482 TYPE_ALIGN_UNIT (type));
1483 }
1484
1485 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1486 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1487 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1488 if (TYPE_SIZE_UNIT (type) != 0
1489 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1490 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1491
1492 /* Also layout any other variants of the type. */
1493 if (TYPE_NEXT_VARIANT (type)
1494 || type != TYPE_MAIN_VARIANT (type))
1495 {
1496 tree variant;
1497 /* Record layout info of this variant. */
1498 tree size = TYPE_SIZE (type);
1499 tree size_unit = TYPE_SIZE_UNIT (type);
1500 unsigned int align = TYPE_ALIGN (type);
1501 unsigned int user_align = TYPE_USER_ALIGN (type);
1502 enum machine_mode mode = TYPE_MODE (type);
1503
1504 /* Copy it into all variants. */
1505 for (variant = TYPE_MAIN_VARIANT (type);
1506 variant != 0;
1507 variant = TYPE_NEXT_VARIANT (variant))
1508 {
1509 TYPE_SIZE (variant) = size;
1510 TYPE_SIZE_UNIT (variant) = size_unit;
1511 TYPE_ALIGN (variant) = align;
1512 TYPE_USER_ALIGN (variant) = user_align;
1513 TYPE_MODE (variant) = mode;
1514 }
1515 }
1516 }
1517
1518 /* Do all of the work required to layout the type indicated by RLI,
1519 once the fields have been laid out. This function will call `free'
1520 for RLI, unless FREE_P is false. Passing a value other than false
1521 for FREE_P is bad practice; this option only exists to support the
1522 G++ 3.2 ABI. */
1523
1524 void
finish_record_layout(record_layout_info rli,int free_p)1525 finish_record_layout (record_layout_info rli, int free_p)
1526 {
1527 tree variant;
1528
1529 /* Compute the final size. */
1530 finalize_record_size (rli);
1531
1532 /* Compute the TYPE_MODE for the record. */
1533 compute_record_mode (rli->t);
1534
1535 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1536 finalize_type_size (rli->t);
1537
1538 /* Propagate TYPE_PACKED to variants. With C++ templates,
1539 handle_packed_attribute is too early to do this. */
1540 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1541 variant = TYPE_NEXT_VARIANT (variant))
1542 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1543
1544 /* Lay out any static members. This is done now because their type
1545 may use the record's type. */
1546 while (rli->pending_statics)
1547 {
1548 layout_decl (TREE_VALUE (rli->pending_statics), 0);
1549 rli->pending_statics = TREE_CHAIN (rli->pending_statics);
1550 }
1551
1552 /* Clean up. */
1553 if (free_p)
1554 free (rli);
1555 }
1556
1557
1558 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1559 NAME, its fields are chained in reverse on FIELDS.
1560
1561 If ALIGN_TYPE is non-null, it is given the same alignment as
1562 ALIGN_TYPE. */
1563
1564 void
finish_builtin_struct(tree type,const char * name,tree fields,tree align_type)1565 finish_builtin_struct (tree type, const char *name, tree fields,
1566 tree align_type)
1567 {
1568 tree tail, next;
1569
1570 for (tail = NULL_TREE; fields; tail = fields, fields = next)
1571 {
1572 DECL_FIELD_CONTEXT (fields) = type;
1573 next = TREE_CHAIN (fields);
1574 TREE_CHAIN (fields) = tail;
1575 }
1576 TYPE_FIELDS (type) = tail;
1577
1578 if (align_type)
1579 {
1580 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1581 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1582 }
1583
1584 layout_type (type);
1585 #if 0 /* not yet, should get fixed properly later */
1586 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1587 #else
1588 TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
1589 #endif
1590 TYPE_STUB_DECL (type) = TYPE_NAME (type);
1591 layout_decl (TYPE_NAME (type), 0);
1592 }
1593
1594 /* Calculate the mode, size, and alignment for TYPE.
1595 For an array type, calculate the element separation as well.
1596 Record TYPE on the chain of permanent or temporary types
1597 so that dbxout will find out about it.
1598
1599 TYPE_SIZE of a type is nonzero if the type has been laid out already.
1600 layout_type does nothing on such a type.
1601
1602 If the type is incomplete, its TYPE_SIZE remains zero. */
1603
1604 void
layout_type(tree type)1605 layout_type (tree type)
1606 {
1607 gcc_assert (type);
1608
1609 if (type == error_mark_node)
1610 return;
1611
1612 /* Do nothing if type has been laid out before. */
1613 if (TYPE_SIZE (type))
1614 return;
1615
1616 switch (TREE_CODE (type))
1617 {
1618 case LANG_TYPE:
1619 /* This kind of type is the responsibility
1620 of the language-specific code. */
1621 gcc_unreachable ();
1622
1623 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
1624 if (TYPE_PRECISION (type) == 0)
1625 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
1626
1627 /* ... fall through ... */
1628
1629 case INTEGER_TYPE:
1630 case ENUMERAL_TYPE:
1631 case CHAR_TYPE:
1632 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1633 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1634 TYPE_UNSIGNED (type) = 1;
1635
1636 TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
1637 MODE_INT);
1638 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1639 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1640 break;
1641
1642 case REAL_TYPE:
1643 TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0);
1644 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1645 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1646 break;
1647
1648 case COMPLEX_TYPE:
1649 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1650 TYPE_MODE (type)
1651 = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1652 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1653 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1654 0);
1655 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1656 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1657 break;
1658
1659 case VECTOR_TYPE:
1660 {
1661 int nunits = TYPE_VECTOR_SUBPARTS (type);
1662 tree nunits_tree = build_int_cst (NULL_TREE, nunits);
1663 tree innertype = TREE_TYPE (type);
1664
1665 gcc_assert (!(nunits & (nunits - 1)));
1666
1667 /* Find an appropriate mode for the vector type. */
1668 if (TYPE_MODE (type) == VOIDmode)
1669 {
1670 enum machine_mode innermode = TYPE_MODE (innertype);
1671 enum machine_mode mode;
1672
1673 /* First, look for a supported vector type. */
1674 if (GET_MODE_CLASS (innermode) == MODE_FLOAT)
1675 mode = MIN_MODE_VECTOR_FLOAT;
1676 else
1677 mode = MIN_MODE_VECTOR_INT;
1678
1679 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
1680 if (GET_MODE_NUNITS (mode) == nunits
1681 && GET_MODE_INNER (mode) == innermode
1682 && targetm.vector_mode_supported_p (mode))
1683 break;
1684
1685 /* For integers, try mapping it to a same-sized scalar mode. */
1686 if (mode == VOIDmode
1687 && GET_MODE_CLASS (innermode) == MODE_INT)
1688 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
1689 MODE_INT, 0);
1690
1691 if (mode == VOIDmode || !have_regs_of_mode[mode])
1692 TYPE_MODE (type) = BLKmode;
1693 else
1694 TYPE_MODE (type) = mode;
1695 }
1696
1697 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1698 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1699 TYPE_SIZE_UNIT (innertype),
1700 nunits_tree, 0);
1701 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1702 nunits_tree, 0);
1703
1704 /* Always naturally align vectors. This prevents ABI changes
1705 depending on whether or not native vector modes are supported. */
1706 TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1707 break;
1708 }
1709
1710 case VOID_TYPE:
1711 /* This is an incomplete type and so doesn't have a size. */
1712 TYPE_ALIGN (type) = 1;
1713 TYPE_USER_ALIGN (type) = 0;
1714 TYPE_MODE (type) = VOIDmode;
1715 break;
1716
1717 case OFFSET_TYPE:
1718 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1719 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1720 /* A pointer might be MODE_PARTIAL_INT,
1721 but ptrdiff_t must be integral. */
1722 TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0);
1723 break;
1724
1725 case FUNCTION_TYPE:
1726 case METHOD_TYPE:
1727 /* It's hard to see what the mode and size of a function ought to
1728 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1729 make it consistent with that. */
1730 TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
1731 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1732 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1733 break;
1734
1735 case POINTER_TYPE:
1736 case REFERENCE_TYPE:
1737 {
1738
1739 enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
1740 && reference_types_internal)
1741 ? Pmode : TYPE_MODE (type));
1742
1743 int nbits = GET_MODE_BITSIZE (mode);
1744
1745 TYPE_SIZE (type) = bitsize_int (nbits);
1746 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1747 TYPE_UNSIGNED (type) = 1;
1748 TYPE_PRECISION (type) = nbits;
1749 }
1750 break;
1751
1752 case ARRAY_TYPE:
1753 {
1754 tree index = TYPE_DOMAIN (type);
1755 tree element = TREE_TYPE (type);
1756
1757 build_pointer_type (element);
1758
1759 /* We need to know both bounds in order to compute the size. */
1760 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1761 && TYPE_SIZE (element))
1762 {
1763 tree ub = TYPE_MAX_VALUE (index);
1764 tree lb = TYPE_MIN_VALUE (index);
1765 tree length;
1766 tree element_size;
1767
1768 /* The initial subtraction should happen in the original type so
1769 that (possible) negative values are handled appropriately. */
1770 length = size_binop (PLUS_EXPR, size_one_node,
1771 fold_convert (sizetype,
1772 fold_build2 (MINUS_EXPR,
1773 TREE_TYPE (lb),
1774 ub, lb)));
1775
1776 /* Special handling for arrays of bits (for Chill). */
1777 element_size = TYPE_SIZE (element);
1778 if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
1779 && (integer_zerop (TYPE_MAX_VALUE (element))
1780 || integer_onep (TYPE_MAX_VALUE (element)))
1781 && host_integerp (TYPE_MIN_VALUE (element), 1))
1782 {
1783 HOST_WIDE_INT maxvalue
1784 = tree_low_cst (TYPE_MAX_VALUE (element), 1);
1785 HOST_WIDE_INT minvalue
1786 = tree_low_cst (TYPE_MIN_VALUE (element), 1);
1787
1788 if (maxvalue - minvalue == 1
1789 && (maxvalue == 1 || maxvalue == 0))
1790 element_size = integer_one_node;
1791 }
1792
1793 /* If neither bound is a constant and sizetype is signed, make
1794 sure the size is never negative. We should really do this
1795 if *either* bound is non-constant, but this is the best
1796 compromise between C and Ada. */
1797 if (!TYPE_UNSIGNED (sizetype)
1798 && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
1799 && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
1800 length = size_binop (MAX_EXPR, length, size_zero_node);
1801
1802 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1803 fold_convert (bitsizetype,
1804 length));
1805
1806 /* If we know the size of the element, calculate the total
1807 size directly, rather than do some division thing below.
1808 This optimization helps Fortran assumed-size arrays
1809 (where the size of the array is determined at runtime)
1810 substantially.
1811 Note that we can't do this in the case where the size of
1812 the elements is one bit since TYPE_SIZE_UNIT cannot be
1813 set correctly in that case. */
1814 if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
1815 TYPE_SIZE_UNIT (type)
1816 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
1817 }
1818
1819 /* Now round the alignment and size,
1820 using machine-dependent criteria if any. */
1821
1822 #ifdef ROUND_TYPE_ALIGN
1823 TYPE_ALIGN (type)
1824 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
1825 #else
1826 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
1827 #endif
1828 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
1829 TYPE_MODE (type) = BLKmode;
1830 if (TYPE_SIZE (type) != 0
1831 #ifdef MEMBER_TYPE_FORCES_BLK
1832 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
1833 #endif
1834 /* BLKmode elements force BLKmode aggregate;
1835 else extract/store fields may lose. */
1836 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
1837 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
1838 {
1839 /* One-element arrays get the component type's mode. */
1840 if (simple_cst_equal (TYPE_SIZE (type),
1841 TYPE_SIZE (TREE_TYPE (type))))
1842 TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type));
1843 else
1844 TYPE_MODE (type)
1845 = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
1846
1847 if (TYPE_MODE (type) != BLKmode
1848 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
1849 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))
1850 && TYPE_MODE (type) != BLKmode)
1851 {
1852 TYPE_NO_FORCE_BLK (type) = 1;
1853 TYPE_MODE (type) = BLKmode;
1854 }
1855 }
1856 /* When the element size is constant, check that it is at least as
1857 large as the element alignment. */
1858 if (TYPE_SIZE_UNIT (element)
1859 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
1860 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
1861 TYPE_ALIGN_UNIT. */
1862 && !TREE_CONSTANT_OVERFLOW (TYPE_SIZE_UNIT (element))
1863 && !integer_zerop (TYPE_SIZE_UNIT (element))
1864 && compare_tree_int (TYPE_SIZE_UNIT (element),
1865 TYPE_ALIGN_UNIT (element)) < 0)
1866 error ("alignment of array elements is greater than element size");
1867 break;
1868 }
1869
1870 case RECORD_TYPE:
1871 case UNION_TYPE:
1872 case QUAL_UNION_TYPE:
1873 {
1874 tree field;
1875 record_layout_info rli;
1876
1877 /* Initialize the layout information. */
1878 rli = start_record_layout (type);
1879
1880 /* If this is a QUAL_UNION_TYPE, we want to process the fields
1881 in the reverse order in building the COND_EXPR that denotes
1882 its size. We reverse them again later. */
1883 if (TREE_CODE (type) == QUAL_UNION_TYPE)
1884 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1885
1886 /* Place all the fields. */
1887 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1888 place_field (rli, field);
1889
1890 if (TREE_CODE (type) == QUAL_UNION_TYPE)
1891 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1892
1893 if (lang_adjust_rli)
1894 (*lang_adjust_rli) (rli);
1895
1896 /* Finish laying out the record. */
1897 finish_record_layout (rli, /*free_p=*/true);
1898 }
1899 break;
1900
1901 default:
1902 gcc_unreachable ();
1903 }
1904
1905 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
1906 records and unions, finish_record_layout already called this
1907 function. */
1908 if (TREE_CODE (type) != RECORD_TYPE
1909 && TREE_CODE (type) != UNION_TYPE
1910 && TREE_CODE (type) != QUAL_UNION_TYPE)
1911 finalize_type_size (type);
1912
1913 /* If an alias set has been set for this aggregate when it was incomplete,
1914 force it into alias set 0.
1915 This is too conservative, but we cannot call record_component_aliases
1916 here because some frontends still change the aggregates after
1917 layout_type. */
1918 if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type))
1919 TYPE_ALIAS_SET (type) = 0;
1920 }
1921
1922 /* Create and return a type for signed integers of PRECISION bits. */
1923
1924 tree
make_signed_type(int precision)1925 make_signed_type (int precision)
1926 {
1927 tree type = make_node (INTEGER_TYPE);
1928
1929 TYPE_PRECISION (type) = precision;
1930
1931 fixup_signed_type (type);
1932 return type;
1933 }
1934
1935 /* Create and return a type for unsigned integers of PRECISION bits. */
1936
1937 tree
make_unsigned_type(int precision)1938 make_unsigned_type (int precision)
1939 {
1940 tree type = make_node (INTEGER_TYPE);
1941
1942 TYPE_PRECISION (type) = precision;
1943
1944 fixup_unsigned_type (type);
1945 return type;
1946 }
1947
1948 /* Initialize sizetype and bitsizetype to a reasonable and temporary
1949 value to enable integer types to be created. */
1950
1951 void
initialize_sizetypes(bool signed_p)1952 initialize_sizetypes (bool signed_p)
1953 {
1954 tree t = make_node (INTEGER_TYPE);
1955
1956 TYPE_MODE (t) = SImode;
1957 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
1958 TYPE_USER_ALIGN (t) = 0;
1959 TYPE_IS_SIZETYPE (t) = 1;
1960 TYPE_UNSIGNED (t) = !signed_p;
1961 TYPE_SIZE (t) = build_int_cst (t, GET_MODE_BITSIZE (SImode));
1962 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
1963 TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
1964 TYPE_MIN_VALUE (t) = build_int_cst (t, 0);
1965
1966 /* 1000 avoids problems with possible overflow and is certainly
1967 larger than any size value we'd want to be storing. */
1968 TYPE_MAX_VALUE (t) = build_int_cst (t, 1000);
1969
1970 sizetype = t;
1971 bitsizetype = build_distinct_type_copy (t);
1972 }
1973
1974 /* Make sizetype a version of TYPE, and initialize *sizetype
1975 accordingly. We do this by overwriting the stub sizetype and
1976 bitsizetype nodes created by initialize_sizetypes. This makes sure
1977 that (a) anything stubby about them no longer exists, (b) any
1978 INTEGER_CSTs created with such a type, remain valid. */
1979
1980 void
set_sizetype(tree type)1981 set_sizetype (tree type)
1982 {
1983 int oprecision = TYPE_PRECISION (type);
1984 /* The *bitsizetype types use a precision that avoids overflows when
1985 calculating signed sizes / offsets in bits. However, when
1986 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
1987 precision. */
1988 int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1,
1989 2 * HOST_BITS_PER_WIDE_INT);
1990 tree t;
1991
1992 gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
1993
1994 t = build_distinct_type_copy (type);
1995 /* We do want to use sizetype's cache, as we will be replacing that
1996 type. */
1997 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
1998 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
1999 TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
2000 TYPE_UID (t) = TYPE_UID (sizetype);
2001 TYPE_IS_SIZETYPE (t) = 1;
2002
2003 /* Replace our original stub sizetype. */
2004 memcpy (sizetype, t, tree_size (sizetype));
2005 TYPE_MAIN_VARIANT (sizetype) = sizetype;
2006
2007 t = make_node (INTEGER_TYPE);
2008 TYPE_NAME (t) = get_identifier ("bit_size_type");
2009 /* We do want to use bitsizetype's cache, as we will be replacing that
2010 type. */
2011 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2012 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2013 TYPE_PRECISION (t) = precision;
2014 TYPE_UID (t) = TYPE_UID (bitsizetype);
2015 TYPE_IS_SIZETYPE (t) = 1;
2016
2017 /* Replace our original stub bitsizetype. */
2018 memcpy (bitsizetype, t, tree_size (bitsizetype));
2019 TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2020
2021 if (TYPE_UNSIGNED (type))
2022 {
2023 fixup_unsigned_type (bitsizetype);
2024 ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
2025 TYPE_IS_SIZETYPE (ssizetype) = 1;
2026 sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
2027 TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2028 }
2029 else
2030 {
2031 fixup_signed_type (bitsizetype);
2032 ssizetype = sizetype;
2033 sbitsizetype = bitsizetype;
2034 }
2035 }
2036
2037 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE,
2038 BOOLEAN_TYPE, or CHAR_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2039 for TYPE, based on the PRECISION and whether or not the TYPE
2040 IS_UNSIGNED. PRECISION need not correspond to a width supported
2041 natively by the hardware; for example, on a machine with 8-bit,
2042 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2043 61. */
2044
2045 void
set_min_and_max_values_for_integral_type(tree type,int precision,bool is_unsigned)2046 set_min_and_max_values_for_integral_type (tree type,
2047 int precision,
2048 bool is_unsigned)
2049 {
2050 tree min_value;
2051 tree max_value;
2052
2053 if (is_unsigned)
2054 {
2055 min_value = build_int_cst (type, 0);
2056 max_value
2057 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2058 ? -1
2059 : ((HOST_WIDE_INT) 1 << precision) - 1,
2060 precision - HOST_BITS_PER_WIDE_INT > 0
2061 ? ((unsigned HOST_WIDE_INT) ~0
2062 >> (HOST_BITS_PER_WIDE_INT
2063 - (precision - HOST_BITS_PER_WIDE_INT)))
2064 : 0);
2065 }
2066 else
2067 {
2068 min_value
2069 = build_int_cst_wide (type,
2070 (precision - HOST_BITS_PER_WIDE_INT > 0
2071 ? 0
2072 : (HOST_WIDE_INT) (-1) << (precision - 1)),
2073 (((HOST_WIDE_INT) (-1)
2074 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2075 ? precision - HOST_BITS_PER_WIDE_INT - 1
2076 : 0))));
2077 max_value
2078 = build_int_cst_wide (type,
2079 (precision - HOST_BITS_PER_WIDE_INT > 0
2080 ? -1
2081 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2082 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2083 ? (((HOST_WIDE_INT) 1
2084 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2085 : 0));
2086 }
2087
2088 TYPE_MIN_VALUE (type) = min_value;
2089 TYPE_MAX_VALUE (type) = max_value;
2090 }
2091
2092 /* Set the extreme values of TYPE based on its precision in bits,
2093 then lay it out. Used when make_signed_type won't do
2094 because the tree code is not INTEGER_TYPE.
2095 E.g. for Pascal, when the -fsigned-char option is given. */
2096
2097 void
fixup_signed_type(tree type)2098 fixup_signed_type (tree type)
2099 {
2100 int precision = TYPE_PRECISION (type);
2101
2102 /* We can not represent properly constants greater then
2103 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2104 as they are used by i386 vector extensions and friends. */
2105 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2106 precision = HOST_BITS_PER_WIDE_INT * 2;
2107
2108 set_min_and_max_values_for_integral_type (type, precision,
2109 /*is_unsigned=*/false);
2110
2111 /* Lay out the type: set its alignment, size, etc. */
2112 layout_type (type);
2113 }
2114
2115 /* Set the extreme values of TYPE based on its precision in bits,
2116 then lay it out. This is used both in `make_unsigned_type'
2117 and for enumeral types. */
2118
2119 void
fixup_unsigned_type(tree type)2120 fixup_unsigned_type (tree type)
2121 {
2122 int precision = TYPE_PRECISION (type);
2123
2124 /* We can not represent properly constants greater then
2125 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2126 as they are used by i386 vector extensions and friends. */
2127 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2128 precision = HOST_BITS_PER_WIDE_INT * 2;
2129
2130 TYPE_UNSIGNED (type) = 1;
2131
2132 set_min_and_max_values_for_integral_type (type, precision,
2133 /*is_unsigned=*/true);
2134
2135 /* Lay out the type: set its alignment, size, etc. */
2136 layout_type (type);
2137 }
2138
2139 /* Find the best machine mode to use when referencing a bit field of length
2140 BITSIZE bits starting at BITPOS.
2141
2142 The underlying object is known to be aligned to a boundary of ALIGN bits.
2143 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2144 larger than LARGEST_MODE (usually SImode).
2145
2146 If no mode meets all these conditions, we return VOIDmode. Otherwise, if
2147 VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest
2148 mode meeting these conditions.
2149
2150 Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return
2151 the largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2152 all the conditions. */
2153
2154 enum machine_mode
get_best_mode(int bitsize,int bitpos,unsigned int align,enum machine_mode largest_mode,int volatilep)2155 get_best_mode (int bitsize, int bitpos, unsigned int align,
2156 enum machine_mode largest_mode, int volatilep)
2157 {
2158 enum machine_mode mode;
2159 unsigned int unit = 0;
2160
2161 /* Find the narrowest integer mode that contains the bit field. */
2162 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2163 mode = GET_MODE_WIDER_MODE (mode))
2164 {
2165 unit = GET_MODE_BITSIZE (mode);
2166 if ((bitpos % unit) + bitsize <= unit)
2167 break;
2168 }
2169
2170 if (mode == VOIDmode
2171 /* It is tempting to omit the following line
2172 if STRICT_ALIGNMENT is true.
2173 But that is incorrect, since if the bitfield uses part of 3 bytes
2174 and we use a 4-byte mode, we could get a spurious segv
2175 if the extra 4th byte is past the end of memory.
2176 (Though at least one Unix compiler ignores this problem:
2177 that on the Sequent 386 machine. */
2178 || MIN (unit, BIGGEST_ALIGNMENT) > align
2179 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2180 return VOIDmode;
2181
2182 if (SLOW_BYTE_ACCESS && ! volatilep)
2183 {
2184 enum machine_mode wide_mode = VOIDmode, tmode;
2185
2186 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2187 tmode = GET_MODE_WIDER_MODE (tmode))
2188 {
2189 unit = GET_MODE_BITSIZE (tmode);
2190 if (bitpos / unit == (bitpos + bitsize - 1) / unit
2191 && unit <= BITS_PER_WORD
2192 && unit <= MIN (align, BIGGEST_ALIGNMENT)
2193 && (largest_mode == VOIDmode
2194 || unit <= GET_MODE_BITSIZE (largest_mode)))
2195 wide_mode = tmode;
2196 }
2197
2198 if (wide_mode != VOIDmode)
2199 return wide_mode;
2200 }
2201
2202 return mode;
2203 }
2204
2205 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2206 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2207
2208 void
get_mode_bounds(enum machine_mode mode,int sign,enum machine_mode target_mode,rtx * mmin,rtx * mmax)2209 get_mode_bounds (enum machine_mode mode, int sign,
2210 enum machine_mode target_mode,
2211 rtx *mmin, rtx *mmax)
2212 {
2213 unsigned size = GET_MODE_BITSIZE (mode);
2214 unsigned HOST_WIDE_INT min_val, max_val;
2215
2216 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2217
2218 if (sign)
2219 {
2220 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2221 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2222 }
2223 else
2224 {
2225 min_val = 0;
2226 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2227 }
2228
2229 *mmin = gen_int_mode (min_val, target_mode);
2230 *mmax = gen_int_mode (max_val, target_mode);
2231 }
2232
2233 #include "gt-stor-layout.h"
2234