1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "function.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "stringpool.h"
31 #include "regs.h"
32 #include "emit-rtl.h"
33 #include "cgraph.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "print-tree.h"
39 #include "langhooks.h"
40 #include "tree-inline.h"
41 #include "dumpfile.h"
42 #include "gimplify.h"
43 #include "attribs.h"
44 #include "debug.h"
45
46 /* Data type for the expressions representing sizes of data types.
47 It is the first integer type laid out. */
48 tree sizetype_tab[(int) stk_type_kind_last];
49
50 /* If nonzero, this is an upper limit on alignment of structure fields.
51 The value is measured in bits. */
52 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
53
54 static tree self_referential_size (tree);
55 static void finalize_record_size (record_layout_info);
56 static void finalize_type_size (tree);
57 static void place_union_field (record_layout_info, tree);
58 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
59 HOST_WIDE_INT, tree);
60 extern void debug_rli (record_layout_info);
61
62 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
63 to serve as the actual size-expression for a type or decl. */
64
65 tree
variable_size(tree size)66 variable_size (tree size)
67 {
68 /* Obviously. */
69 if (TREE_CONSTANT (size))
70 return size;
71
72 /* If the size is self-referential, we can't make a SAVE_EXPR (see
73 save_expr for the rationale). But we can do something else. */
74 if (CONTAINS_PLACEHOLDER_P (size))
75 return self_referential_size (size);
76
77 /* If we are in the global binding level, we can't make a SAVE_EXPR
78 since it may end up being shared across functions, so it is up
79 to the front-end to deal with this case. */
80 if (lang_hooks.decls.global_bindings_p ())
81 return size;
82
83 return save_expr (size);
84 }
85
86 /* An array of functions used for self-referential size computation. */
87 static GTY(()) vec<tree, va_gc> *size_functions;
88
89 /* Return true if T is a self-referential component reference. */
90
91 static bool
self_referential_component_ref_p(tree t)92 self_referential_component_ref_p (tree t)
93 {
94 if (TREE_CODE (t) != COMPONENT_REF)
95 return false;
96
97 while (REFERENCE_CLASS_P (t))
98 t = TREE_OPERAND (t, 0);
99
100 return (TREE_CODE (t) == PLACEHOLDER_EXPR);
101 }
102
103 /* Similar to copy_tree_r but do not copy component references involving
104 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
105 and substituted in substitute_in_expr. */
106
107 static tree
copy_self_referential_tree_r(tree * tp,int * walk_subtrees,void * data)108 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
109 {
110 enum tree_code code = TREE_CODE (*tp);
111
112 /* Stop at types, decls, constants like copy_tree_r. */
113 if (TREE_CODE_CLASS (code) == tcc_type
114 || TREE_CODE_CLASS (code) == tcc_declaration
115 || TREE_CODE_CLASS (code) == tcc_constant)
116 {
117 *walk_subtrees = 0;
118 return NULL_TREE;
119 }
120
121 /* This is the pattern built in ada/make_aligning_type. */
122 else if (code == ADDR_EXPR
123 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
124 {
125 *walk_subtrees = 0;
126 return NULL_TREE;
127 }
128
129 /* Default case: the component reference. */
130 else if (self_referential_component_ref_p (*tp))
131 {
132 *walk_subtrees = 0;
133 return NULL_TREE;
134 }
135
136 /* We're not supposed to have them in self-referential size trees
137 because we wouldn't properly control when they are evaluated.
138 However, not creating superfluous SAVE_EXPRs requires accurate
139 tracking of readonly-ness all the way down to here, which we
140 cannot always guarantee in practice. So punt in this case. */
141 else if (code == SAVE_EXPR)
142 return error_mark_node;
143
144 else if (code == STATEMENT_LIST)
145 gcc_unreachable ();
146
147 return copy_tree_r (tp, walk_subtrees, data);
148 }
149
150 /* Given a SIZE expression that is self-referential, return an equivalent
151 expression to serve as the actual size expression for a type. */
152
153 static tree
self_referential_size(tree size)154 self_referential_size (tree size)
155 {
156 static unsigned HOST_WIDE_INT fnno = 0;
157 vec<tree> self_refs = vNULL;
158 tree param_type_list = NULL, param_decl_list = NULL;
159 tree t, ref, return_type, fntype, fnname, fndecl;
160 unsigned int i;
161 char buf[128];
162 vec<tree, va_gc> *args = NULL;
163
164 /* Do not factor out simple operations. */
165 t = skip_simple_constant_arithmetic (size);
166 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t))
167 return size;
168
169 /* Collect the list of self-references in the expression. */
170 find_placeholder_in_expr (size, &self_refs);
171 gcc_assert (self_refs.length () > 0);
172
173 /* Obtain a private copy of the expression. */
174 t = size;
175 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
176 return size;
177 size = t;
178
179 /* Build the parameter and argument lists in parallel; also
180 substitute the former for the latter in the expression. */
181 vec_alloc (args, self_refs.length ());
182 FOR_EACH_VEC_ELT (self_refs, i, ref)
183 {
184 tree subst, param_name, param_type, param_decl;
185
186 if (DECL_P (ref))
187 {
188 /* We shouldn't have true variables here. */
189 gcc_assert (TREE_READONLY (ref));
190 subst = ref;
191 }
192 /* This is the pattern built in ada/make_aligning_type. */
193 else if (TREE_CODE (ref) == ADDR_EXPR)
194 subst = ref;
195 /* Default case: the component reference. */
196 else
197 subst = TREE_OPERAND (ref, 1);
198
199 sprintf (buf, "p%d", i);
200 param_name = get_identifier (buf);
201 param_type = TREE_TYPE (ref);
202 param_decl
203 = build_decl (input_location, PARM_DECL, param_name, param_type);
204 DECL_ARG_TYPE (param_decl) = param_type;
205 DECL_ARTIFICIAL (param_decl) = 1;
206 TREE_READONLY (param_decl) = 1;
207
208 size = substitute_in_expr (size, subst, param_decl);
209
210 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
211 param_decl_list = chainon (param_decl, param_decl_list);
212 args->quick_push (ref);
213 }
214
215 self_refs.release ();
216
217 /* Append 'void' to indicate that the number of parameters is fixed. */
218 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
219
220 /* The 3 lists have been created in reverse order. */
221 param_type_list = nreverse (param_type_list);
222 param_decl_list = nreverse (param_decl_list);
223
224 /* Build the function type. */
225 return_type = TREE_TYPE (size);
226 fntype = build_function_type (return_type, param_type_list);
227
228 /* Build the function declaration. */
229 sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
230 fnname = get_file_function_name (buf);
231 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
232 for (t = param_decl_list; t; t = DECL_CHAIN (t))
233 DECL_CONTEXT (t) = fndecl;
234 DECL_ARGUMENTS (fndecl) = param_decl_list;
235 DECL_RESULT (fndecl)
236 = build_decl (input_location, RESULT_DECL, 0, return_type);
237 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
238
239 /* The function has been created by the compiler and we don't
240 want to emit debug info for it. */
241 DECL_ARTIFICIAL (fndecl) = 1;
242 DECL_IGNORED_P (fndecl) = 1;
243
244 /* It is supposed to be "const" and never throw. */
245 TREE_READONLY (fndecl) = 1;
246 TREE_NOTHROW (fndecl) = 1;
247
248 /* We want it to be inlined when this is deemed profitable, as
249 well as discarded if every call has been integrated. */
250 DECL_DECLARED_INLINE_P (fndecl) = 1;
251
252 /* It is made up of a unique return statement. */
253 DECL_INITIAL (fndecl) = make_node (BLOCK);
254 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
255 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
256 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
257 TREE_STATIC (fndecl) = 1;
258
259 /* Put it onto the list of size functions. */
260 vec_safe_push (size_functions, fndecl);
261
262 /* Replace the original expression with a call to the size function. */
263 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
264 }
265
266 /* Take, queue and compile all the size functions. It is essential that
267 the size functions be gimplified at the very end of the compilation
268 in order to guarantee transparent handling of self-referential sizes.
269 Otherwise the GENERIC inliner would not be able to inline them back
270 at each of their call sites, thus creating artificial non-constant
271 size expressions which would trigger nasty problems later on. */
272
273 void
finalize_size_functions(void)274 finalize_size_functions (void)
275 {
276 unsigned int i;
277 tree fndecl;
278
279 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
280 {
281 allocate_struct_function (fndecl, false);
282 set_cfun (NULL);
283 dump_function (TDI_original, fndecl);
284
285 /* As these functions are used to describe the layout of variable-length
286 structures, debug info generation needs their implementation. */
287 debug_hooks->size_function (fndecl);
288 gimplify_function_tree (fndecl);
289 cgraph_node::finalize_function (fndecl, false);
290 }
291
292 vec_free (size_functions);
293 }
294
295 /* Return a machine mode of class MCLASS with SIZE bits of precision,
296 if one exists. The mode may have padding bits as well the SIZE
297 value bits. If LIMIT is nonzero, disregard modes wider than
298 MAX_FIXED_MODE_SIZE. */
299
300 opt_machine_mode
mode_for_size(poly_uint64 size,enum mode_class mclass,int limit)301 mode_for_size (poly_uint64 size, enum mode_class mclass, int limit)
302 {
303 machine_mode mode;
304 int i;
305
306 if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE))
307 return opt_machine_mode ();
308
309 /* Get the first mode which has this size, in the specified class. */
310 FOR_EACH_MODE_IN_CLASS (mode, mclass)
311 if (known_eq (GET_MODE_PRECISION (mode), size))
312 return mode;
313
314 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
315 for (i = 0; i < NUM_INT_N_ENTS; i ++)
316 if (known_eq (int_n_data[i].bitsize, size)
317 && int_n_enabled_p[i])
318 return int_n_data[i].m;
319
320 return opt_machine_mode ();
321 }
322
323 /* Similar, except passed a tree node. */
324
325 opt_machine_mode
mode_for_size_tree(const_tree size,enum mode_class mclass,int limit)326 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
327 {
328 unsigned HOST_WIDE_INT uhwi;
329 unsigned int ui;
330
331 if (!tree_fits_uhwi_p (size))
332 return opt_machine_mode ();
333 uhwi = tree_to_uhwi (size);
334 ui = uhwi;
335 if (uhwi != ui)
336 return opt_machine_mode ();
337 return mode_for_size (ui, mclass, limit);
338 }
339
340 /* Return the narrowest mode of class MCLASS that contains at least
341 SIZE bits. Abort if no such mode exists. */
342
343 machine_mode
smallest_mode_for_size(poly_uint64 size,enum mode_class mclass)344 smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
345 {
346 machine_mode mode = VOIDmode;
347 int i;
348
349 /* Get the first mode which has at least this size, in the
350 specified class. */
351 FOR_EACH_MODE_IN_CLASS (mode, mclass)
352 if (known_ge (GET_MODE_PRECISION (mode), size))
353 break;
354
355 gcc_assert (mode != VOIDmode);
356
357 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
358 for (i = 0; i < NUM_INT_N_ENTS; i ++)
359 if (known_ge (int_n_data[i].bitsize, size)
360 && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode))
361 && int_n_enabled_p[i])
362 mode = int_n_data[i].m;
363
364 return mode;
365 }
366
367 /* Return an integer mode of exactly the same size as MODE, if one exists. */
368
369 opt_scalar_int_mode
int_mode_for_mode(machine_mode mode)370 int_mode_for_mode (machine_mode mode)
371 {
372 switch (GET_MODE_CLASS (mode))
373 {
374 case MODE_INT:
375 case MODE_PARTIAL_INT:
376 return as_a <scalar_int_mode> (mode);
377
378 case MODE_COMPLEX_INT:
379 case MODE_COMPLEX_FLOAT:
380 case MODE_FLOAT:
381 case MODE_DECIMAL_FLOAT:
382 case MODE_FRACT:
383 case MODE_ACCUM:
384 case MODE_UFRACT:
385 case MODE_UACCUM:
386 case MODE_VECTOR_BOOL:
387 case MODE_VECTOR_INT:
388 case MODE_VECTOR_FLOAT:
389 case MODE_VECTOR_FRACT:
390 case MODE_VECTOR_ACCUM:
391 case MODE_VECTOR_UFRACT:
392 case MODE_VECTOR_UACCUM:
393 return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
394
395 case MODE_RANDOM:
396 if (mode == BLKmode)
397 return opt_scalar_int_mode ();
398
399 /* fall through */
400
401 case MODE_CC:
402 default:
403 gcc_unreachable ();
404 }
405 }
406
407 /* Find a mode that can be used for efficient bitwise operations on MODE,
408 if one exists. */
409
410 opt_machine_mode
bitwise_mode_for_mode(machine_mode mode)411 bitwise_mode_for_mode (machine_mode mode)
412 {
413 /* Quick exit if we already have a suitable mode. */
414 scalar_int_mode int_mode;
415 if (is_a <scalar_int_mode> (mode, &int_mode)
416 && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
417 return int_mode;
418
419 /* Reuse the sanity checks from int_mode_for_mode. */
420 gcc_checking_assert ((int_mode_for_mode (mode), true));
421
422 poly_int64 bitsize = GET_MODE_BITSIZE (mode);
423
424 /* Try to replace complex modes with complex modes. In general we
425 expect both components to be processed independently, so we only
426 care whether there is a register for the inner mode. */
427 if (COMPLEX_MODE_P (mode))
428 {
429 machine_mode trial = mode;
430 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT
431 || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial))
432 && have_regs_of_mode[GET_MODE_INNER (trial)])
433 return trial;
434 }
435
436 /* Try to replace vector modes with vector modes. Also try using vector
437 modes if an integer mode would be too big. */
438 if (VECTOR_MODE_P (mode)
439 || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
440 {
441 machine_mode trial = mode;
442 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
443 || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial))
444 && have_regs_of_mode[trial]
445 && targetm.vector_mode_supported_p (trial))
446 return trial;
447 }
448
449 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */
450 return mode_for_size (bitsize, MODE_INT, true);
451 }
452
453 /* Find a type that can be used for efficient bitwise operations on MODE.
454 Return null if no such mode exists. */
455
456 tree
bitwise_type_for_mode(machine_mode mode)457 bitwise_type_for_mode (machine_mode mode)
458 {
459 if (!bitwise_mode_for_mode (mode).exists (&mode))
460 return NULL_TREE;
461
462 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
463 tree inner_type = build_nonstandard_integer_type (inner_size, true);
464
465 if (VECTOR_MODE_P (mode))
466 return build_vector_type_for_mode (inner_type, mode);
467
468 if (COMPLEX_MODE_P (mode))
469 return build_complex_type (inner_type);
470
471 gcc_checking_assert (GET_MODE_INNER (mode) == mode);
472 return inner_type;
473 }
474
475 /* Find a mode that is suitable for representing a vector with NUNITS
476 elements of mode INNERMODE, if one exists. The returned mode can be
477 either an integer mode or a vector mode. */
478
479 opt_machine_mode
mode_for_vector(scalar_mode innermode,poly_uint64 nunits)480 mode_for_vector (scalar_mode innermode, poly_uint64 nunits)
481 {
482 machine_mode mode;
483
484 /* First, look for a supported vector type. */
485 if (SCALAR_FLOAT_MODE_P (innermode))
486 mode = MIN_MODE_VECTOR_FLOAT;
487 else if (SCALAR_FRACT_MODE_P (innermode))
488 mode = MIN_MODE_VECTOR_FRACT;
489 else if (SCALAR_UFRACT_MODE_P (innermode))
490 mode = MIN_MODE_VECTOR_UFRACT;
491 else if (SCALAR_ACCUM_MODE_P (innermode))
492 mode = MIN_MODE_VECTOR_ACCUM;
493 else if (SCALAR_UACCUM_MODE_P (innermode))
494 mode = MIN_MODE_VECTOR_UACCUM;
495 else
496 mode = MIN_MODE_VECTOR_INT;
497
498 /* Do not check vector_mode_supported_p here. We'll do that
499 later in vector_type_mode. */
500 FOR_EACH_MODE_FROM (mode, mode)
501 if (known_eq (GET_MODE_NUNITS (mode), nunits)
502 && GET_MODE_INNER (mode) == innermode)
503 return mode;
504
505 /* For integers, try mapping it to a same-sized scalar mode. */
506 if (GET_MODE_CLASS (innermode) == MODE_INT)
507 {
508 poly_uint64 nbits = nunits * GET_MODE_BITSIZE (innermode);
509 if (int_mode_for_size (nbits, 0).exists (&mode)
510 && have_regs_of_mode[mode])
511 return mode;
512 }
513
514 return opt_machine_mode ();
515 }
516
517 /* Return the mode for a vector that has NUNITS integer elements of
518 INT_BITS bits each, if such a mode exists. The mode can be either
519 an integer mode or a vector mode. */
520
521 opt_machine_mode
mode_for_int_vector(unsigned int int_bits,poly_uint64 nunits)522 mode_for_int_vector (unsigned int int_bits, poly_uint64 nunits)
523 {
524 scalar_int_mode int_mode;
525 machine_mode vec_mode;
526 if (int_mode_for_size (int_bits, 0).exists (&int_mode)
527 && mode_for_vector (int_mode, nunits).exists (&vec_mode))
528 return vec_mode;
529 return opt_machine_mode ();
530 }
531
532 /* Return the alignment of MODE. This will be bounded by 1 and
533 BIGGEST_ALIGNMENT. */
534
535 unsigned int
get_mode_alignment(machine_mode mode)536 get_mode_alignment (machine_mode mode)
537 {
538 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
539 }
540
541 /* Return the natural mode of an array, given that it is SIZE bytes in
542 total and has elements of type ELEM_TYPE. */
543
544 static machine_mode
mode_for_array(tree elem_type,tree size)545 mode_for_array (tree elem_type, tree size)
546 {
547 tree elem_size;
548 poly_uint64 int_size, int_elem_size;
549 unsigned HOST_WIDE_INT num_elems;
550 bool limit_p;
551
552 /* One-element arrays get the component type's mode. */
553 elem_size = TYPE_SIZE (elem_type);
554 if (simple_cst_equal (size, elem_size))
555 return TYPE_MODE (elem_type);
556
557 limit_p = true;
558 if (poly_int_tree_p (size, &int_size)
559 && poly_int_tree_p (elem_size, &int_elem_size)
560 && maybe_ne (int_elem_size, 0U)
561 && constant_multiple_p (int_size, int_elem_size, &num_elems))
562 {
563 machine_mode elem_mode = TYPE_MODE (elem_type);
564 machine_mode mode;
565 if (targetm.array_mode (elem_mode, num_elems).exists (&mode))
566 return mode;
567 if (targetm.array_mode_supported_p (elem_mode, num_elems))
568 limit_p = false;
569 }
570 return mode_for_size_tree (size, MODE_INT, limit_p).else_blk ();
571 }
572
573 /* Subroutine of layout_decl: Force alignment required for the data type.
574 But if the decl itself wants greater alignment, don't override that. */
575
576 static inline void
do_type_align(tree type,tree decl)577 do_type_align (tree type, tree decl)
578 {
579 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
580 {
581 SET_DECL_ALIGN (decl, TYPE_ALIGN (type));
582 if (TREE_CODE (decl) == FIELD_DECL)
583 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
584 }
585 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl))
586 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type));
587 }
588
589 /* Set the size, mode and alignment of a ..._DECL node.
590 TYPE_DECL does need this for C++.
591 Note that LABEL_DECL and CONST_DECL nodes do not need this,
592 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
593 Don't call layout_decl for them.
594
595 KNOWN_ALIGN is the amount of alignment we can assume this
596 decl has with no special effort. It is relevant only for FIELD_DECLs
597 and depends on the previous fields.
598 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
599 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
600 the record will be aligned to suit. */
601
602 void
layout_decl(tree decl,unsigned int known_align)603 layout_decl (tree decl, unsigned int known_align)
604 {
605 tree type = TREE_TYPE (decl);
606 enum tree_code code = TREE_CODE (decl);
607 rtx rtl = NULL_RTX;
608 location_t loc = DECL_SOURCE_LOCATION (decl);
609
610 if (code == CONST_DECL)
611 return;
612
613 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
614 || code == TYPE_DECL || code == FIELD_DECL);
615
616 rtl = DECL_RTL_IF_SET (decl);
617
618 if (type == error_mark_node)
619 type = void_type_node;
620
621 /* Usually the size and mode come from the data type without change,
622 however, the front-end may set the explicit width of the field, so its
623 size may not be the same as the size of its type. This happens with
624 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
625 also happens with other fields. For example, the C++ front-end creates
626 zero-sized fields corresponding to empty base classes, and depends on
627 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
628 size in bytes from the size in bits. If we have already set the mode,
629 don't set it again since we can be called twice for FIELD_DECLs. */
630
631 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
632 if (DECL_MODE (decl) == VOIDmode)
633 SET_DECL_MODE (decl, TYPE_MODE (type));
634
635 if (DECL_SIZE (decl) == 0)
636 {
637 DECL_SIZE (decl) = TYPE_SIZE (type);
638 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
639 }
640 else if (DECL_SIZE_UNIT (decl) == 0)
641 DECL_SIZE_UNIT (decl)
642 = fold_convert_loc (loc, sizetype,
643 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
644 bitsize_unit_node));
645
646 if (code != FIELD_DECL)
647 /* For non-fields, update the alignment from the type. */
648 do_type_align (type, decl);
649 else
650 /* For fields, it's a bit more complicated... */
651 {
652 bool old_user_align = DECL_USER_ALIGN (decl);
653 bool zero_bitfield = false;
654 bool packed_p = DECL_PACKED (decl);
655 unsigned int mfa;
656
657 if (DECL_BIT_FIELD (decl))
658 {
659 DECL_BIT_FIELD_TYPE (decl) = type;
660
661 /* A zero-length bit-field affects the alignment of the next
662 field. In essence such bit-fields are not influenced by
663 any packing due to #pragma pack or attribute packed. */
664 if (integer_zerop (DECL_SIZE (decl))
665 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
666 {
667 zero_bitfield = true;
668 packed_p = false;
669 if (PCC_BITFIELD_TYPE_MATTERS)
670 do_type_align (type, decl);
671 else
672 {
673 #ifdef EMPTY_FIELD_BOUNDARY
674 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
675 {
676 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY);
677 DECL_USER_ALIGN (decl) = 0;
678 }
679 #endif
680 }
681 }
682
683 /* See if we can use an ordinary integer mode for a bit-field.
684 Conditions are: a fixed size that is correct for another mode,
685 occupying a complete byte or bytes on proper boundary. */
686 if (TYPE_SIZE (type) != 0
687 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
688 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
689 {
690 machine_mode xmode;
691 if (mode_for_size_tree (DECL_SIZE (decl),
692 MODE_INT, 1).exists (&xmode))
693 {
694 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
695 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
696 && (known_align == 0 || known_align >= xalign))
697 {
698 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl)));
699 SET_DECL_MODE (decl, xmode);
700 DECL_BIT_FIELD (decl) = 0;
701 }
702 }
703 }
704
705 /* Turn off DECL_BIT_FIELD if we won't need it set. */
706 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
707 && known_align >= TYPE_ALIGN (type)
708 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
709 DECL_BIT_FIELD (decl) = 0;
710 }
711 else if (packed_p && DECL_USER_ALIGN (decl))
712 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
713 round up; we'll reduce it again below. We want packing to
714 supersede USER_ALIGN inherited from the type, but defer to
715 alignment explicitly specified on the field decl. */;
716 else
717 do_type_align (type, decl);
718
719 /* If the field is packed and not explicitly aligned, give it the
720 minimum alignment. Note that do_type_align may set
721 DECL_USER_ALIGN, so we need to check old_user_align instead. */
722 if (packed_p
723 && !old_user_align)
724 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT));
725
726 if (! packed_p && ! DECL_USER_ALIGN (decl))
727 {
728 /* Some targets (i.e. i386, VMS) limit struct field alignment
729 to a lower boundary than alignment of variables unless
730 it was overridden by attribute aligned. */
731 #ifdef BIGGEST_FIELD_ALIGNMENT
732 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl),
733 (unsigned) BIGGEST_FIELD_ALIGNMENT));
734 #endif
735 #ifdef ADJUST_FIELD_ALIGN
736 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl),
737 DECL_ALIGN (decl)));
738 #endif
739 }
740
741 if (zero_bitfield)
742 mfa = initial_max_fld_align * BITS_PER_UNIT;
743 else
744 mfa = maximum_field_alignment;
745 /* Should this be controlled by DECL_USER_ALIGN, too? */
746 if (mfa != 0)
747 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa));
748 }
749
750 /* Evaluate nonconstant size only once, either now or as soon as safe. */
751 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
752 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
753 if (DECL_SIZE_UNIT (decl) != 0
754 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
755 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
756
757 /* If requested, warn about definitions of large data objects. */
758 if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl)))
759 && !DECL_EXTERNAL (decl))
760 {
761 tree size = DECL_SIZE_UNIT (decl);
762
763 if (size != 0 && TREE_CODE (size) == INTEGER_CST)
764 {
765 /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
766 as if PTRDIFF_MAX had been specified, with the value
767 being that on the target rather than the host. */
768 unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
769 if (max_size == HOST_WIDE_INT_MAX)
770 max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
771
772 if (compare_tree_int (size, max_size) > 0)
773 warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
774 "maximum object size %wu",
775 decl, size, max_size);
776 }
777 }
778
779 /* If the RTL was already set, update its mode and mem attributes. */
780 if (rtl)
781 {
782 PUT_MODE (rtl, DECL_MODE (decl));
783 SET_DECL_RTL (decl, 0);
784 if (MEM_P (rtl))
785 set_mem_attributes (rtl, decl, 1);
786 SET_DECL_RTL (decl, rtl);
787 }
788 }
789
790 /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the
791 results of a previous call to layout_decl and calls it again. */
792
793 void
relayout_decl(tree decl)794 relayout_decl (tree decl)
795 {
796 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
797 SET_DECL_MODE (decl, VOIDmode);
798 if (!DECL_USER_ALIGN (decl))
799 SET_DECL_ALIGN (decl, 0);
800 if (DECL_RTL_SET_P (decl))
801 SET_DECL_RTL (decl, 0);
802
803 layout_decl (decl, 0);
804 }
805
806 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
807 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
808 is to be passed to all other layout functions for this record. It is the
809 responsibility of the caller to call `free' for the storage returned.
810 Note that garbage collection is not permitted until we finish laying
811 out the record. */
812
813 record_layout_info
start_record_layout(tree t)814 start_record_layout (tree t)
815 {
816 record_layout_info rli = XNEW (struct record_layout_info_s);
817
818 rli->t = t;
819
820 /* If the type has a minimum specified alignment (via an attribute
821 declaration, for example) use it -- otherwise, start with a
822 one-byte alignment. */
823 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
824 rli->unpacked_align = rli->record_align;
825 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
826
827 #ifdef STRUCTURE_SIZE_BOUNDARY
828 /* Packed structures don't need to have minimum size. */
829 if (! TYPE_PACKED (t))
830 {
831 unsigned tmp;
832
833 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
834 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
835 if (maximum_field_alignment != 0)
836 tmp = MIN (tmp, maximum_field_alignment);
837 rli->record_align = MAX (rli->record_align, tmp);
838 }
839 #endif
840
841 rli->offset = size_zero_node;
842 rli->bitpos = bitsize_zero_node;
843 rli->prev_field = 0;
844 rli->pending_statics = 0;
845 rli->packed_maybe_necessary = 0;
846 rli->remaining_in_alignment = 0;
847
848 return rli;
849 }
850
851 /* Fold sizetype value X to bitsizetype, given that X represents a type
852 size or offset. */
853
854 static tree
bits_from_bytes(tree x)855 bits_from_bytes (tree x)
856 {
857 if (POLY_INT_CST_P (x))
858 /* The runtime calculation isn't allowed to overflow sizetype;
859 increasing the runtime values must always increase the size
860 or offset of the object. This means that the object imposes
861 a maximum value on the runtime parameters, but we don't record
862 what that is. */
863 return build_poly_int_cst
864 (bitsizetype,
865 poly_wide_int::from (poly_int_cst_value (x),
866 TYPE_PRECISION (bitsizetype),
867 TYPE_SIGN (TREE_TYPE (x))));
868 x = fold_convert (bitsizetype, x);
869 gcc_checking_assert (x);
870 return x;
871 }
872
873 /* Return the combined bit position for the byte offset OFFSET and the
874 bit position BITPOS.
875
876 These functions operate on byte and bit positions present in FIELD_DECLs
877 and assume that these expressions result in no (intermediate) overflow.
878 This assumption is necessary to fold the expressions as much as possible,
879 so as to avoid creating artificially variable-sized types in languages
880 supporting variable-sized types like Ada. */
881
882 tree
bit_from_pos(tree offset,tree bitpos)883 bit_from_pos (tree offset, tree bitpos)
884 {
885 return size_binop (PLUS_EXPR, bitpos,
886 size_binop (MULT_EXPR, bits_from_bytes (offset),
887 bitsize_unit_node));
888 }
889
890 /* Return the combined truncated byte position for the byte offset OFFSET and
891 the bit position BITPOS. */
892
893 tree
byte_from_pos(tree offset,tree bitpos)894 byte_from_pos (tree offset, tree bitpos)
895 {
896 tree bytepos;
897 if (TREE_CODE (bitpos) == MULT_EXPR
898 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
899 bytepos = TREE_OPERAND (bitpos, 0);
900 else
901 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
902 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
903 }
904
905 /* Split the bit position POS into a byte offset *POFFSET and a bit
906 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
907
908 void
pos_from_bit(tree * poffset,tree * pbitpos,unsigned int off_align,tree pos)909 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
910 tree pos)
911 {
912 tree toff_align = bitsize_int (off_align);
913 if (TREE_CODE (pos) == MULT_EXPR
914 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
915 {
916 *poffset = size_binop (MULT_EXPR,
917 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
918 size_int (off_align / BITS_PER_UNIT));
919 *pbitpos = bitsize_zero_node;
920 }
921 else
922 {
923 *poffset = size_binop (MULT_EXPR,
924 fold_convert (sizetype,
925 size_binop (FLOOR_DIV_EXPR, pos,
926 toff_align)),
927 size_int (off_align / BITS_PER_UNIT));
928 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
929 }
930 }
931
932 /* Given a pointer to bit and byte offsets and an offset alignment,
933 normalize the offsets so they are within the alignment. */
934
935 void
normalize_offset(tree * poffset,tree * pbitpos,unsigned int off_align)936 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
937 {
938 /* If the bit position is now larger than it should be, adjust it
939 downwards. */
940 if (compare_tree_int (*pbitpos, off_align) >= 0)
941 {
942 tree offset, bitpos;
943 pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
944 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
945 *pbitpos = bitpos;
946 }
947 }
948
949 /* Print debugging information about the information in RLI. */
950
951 DEBUG_FUNCTION void
debug_rli(record_layout_info rli)952 debug_rli (record_layout_info rli)
953 {
954 print_node_brief (stderr, "type", rli->t, 0);
955 print_node_brief (stderr, "\noffset", rli->offset, 0);
956 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
957
958 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
959 rli->record_align, rli->unpacked_align,
960 rli->offset_align);
961
962 /* The ms_struct code is the only that uses this. */
963 if (targetm.ms_bitfield_layout_p (rli->t))
964 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
965
966 if (rli->packed_maybe_necessary)
967 fprintf (stderr, "packed may be necessary\n");
968
969 if (!vec_safe_is_empty (rli->pending_statics))
970 {
971 fprintf (stderr, "pending statics:\n");
972 debug (rli->pending_statics);
973 }
974 }
975
976 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
977 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
978
979 void
normalize_rli(record_layout_info rli)980 normalize_rli (record_layout_info rli)
981 {
982 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
983 }
984
985 /* Returns the size in bytes allocated so far. */
986
987 tree
rli_size_unit_so_far(record_layout_info rli)988 rli_size_unit_so_far (record_layout_info rli)
989 {
990 return byte_from_pos (rli->offset, rli->bitpos);
991 }
992
993 /* Returns the size in bits allocated so far. */
994
995 tree
rli_size_so_far(record_layout_info rli)996 rli_size_so_far (record_layout_info rli)
997 {
998 return bit_from_pos (rli->offset, rli->bitpos);
999 }
1000
1001 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
1002 the next available location within the record is given by KNOWN_ALIGN.
1003 Update the variable alignment fields in RLI, and return the alignment
1004 to give the FIELD. */
1005
1006 unsigned int
update_alignment_for_field(record_layout_info rli,tree field,unsigned int known_align)1007 update_alignment_for_field (record_layout_info rli, tree field,
1008 unsigned int known_align)
1009 {
1010 /* The alignment required for FIELD. */
1011 unsigned int desired_align;
1012 /* The type of this field. */
1013 tree type = TREE_TYPE (field);
1014 /* True if the field was explicitly aligned by the user. */
1015 bool user_align;
1016 bool is_bitfield;
1017
1018 /* Do not attempt to align an ERROR_MARK node */
1019 if (TREE_CODE (type) == ERROR_MARK)
1020 return 0;
1021
1022 /* Lay out the field so we know what alignment it needs. */
1023 layout_decl (field, known_align);
1024 desired_align = DECL_ALIGN (field);
1025 user_align = DECL_USER_ALIGN (field);
1026
1027 is_bitfield = (type != error_mark_node
1028 && DECL_BIT_FIELD_TYPE (field)
1029 && ! integer_zerop (TYPE_SIZE (type)));
1030
1031 /* Record must have at least as much alignment as any field.
1032 Otherwise, the alignment of the field within the record is
1033 meaningless. */
1034 if (targetm.ms_bitfield_layout_p (rli->t))
1035 {
1036 /* Here, the alignment of the underlying type of a bitfield can
1037 affect the alignment of a record; even a zero-sized field
1038 can do this. The alignment should be to the alignment of
1039 the type, except that for zero-size bitfields this only
1040 applies if there was an immediately prior, nonzero-size
1041 bitfield. (That's the way it is, experimentally.) */
1042 if (!is_bitfield
1043 || ((DECL_SIZE (field) == NULL_TREE
1044 || !integer_zerop (DECL_SIZE (field)))
1045 ? !DECL_PACKED (field)
1046 : (rli->prev_field
1047 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1048 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1049 {
1050 unsigned int type_align = TYPE_ALIGN (type);
1051 if (!is_bitfield && DECL_PACKED (field))
1052 type_align = desired_align;
1053 else
1054 type_align = MAX (type_align, desired_align);
1055 if (maximum_field_alignment != 0)
1056 type_align = MIN (type_align, maximum_field_alignment);
1057 rli->record_align = MAX (rli->record_align, type_align);
1058 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1059 }
1060 }
1061 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1062 {
1063 /* Named bit-fields cause the entire structure to have the
1064 alignment implied by their type. Some targets also apply the same
1065 rules to unnamed bitfields. */
1066 if (DECL_NAME (field) != 0
1067 || targetm.align_anon_bitfield ())
1068 {
1069 unsigned int type_align = TYPE_ALIGN (type);
1070
1071 #ifdef ADJUST_FIELD_ALIGN
1072 if (! TYPE_USER_ALIGN (type))
1073 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1074 #endif
1075
1076 /* Targets might chose to handle unnamed and hence possibly
1077 zero-width bitfield. Those are not influenced by #pragmas
1078 or packed attributes. */
1079 if (integer_zerop (DECL_SIZE (field)))
1080 {
1081 if (initial_max_fld_align)
1082 type_align = MIN (type_align,
1083 initial_max_fld_align * BITS_PER_UNIT);
1084 }
1085 else if (maximum_field_alignment != 0)
1086 type_align = MIN (type_align, maximum_field_alignment);
1087 else if (DECL_PACKED (field))
1088 type_align = MIN (type_align, BITS_PER_UNIT);
1089
1090 /* The alignment of the record is increased to the maximum
1091 of the current alignment, the alignment indicated on the
1092 field (i.e., the alignment specified by an __aligned__
1093 attribute), and the alignment indicated by the type of
1094 the field. */
1095 rli->record_align = MAX (rli->record_align, desired_align);
1096 rli->record_align = MAX (rli->record_align, type_align);
1097
1098 if (warn_packed)
1099 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1100 user_align |= TYPE_USER_ALIGN (type);
1101 }
1102 }
1103 else
1104 {
1105 rli->record_align = MAX (rli->record_align, desired_align);
1106 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1107 }
1108
1109 TYPE_USER_ALIGN (rli->t) |= user_align;
1110
1111 return desired_align;
1112 }
1113
1114 /* Issue a warning if the record alignment, RECORD_ALIGN, is less than
1115 the field alignment of FIELD or FIELD isn't aligned. */
1116
1117 static void
handle_warn_if_not_align(tree field,unsigned int record_align)1118 handle_warn_if_not_align (tree field, unsigned int record_align)
1119 {
1120 tree type = TREE_TYPE (field);
1121
1122 if (type == error_mark_node)
1123 return;
1124
1125 unsigned int warn_if_not_align = 0;
1126
1127 int opt_w = 0;
1128
1129 if (warn_if_not_aligned)
1130 {
1131 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field);
1132 if (!warn_if_not_align)
1133 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type);
1134 if (warn_if_not_align)
1135 opt_w = OPT_Wif_not_aligned;
1136 }
1137
1138 if (!warn_if_not_align
1139 && warn_packed_not_aligned
1140 && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type)))
1141 {
1142 warn_if_not_align = TYPE_ALIGN (type);
1143 opt_w = OPT_Wpacked_not_aligned;
1144 }
1145
1146 if (!warn_if_not_align)
1147 return;
1148
1149 tree context = DECL_CONTEXT (field);
1150
1151 warn_if_not_align /= BITS_PER_UNIT;
1152 record_align /= BITS_PER_UNIT;
1153 if ((record_align % warn_if_not_align) != 0)
1154 warning (opt_w, "alignment %u of %qT is less than %u",
1155 record_align, context, warn_if_not_align);
1156
1157 tree off = byte_position (field);
1158 if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align)))
1159 {
1160 if (TREE_CODE (off) == INTEGER_CST)
1161 warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u",
1162 field, off, context, warn_if_not_align);
1163 else
1164 warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u",
1165 field, off, context, warn_if_not_align);
1166 }
1167 }
1168
1169 /* Called from place_field to handle unions. */
1170
1171 static void
place_union_field(record_layout_info rli,tree field)1172 place_union_field (record_layout_info rli, tree field)
1173 {
1174 update_alignment_for_field (rli, field, /*known_align=*/0);
1175
1176 DECL_FIELD_OFFSET (field) = size_zero_node;
1177 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1178 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1179 handle_warn_if_not_align (field, rli->record_align);
1180
1181 /* If this is an ERROR_MARK return *after* having set the
1182 field at the start of the union. This helps when parsing
1183 invalid fields. */
1184 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1185 return;
1186
1187 if (AGGREGATE_TYPE_P (TREE_TYPE (field))
1188 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field)))
1189 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1190
1191 /* We assume the union's size will be a multiple of a byte so we don't
1192 bother with BITPOS. */
1193 if (TREE_CODE (rli->t) == UNION_TYPE)
1194 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1195 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1196 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1197 DECL_SIZE_UNIT (field), rli->offset);
1198 }
1199
1200 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1201 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1202 units of alignment than the underlying TYPE. */
1203 static int
excess_unit_span(HOST_WIDE_INT byte_offset,HOST_WIDE_INT bit_offset,HOST_WIDE_INT size,HOST_WIDE_INT align,tree type)1204 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1205 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1206 {
1207 /* Note that the calculation of OFFSET might overflow; we calculate it so
1208 that we still get the right result as long as ALIGN is a power of two. */
1209 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1210
1211 offset = offset % align;
1212 return ((offset + size + align - 1) / align
1213 > tree_to_uhwi (TYPE_SIZE (type)) / align);
1214 }
1215
1216 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1217 is a FIELD_DECL to be added after those fields already present in
1218 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1219 callers that desire that behavior must manually perform that step.) */
1220
1221 void
place_field(record_layout_info rli,tree field)1222 place_field (record_layout_info rli, tree field)
1223 {
1224 /* The alignment required for FIELD. */
1225 unsigned int desired_align;
1226 /* The alignment FIELD would have if we just dropped it into the
1227 record as it presently stands. */
1228 unsigned int known_align;
1229 unsigned int actual_align;
1230 /* The type of this field. */
1231 tree type = TREE_TYPE (field);
1232
1233 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1234
1235 /* If FIELD is static, then treat it like a separate variable, not
1236 really like a structure field. If it is a FUNCTION_DECL, it's a
1237 method. In both cases, all we do is lay out the decl, and we do
1238 it *after* the record is laid out. */
1239 if (VAR_P (field))
1240 {
1241 vec_safe_push (rli->pending_statics, field);
1242 return;
1243 }
1244
1245 /* Enumerators and enum types which are local to this class need not
1246 be laid out. Likewise for initialized constant fields. */
1247 else if (TREE_CODE (field) != FIELD_DECL)
1248 return;
1249
1250 /* Unions are laid out very differently than records, so split
1251 that code off to another function. */
1252 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1253 {
1254 place_union_field (rli, field);
1255 return;
1256 }
1257
1258 else if (TREE_CODE (type) == ERROR_MARK)
1259 {
1260 /* Place this field at the current allocation position, so we
1261 maintain monotonicity. */
1262 DECL_FIELD_OFFSET (field) = rli->offset;
1263 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1264 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1265 handle_warn_if_not_align (field, rli->record_align);
1266 return;
1267 }
1268
1269 if (AGGREGATE_TYPE_P (type)
1270 && TYPE_TYPELESS_STORAGE (type))
1271 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1272
1273 /* Work out the known alignment so far. Note that A & (-A) is the
1274 value of the least-significant bit in A that is one. */
1275 if (! integer_zerop (rli->bitpos))
1276 known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
1277 else if (integer_zerop (rli->offset))
1278 known_align = 0;
1279 else if (tree_fits_uhwi_p (rli->offset))
1280 known_align = (BITS_PER_UNIT
1281 * least_bit_hwi (tree_to_uhwi (rli->offset)));
1282 else
1283 known_align = rli->offset_align;
1284
1285 desired_align = update_alignment_for_field (rli, field, known_align);
1286 if (known_align == 0)
1287 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1288
1289 if (warn_packed && DECL_PACKED (field))
1290 {
1291 if (known_align >= TYPE_ALIGN (type))
1292 {
1293 if (TYPE_ALIGN (type) > desired_align)
1294 {
1295 if (STRICT_ALIGNMENT)
1296 warning (OPT_Wattributes, "packed attribute causes "
1297 "inefficient alignment for %q+D", field);
1298 /* Don't warn if DECL_PACKED was set by the type. */
1299 else if (!TYPE_PACKED (rli->t))
1300 warning (OPT_Wattributes, "packed attribute is "
1301 "unnecessary for %q+D", field);
1302 }
1303 }
1304 else
1305 rli->packed_maybe_necessary = 1;
1306 }
1307
1308 /* Does this field automatically have alignment it needs by virtue
1309 of the fields that precede it and the record's own alignment? */
1310 if (known_align < desired_align
1311 && (! targetm.ms_bitfield_layout_p (rli->t)
1312 || rli->prev_field == NULL))
1313 {
1314 /* No, we need to skip space before this field.
1315 Bump the cumulative size to multiple of field alignment. */
1316
1317 if (!targetm.ms_bitfield_layout_p (rli->t)
1318 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION
1319 && !TYPE_ARTIFICIAL (rli->t))
1320 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1321
1322 /* If the alignment is still within offset_align, just align
1323 the bit position. */
1324 if (desired_align < rli->offset_align)
1325 rli->bitpos = round_up (rli->bitpos, desired_align);
1326 else
1327 {
1328 /* First adjust OFFSET by the partial bits, then align. */
1329 rli->offset
1330 = size_binop (PLUS_EXPR, rli->offset,
1331 fold_convert (sizetype,
1332 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1333 bitsize_unit_node)));
1334 rli->bitpos = bitsize_zero_node;
1335
1336 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1337 }
1338
1339 if (! TREE_CONSTANT (rli->offset))
1340 rli->offset_align = desired_align;
1341 }
1342
1343 /* Handle compatibility with PCC. Note that if the record has any
1344 variable-sized fields, we need not worry about compatibility. */
1345 if (PCC_BITFIELD_TYPE_MATTERS
1346 && ! targetm.ms_bitfield_layout_p (rli->t)
1347 && TREE_CODE (field) == FIELD_DECL
1348 && type != error_mark_node
1349 && DECL_BIT_FIELD (field)
1350 && (! DECL_PACKED (field)
1351 /* Enter for these packed fields only to issue a warning. */
1352 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1353 && maximum_field_alignment == 0
1354 && ! integer_zerop (DECL_SIZE (field))
1355 && tree_fits_uhwi_p (DECL_SIZE (field))
1356 && tree_fits_uhwi_p (rli->offset)
1357 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1358 {
1359 unsigned int type_align = TYPE_ALIGN (type);
1360 tree dsize = DECL_SIZE (field);
1361 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1362 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1363 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1364
1365 #ifdef ADJUST_FIELD_ALIGN
1366 if (! TYPE_USER_ALIGN (type))
1367 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1368 #endif
1369
1370 /* A bit field may not span more units of alignment of its type
1371 than its type itself. Advance to next boundary if necessary. */
1372 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1373 {
1374 if (DECL_PACKED (field))
1375 {
1376 if (warn_packed_bitfield_compat == 1)
1377 inform
1378 (input_location,
1379 "offset of packed bit-field %qD has changed in GCC 4.4",
1380 field);
1381 }
1382 else
1383 rli->bitpos = round_up (rli->bitpos, type_align);
1384 }
1385
1386 if (! DECL_PACKED (field))
1387 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1388
1389 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1390 TYPE_WARN_IF_NOT_ALIGN (type));
1391 }
1392
1393 #ifdef BITFIELD_NBYTES_LIMITED
1394 if (BITFIELD_NBYTES_LIMITED
1395 && ! targetm.ms_bitfield_layout_p (rli->t)
1396 && TREE_CODE (field) == FIELD_DECL
1397 && type != error_mark_node
1398 && DECL_BIT_FIELD_TYPE (field)
1399 && ! DECL_PACKED (field)
1400 && ! integer_zerop (DECL_SIZE (field))
1401 && tree_fits_uhwi_p (DECL_SIZE (field))
1402 && tree_fits_uhwi_p (rli->offset)
1403 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1404 {
1405 unsigned int type_align = TYPE_ALIGN (type);
1406 tree dsize = DECL_SIZE (field);
1407 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1408 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1409 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1410
1411 #ifdef ADJUST_FIELD_ALIGN
1412 if (! TYPE_USER_ALIGN (type))
1413 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1414 #endif
1415
1416 if (maximum_field_alignment != 0)
1417 type_align = MIN (type_align, maximum_field_alignment);
1418 /* ??? This test is opposite the test in the containing if
1419 statement, so this code is unreachable currently. */
1420 else if (DECL_PACKED (field))
1421 type_align = MIN (type_align, BITS_PER_UNIT);
1422
1423 /* A bit field may not span the unit of alignment of its type.
1424 Advance to next boundary if necessary. */
1425 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1426 rli->bitpos = round_up (rli->bitpos, type_align);
1427
1428 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1429 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1430 TYPE_WARN_IF_NOT_ALIGN (type));
1431 }
1432 #endif
1433
1434 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1435 A subtlety:
1436 When a bit field is inserted into a packed record, the whole
1437 size of the underlying type is used by one or more same-size
1438 adjacent bitfields. (That is, if its long:3, 32 bits is
1439 used in the record, and any additional adjacent long bitfields are
1440 packed into the same chunk of 32 bits. However, if the size
1441 changes, a new field of that size is allocated.) In an unpacked
1442 record, this is the same as using alignment, but not equivalent
1443 when packing.
1444
1445 Note: for compatibility, we use the type size, not the type alignment
1446 to determine alignment, since that matches the documentation */
1447
1448 if (targetm.ms_bitfield_layout_p (rli->t))
1449 {
1450 tree prev_saved = rli->prev_field;
1451 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1452
1453 /* This is a bitfield if it exists. */
1454 if (rli->prev_field)
1455 {
1456 bool realign_p = known_align < desired_align;
1457
1458 /* If both are bitfields, nonzero, and the same size, this is
1459 the middle of a run. Zero declared size fields are special
1460 and handled as "end of run". (Note: it's nonzero declared
1461 size, but equal type sizes!) (Since we know that both
1462 the current and previous fields are bitfields by the
1463 time we check it, DECL_SIZE must be present for both.) */
1464 if (DECL_BIT_FIELD_TYPE (field)
1465 && !integer_zerop (DECL_SIZE (field))
1466 && !integer_zerop (DECL_SIZE (rli->prev_field))
1467 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1468 && tree_fits_uhwi_p (TYPE_SIZE (type))
1469 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1470 {
1471 /* We're in the middle of a run of equal type size fields; make
1472 sure we realign if we run out of bits. (Not decl size,
1473 type size!) */
1474 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1475
1476 if (rli->remaining_in_alignment < bitsize)
1477 {
1478 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1479
1480 /* out of bits; bump up to next 'word'. */
1481 rli->bitpos
1482 = size_binop (PLUS_EXPR, rli->bitpos,
1483 bitsize_int (rli->remaining_in_alignment));
1484 rli->prev_field = field;
1485 if (typesize < bitsize)
1486 rli->remaining_in_alignment = 0;
1487 else
1488 rli->remaining_in_alignment = typesize - bitsize;
1489 }
1490 else
1491 {
1492 rli->remaining_in_alignment -= bitsize;
1493 realign_p = false;
1494 }
1495 }
1496 else
1497 {
1498 /* End of a run: if leaving a run of bitfields of the same type
1499 size, we have to "use up" the rest of the bits of the type
1500 size.
1501
1502 Compute the new position as the sum of the size for the prior
1503 type and where we first started working on that type.
1504 Note: since the beginning of the field was aligned then
1505 of course the end will be too. No round needed. */
1506
1507 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1508 {
1509 rli->bitpos
1510 = size_binop (PLUS_EXPR, rli->bitpos,
1511 bitsize_int (rli->remaining_in_alignment));
1512 }
1513 else
1514 /* We "use up" size zero fields; the code below should behave
1515 as if the prior field was not a bitfield. */
1516 prev_saved = NULL;
1517
1518 /* Cause a new bitfield to be captured, either this time (if
1519 currently a bitfield) or next time we see one. */
1520 if (!DECL_BIT_FIELD_TYPE (field)
1521 || integer_zerop (DECL_SIZE (field)))
1522 rli->prev_field = NULL;
1523 }
1524
1525 /* Does this field automatically have alignment it needs by virtue
1526 of the fields that precede it and the record's own alignment? */
1527 if (realign_p)
1528 {
1529 /* If the alignment is still within offset_align, just align
1530 the bit position. */
1531 if (desired_align < rli->offset_align)
1532 rli->bitpos = round_up (rli->bitpos, desired_align);
1533 else
1534 {
1535 /* First adjust OFFSET by the partial bits, then align. */
1536 tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos,
1537 bitsize_unit_node);
1538 rli->offset = size_binop (PLUS_EXPR, rli->offset,
1539 fold_convert (sizetype, d));
1540 rli->bitpos = bitsize_zero_node;
1541
1542 rli->offset = round_up (rli->offset,
1543 desired_align / BITS_PER_UNIT);
1544 }
1545
1546 if (! TREE_CONSTANT (rli->offset))
1547 rli->offset_align = desired_align;
1548 }
1549
1550 normalize_rli (rli);
1551 }
1552
1553 /* If we're starting a new run of same type size bitfields
1554 (or a run of non-bitfields), set up the "first of the run"
1555 fields.
1556
1557 That is, if the current field is not a bitfield, or if there
1558 was a prior bitfield the type sizes differ, or if there wasn't
1559 a prior bitfield the size of the current field is nonzero.
1560
1561 Note: we must be sure to test ONLY the type size if there was
1562 a prior bitfield and ONLY for the current field being zero if
1563 there wasn't. */
1564
1565 if (!DECL_BIT_FIELD_TYPE (field)
1566 || (prev_saved != NULL
1567 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1568 : !integer_zerop (DECL_SIZE (field))))
1569 {
1570 /* Never smaller than a byte for compatibility. */
1571 unsigned int type_align = BITS_PER_UNIT;
1572
1573 /* (When not a bitfield), we could be seeing a flex array (with
1574 no DECL_SIZE). Since we won't be using remaining_in_alignment
1575 until we see a bitfield (and come by here again) we just skip
1576 calculating it. */
1577 if (DECL_SIZE (field) != NULL
1578 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1579 && tree_fits_uhwi_p (DECL_SIZE (field)))
1580 {
1581 unsigned HOST_WIDE_INT bitsize
1582 = tree_to_uhwi (DECL_SIZE (field));
1583 unsigned HOST_WIDE_INT typesize
1584 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1585
1586 if (typesize < bitsize)
1587 rli->remaining_in_alignment = 0;
1588 else
1589 rli->remaining_in_alignment = typesize - bitsize;
1590 }
1591
1592 /* Now align (conventionally) for the new type. */
1593 if (! DECL_PACKED (field))
1594 type_align = TYPE_ALIGN (TREE_TYPE (field));
1595
1596 if (maximum_field_alignment != 0)
1597 type_align = MIN (type_align, maximum_field_alignment);
1598
1599 rli->bitpos = round_up (rli->bitpos, type_align);
1600
1601 /* If we really aligned, don't allow subsequent bitfields
1602 to undo that. */
1603 rli->prev_field = NULL;
1604 }
1605 }
1606
1607 /* Offset so far becomes the position of this field after normalizing. */
1608 normalize_rli (rli);
1609 DECL_FIELD_OFFSET (field) = rli->offset;
1610 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1611 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1612 handle_warn_if_not_align (field, rli->record_align);
1613
1614 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */
1615 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1616 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1617
1618 /* If this field ended up more aligned than we thought it would be (we
1619 approximate this by seeing if its position changed), lay out the field
1620 again; perhaps we can use an integral mode for it now. */
1621 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1622 actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1623 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1624 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1625 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1626 actual_align = (BITS_PER_UNIT
1627 * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1628 else
1629 actual_align = DECL_OFFSET_ALIGN (field);
1630 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1631 store / extract bit field operations will check the alignment of the
1632 record against the mode of bit fields. */
1633
1634 if (known_align != actual_align)
1635 layout_decl (field, actual_align);
1636
1637 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1638 rli->prev_field = field;
1639
1640 /* Now add size of this field to the size of the record. If the size is
1641 not constant, treat the field as being a multiple of bytes and just
1642 adjust the offset, resetting the bit position. Otherwise, apportion the
1643 size amongst the bit position and offset. First handle the case of an
1644 unspecified size, which can happen when we have an invalid nested struct
1645 definition, such as struct j { struct j { int i; } }. The error message
1646 is printed in finish_struct. */
1647 if (DECL_SIZE (field) == 0)
1648 /* Do nothing. */;
1649 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1650 || TREE_OVERFLOW (DECL_SIZE (field)))
1651 {
1652 rli->offset
1653 = size_binop (PLUS_EXPR, rli->offset,
1654 fold_convert (sizetype,
1655 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1656 bitsize_unit_node)));
1657 rli->offset
1658 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1659 rli->bitpos = bitsize_zero_node;
1660 rli->offset_align = MIN (rli->offset_align, desired_align);
1661
1662 if (!multiple_of_p (bitsizetype, DECL_SIZE (field),
1663 bitsize_int (rli->offset_align)))
1664 {
1665 tree type = strip_array_types (TREE_TYPE (field));
1666 /* The above adjusts offset_align just based on the start of the
1667 field. The field might not have a size that is a multiple of
1668 that offset_align though. If the field is an array of fixed
1669 sized elements, assume there can be any multiple of those
1670 sizes. If it is a variable length aggregate or array of
1671 variable length aggregates, assume worst that the end is
1672 just BITS_PER_UNIT aligned. */
1673 if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
1674 {
1675 if (TREE_INT_CST_LOW (TYPE_SIZE (type)))
1676 {
1677 unsigned HOST_WIDE_INT sz
1678 = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type)));
1679 rli->offset_align = MIN (rli->offset_align, sz);
1680 }
1681 }
1682 else
1683 rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT);
1684 }
1685 }
1686 else if (targetm.ms_bitfield_layout_p (rli->t))
1687 {
1688 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1689
1690 /* If FIELD is the last field and doesn't end at the full length
1691 of the type then pad the struct out to the full length of the
1692 last type. */
1693 if (DECL_BIT_FIELD_TYPE (field)
1694 && !integer_zerop (DECL_SIZE (field)))
1695 {
1696 /* We have to scan, because non-field DECLS are also here. */
1697 tree probe = field;
1698 while ((probe = DECL_CHAIN (probe)))
1699 if (TREE_CODE (probe) == FIELD_DECL)
1700 break;
1701 if (!probe)
1702 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1703 bitsize_int (rli->remaining_in_alignment));
1704 }
1705
1706 normalize_rli (rli);
1707 }
1708 else
1709 {
1710 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1711 normalize_rli (rli);
1712 }
1713 }
1714
1715 /* Assuming that all the fields have been laid out, this function uses
1716 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1717 indicated by RLI. */
1718
1719 static void
finalize_record_size(record_layout_info rli)1720 finalize_record_size (record_layout_info rli)
1721 {
1722 tree unpadded_size, unpadded_size_unit;
1723
1724 /* Now we want just byte and bit offsets, so set the offset alignment
1725 to be a byte and then normalize. */
1726 rli->offset_align = BITS_PER_UNIT;
1727 normalize_rli (rli);
1728
1729 /* Determine the desired alignment. */
1730 #ifdef ROUND_TYPE_ALIGN
1731 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1732 rli->record_align));
1733 #else
1734 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align));
1735 #endif
1736
1737 /* Compute the size so far. Be sure to allow for extra bits in the
1738 size in bytes. We have guaranteed above that it will be no more
1739 than a single byte. */
1740 unpadded_size = rli_size_so_far (rli);
1741 unpadded_size_unit = rli_size_unit_so_far (rli);
1742 if (! integer_zerop (rli->bitpos))
1743 unpadded_size_unit
1744 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1745
1746 /* Round the size up to be a multiple of the required alignment. */
1747 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1748 TYPE_SIZE_UNIT (rli->t)
1749 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1750
1751 if (TREE_CONSTANT (unpadded_size)
1752 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1753 && input_location != BUILTINS_LOCATION
1754 && !TYPE_ARTIFICIAL (rli->t))
1755 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1756
1757 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1758 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1759 && TREE_CONSTANT (unpadded_size))
1760 {
1761 tree unpacked_size;
1762
1763 #ifdef ROUND_TYPE_ALIGN
1764 rli->unpacked_align
1765 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1766 #else
1767 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1768 #endif
1769
1770 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1771 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1772 {
1773 if (TYPE_NAME (rli->t))
1774 {
1775 tree name;
1776
1777 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1778 name = TYPE_NAME (rli->t);
1779 else
1780 name = DECL_NAME (TYPE_NAME (rli->t));
1781
1782 if (STRICT_ALIGNMENT)
1783 warning (OPT_Wpacked, "packed attribute causes inefficient "
1784 "alignment for %qE", name);
1785 else
1786 warning (OPT_Wpacked,
1787 "packed attribute is unnecessary for %qE", name);
1788 }
1789 else
1790 {
1791 if (STRICT_ALIGNMENT)
1792 warning (OPT_Wpacked,
1793 "packed attribute causes inefficient alignment");
1794 else
1795 warning (OPT_Wpacked, "packed attribute is unnecessary");
1796 }
1797 }
1798 }
1799 }
1800
1801 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1802
1803 void
compute_record_mode(tree type)1804 compute_record_mode (tree type)
1805 {
1806 tree field;
1807 machine_mode mode = VOIDmode;
1808
1809 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1810 However, if possible, we use a mode that fits in a register
1811 instead, in order to allow for better optimization down the
1812 line. */
1813 SET_TYPE_MODE (type, BLKmode);
1814
1815 if (! tree_fits_uhwi_p (TYPE_SIZE (type)))
1816 return;
1817
1818 /* A record which has any BLKmode members must itself be
1819 BLKmode; it can't go in a register. Unless the member is
1820 BLKmode only because it isn't aligned. */
1821 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1822 {
1823 if (TREE_CODE (field) != FIELD_DECL)
1824 continue;
1825
1826 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1827 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1828 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1829 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1830 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1831 || ! tree_fits_uhwi_p (bit_position (field))
1832 || DECL_SIZE (field) == 0
1833 || ! tree_fits_uhwi_p (DECL_SIZE (field)))
1834 return;
1835
1836 /* If this field is the whole struct, remember its mode so
1837 that, say, we can put a double in a class into a DF
1838 register instead of forcing it to live in the stack. */
1839 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))
1840 /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to
1841 wider types (e.g. int32), despite precision being less. Ensure
1842 that the TYPE_MODE of the struct does not get set to the partial
1843 int mode if there is a wider type also in the struct. */
1844 && known_gt (GET_MODE_PRECISION (DECL_MODE (field)),
1845 GET_MODE_PRECISION (mode)))
1846 mode = DECL_MODE (field);
1847
1848 /* With some targets, it is sub-optimal to access an aligned
1849 BLKmode structure as a scalar. */
1850 if (targetm.member_type_forces_blk (field, mode))
1851 return;
1852 }
1853
1854 /* If we only have one real field; use its mode if that mode's size
1855 matches the type's size. This generally only applies to RECORD_TYPE.
1856 For UNION_TYPE, if the widest field is MODE_INT then use that mode.
1857 If the widest field is MODE_PARTIAL_INT, and the union will be passed
1858 by reference, then use that mode. */
1859 poly_uint64 type_size;
1860 if ((TREE_CODE (type) == RECORD_TYPE
1861 || (TREE_CODE (type) == UNION_TYPE
1862 && (GET_MODE_CLASS (mode) == MODE_INT
1863 || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
1864 && targetm.calls.pass_by_reference (pack_cumulative_args (0),
1865 mode, type, 0)))))
1866 && mode != VOIDmode
1867 && poly_int_tree_p (TYPE_SIZE (type), &type_size)
1868 && known_eq (GET_MODE_BITSIZE (mode), type_size))
1869 ;
1870 else
1871 mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
1872
1873 /* If structure's known alignment is less than what the scalar
1874 mode would need, and it matters, then stick with BLKmode. */
1875 if (mode != BLKmode
1876 && STRICT_ALIGNMENT
1877 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1878 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
1879 {
1880 /* If this is the only reason this type is BLKmode, then
1881 don't force containing types to be BLKmode. */
1882 TYPE_NO_FORCE_BLK (type) = 1;
1883 mode = BLKmode;
1884 }
1885
1886 SET_TYPE_MODE (type, mode);
1887 }
1888
1889 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1890 out. */
1891
1892 static void
finalize_type_size(tree type)1893 finalize_type_size (tree type)
1894 {
1895 /* Normally, use the alignment corresponding to the mode chosen.
1896 However, where strict alignment is not required, avoid
1897 over-aligning structures, since most compilers do not do this
1898 alignment. */
1899 if (TYPE_MODE (type) != BLKmode
1900 && TYPE_MODE (type) != VOIDmode
1901 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type)))
1902 {
1903 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1904
1905 /* Don't override a larger alignment requirement coming from a user
1906 alignment of one of the fields. */
1907 if (mode_align >= TYPE_ALIGN (type))
1908 {
1909 SET_TYPE_ALIGN (type, mode_align);
1910 TYPE_USER_ALIGN (type) = 0;
1911 }
1912 }
1913
1914 /* Do machine-dependent extra alignment. */
1915 #ifdef ROUND_TYPE_ALIGN
1916 SET_TYPE_ALIGN (type,
1917 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT));
1918 #endif
1919
1920 /* If we failed to find a simple way to calculate the unit size
1921 of the type, find it by division. */
1922 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1923 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1924 result will fit in sizetype. We will get more efficient code using
1925 sizetype, so we force a conversion. */
1926 TYPE_SIZE_UNIT (type)
1927 = fold_convert (sizetype,
1928 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1929 bitsize_unit_node));
1930
1931 if (TYPE_SIZE (type) != 0)
1932 {
1933 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1934 TYPE_SIZE_UNIT (type)
1935 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1936 }
1937
1938 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1939 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1940 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1941 if (TYPE_SIZE_UNIT (type) != 0
1942 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1943 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1944
1945 /* Handle empty records as per the x86-64 psABI. */
1946 TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type);
1947
1948 /* Also layout any other variants of the type. */
1949 if (TYPE_NEXT_VARIANT (type)
1950 || type != TYPE_MAIN_VARIANT (type))
1951 {
1952 tree variant;
1953 /* Record layout info of this variant. */
1954 tree size = TYPE_SIZE (type);
1955 tree size_unit = TYPE_SIZE_UNIT (type);
1956 unsigned int align = TYPE_ALIGN (type);
1957 unsigned int precision = TYPE_PRECISION (type);
1958 unsigned int user_align = TYPE_USER_ALIGN (type);
1959 machine_mode mode = TYPE_MODE (type);
1960 bool empty_p = TYPE_EMPTY_P (type);
1961
1962 /* Copy it into all variants. */
1963 for (variant = TYPE_MAIN_VARIANT (type);
1964 variant != 0;
1965 variant = TYPE_NEXT_VARIANT (variant))
1966 {
1967 TYPE_SIZE (variant) = size;
1968 TYPE_SIZE_UNIT (variant) = size_unit;
1969 unsigned valign = align;
1970 if (TYPE_USER_ALIGN (variant))
1971 valign = MAX (valign, TYPE_ALIGN (variant));
1972 else
1973 TYPE_USER_ALIGN (variant) = user_align;
1974 SET_TYPE_ALIGN (variant, valign);
1975 TYPE_PRECISION (variant) = precision;
1976 SET_TYPE_MODE (variant, mode);
1977 TYPE_EMPTY_P (variant) = empty_p;
1978 }
1979 }
1980 }
1981
1982 /* Return a new underlying object for a bitfield started with FIELD. */
1983
1984 static tree
start_bitfield_representative(tree field)1985 start_bitfield_representative (tree field)
1986 {
1987 tree repr = make_node (FIELD_DECL);
1988 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
1989 /* Force the representative to begin at a BITS_PER_UNIT aligned
1990 boundary - C++ may use tail-padding of a base object to
1991 continue packing bits so the bitfield region does not start
1992 at bit zero (see g++.dg/abi/bitfield5.C for example).
1993 Unallocated bits may happen for other reasons as well,
1994 for example Ada which allows explicit bit-granular structure layout. */
1995 DECL_FIELD_BIT_OFFSET (repr)
1996 = size_binop (BIT_AND_EXPR,
1997 DECL_FIELD_BIT_OFFSET (field),
1998 bitsize_int (~(BITS_PER_UNIT - 1)));
1999 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
2000 DECL_SIZE (repr) = DECL_SIZE (field);
2001 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
2002 DECL_PACKED (repr) = DECL_PACKED (field);
2003 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
2004 /* There are no indirect accesses to this field. If we introduce
2005 some then they have to use the record alias set. This makes
2006 sure to properly conflict with [indirect] accesses to addressable
2007 fields of the bitfield group. */
2008 DECL_NONADDRESSABLE_P (repr) = 1;
2009 return repr;
2010 }
2011
2012 /* Finish up a bitfield group that was started by creating the underlying
2013 object REPR with the last field in the bitfield group FIELD. */
2014
2015 static void
finish_bitfield_representative(tree repr,tree field)2016 finish_bitfield_representative (tree repr, tree field)
2017 {
2018 unsigned HOST_WIDE_INT bitsize, maxbitsize;
2019 tree nextf, size;
2020
2021 size = size_diffop (DECL_FIELD_OFFSET (field),
2022 DECL_FIELD_OFFSET (repr));
2023 while (TREE_CODE (size) == COMPOUND_EXPR)
2024 size = TREE_OPERAND (size, 1);
2025 gcc_assert (tree_fits_uhwi_p (size));
2026 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
2027 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2028 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
2029 + tree_to_uhwi (DECL_SIZE (field)));
2030
2031 /* Round up bitsize to multiples of BITS_PER_UNIT. */
2032 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2033
2034 /* Now nothing tells us how to pad out bitsize ... */
2035 nextf = DECL_CHAIN (field);
2036 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
2037 nextf = DECL_CHAIN (nextf);
2038 if (nextf)
2039 {
2040 tree maxsize;
2041 /* If there was an error, the field may be not laid out
2042 correctly. Don't bother to do anything. */
2043 if (TREE_TYPE (nextf) == error_mark_node)
2044 return;
2045 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
2046 DECL_FIELD_OFFSET (repr));
2047 if (tree_fits_uhwi_p (maxsize))
2048 {
2049 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2050 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
2051 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2052 /* If the group ends within a bitfield nextf does not need to be
2053 aligned to BITS_PER_UNIT. Thus round up. */
2054 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2055 }
2056 else
2057 maxbitsize = bitsize;
2058 }
2059 else
2060 {
2061 /* Note that if the C++ FE sets up tail-padding to be re-used it
2062 creates a as-base variant of the type with TYPE_SIZE adjusted
2063 accordingly. So it is safe to include tail-padding here. */
2064 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding
2065 (DECL_CONTEXT (field));
2066 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr));
2067 /* We cannot generally rely on maxsize to fold to an integer constant,
2068 so use bitsize as fallback for this case. */
2069 if (tree_fits_uhwi_p (maxsize))
2070 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2071 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2072 else
2073 maxbitsize = bitsize;
2074 }
2075
2076 /* Only if we don't artificially break up the representative in
2077 the middle of a large bitfield with different possibly
2078 overlapping representatives. And all representatives start
2079 at byte offset. */
2080 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
2081
2082 /* Find the smallest nice mode to use. */
2083 opt_scalar_int_mode mode_iter;
2084 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2085 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize)
2086 break;
2087
2088 scalar_int_mode mode;
2089 if (!mode_iter.exists (&mode)
2090 || GET_MODE_BITSIZE (mode) > maxbitsize
2091 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)
2092 {
2093 /* We really want a BLKmode representative only as a last resort,
2094 considering the member b in
2095 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
2096 Otherwise we simply want to split the representative up
2097 allowing for overlaps within the bitfield region as required for
2098 struct { int a : 7; int b : 7;
2099 int c : 10; int d; } __attribute__((packed));
2100 [0, 15] HImode for a and b, [8, 23] HImode for c. */
2101 DECL_SIZE (repr) = bitsize_int (bitsize);
2102 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
2103 SET_DECL_MODE (repr, BLKmode);
2104 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
2105 bitsize / BITS_PER_UNIT);
2106 }
2107 else
2108 {
2109 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
2110 DECL_SIZE (repr) = bitsize_int (modesize);
2111 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
2112 SET_DECL_MODE (repr, mode);
2113 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
2114 }
2115
2116 /* Remember whether the bitfield group is at the end of the
2117 structure or not. */
2118 DECL_CHAIN (repr) = nextf;
2119 }
2120
2121 /* Compute and set FIELD_DECLs for the underlying objects we should
2122 use for bitfield access for the structure T. */
2123
2124 void
finish_bitfield_layout(tree t)2125 finish_bitfield_layout (tree t)
2126 {
2127 tree field, prev;
2128 tree repr = NULL_TREE;
2129
2130 /* Unions would be special, for the ease of type-punning optimizations
2131 we could use the underlying type as hint for the representative
2132 if the bitfield would fit and the representative would not exceed
2133 the union in size. */
2134 if (TREE_CODE (t) != RECORD_TYPE)
2135 return;
2136
2137 for (prev = NULL_TREE, field = TYPE_FIELDS (t);
2138 field; field = DECL_CHAIN (field))
2139 {
2140 if (TREE_CODE (field) != FIELD_DECL)
2141 continue;
2142
2143 /* In the C++ memory model, consecutive bit fields in a structure are
2144 considered one memory location and updating a memory location
2145 may not store into adjacent memory locations. */
2146 if (!repr
2147 && DECL_BIT_FIELD_TYPE (field))
2148 {
2149 /* Start new representative. */
2150 repr = start_bitfield_representative (field);
2151 }
2152 else if (repr
2153 && ! DECL_BIT_FIELD_TYPE (field))
2154 {
2155 /* Finish off new representative. */
2156 finish_bitfield_representative (repr, prev);
2157 repr = NULL_TREE;
2158 }
2159 else if (DECL_BIT_FIELD_TYPE (field))
2160 {
2161 gcc_assert (repr != NULL_TREE);
2162
2163 /* Zero-size bitfields finish off a representative and
2164 do not have a representative themselves. This is
2165 required by the C++ memory model. */
2166 if (integer_zerop (DECL_SIZE (field)))
2167 {
2168 finish_bitfield_representative (repr, prev);
2169 repr = NULL_TREE;
2170 }
2171
2172 /* We assume that either DECL_FIELD_OFFSET of the representative
2173 and each bitfield member is a constant or they are equal.
2174 This is because we need to be able to compute the bit-offset
2175 of each field relative to the representative in get_bit_range
2176 during RTL expansion.
2177 If these constraints are not met, simply force a new
2178 representative to be generated. That will at most
2179 generate worse code but still maintain correctness with
2180 respect to the C++ memory model. */
2181 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2182 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2183 || operand_equal_p (DECL_FIELD_OFFSET (repr),
2184 DECL_FIELD_OFFSET (field), 0)))
2185 {
2186 finish_bitfield_representative (repr, prev);
2187 repr = start_bitfield_representative (field);
2188 }
2189 }
2190 else
2191 continue;
2192
2193 if (repr)
2194 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2195
2196 prev = field;
2197 }
2198
2199 if (repr)
2200 finish_bitfield_representative (repr, prev);
2201 }
2202
2203 /* Do all of the work required to layout the type indicated by RLI,
2204 once the fields have been laid out. This function will call `free'
2205 for RLI, unless FREE_P is false. Passing a value other than false
2206 for FREE_P is bad practice; this option only exists to support the
2207 G++ 3.2 ABI. */
2208
2209 void
finish_record_layout(record_layout_info rli,int free_p)2210 finish_record_layout (record_layout_info rli, int free_p)
2211 {
2212 tree variant;
2213
2214 /* Compute the final size. */
2215 finalize_record_size (rli);
2216
2217 /* Compute the TYPE_MODE for the record. */
2218 compute_record_mode (rli->t);
2219
2220 /* Perform any last tweaks to the TYPE_SIZE, etc. */
2221 finalize_type_size (rli->t);
2222
2223 /* Compute bitfield representatives. */
2224 finish_bitfield_layout (rli->t);
2225
2226 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants.
2227 With C++ templates, it is too early to do this when the attribute
2228 is being parsed. */
2229 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2230 variant = TYPE_NEXT_VARIANT (variant))
2231 {
2232 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2233 TYPE_REVERSE_STORAGE_ORDER (variant)
2234 = TYPE_REVERSE_STORAGE_ORDER (rli->t);
2235 }
2236
2237 /* Lay out any static members. This is done now because their type
2238 may use the record's type. */
2239 while (!vec_safe_is_empty (rli->pending_statics))
2240 layout_decl (rli->pending_statics->pop (), 0);
2241
2242 /* Clean up. */
2243 if (free_p)
2244 {
2245 vec_free (rli->pending_statics);
2246 free (rli);
2247 }
2248 }
2249
2250
2251 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
2252 NAME, its fields are chained in reverse on FIELDS.
2253
2254 If ALIGN_TYPE is non-null, it is given the same alignment as
2255 ALIGN_TYPE. */
2256
2257 void
finish_builtin_struct(tree type,const char * name,tree fields,tree align_type)2258 finish_builtin_struct (tree type, const char *name, tree fields,
2259 tree align_type)
2260 {
2261 tree tail, next;
2262
2263 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2264 {
2265 DECL_FIELD_CONTEXT (fields) = type;
2266 next = DECL_CHAIN (fields);
2267 DECL_CHAIN (fields) = tail;
2268 }
2269 TYPE_FIELDS (type) = tail;
2270
2271 if (align_type)
2272 {
2273 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type));
2274 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2275 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2276 TYPE_WARN_IF_NOT_ALIGN (align_type));
2277 }
2278
2279 layout_type (type);
2280 #if 0 /* not yet, should get fixed properly later */
2281 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2282 #else
2283 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2284 TYPE_DECL, get_identifier (name), type);
2285 #endif
2286 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2287 layout_decl (TYPE_NAME (type), 0);
2288 }
2289
2290 /* Calculate the mode, size, and alignment for TYPE.
2291 For an array type, calculate the element separation as well.
2292 Record TYPE on the chain of permanent or temporary types
2293 so that dbxout will find out about it.
2294
2295 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2296 layout_type does nothing on such a type.
2297
2298 If the type is incomplete, its TYPE_SIZE remains zero. */
2299
2300 void
layout_type(tree type)2301 layout_type (tree type)
2302 {
2303 gcc_assert (type);
2304
2305 if (type == error_mark_node)
2306 return;
2307
2308 /* We don't want finalize_type_size to copy an alignment attribute to
2309 variants that don't have it. */
2310 type = TYPE_MAIN_VARIANT (type);
2311
2312 /* Do nothing if type has been laid out before. */
2313 if (TYPE_SIZE (type))
2314 return;
2315
2316 switch (TREE_CODE (type))
2317 {
2318 case LANG_TYPE:
2319 /* This kind of type is the responsibility
2320 of the language-specific code. */
2321 gcc_unreachable ();
2322
2323 case BOOLEAN_TYPE:
2324 case INTEGER_TYPE:
2325 case ENUMERAL_TYPE:
2326 {
2327 scalar_int_mode mode
2328 = smallest_int_mode_for_size (TYPE_PRECISION (type));
2329 SET_TYPE_MODE (type, mode);
2330 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2331 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
2332 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2333 break;
2334 }
2335
2336 case REAL_TYPE:
2337 {
2338 /* Allow the caller to choose the type mode, which is how decimal
2339 floats are distinguished from binary ones. */
2340 if (TYPE_MODE (type) == VOIDmode)
2341 SET_TYPE_MODE
2342 (type, float_mode_for_size (TYPE_PRECISION (type)).require ());
2343 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type));
2344 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2345 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2346 break;
2347 }
2348
2349 case FIXED_POINT_TYPE:
2350 {
2351 /* TYPE_MODE (type) has been set already. */
2352 scalar_mode mode = SCALAR_TYPE_MODE (type);
2353 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2354 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2355 break;
2356 }
2357
2358 case COMPLEX_TYPE:
2359 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2360 SET_TYPE_MODE (type,
2361 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type))));
2362
2363 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2364 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2365 break;
2366
2367 case VECTOR_TYPE:
2368 {
2369 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
2370 tree innertype = TREE_TYPE (type);
2371
2372 /* Find an appropriate mode for the vector type. */
2373 if (TYPE_MODE (type) == VOIDmode)
2374 SET_TYPE_MODE (type,
2375 mode_for_vector (SCALAR_TYPE_MODE (innertype),
2376 nunits).else_blk ());
2377
2378 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2379 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2380 /* Several boolean vector elements may fit in a single unit. */
2381 if (VECTOR_BOOLEAN_TYPE_P (type)
2382 && type->type_common.mode != BLKmode)
2383 TYPE_SIZE_UNIT (type)
2384 = size_int (GET_MODE_SIZE (type->type_common.mode));
2385 else
2386 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2387 TYPE_SIZE_UNIT (innertype),
2388 size_int (nunits));
2389 TYPE_SIZE (type) = int_const_binop
2390 (MULT_EXPR,
2391 bits_from_bytes (TYPE_SIZE_UNIT (type)),
2392 bitsize_int (BITS_PER_UNIT));
2393
2394 /* For vector types, we do not default to the mode's alignment.
2395 Instead, query a target hook, defaulting to natural alignment.
2396 This prevents ABI changes depending on whether or not native
2397 vector modes are supported. */
2398 SET_TYPE_ALIGN (type, targetm.vector_alignment (type));
2399
2400 /* However, if the underlying mode requires a bigger alignment than
2401 what the target hook provides, we cannot use the mode. For now,
2402 simply reject that case. */
2403 gcc_assert (TYPE_ALIGN (type)
2404 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2405 break;
2406 }
2407
2408 case VOID_TYPE:
2409 /* This is an incomplete type and so doesn't have a size. */
2410 SET_TYPE_ALIGN (type, 1);
2411 TYPE_USER_ALIGN (type) = 0;
2412 SET_TYPE_MODE (type, VOIDmode);
2413 break;
2414
2415 case OFFSET_TYPE:
2416 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2417 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2418 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2419 integral, which may be an __intN. */
2420 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
2421 TYPE_PRECISION (type) = POINTER_SIZE;
2422 break;
2423
2424 case FUNCTION_TYPE:
2425 case METHOD_TYPE:
2426 /* It's hard to see what the mode and size of a function ought to
2427 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2428 make it consistent with that. */
2429 SET_TYPE_MODE (type,
2430 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
2431 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2432 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2433 break;
2434
2435 case POINTER_TYPE:
2436 case REFERENCE_TYPE:
2437 {
2438 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2439 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2440 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2441 TYPE_UNSIGNED (type) = 1;
2442 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2443 }
2444 break;
2445
2446 case ARRAY_TYPE:
2447 {
2448 tree index = TYPE_DOMAIN (type);
2449 tree element = TREE_TYPE (type);
2450
2451 /* We need to know both bounds in order to compute the size. */
2452 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2453 && TYPE_SIZE (element))
2454 {
2455 tree ub = TYPE_MAX_VALUE (index);
2456 tree lb = TYPE_MIN_VALUE (index);
2457 tree element_size = TYPE_SIZE (element);
2458 tree length;
2459
2460 /* Make sure that an array of zero-sized element is zero-sized
2461 regardless of its extent. */
2462 if (integer_zerop (element_size))
2463 length = size_zero_node;
2464
2465 /* The computation should happen in the original signedness so
2466 that (possible) negative values are handled appropriately
2467 when determining overflow. */
2468 else
2469 {
2470 /* ??? When it is obvious that the range is signed
2471 represent it using ssizetype. */
2472 if (TREE_CODE (lb) == INTEGER_CST
2473 && TREE_CODE (ub) == INTEGER_CST
2474 && TYPE_UNSIGNED (TREE_TYPE (lb))
2475 && tree_int_cst_lt (ub, lb))
2476 {
2477 lb = wide_int_to_tree (ssizetype,
2478 offset_int::from (wi::to_wide (lb),
2479 SIGNED));
2480 ub = wide_int_to_tree (ssizetype,
2481 offset_int::from (wi::to_wide (ub),
2482 SIGNED));
2483 }
2484 length
2485 = fold_convert (sizetype,
2486 size_binop (PLUS_EXPR,
2487 build_int_cst (TREE_TYPE (lb), 1),
2488 size_binop (MINUS_EXPR, ub, lb)));
2489 }
2490
2491 /* ??? We have no way to distinguish a null-sized array from an
2492 array spanning the whole sizetype range, so we arbitrarily
2493 decide that [0, -1] is the only valid representation. */
2494 if (integer_zerop (length)
2495 && TREE_OVERFLOW (length)
2496 && integer_zerop (lb))
2497 length = size_zero_node;
2498
2499 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2500 bits_from_bytes (length));
2501
2502 /* If we know the size of the element, calculate the total size
2503 directly, rather than do some division thing below. This
2504 optimization helps Fortran assumed-size arrays (where the
2505 size of the array is determined at runtime) substantially. */
2506 if (TYPE_SIZE_UNIT (element))
2507 TYPE_SIZE_UNIT (type)
2508 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2509 }
2510
2511 /* Now round the alignment and size,
2512 using machine-dependent criteria if any. */
2513
2514 unsigned align = TYPE_ALIGN (element);
2515 if (TYPE_USER_ALIGN (type))
2516 align = MAX (align, TYPE_ALIGN (type));
2517 else
2518 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2519 if (!TYPE_WARN_IF_NOT_ALIGN (type))
2520 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2521 TYPE_WARN_IF_NOT_ALIGN (element));
2522 #ifdef ROUND_TYPE_ALIGN
2523 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
2524 #else
2525 align = MAX (align, BITS_PER_UNIT);
2526 #endif
2527 SET_TYPE_ALIGN (type, align);
2528 SET_TYPE_MODE (type, BLKmode);
2529 if (TYPE_SIZE (type) != 0
2530 && ! targetm.member_type_forces_blk (type, VOIDmode)
2531 /* BLKmode elements force BLKmode aggregate;
2532 else extract/store fields may lose. */
2533 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2534 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2535 {
2536 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2537 TYPE_SIZE (type)));
2538 if (TYPE_MODE (type) != BLKmode
2539 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2540 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2541 {
2542 TYPE_NO_FORCE_BLK (type) = 1;
2543 SET_TYPE_MODE (type, BLKmode);
2544 }
2545 }
2546 if (AGGREGATE_TYPE_P (element))
2547 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element);
2548 /* When the element size is constant, check that it is at least as
2549 large as the element alignment. */
2550 if (TYPE_SIZE_UNIT (element)
2551 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2552 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2553 TYPE_ALIGN_UNIT. */
2554 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2555 && !integer_zerop (TYPE_SIZE_UNIT (element))
2556 && compare_tree_int (TYPE_SIZE_UNIT (element),
2557 TYPE_ALIGN_UNIT (element)) < 0)
2558 error ("alignment of array elements is greater than element size");
2559 break;
2560 }
2561
2562 case RECORD_TYPE:
2563 case UNION_TYPE:
2564 case QUAL_UNION_TYPE:
2565 {
2566 tree field;
2567 record_layout_info rli;
2568
2569 /* Initialize the layout information. */
2570 rli = start_record_layout (type);
2571
2572 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2573 in the reverse order in building the COND_EXPR that denotes
2574 its size. We reverse them again later. */
2575 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2576 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2577
2578 /* Place all the fields. */
2579 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2580 place_field (rli, field);
2581
2582 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2583 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2584
2585 /* Finish laying out the record. */
2586 finish_record_layout (rli, /*free_p=*/true);
2587 }
2588 break;
2589
2590 default:
2591 gcc_unreachable ();
2592 }
2593
2594 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2595 records and unions, finish_record_layout already called this
2596 function. */
2597 if (!RECORD_OR_UNION_TYPE_P (type))
2598 finalize_type_size (type);
2599
2600 /* We should never see alias sets on incomplete aggregates. And we
2601 should not call layout_type on not incomplete aggregates. */
2602 if (AGGREGATE_TYPE_P (type))
2603 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2604 }
2605
2606 /* Return the least alignment required for type TYPE. */
2607
2608 unsigned int
min_align_of_type(tree type)2609 min_align_of_type (tree type)
2610 {
2611 unsigned int align = TYPE_ALIGN (type);
2612 if (!TYPE_USER_ALIGN (type))
2613 {
2614 align = MIN (align, BIGGEST_ALIGNMENT);
2615 #ifdef BIGGEST_FIELD_ALIGNMENT
2616 align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2617 #endif
2618 unsigned int field_align = align;
2619 #ifdef ADJUST_FIELD_ALIGN
2620 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align);
2621 #endif
2622 align = MIN (align, field_align);
2623 }
2624 return align / BITS_PER_UNIT;
2625 }
2626
2627 /* Create and return a type for signed integers of PRECISION bits. */
2628
2629 tree
make_signed_type(int precision)2630 make_signed_type (int precision)
2631 {
2632 tree type = make_node (INTEGER_TYPE);
2633
2634 TYPE_PRECISION (type) = precision;
2635
2636 fixup_signed_type (type);
2637 return type;
2638 }
2639
2640 /* Create and return a type for unsigned integers of PRECISION bits. */
2641
2642 tree
make_unsigned_type(int precision)2643 make_unsigned_type (int precision)
2644 {
2645 tree type = make_node (INTEGER_TYPE);
2646
2647 TYPE_PRECISION (type) = precision;
2648
2649 fixup_unsigned_type (type);
2650 return type;
2651 }
2652
2653 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2654 and SATP. */
2655
2656 tree
make_fract_type(int precision,int unsignedp,int satp)2657 make_fract_type (int precision, int unsignedp, int satp)
2658 {
2659 tree type = make_node (FIXED_POINT_TYPE);
2660
2661 TYPE_PRECISION (type) = precision;
2662
2663 if (satp)
2664 TYPE_SATURATING (type) = 1;
2665
2666 /* Lay out the type: set its alignment, size, etc. */
2667 TYPE_UNSIGNED (type) = unsignedp;
2668 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
2669 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2670 layout_type (type);
2671
2672 return type;
2673 }
2674
2675 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2676 and SATP. */
2677
2678 tree
make_accum_type(int precision,int unsignedp,int satp)2679 make_accum_type (int precision, int unsignedp, int satp)
2680 {
2681 tree type = make_node (FIXED_POINT_TYPE);
2682
2683 TYPE_PRECISION (type) = precision;
2684
2685 if (satp)
2686 TYPE_SATURATING (type) = 1;
2687
2688 /* Lay out the type: set its alignment, size, etc. */
2689 TYPE_UNSIGNED (type) = unsignedp;
2690 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
2691 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2692 layout_type (type);
2693
2694 return type;
2695 }
2696
2697 /* Initialize sizetypes so layout_type can use them. */
2698
2699 void
initialize_sizetypes(void)2700 initialize_sizetypes (void)
2701 {
2702 int precision, bprecision;
2703
2704 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2705 if (strcmp (SIZETYPE, "unsigned int") == 0)
2706 precision = INT_TYPE_SIZE;
2707 else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2708 precision = LONG_TYPE_SIZE;
2709 else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2710 precision = LONG_LONG_TYPE_SIZE;
2711 else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2712 precision = SHORT_TYPE_SIZE;
2713 else
2714 {
2715 int i;
2716
2717 precision = -1;
2718 for (i = 0; i < NUM_INT_N_ENTS; i++)
2719 if (int_n_enabled_p[i])
2720 {
2721 char name[50];
2722 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
2723
2724 if (strcmp (name, SIZETYPE) == 0)
2725 {
2726 precision = int_n_data[i].bitsize;
2727 }
2728 }
2729 if (precision == -1)
2730 gcc_unreachable ();
2731 }
2732
2733 bprecision
2734 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
2735 bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision));
2736 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2737 bprecision = HOST_BITS_PER_DOUBLE_INT;
2738
2739 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2740 sizetype = make_node (INTEGER_TYPE);
2741 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2742 TYPE_PRECISION (sizetype) = precision;
2743 TYPE_UNSIGNED (sizetype) = 1;
2744 bitsizetype = make_node (INTEGER_TYPE);
2745 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2746 TYPE_PRECISION (bitsizetype) = bprecision;
2747 TYPE_UNSIGNED (bitsizetype) = 1;
2748
2749 /* Now layout both types manually. */
2750 scalar_int_mode mode = smallest_int_mode_for_size (precision);
2751 SET_TYPE_MODE (sizetype, mode);
2752 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
2753 TYPE_SIZE (sizetype) = bitsize_int (precision);
2754 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
2755 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2756
2757 mode = smallest_int_mode_for_size (bprecision);
2758 SET_TYPE_MODE (bitsizetype, mode);
2759 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
2760 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2761 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode));
2762 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2763
2764 /* Create the signed variants of *sizetype. */
2765 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2766 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2767 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2768 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2769 }
2770
2771 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2772 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2773 for TYPE, based on the PRECISION and whether or not the TYPE
2774 IS_UNSIGNED. PRECISION need not correspond to a width supported
2775 natively by the hardware; for example, on a machine with 8-bit,
2776 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2777 61. */
2778
2779 void
set_min_and_max_values_for_integral_type(tree type,int precision,signop sgn)2780 set_min_and_max_values_for_integral_type (tree type,
2781 int precision,
2782 signop sgn)
2783 {
2784 /* For bitfields with zero width we end up creating integer types
2785 with zero precision. Don't assign any minimum/maximum values
2786 to those types, they don't have any valid value. */
2787 if (precision < 1)
2788 return;
2789
2790 TYPE_MIN_VALUE (type)
2791 = wide_int_to_tree (type, wi::min_value (precision, sgn));
2792 TYPE_MAX_VALUE (type)
2793 = wide_int_to_tree (type, wi::max_value (precision, sgn));
2794 }
2795
2796 /* Set the extreme values of TYPE based on its precision in bits,
2797 then lay it out. Used when make_signed_type won't do
2798 because the tree code is not INTEGER_TYPE. */
2799
2800 void
fixup_signed_type(tree type)2801 fixup_signed_type (tree type)
2802 {
2803 int precision = TYPE_PRECISION (type);
2804
2805 set_min_and_max_values_for_integral_type (type, precision, SIGNED);
2806
2807 /* Lay out the type: set its alignment, size, etc. */
2808 layout_type (type);
2809 }
2810
2811 /* Set the extreme values of TYPE based on its precision in bits,
2812 then lay it out. This is used both in `make_unsigned_type'
2813 and for enumeral types. */
2814
2815 void
fixup_unsigned_type(tree type)2816 fixup_unsigned_type (tree type)
2817 {
2818 int precision = TYPE_PRECISION (type);
2819
2820 TYPE_UNSIGNED (type) = 1;
2821
2822 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
2823
2824 /* Lay out the type: set its alignment, size, etc. */
2825 layout_type (type);
2826 }
2827
2828 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2829 starting at BITPOS.
2830
2831 BITREGION_START is the bit position of the first bit in this
2832 sequence of bit fields. BITREGION_END is the last bit in this
2833 sequence. If these two fields are non-zero, we should restrict the
2834 memory access to that range. Otherwise, we are allowed to touch
2835 any adjacent non bit-fields.
2836
2837 ALIGN is the alignment of the underlying object in bits.
2838 VOLATILEP says whether the bitfield is volatile. */
2839
2840 bit_field_mode_iterator
bit_field_mode_iterator(HOST_WIDE_INT bitsize,HOST_WIDE_INT bitpos,poly_int64 bitregion_start,poly_int64 bitregion_end,unsigned int align,bool volatilep)2841 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2842 poly_int64 bitregion_start,
2843 poly_int64 bitregion_end,
2844 unsigned int align, bool volatilep)
2845 : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
2846 m_bitpos (bitpos), m_bitregion_start (bitregion_start),
2847 m_bitregion_end (bitregion_end), m_align (align),
2848 m_volatilep (volatilep), m_count (0)
2849 {
2850 if (known_eq (m_bitregion_end, 0))
2851 {
2852 /* We can assume that any aligned chunk of ALIGN bits that overlaps
2853 the bitfield is mapped and won't trap, provided that ALIGN isn't
2854 too large. The cap is the biggest required alignment for data,
2855 or at least the word size. And force one such chunk at least. */
2856 unsigned HOST_WIDE_INT units
2857 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2858 if (bitsize <= 0)
2859 bitsize = 1;
2860 HOST_WIDE_INT end = bitpos + bitsize + units - 1;
2861 m_bitregion_end = end - end % units - 1;
2862 }
2863 }
2864
2865 /* Calls to this function return successively larger modes that can be used
2866 to represent the bitfield. Return true if another bitfield mode is
2867 available, storing it in *OUT_MODE if so. */
2868
2869 bool
next_mode(scalar_int_mode * out_mode)2870 bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
2871 {
2872 scalar_int_mode mode;
2873 for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode))
2874 {
2875 unsigned int unit = GET_MODE_BITSIZE (mode);
2876
2877 /* Skip modes that don't have full precision. */
2878 if (unit != GET_MODE_PRECISION (mode))
2879 continue;
2880
2881 /* Stop if the mode is too wide to handle efficiently. */
2882 if (unit > MAX_FIXED_MODE_SIZE)
2883 break;
2884
2885 /* Don't deliver more than one multiword mode; the smallest one
2886 should be used. */
2887 if (m_count > 0 && unit > BITS_PER_WORD)
2888 break;
2889
2890 /* Skip modes that are too small. */
2891 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
2892 unsigned HOST_WIDE_INT subend = substart + m_bitsize;
2893 if (subend > unit)
2894 continue;
2895
2896 /* Stop if the mode goes outside the bitregion. */
2897 HOST_WIDE_INT start = m_bitpos - substart;
2898 if (maybe_ne (m_bitregion_start, 0)
2899 && maybe_lt (start, m_bitregion_start))
2900 break;
2901 HOST_WIDE_INT end = start + unit;
2902 if (maybe_gt (end, m_bitregion_end + 1))
2903 break;
2904
2905 /* Stop if the mode requires too much alignment. */
2906 if (GET_MODE_ALIGNMENT (mode) > m_align
2907 && targetm.slow_unaligned_access (mode, m_align))
2908 break;
2909
2910 *out_mode = mode;
2911 m_mode = GET_MODE_WIDER_MODE (mode);
2912 m_count++;
2913 return true;
2914 }
2915 return false;
2916 }
2917
2918 /* Return true if smaller modes are generally preferred for this kind
2919 of bitfield. */
2920
2921 bool
prefer_smaller_modes()2922 bit_field_mode_iterator::prefer_smaller_modes ()
2923 {
2924 return (m_volatilep
2925 ? targetm.narrow_volatile_bitfield ()
2926 : !SLOW_BYTE_ACCESS);
2927 }
2928
2929 /* Find the best machine mode to use when referencing a bit field of length
2930 BITSIZE bits starting at BITPOS.
2931
2932 BITREGION_START is the bit position of the first bit in this
2933 sequence of bit fields. BITREGION_END is the last bit in this
2934 sequence. If these two fields are non-zero, we should restrict the
2935 memory access to that range. Otherwise, we are allowed to touch
2936 any adjacent non bit-fields.
2937
2938 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits.
2939 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller
2940 doesn't want to apply a specific limit.
2941
2942 If no mode meets all these conditions, we return VOIDmode.
2943
2944 The underlying object is known to be aligned to a boundary of ALIGN bits.
2945
2946 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2947 smallest mode meeting these conditions.
2948
2949 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2950 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2951 all the conditions.
2952
2953 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2954 decide which of the above modes should be used. */
2955
2956 bool
get_best_mode(int bitsize,int bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,unsigned int align,unsigned HOST_WIDE_INT largest_mode_bitsize,bool volatilep,scalar_int_mode * best_mode)2957 get_best_mode (int bitsize, int bitpos,
2958 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
2959 unsigned int align,
2960 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
2961 scalar_int_mode *best_mode)
2962 {
2963 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
2964 bitregion_end, align, volatilep);
2965 scalar_int_mode mode;
2966 bool found = false;
2967 while (iter.next_mode (&mode)
2968 /* ??? For historical reasons, reject modes that would normally
2969 receive greater alignment, even if unaligned accesses are
2970 acceptable. This has both advantages and disadvantages.
2971 Removing this check means that something like:
2972
2973 struct s { unsigned int x; unsigned int y; };
2974 int f (struct s *s) { return s->x == 0 && s->y == 0; }
2975
2976 can be implemented using a single load and compare on
2977 64-bit machines that have no alignment restrictions.
2978 For example, on powerpc64-linux-gnu, we would generate:
2979
2980 ld 3,0(3)
2981 cntlzd 3,3
2982 srdi 3,3,6
2983 blr
2984
2985 rather than:
2986
2987 lwz 9,0(3)
2988 cmpwi 7,9,0
2989 bne 7,.L3
2990 lwz 3,4(3)
2991 cntlzw 3,3
2992 srwi 3,3,5
2993 extsw 3,3
2994 blr
2995 .p2align 4,,15
2996 .L3:
2997 li 3,0
2998 blr
2999
3000 However, accessing more than one field can make life harder
3001 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
3002 has a series of unsigned short copies followed by a series of
3003 unsigned short comparisons. With this check, both the copies
3004 and comparisons remain 16-bit accesses and FRE is able
3005 to eliminate the latter. Without the check, the comparisons
3006 can be done using 2 64-bit operations, which FRE isn't able
3007 to handle in the same way.
3008
3009 Either way, it would probably be worth disabling this check
3010 during expand. One particular example where removing the
3011 check would help is the get_best_mode call in store_bit_field.
3012 If we are given a memory bitregion of 128 bits that is aligned
3013 to a 64-bit boundary, and the bitfield we want to modify is
3014 in the second half of the bitregion, this check causes
3015 store_bitfield to turn the memory into a 64-bit reference
3016 to the _first_ half of the region. We later use
3017 adjust_bitfield_address to get a reference to the correct half,
3018 but doing so looks to adjust_bitfield_address as though we are
3019 moving past the end of the original object, so it drops the
3020 associated MEM_EXPR and MEM_OFFSET. Removing the check
3021 causes store_bit_field to keep a 128-bit memory reference,
3022 so that the final bitfield reference still has a MEM_EXPR
3023 and MEM_OFFSET. */
3024 && GET_MODE_ALIGNMENT (mode) <= align
3025 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize)
3026 {
3027 *best_mode = mode;
3028 found = true;
3029 if (iter.prefer_smaller_modes ())
3030 break;
3031 }
3032
3033 return found;
3034 }
3035
3036 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
3037 SIGN). The returned constants are made to be usable in TARGET_MODE. */
3038
3039 void
get_mode_bounds(scalar_int_mode mode,int sign,scalar_int_mode target_mode,rtx * mmin,rtx * mmax)3040 get_mode_bounds (scalar_int_mode mode, int sign,
3041 scalar_int_mode target_mode,
3042 rtx *mmin, rtx *mmax)
3043 {
3044 unsigned size = GET_MODE_PRECISION (mode);
3045 unsigned HOST_WIDE_INT min_val, max_val;
3046
3047 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
3048
3049 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */
3050 if (mode == BImode)
3051 {
3052 if (STORE_FLAG_VALUE < 0)
3053 {
3054 min_val = STORE_FLAG_VALUE;
3055 max_val = 0;
3056 }
3057 else
3058 {
3059 min_val = 0;
3060 max_val = STORE_FLAG_VALUE;
3061 }
3062 }
3063 else if (sign)
3064 {
3065 min_val = -(HOST_WIDE_INT_1U << (size - 1));
3066 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1;
3067 }
3068 else
3069 {
3070 min_val = 0;
3071 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1;
3072 }
3073
3074 *mmin = gen_int_mode (min_val, target_mode);
3075 *mmax = gen_int_mode (max_val, target_mode);
3076 }
3077
3078 #include "gt-stor-layout.h"
3079