1 /* Default target hook functions.
2    Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* The migration of target macros to target hooks works as follows:
21 
22    1. Create a target hook that uses the existing target macros to
23       implement the same functionality.
24 
25    2. Convert all the MI files to use the hook instead of the macro.
26 
27    3. Repeat for a majority of the remaining target macros.  This will
28       take some time.
29 
30    4. Tell target maintainers to start migrating.
31 
32    5. Eventually convert the backends to override the hook instead of
33       defining the macros.  This will take some time too.
34 
35    6. TBD when, poison the macros.  Unmigrated targets will break at
36       this point.
37 
38    Note that we expect steps 1-3 to be done by the people that
39    understand what the MI does with each macro, and step 5 to be done
40    by the target maintainers for their respective targets.
41 
42    Note that steps 1 and 2 don't have to be done together, but no
43    target can override the new hook until step 2 is complete for it.
44 
45    Once the macros are poisoned, we will revert to the old migration
46    rules - migrate the macro, callers, and targets all at once.  This
47    comment can thus be removed at that point.  */
48 
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "target.h"
53 #include "function.h"
54 #include "rtl.h"
55 #include "tree.h"
56 #include "tree-ssa-alias.h"
57 #include "gimple-expr.h"
58 #include "memmodel.h"
59 #include "backend.h"
60 #include "emit-rtl.h"
61 #include "df.h"
62 #include "tm_p.h"
63 #include "stringpool.h"
64 #include "tree-vrp.h"
65 #include "tree-ssanames.h"
66 #include "profile-count.h"
67 #include "optabs.h"
68 #include "regs.h"
69 #include "recog.h"
70 #include "diagnostic-core.h"
71 #include "fold-const.h"
72 #include "stor-layout.h"
73 #include "varasm.h"
74 #include "flags.h"
75 #include "explow.h"
76 #include "expmed.h"
77 #include "calls.h"
78 #include "expr.h"
79 #include "output.h"
80 #include "common/common-target.h"
81 #include "reload.h"
82 #include "intl.h"
83 #include "opts.h"
84 #include "gimplify.h"
85 #include "predict.h"
86 #include "real.h"
87 #include "langhooks.h"
88 #include "sbitmap.h"
89 #include "function-abi.h"
90 #include "attribs.h"
91 #include "asan.h"
92 #include "emit-rtl.h"
93 
94 bool
default_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED,rtx addr ATTRIBUTE_UNUSED,bool strict ATTRIBUTE_UNUSED)95 default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
96 			      rtx addr ATTRIBUTE_UNUSED,
97 			      bool strict ATTRIBUTE_UNUSED)
98 {
99 #ifdef GO_IF_LEGITIMATE_ADDRESS
100   /* Defer to the old implementation using a goto.  */
101   if (strict)
102     return strict_memory_address_p (mode, addr);
103   else
104     return memory_address_p (mode, addr);
105 #else
106   gcc_unreachable ();
107 #endif
108 }
109 
110 void
default_external_libcall(rtx fun ATTRIBUTE_UNUSED)111 default_external_libcall (rtx fun ATTRIBUTE_UNUSED)
112 {
113 #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
114   ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
115 #endif
116 }
117 
118 int
default_unspec_may_trap_p(const_rtx x,unsigned flags)119 default_unspec_may_trap_p (const_rtx x, unsigned flags)
120 {
121   int i;
122 
123   /* Any floating arithmetic may trap.  */
124   if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math))
125     return 1;
126 
127   for (i = 0; i < XVECLEN (x, 0); ++i)
128     {
129       if (may_trap_p_1 (XVECEXP (x, 0, i), flags))
130 	return 1;
131     }
132 
133   return 0;
134 }
135 
136 machine_mode
default_promote_function_mode(const_tree type ATTRIBUTE_UNUSED,machine_mode mode,int * punsignedp ATTRIBUTE_UNUSED,const_tree funtype ATTRIBUTE_UNUSED,int for_return ATTRIBUTE_UNUSED)137 default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
138 			       machine_mode mode,
139 			       int *punsignedp ATTRIBUTE_UNUSED,
140 			       const_tree funtype ATTRIBUTE_UNUSED,
141 			       int for_return ATTRIBUTE_UNUSED)
142 {
143   if (type != NULL_TREE && for_return == 2)
144     return promote_mode (type, mode, punsignedp);
145   return mode;
146 }
147 
148 machine_mode
default_promote_function_mode_always_promote(const_tree type,machine_mode mode,int * punsignedp,const_tree funtype ATTRIBUTE_UNUSED,int for_return ATTRIBUTE_UNUSED)149 default_promote_function_mode_always_promote (const_tree type,
150 					      machine_mode mode,
151 					      int *punsignedp,
152 					      const_tree funtype ATTRIBUTE_UNUSED,
153 					      int for_return ATTRIBUTE_UNUSED)
154 {
155   return promote_mode (type, mode, punsignedp);
156 }
157 
158 machine_mode
default_cc_modes_compatible(machine_mode m1,machine_mode m2)159 default_cc_modes_compatible (machine_mode m1, machine_mode m2)
160 {
161   if (m1 == m2)
162     return m1;
163   return VOIDmode;
164 }
165 
166 bool
default_return_in_memory(const_tree type,const_tree fntype ATTRIBUTE_UNUSED)167 default_return_in_memory (const_tree type,
168 			  const_tree fntype ATTRIBUTE_UNUSED)
169 {
170   return (TYPE_MODE (type) == BLKmode);
171 }
172 
173 rtx
default_legitimize_address(rtx x,rtx orig_x ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED)174 default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
175 			    machine_mode mode ATTRIBUTE_UNUSED)
176 {
177   return x;
178 }
179 
180 bool
default_legitimize_address_displacement(rtx *,rtx *,poly_int64,machine_mode)181 default_legitimize_address_displacement (rtx *, rtx *, poly_int64,
182 					 machine_mode)
183 {
184   return false;
185 }
186 
187 bool
default_const_not_ok_for_debug_p(rtx x)188 default_const_not_ok_for_debug_p (rtx x)
189 {
190   if (GET_CODE (x) == UNSPEC)
191     return true;
192   return false;
193 }
194 
195 rtx
default_expand_builtin_saveregs(void)196 default_expand_builtin_saveregs (void)
197 {
198   error ("%<__builtin_saveregs%> not supported by this target");
199   return const0_rtx;
200 }
201 
202 void
default_setup_incoming_varargs(cumulative_args_t,const function_arg_info &,int *,int)203 default_setup_incoming_varargs (cumulative_args_t,
204 				const function_arg_info &, int *, int)
205 {
206 }
207 
208 /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE.  */
209 
210 rtx
default_builtin_setjmp_frame_value(void)211 default_builtin_setjmp_frame_value (void)
212 {
213   return virtual_stack_vars_rtx;
214 }
215 
216 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false.  */
217 
218 bool
hook_bool_CUMULATIVE_ARGS_false(cumulative_args_t ca ATTRIBUTE_UNUSED)219 hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED)
220 {
221   return false;
222 }
223 
224 bool
default_pretend_outgoing_varargs_named(cumulative_args_t ca ATTRIBUTE_UNUSED)225 default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED)
226 {
227   return (targetm.calls.setup_incoming_varargs
228 	  != default_setup_incoming_varargs);
229 }
230 
231 scalar_int_mode
default_eh_return_filter_mode(void)232 default_eh_return_filter_mode (void)
233 {
234   return targetm.unwind_word_mode ();
235 }
236 
237 scalar_int_mode
default_libgcc_cmp_return_mode(void)238 default_libgcc_cmp_return_mode (void)
239 {
240   return word_mode;
241 }
242 
243 scalar_int_mode
default_libgcc_shift_count_mode(void)244 default_libgcc_shift_count_mode (void)
245 {
246   return word_mode;
247 }
248 
249 scalar_int_mode
default_unwind_word_mode(void)250 default_unwind_word_mode (void)
251 {
252   return word_mode;
253 }
254 
255 /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK.  */
256 
257 unsigned HOST_WIDE_INT
default_shift_truncation_mask(machine_mode mode)258 default_shift_truncation_mask (machine_mode mode)
259 {
260   return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0;
261 }
262 
263 /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL.  */
264 
265 unsigned int
default_min_divisions_for_recip_mul(machine_mode mode ATTRIBUTE_UNUSED)266 default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED)
267 {
268   return have_insn_for (DIV, mode) ? 3 : 2;
269 }
270 
271 /* The default implementation of TARGET_MODE_REP_EXTENDED.  */
272 
273 int
default_mode_rep_extended(scalar_int_mode,scalar_int_mode)274 default_mode_rep_extended (scalar_int_mode, scalar_int_mode)
275 {
276   return UNKNOWN;
277 }
278 
279 /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true.  */
280 
281 bool
hook_bool_CUMULATIVE_ARGS_true(cumulative_args_t a ATTRIBUTE_UNUSED)282 hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED)
283 {
284   return true;
285 }
286 
287 /* Return machine mode for non-standard suffix
288    or VOIDmode if non-standard suffixes are unsupported.  */
289 machine_mode
default_mode_for_suffix(char suffix ATTRIBUTE_UNUSED)290 default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED)
291 {
292   return VOIDmode;
293 }
294 
295 /* The generic C++ ABI specifies this is a 64-bit value.  */
296 tree
default_cxx_guard_type(void)297 default_cxx_guard_type (void)
298 {
299   return long_long_integer_type_node;
300 }
301 
302 /* Returns the size of the cookie to use when allocating an array
303    whose elements have the indicated TYPE.  Assumes that it is already
304    known that a cookie is needed.  */
305 
306 tree
default_cxx_get_cookie_size(tree type)307 default_cxx_get_cookie_size (tree type)
308 {
309   tree cookie_size;
310 
311   /* We need to allocate an additional max (sizeof (size_t), alignof
312      (true_type)) bytes.  */
313   tree sizetype_size;
314   tree type_align;
315 
316   sizetype_size = size_in_bytes (sizetype);
317   type_align = size_int (TYPE_ALIGN_UNIT (type));
318   if (tree_int_cst_lt (type_align, sizetype_size))
319     cookie_size = sizetype_size;
320   else
321     cookie_size = type_align;
322 
323   return cookie_size;
324 }
325 
326 /* Return true if a parameter must be passed by reference.  This version
327    of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK.  */
328 
329 bool
hook_pass_by_reference_must_pass_in_stack(cumulative_args_t,const function_arg_info & arg)330 hook_pass_by_reference_must_pass_in_stack (cumulative_args_t,
331 					   const function_arg_info &arg)
332 {
333   return targetm.calls.must_pass_in_stack (arg);
334 }
335 
336 /* Return true if a parameter follows callee copies conventions.  This
337    version of the hook is true for all named arguments.  */
338 
339 bool
hook_callee_copies_named(cumulative_args_t,const function_arg_info & arg)340 hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg)
341 {
342   return arg.named;
343 }
344 
345 /* Emit to STREAM the assembler syntax for insn operand X.  */
346 
347 void
default_print_operand(FILE * stream ATTRIBUTE_UNUSED,rtx x ATTRIBUTE_UNUSED,int code ATTRIBUTE_UNUSED)348 default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
349 		       int code ATTRIBUTE_UNUSED)
350 {
351 #ifdef PRINT_OPERAND
352   PRINT_OPERAND (stream, x, code);
353 #else
354   gcc_unreachable ();
355 #endif
356 }
357 
358 /* Emit to STREAM the assembler syntax for an insn operand whose memory
359    address is X.  */
360 
361 void
default_print_operand_address(FILE * stream ATTRIBUTE_UNUSED,machine_mode,rtx x ATTRIBUTE_UNUSED)362 default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED,
363 			       machine_mode /*mode*/,
364 			       rtx x ATTRIBUTE_UNUSED)
365 {
366 #ifdef PRINT_OPERAND_ADDRESS
367   PRINT_OPERAND_ADDRESS (stream, x);
368 #else
369   gcc_unreachable ();
370 #endif
371 }
372 
373 /* Return true if CODE is a valid punctuation character for the
374    `print_operand' hook.  */
375 
376 bool
default_print_operand_punct_valid_p(unsigned char code ATTRIBUTE_UNUSED)377 default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED)
378 {
379 #ifdef PRINT_OPERAND_PUNCT_VALID_P
380   return PRINT_OPERAND_PUNCT_VALID_P (code);
381 #else
382   return false;
383 #endif
384 }
385 
386 /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME.  */
387 tree
default_mangle_assembler_name(const char * name ATTRIBUTE_UNUSED)388 default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED)
389 {
390   const char *skipped = name + (*name == '*' ? 1 : 0);
391   const char *stripped = targetm.strip_name_encoding (skipped);
392   if (*name != '*' && user_label_prefix[0])
393     stripped = ACONCAT ((user_label_prefix, stripped, NULL));
394   return get_identifier (stripped);
395 }
396 
397 /* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE.  */
398 
399 machine_mode
default_translate_mode_attribute(machine_mode mode)400 default_translate_mode_attribute (machine_mode mode)
401 {
402   return mode;
403 }
404 
405 /* True if MODE is valid for the target.  By "valid", we mean able to
406    be manipulated in non-trivial ways.  In particular, this means all
407    the arithmetic is supported.
408 
409    By default we guess this means that any C type is supported.  If
410    we can't map the mode back to a type that would be available in C,
411    then reject it.  Special case, here, is the double-word arithmetic
412    supported by optabs.c.  */
413 
414 bool
default_scalar_mode_supported_p(scalar_mode mode)415 default_scalar_mode_supported_p (scalar_mode mode)
416 {
417   int precision = GET_MODE_PRECISION (mode);
418 
419   switch (GET_MODE_CLASS (mode))
420     {
421     case MODE_PARTIAL_INT:
422     case MODE_INT:
423       if (precision == CHAR_TYPE_SIZE)
424 	return true;
425       if (precision == SHORT_TYPE_SIZE)
426 	return true;
427       if (precision == INT_TYPE_SIZE)
428 	return true;
429       if (precision == LONG_TYPE_SIZE)
430 	return true;
431       if (precision == LONG_LONG_TYPE_SIZE)
432 	return true;
433       if (precision == 2 * BITS_PER_WORD)
434 	return true;
435       return false;
436 
437     case MODE_FLOAT:
438       if (precision == FLOAT_TYPE_SIZE)
439 	return true;
440       if (precision == DOUBLE_TYPE_SIZE)
441 	return true;
442       if (precision == LONG_DOUBLE_TYPE_SIZE)
443 	return true;
444       return false;
445 
446     case MODE_DECIMAL_FLOAT:
447     case MODE_FRACT:
448     case MODE_UFRACT:
449     case MODE_ACCUM:
450     case MODE_UACCUM:
451       return false;
452 
453     default:
454       gcc_unreachable ();
455     }
456 }
457 
458 /* Return true if libgcc supports floating-point mode MODE (known to
459    be supported as a scalar mode).  */
460 
461 bool
default_libgcc_floating_mode_supported_p(scalar_float_mode mode)462 default_libgcc_floating_mode_supported_p (scalar_float_mode mode)
463 {
464   switch (mode)
465     {
466 #ifdef HAVE_SFmode
467     case E_SFmode:
468 #endif
469 #ifdef HAVE_DFmode
470     case E_DFmode:
471 #endif
472 #ifdef HAVE_XFmode
473     case E_XFmode:
474 #endif
475 #ifdef HAVE_TFmode
476     case E_TFmode:
477 #endif
478       return true;
479 
480     default:
481       return false;
482     }
483 }
484 
485 /* Return the machine mode to use for the type _FloatN, if EXTENDED is
486    false, or _FloatNx, if EXTENDED is true, or VOIDmode if not
487    supported.  */
488 opt_scalar_float_mode
default_floatn_mode(int n,bool extended)489 default_floatn_mode (int n, bool extended)
490 {
491   if (extended)
492     {
493       opt_scalar_float_mode cand1, cand2;
494       scalar_float_mode mode;
495       switch (n)
496 	{
497 	case 32:
498 #ifdef HAVE_DFmode
499 	  cand1 = DFmode;
500 #endif
501 	  break;
502 
503 	case 64:
504 #ifdef HAVE_XFmode
505 	  cand1 = XFmode;
506 #endif
507 #ifdef HAVE_TFmode
508 	  cand2 = TFmode;
509 #endif
510 	  break;
511 
512 	case 128:
513 	  break;
514 
515 	default:
516 	  /* Those are the only valid _FloatNx types.  */
517 	  gcc_unreachable ();
518 	}
519       if (cand1.exists (&mode)
520 	  && REAL_MODE_FORMAT (mode)->ieee_bits > n
521 	  && targetm.scalar_mode_supported_p (mode)
522 	  && targetm.libgcc_floating_mode_supported_p (mode))
523 	return cand1;
524       if (cand2.exists (&mode)
525 	  && REAL_MODE_FORMAT (mode)->ieee_bits > n
526 	  && targetm.scalar_mode_supported_p (mode)
527 	  && targetm.libgcc_floating_mode_supported_p (mode))
528 	return cand2;
529     }
530   else
531     {
532       opt_scalar_float_mode cand;
533       scalar_float_mode mode;
534       switch (n)
535 	{
536 	case 16:
537 	  /* Always enable _Float16 if we have basic support for the mode.
538 	     Targets can control the range and precision of operations on
539 	     the _Float16 type using TARGET_C_EXCESS_PRECISION.  */
540 #ifdef HAVE_HFmode
541 	  cand = HFmode;
542 #endif
543 	  break;
544 
545 	case 32:
546 #ifdef HAVE_SFmode
547 	  cand = SFmode;
548 #endif
549 	  break;
550 
551 	case 64:
552 #ifdef HAVE_DFmode
553 	  cand = DFmode;
554 #endif
555 	  break;
556 
557 	case 128:
558 #ifdef HAVE_TFmode
559 	  cand = TFmode;
560 #endif
561 	  break;
562 
563 	default:
564 	  break;
565 	}
566       if (cand.exists (&mode)
567 	  && REAL_MODE_FORMAT (mode)->ieee_bits == n
568 	  && targetm.scalar_mode_supported_p (mode)
569 	  && targetm.libgcc_floating_mode_supported_p (mode))
570 	return cand;
571     }
572   return opt_scalar_float_mode ();
573 }
574 
575 /* Define this to return true if the _Floatn and _Floatnx built-in functions
576    should implicitly enable the built-in function without the __builtin_ prefix
577    in addition to the normal built-in function with the __builtin_ prefix.  The
578    default is to only enable built-in functions without the __builtin_ prefix
579    for the GNU C langauge.  The argument FUNC is the enum builtin_in_function
580    id of the function to be enabled.  */
581 
582 bool
default_floatn_builtin_p(int func ATTRIBUTE_UNUSED)583 default_floatn_builtin_p (int func ATTRIBUTE_UNUSED)
584 {
585   static bool first_time_p = true;
586   static bool c_or_objective_c;
587 
588   if (first_time_p)
589     {
590       first_time_p = false;
591       c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC ();
592     }
593 
594   return c_or_objective_c;
595 }
596 
597 /* Make some target macros useable by target-independent code.  */
598 bool
targhook_words_big_endian(void)599 targhook_words_big_endian (void)
600 {
601   return !!WORDS_BIG_ENDIAN;
602 }
603 
604 bool
targhook_float_words_big_endian(void)605 targhook_float_words_big_endian (void)
606 {
607   return !!FLOAT_WORDS_BIG_ENDIAN;
608 }
609 
610 /* True if the target supports floating-point exceptions and rounding
611    modes.  */
612 
613 bool
default_float_exceptions_rounding_supported_p(void)614 default_float_exceptions_rounding_supported_p (void)
615 {
616 #ifdef HAVE_adddf3
617   return HAVE_adddf3;
618 #else
619   return false;
620 #endif
621 }
622 
623 /* True if the target supports decimal floating point.  */
624 
625 bool
default_decimal_float_supported_p(void)626 default_decimal_float_supported_p (void)
627 {
628   return ENABLE_DECIMAL_FLOAT;
629 }
630 
631 /* True if the target supports fixed-point arithmetic.  */
632 
633 bool
default_fixed_point_supported_p(void)634 default_fixed_point_supported_p (void)
635 {
636   return ENABLE_FIXED_POINT;
637 }
638 
639 /* True if the target supports GNU indirect functions.  */
640 
641 bool
default_has_ifunc_p(void)642 default_has_ifunc_p (void)
643 {
644   return HAVE_GNU_INDIRECT_FUNCTION;
645 }
646 
647 /* Return true if we predict the loop LOOP will be transformed to a
648    low-overhead loop, otherwise return false.
649 
650    By default, false is returned, as this hook's applicability should be
651    verified for each target.  Target maintainers should re-define the hook
652    if the target can take advantage of it.  */
653 
654 bool
default_predict_doloop_p(class loop * loop ATTRIBUTE_UNUSED)655 default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
656 {
657   return false;
658 }
659 
660 /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
661    an error message.
662 
663    This function checks whether a given INSN is valid within a low-overhead
664    loop.  If INSN is invalid it returns the reason for that, otherwise it
665    returns NULL. A called function may clobber any special registers required
666    for low-overhead looping. Additionally, some targets (eg, PPC) use the count
667    register for branch on table instructions. We reject the doloop pattern in
668    these cases.  */
669 
670 const char *
default_invalid_within_doloop(const rtx_insn * insn)671 default_invalid_within_doloop (const rtx_insn *insn)
672 {
673   if (CALL_P (insn))
674     return "Function call in loop.";
675 
676   if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn))
677     return "Computed branch in the loop.";
678 
679   return NULL;
680 }
681 
682 /* Mapping of builtin functions to vectorized variants.  */
683 
684 tree
default_builtin_vectorized_function(unsigned int,tree,tree)685 default_builtin_vectorized_function (unsigned int, tree, tree)
686 {
687   return NULL_TREE;
688 }
689 
690 /* Mapping of target builtin functions to vectorized variants.  */
691 
692 tree
default_builtin_md_vectorized_function(tree,tree,tree)693 default_builtin_md_vectorized_function (tree, tree, tree)
694 {
695   return NULL_TREE;
696 }
697 
698 /* Default vectorizer cost model values.  */
699 
700 int
default_builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost,tree vectype,int misalign ATTRIBUTE_UNUSED)701 default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
702                                     tree vectype,
703                                     int misalign ATTRIBUTE_UNUSED)
704 {
705   switch (type_of_cost)
706     {
707       case scalar_stmt:
708       case scalar_load:
709       case scalar_store:
710       case vector_stmt:
711       case vector_load:
712       case vector_store:
713       case vec_to_scalar:
714       case scalar_to_vec:
715       case cond_branch_not_taken:
716       case vec_perm:
717       case vec_promote_demote:
718         return 1;
719 
720       case unaligned_load:
721       case unaligned_store:
722         return 2;
723 
724       case cond_branch_taken:
725         return 3;
726 
727       case vec_construct:
728 	return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
729 
730       default:
731         gcc_unreachable ();
732     }
733 }
734 
735 /* Reciprocal.  */
736 
737 tree
default_builtin_reciprocal(tree)738 default_builtin_reciprocal (tree)
739 {
740   return NULL_TREE;
741 }
742 
743 bool
hook_bool_CUMULATIVE_ARGS_arg_info_false(cumulative_args_t,const function_arg_info &)744 hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t,
745 					  const function_arg_info &)
746 {
747   return false;
748 }
749 
750 bool
hook_bool_CUMULATIVE_ARGS_arg_info_true(cumulative_args_t,const function_arg_info &)751 hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t,
752 					 const function_arg_info &)
753 {
754   return true;
755 }
756 
757 int
hook_int_CUMULATIVE_ARGS_arg_info_0(cumulative_args_t,const function_arg_info &)758 hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
759 				     const function_arg_info &)
760 {
761   return 0;
762 }
763 
764 void
hook_void_CUMULATIVE_ARGS_tree(cumulative_args_t ca ATTRIBUTE_UNUSED,tree ATTRIBUTE_UNUSED)765 hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
766 				tree ATTRIBUTE_UNUSED)
767 {
768 }
769 
770 void
default_function_arg_advance(cumulative_args_t,const function_arg_info &)771 default_function_arg_advance (cumulative_args_t, const function_arg_info &)
772 {
773   gcc_unreachable ();
774 }
775 
776 /* Default implementation of TARGET_FUNCTION_ARG_OFFSET.  */
777 
778 HOST_WIDE_INT
default_function_arg_offset(machine_mode,const_tree)779 default_function_arg_offset (machine_mode, const_tree)
780 {
781   return 0;
782 }
783 
784 /* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad
785    upward, but pad short args downward on big-endian machines.  */
786 
787 pad_direction
default_function_arg_padding(machine_mode mode,const_tree type)788 default_function_arg_padding (machine_mode mode, const_tree type)
789 {
790   if (!BYTES_BIG_ENDIAN)
791     return PAD_UPWARD;
792 
793   unsigned HOST_WIDE_INT size;
794   if (mode == BLKmode)
795     {
796       if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
797 	return PAD_UPWARD;
798       size = int_size_in_bytes (type);
799     }
800   else
801     /* Targets with variable-sized modes must override this hook
802        and handle variable-sized modes explicitly.  */
803     size = GET_MODE_SIZE (mode).to_constant ();
804 
805   if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
806     return PAD_DOWNWARD;
807 
808   return PAD_UPWARD;
809 }
810 
811 rtx
default_function_arg(cumulative_args_t,const function_arg_info &)812 default_function_arg (cumulative_args_t, const function_arg_info &)
813 {
814   gcc_unreachable ();
815 }
816 
817 rtx
default_function_incoming_arg(cumulative_args_t,const function_arg_info &)818 default_function_incoming_arg (cumulative_args_t, const function_arg_info &)
819 {
820   gcc_unreachable ();
821 }
822 
823 unsigned int
default_function_arg_boundary(machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED)824 default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
825 			       const_tree type ATTRIBUTE_UNUSED)
826 {
827   return PARM_BOUNDARY;
828 }
829 
830 unsigned int
default_function_arg_round_boundary(machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED)831 default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED,
832 				     const_tree type ATTRIBUTE_UNUSED)
833 {
834   return PARM_BOUNDARY;
835 }
836 
837 void
hook_void_bitmap(bitmap regs ATTRIBUTE_UNUSED)838 hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED)
839 {
840 }
841 
842 const char *
hook_invalid_arg_for_unprototyped_fn(const_tree typelist ATTRIBUTE_UNUSED,const_tree funcdecl ATTRIBUTE_UNUSED,const_tree val ATTRIBUTE_UNUSED)843 hook_invalid_arg_for_unprototyped_fn (
844 	const_tree typelist ATTRIBUTE_UNUSED,
845 	const_tree funcdecl ATTRIBUTE_UNUSED,
846 	const_tree val ATTRIBUTE_UNUSED)
847 {
848   return NULL;
849 }
850 
851 /* Initialize the stack protection decls.  */
852 
853 /* Stack protection related decls living in libgcc.  */
854 static GTY(()) tree stack_chk_guard_decl;
855 
856 tree
default_stack_protect_guard(void)857 default_stack_protect_guard (void)
858 {
859   tree t = stack_chk_guard_decl;
860 
861   if (t == NULL)
862     {
863       rtx x;
864 
865       t = build_decl (UNKNOWN_LOCATION,
866 		      VAR_DECL, get_identifier ("__stack_chk_guard"),
867 		      ptr_type_node);
868       TREE_STATIC (t) = 1;
869       TREE_PUBLIC (t) = 1;
870       DECL_EXTERNAL (t) = 1;
871       TREE_USED (t) = 1;
872       TREE_THIS_VOLATILE (t) = 1;
873       DECL_ARTIFICIAL (t) = 1;
874       DECL_IGNORED_P (t) = 1;
875 
876       /* Do not share RTL as the declaration is visible outside of
877 	 current function.  */
878       x = DECL_RTL (t);
879       RTX_FLAG (x, used) = 1;
880 
881       stack_chk_guard_decl = t;
882     }
883 
884   return t;
885 }
886 
887 static GTY(()) tree stack_chk_fail_decl;
888 
889 tree
default_external_stack_protect_fail(void)890 default_external_stack_protect_fail (void)
891 {
892   tree t = stack_chk_fail_decl;
893 
894   if (t == NULL_TREE)
895     {
896       t = build_function_type_list (void_type_node, NULL_TREE);
897       t = build_decl (UNKNOWN_LOCATION,
898 		      FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t);
899       TREE_STATIC (t) = 1;
900       TREE_PUBLIC (t) = 1;
901       DECL_EXTERNAL (t) = 1;
902       TREE_USED (t) = 1;
903       TREE_THIS_VOLATILE (t) = 1;
904       TREE_NOTHROW (t) = 1;
905       DECL_ARTIFICIAL (t) = 1;
906       DECL_IGNORED_P (t) = 1;
907       DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
908       DECL_VISIBILITY_SPECIFIED (t) = 1;
909 
910       stack_chk_fail_decl = t;
911     }
912 
913   return build_call_expr (t, 0);
914 }
915 
916 tree
default_hidden_stack_protect_fail(void)917 default_hidden_stack_protect_fail (void)
918 {
919 #ifndef HAVE_GAS_HIDDEN
920   return default_external_stack_protect_fail ();
921 #else
922   tree t = stack_chk_fail_decl;
923 
924   if (!flag_pic)
925     return default_external_stack_protect_fail ();
926 
927   if (t == NULL_TREE)
928     {
929       t = build_function_type_list (void_type_node, NULL_TREE);
930       t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
931 		      get_identifier ("__stack_chk_fail_local"), t);
932       TREE_STATIC (t) = 1;
933       TREE_PUBLIC (t) = 1;
934       DECL_EXTERNAL (t) = 1;
935       TREE_USED (t) = 1;
936       TREE_THIS_VOLATILE (t) = 1;
937       TREE_NOTHROW (t) = 1;
938       DECL_ARTIFICIAL (t) = 1;
939       DECL_IGNORED_P (t) = 1;
940       DECL_VISIBILITY_SPECIFIED (t) = 1;
941       DECL_VISIBILITY (t) = VISIBILITY_HIDDEN;
942 
943       stack_chk_fail_decl = t;
944     }
945 
946   return build_call_expr (t, 0);
947 #endif
948 }
949 
950 bool
hook_bool_const_rtx_commutative_p(const_rtx x,int outer_code ATTRIBUTE_UNUSED)951 hook_bool_const_rtx_commutative_p (const_rtx x,
952 				   int outer_code ATTRIBUTE_UNUSED)
953 {
954   return COMMUTATIVE_P (x);
955 }
956 
957 rtx
default_function_value(const_tree ret_type ATTRIBUTE_UNUSED,const_tree fn_decl_or_type,bool outgoing ATTRIBUTE_UNUSED)958 default_function_value (const_tree ret_type ATTRIBUTE_UNUSED,
959 			const_tree fn_decl_or_type,
960 			bool outgoing ATTRIBUTE_UNUSED)
961 {
962   /* The old interface doesn't handle receiving the function type.  */
963   if (fn_decl_or_type
964       && !DECL_P (fn_decl_or_type))
965     fn_decl_or_type = NULL;
966 
967 #ifdef FUNCTION_VALUE
968   return FUNCTION_VALUE (ret_type, fn_decl_or_type);
969 #else
970   gcc_unreachable ();
971 #endif
972 }
973 
974 rtx
default_libcall_value(machine_mode mode ATTRIBUTE_UNUSED,const_rtx fun ATTRIBUTE_UNUSED)975 default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED,
976 		       const_rtx fun ATTRIBUTE_UNUSED)
977 {
978 #ifdef LIBCALL_VALUE
979   return LIBCALL_VALUE (MACRO_MODE (mode));
980 #else
981   gcc_unreachable ();
982 #endif
983 }
984 
985 /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P.  */
986 
987 bool
default_function_value_regno_p(const unsigned int regno ATTRIBUTE_UNUSED)988 default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED)
989 {
990 #ifdef FUNCTION_VALUE_REGNO_P
991   return FUNCTION_VALUE_REGNO_P (regno);
992 #else
993   gcc_unreachable ();
994 #endif
995 }
996 
997 /* The default hook for TARGET_ZERO_CALL_USED_REGS.  */
998 
999 HARD_REG_SET
default_zero_call_used_regs(HARD_REG_SET need_zeroed_hardregs)1000 default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
1001 {
1002   gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs));
1003 
1004   for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1005     if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
1006       {
1007 	rtx_insn *last_insn = get_last_insn ();
1008 	machine_mode mode = GET_MODE (regno_reg_rtx[regno]);
1009 	rtx zero = CONST0_RTX (mode);
1010 	rtx_insn *insn = emit_move_insn (regno_reg_rtx[regno], zero);
1011 	if (!valid_insn_p (insn))
1012 	  {
1013 	    static bool issued_error;
1014 	    if (!issued_error)
1015 	      {
1016 		issued_error = true;
1017 		sorry ("%qs not supported on this target",
1018 			"-fzero-call-used-regs");
1019 	      }
1020 	    delete_insns_since (last_insn);
1021 	  }
1022       }
1023   return need_zeroed_hardregs;
1024 }
1025 
1026 rtx
default_internal_arg_pointer(void)1027 default_internal_arg_pointer (void)
1028 {
1029   /* If the reg that the virtual arg pointer will be translated into is
1030      not a fixed reg or is the stack pointer, make a copy of the virtual
1031      arg pointer, and address parms via the copy.  The frame pointer is
1032      considered fixed even though it is not marked as such.  */
1033   if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
1034        || ! (fixed_regs[ARG_POINTER_REGNUM]
1035 	     || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
1036     return copy_to_reg (virtual_incoming_args_rtx);
1037   else
1038     return virtual_incoming_args_rtx;
1039 }
1040 
1041 rtx
default_static_chain(const_tree ARG_UNUSED (fndecl_or_type),bool incoming_p)1042 default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p)
1043 {
1044   if (incoming_p)
1045     {
1046 #ifdef STATIC_CHAIN_INCOMING_REGNUM
1047       return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
1048 #endif
1049     }
1050 
1051 #ifdef STATIC_CHAIN_REGNUM
1052   return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
1053 #endif
1054 
1055   {
1056     static bool issued_error;
1057     if (!issued_error)
1058       {
1059 	issued_error = true;
1060 	sorry ("nested functions not supported on this target");
1061       }
1062 
1063     /* It really doesn't matter what we return here, so long at it
1064        doesn't cause the rest of the compiler to crash.  */
1065     return gen_rtx_MEM (Pmode, stack_pointer_rtx);
1066   }
1067 }
1068 
1069 void
default_trampoline_init(rtx ARG_UNUSED (m_tramp),tree ARG_UNUSED (t_func),rtx ARG_UNUSED (r_chain))1070 default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func),
1071 			 rtx ARG_UNUSED (r_chain))
1072 {
1073   sorry ("nested function trampolines not supported on this target");
1074 }
1075 
1076 poly_int64
default_return_pops_args(tree,tree,poly_int64)1077 default_return_pops_args (tree, tree, poly_int64)
1078 {
1079   return 0;
1080 }
1081 
1082 reg_class_t
default_ira_change_pseudo_allocno_class(int regno ATTRIBUTE_UNUSED,reg_class_t cl,reg_class_t best_cl ATTRIBUTE_UNUSED)1083 default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
1084 					 reg_class_t cl,
1085 					 reg_class_t best_cl ATTRIBUTE_UNUSED)
1086 {
1087   return cl;
1088 }
1089 
1090 extern bool
default_lra_p(void)1091 default_lra_p (void)
1092 {
1093   return true;
1094 }
1095 
1096 int
default_register_priority(int hard_regno ATTRIBUTE_UNUSED)1097 default_register_priority (int hard_regno ATTRIBUTE_UNUSED)
1098 {
1099   return 0;
1100 }
1101 
1102 extern bool
default_register_usage_leveling_p(void)1103 default_register_usage_leveling_p (void)
1104 {
1105   return false;
1106 }
1107 
1108 extern bool
default_different_addr_displacement_p(void)1109 default_different_addr_displacement_p (void)
1110 {
1111   return false;
1112 }
1113 
1114 reg_class_t
default_secondary_reload(bool in_p ATTRIBUTE_UNUSED,rtx x ATTRIBUTE_UNUSED,reg_class_t reload_class_i ATTRIBUTE_UNUSED,machine_mode reload_mode ATTRIBUTE_UNUSED,secondary_reload_info * sri)1115 default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
1116 			  reg_class_t reload_class_i ATTRIBUTE_UNUSED,
1117 			  machine_mode reload_mode ATTRIBUTE_UNUSED,
1118 			  secondary_reload_info *sri)
1119 {
1120   enum reg_class rclass = NO_REGS;
1121   enum reg_class reload_class = (enum reg_class) reload_class_i;
1122 
1123   if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing)
1124     {
1125       sri->icode = sri->prev_sri->t_icode;
1126       return NO_REGS;
1127     }
1128 #ifdef SECONDARY_INPUT_RELOAD_CLASS
1129   if (in_p)
1130     rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class,
1131 					   MACRO_MODE (reload_mode), x);
1132 #endif
1133 #ifdef SECONDARY_OUTPUT_RELOAD_CLASS
1134   if (! in_p)
1135     rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class,
1136 					    MACRO_MODE (reload_mode), x);
1137 #endif
1138   if (rclass != NO_REGS)
1139     {
1140       enum insn_code icode
1141 	= direct_optab_handler (in_p ? reload_in_optab : reload_out_optab,
1142 				reload_mode);
1143 
1144       if (icode != CODE_FOR_nothing
1145 	  && !insn_operand_matches (icode, in_p, x))
1146 	icode = CODE_FOR_nothing;
1147       else if (icode != CODE_FOR_nothing)
1148 	{
1149 	  const char *insn_constraint, *scratch_constraint;
1150 	  enum reg_class insn_class, scratch_class;
1151 
1152 	  gcc_assert (insn_data[(int) icode].n_operands == 3);
1153 	  insn_constraint = insn_data[(int) icode].operand[!in_p].constraint;
1154 	  if (!*insn_constraint)
1155 	    insn_class = ALL_REGS;
1156 	  else
1157 	    {
1158 	      if (in_p)
1159 		{
1160 		  gcc_assert (*insn_constraint == '=');
1161 		  insn_constraint++;
1162 		}
1163 	      insn_class = (reg_class_for_constraint
1164 			    (lookup_constraint (insn_constraint)));
1165 	      gcc_assert (insn_class != NO_REGS);
1166 	    }
1167 
1168 	  scratch_constraint = insn_data[(int) icode].operand[2].constraint;
1169 	  /* The scratch register's constraint must start with "=&",
1170 	     except for an input reload, where only "=" is necessary,
1171 	     and where it might be beneficial to re-use registers from
1172 	     the input.  */
1173 	  gcc_assert (scratch_constraint[0] == '='
1174 		      && (in_p || scratch_constraint[1] == '&'));
1175 	  scratch_constraint++;
1176 	  if (*scratch_constraint == '&')
1177 	    scratch_constraint++;
1178 	  scratch_class = (reg_class_for_constraint
1179 			   (lookup_constraint (scratch_constraint)));
1180 
1181 	  if (reg_class_subset_p (reload_class, insn_class))
1182 	    {
1183 	      gcc_assert (scratch_class == rclass);
1184 	      rclass = NO_REGS;
1185 	    }
1186 	  else
1187 	    rclass = insn_class;
1188 
1189         }
1190       if (rclass == NO_REGS)
1191 	sri->icode = icode;
1192       else
1193 	sri->t_icode = icode;
1194     }
1195   return rclass;
1196 }
1197 
1198 /* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE.  */
1199 
1200 machine_mode
default_secondary_memory_needed_mode(machine_mode mode)1201 default_secondary_memory_needed_mode (machine_mode mode)
1202 {
1203   if (!targetm.lra_p ()
1204       && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
1205       && INTEGRAL_MODE_P (mode))
1206     return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1207   return mode;
1208 }
1209 
1210 /* By default, if flag_pic is true, then neither local nor global relocs
1211    should be placed in readonly memory.  */
1212 
1213 int
default_reloc_rw_mask(void)1214 default_reloc_rw_mask (void)
1215 {
1216   return flag_pic ? 3 : 0;
1217 }
1218 
1219 /* By default, address diff vectors are generated
1220 for jump tables when flag_pic is true.  */
1221 
1222 bool
default_generate_pic_addr_diff_vec(void)1223 default_generate_pic_addr_diff_vec (void)
1224 {
1225   return flag_pic;
1226 }
1227 
1228 /* By default, do no modification. */
default_mangle_decl_assembler_name(tree decl ATTRIBUTE_UNUSED,tree id)1229 tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED,
1230 					 tree id)
1231 {
1232    return id;
1233 }
1234 
1235 /* The default implementation of TARGET_STATIC_RTX_ALIGNMENT.  */
1236 
1237 HOST_WIDE_INT
default_static_rtx_alignment(machine_mode mode)1238 default_static_rtx_alignment (machine_mode mode)
1239 {
1240   return GET_MODE_ALIGNMENT (mode);
1241 }
1242 
1243 /* The default implementation of TARGET_CONSTANT_ALIGNMENT.  */
1244 
1245 HOST_WIDE_INT
default_constant_alignment(const_tree,HOST_WIDE_INT align)1246 default_constant_alignment (const_tree, HOST_WIDE_INT align)
1247 {
1248   return align;
1249 }
1250 
1251 /* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings
1252    to at least BITS_PER_WORD but otherwise makes no changes.  */
1253 
1254 HOST_WIDE_INT
constant_alignment_word_strings(const_tree exp,HOST_WIDE_INT align)1255 constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align)
1256 {
1257   if (TREE_CODE (exp) == STRING_CST)
1258     return MAX (align, BITS_PER_WORD);
1259   return align;
1260 }
1261 
1262 /* Default to natural alignment for vector types, bounded by
1263    MAX_OFILE_ALIGNMENT.  */
1264 
1265 HOST_WIDE_INT
default_vector_alignment(const_tree type)1266 default_vector_alignment (const_tree type)
1267 {
1268   unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT;
1269   tree size = TYPE_SIZE (type);
1270   if (tree_fits_uhwi_p (size))
1271     align = tree_to_uhwi (size);
1272   if (align >= MAX_OFILE_ALIGNMENT)
1273     return MAX_OFILE_ALIGNMENT;
1274   return MAX (align, GET_MODE_ALIGNMENT (TYPE_MODE (type)));
1275 }
1276 
1277 /* The default implementation of
1278    TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT.  */
1279 
1280 poly_uint64
default_preferred_vector_alignment(const_tree type)1281 default_preferred_vector_alignment (const_tree type)
1282 {
1283   return TYPE_ALIGN (type);
1284 }
1285 
1286 /* By default assume vectors of element TYPE require a multiple of the natural
1287    alignment of TYPE.  TYPE is naturally aligned if IS_PACKED is false.  */
1288 bool
default_builtin_vector_alignment_reachable(const_tree,bool is_packed)1289 default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed)
1290 {
1291   return ! is_packed;
1292 }
1293 
1294 /* By default, assume that a target supports any factor of misalignment
1295    memory access if it supports movmisalign patten.
1296    is_packed is true if the memory access is defined in a packed struct.  */
1297 bool
default_builtin_support_vector_misalignment(machine_mode mode,const_tree type ATTRIBUTE_UNUSED,int misalignment ATTRIBUTE_UNUSED,bool is_packed ATTRIBUTE_UNUSED)1298 default_builtin_support_vector_misalignment (machine_mode mode,
1299 					     const_tree type
1300 					     ATTRIBUTE_UNUSED,
1301 					     int misalignment
1302 					     ATTRIBUTE_UNUSED,
1303 					     bool is_packed
1304 					     ATTRIBUTE_UNUSED)
1305 {
1306   if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing)
1307     return true;
1308   return false;
1309 }
1310 
1311 /* By default, only attempt to parallelize bitwise operations, and
1312    possibly adds/subtracts using bit-twiddling.  */
1313 
1314 machine_mode
default_preferred_simd_mode(scalar_mode)1315 default_preferred_simd_mode (scalar_mode)
1316 {
1317   return word_mode;
1318 }
1319 
1320 /* By default do not split reductions further.  */
1321 
1322 machine_mode
default_split_reduction(machine_mode mode)1323 default_split_reduction (machine_mode mode)
1324 {
1325   return mode;
1326 }
1327 
1328 /* By default only the preferred vector mode is tried.  */
1329 
1330 unsigned int
default_autovectorize_vector_modes(vector_modes *,bool)1331 default_autovectorize_vector_modes (vector_modes *, bool)
1332 {
1333   return 0;
1334 }
1335 
1336 /* The default implementation of TARGET_VECTORIZE_RELATED_MODE.  */
1337 
1338 opt_machine_mode
default_vectorize_related_mode(machine_mode vector_mode,scalar_mode element_mode,poly_uint64 nunits)1339 default_vectorize_related_mode (machine_mode vector_mode,
1340 				scalar_mode element_mode,
1341 				poly_uint64 nunits)
1342 {
1343   machine_mode result_mode;
1344   if ((maybe_ne (nunits, 0U)
1345        || multiple_p (GET_MODE_SIZE (vector_mode),
1346 		      GET_MODE_SIZE (element_mode), &nunits))
1347       && mode_for_vector (element_mode, nunits).exists (&result_mode)
1348       && VECTOR_MODE_P (result_mode)
1349       && targetm.vector_mode_supported_p (result_mode))
1350     return result_mode;
1351 
1352   return opt_machine_mode ();
1353 }
1354 
1355 /* By default a vector of integers is used as a mask.  */
1356 
1357 opt_machine_mode
default_get_mask_mode(machine_mode mode)1358 default_get_mask_mode (machine_mode mode)
1359 {
1360   return related_int_vector_mode (mode);
1361 }
1362 
1363 /* By default consider masked stores to be expensive.  */
1364 
1365 bool
default_empty_mask_is_expensive(unsigned ifn)1366 default_empty_mask_is_expensive (unsigned ifn)
1367 {
1368   return ifn == IFN_MASK_STORE;
1369 }
1370 
1371 /* By default, the cost model accumulates three separate costs (prologue,
1372    loop body, and epilogue) for a vectorized loop or block.  So allocate an
1373    array of three unsigned ints, set it to zero, and return its address.  */
1374 
1375 void *
default_init_cost(class loop * loop_info ATTRIBUTE_UNUSED)1376 default_init_cost (class loop *loop_info ATTRIBUTE_UNUSED)
1377 {
1378   unsigned *cost = XNEWVEC (unsigned, 3);
1379   cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
1380   return cost;
1381 }
1382 
1383 /* By default, the cost model looks up the cost of the given statement
1384    kind and mode, multiplies it by the occurrence count, accumulates
1385    it into the cost specified by WHERE, and returns the cost added.  */
1386 
1387 unsigned
default_add_stmt_cost(class vec_info * vinfo,void * data,int count,enum vect_cost_for_stmt kind,class _stmt_vec_info * stmt_info,tree vectype,int misalign,enum vect_cost_model_location where)1388 default_add_stmt_cost (class vec_info *vinfo, void *data, int count,
1389 		       enum vect_cost_for_stmt kind,
1390 		       class _stmt_vec_info *stmt_info, tree vectype,
1391 		       int misalign,
1392 		       enum vect_cost_model_location where)
1393 {
1394   unsigned *cost = (unsigned *) data;
1395   unsigned retval = 0;
1396   int stmt_cost = targetm.vectorize.builtin_vectorization_cost (kind, vectype,
1397 								misalign);
1398    /* Statements in an inner loop relative to the loop being
1399       vectorized are weighted more heavily.  The value here is
1400       arbitrary and could potentially be improved with analysis.  */
1401   if (where == vect_body && stmt_info
1402       && stmt_in_inner_loop_p (vinfo, stmt_info))
1403     count *= 50;  /* FIXME.  */
1404 
1405   retval = (unsigned) (count * stmt_cost);
1406   cost[where] += retval;
1407 
1408   return retval;
1409 }
1410 
1411 /* By default, the cost model just returns the accumulated costs.  */
1412 
1413 void
default_finish_cost(void * data,unsigned * prologue_cost,unsigned * body_cost,unsigned * epilogue_cost)1414 default_finish_cost (void *data, unsigned *prologue_cost,
1415 		     unsigned *body_cost, unsigned *epilogue_cost)
1416 {
1417   unsigned *cost = (unsigned *) data;
1418   *prologue_cost = cost[vect_prologue];
1419   *body_cost     = cost[vect_body];
1420   *epilogue_cost = cost[vect_epilogue];
1421 }
1422 
1423 /* Free the cost data.  */
1424 
1425 void
default_destroy_cost_data(void * data)1426 default_destroy_cost_data (void *data)
1427 {
1428   free (data);
1429 }
1430 
1431 /* Determine whether or not a pointer mode is valid. Assume defaults
1432    of ptr_mode or Pmode - can be overridden.  */
1433 bool
default_valid_pointer_mode(scalar_int_mode mode)1434 default_valid_pointer_mode (scalar_int_mode mode)
1435 {
1436   return (mode == ptr_mode || mode == Pmode);
1437 }
1438 
1439 /* Determine whether the memory reference specified by REF may alias
1440    the C libraries errno location.  */
1441 bool
default_ref_may_alias_errno(ao_ref * ref)1442 default_ref_may_alias_errno (ao_ref *ref)
1443 {
1444   tree base = ao_ref_base (ref);
1445   /* The default implementation assumes the errno location is
1446      a declaration of type int or is always accessed via a
1447      pointer to int.  We assume that accesses to errno are
1448      not deliberately obfuscated (even in conforming ways).  */
1449   if (TYPE_UNSIGNED (TREE_TYPE (base))
1450       || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node))
1451     return false;
1452   /* The default implementation assumes an errno location declaration
1453      is never defined in the current compilation unit and may not be
1454      aliased by a local variable.  */
1455   if (DECL_P (base)
1456       && DECL_EXTERNAL (base)
1457       && !TREE_STATIC (base))
1458     return true;
1459   else if (TREE_CODE (base) == MEM_REF
1460 	   && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1461     {
1462       struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
1463       return !pi || pi->pt.anything || pi->pt.nonlocal;
1464     }
1465   return false;
1466 }
1467 
1468 /* Return the mode for a pointer to a given ADDRSPACE,
1469    defaulting to ptr_mode for all address spaces.  */
1470 
1471 scalar_int_mode
default_addr_space_pointer_mode(addr_space_t addrspace ATTRIBUTE_UNUSED)1472 default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1473 {
1474   return ptr_mode;
1475 }
1476 
1477 /* Return the mode for an address in a given ADDRSPACE,
1478    defaulting to Pmode for all address spaces.  */
1479 
1480 scalar_int_mode
default_addr_space_address_mode(addr_space_t addrspace ATTRIBUTE_UNUSED)1481 default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1482 {
1483   return Pmode;
1484 }
1485 
1486 /* Named address space version of valid_pointer_mode.
1487    To match the above, the same modes apply to all address spaces.  */
1488 
1489 bool
default_addr_space_valid_pointer_mode(scalar_int_mode mode,addr_space_t as ATTRIBUTE_UNUSED)1490 default_addr_space_valid_pointer_mode (scalar_int_mode mode,
1491 				       addr_space_t as ATTRIBUTE_UNUSED)
1492 {
1493   return targetm.valid_pointer_mode (mode);
1494 }
1495 
1496 /* Some places still assume that all pointer or address modes are the
1497    standard Pmode and ptr_mode.  These optimizations become invalid if
1498    the target actually supports multiple different modes.  For now,
1499    we disable such optimizations on such targets, using this function.  */
1500 
1501 bool
target_default_pointer_address_modes_p(void)1502 target_default_pointer_address_modes_p (void)
1503 {
1504   if (targetm.addr_space.address_mode != default_addr_space_address_mode)
1505     return false;
1506   if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode)
1507     return false;
1508 
1509   return true;
1510 }
1511 
1512 /* Named address space version of legitimate_address_p.
1513    By default, all address spaces have the same form.  */
1514 
1515 bool
default_addr_space_legitimate_address_p(machine_mode mode,rtx mem,bool strict,addr_space_t as ATTRIBUTE_UNUSED)1516 default_addr_space_legitimate_address_p (machine_mode mode, rtx mem,
1517 					 bool strict,
1518 					 addr_space_t as ATTRIBUTE_UNUSED)
1519 {
1520   return targetm.legitimate_address_p (mode, mem, strict);
1521 }
1522 
1523 /* Named address space version of LEGITIMIZE_ADDRESS.
1524    By default, all address spaces have the same form.  */
1525 
1526 rtx
default_addr_space_legitimize_address(rtx x,rtx oldx,machine_mode mode,addr_space_t as ATTRIBUTE_UNUSED)1527 default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
1528 				       addr_space_t as ATTRIBUTE_UNUSED)
1529 {
1530   return targetm.legitimize_address (x, oldx, mode);
1531 }
1532 
1533 /* The default hook for determining if one named address space is a subset of
1534    another and to return which address space to use as the common address
1535    space.  */
1536 
1537 bool
default_addr_space_subset_p(addr_space_t subset,addr_space_t superset)1538 default_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
1539 {
1540   return (subset == superset);
1541 }
1542 
1543 /* The default hook for determining if 0 within a named address
1544    space is a valid address.  */
1545 
1546 bool
default_addr_space_zero_address_valid(addr_space_t as ATTRIBUTE_UNUSED)1547 default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED)
1548 {
1549   return false;
1550 }
1551 
1552 /* The default hook for debugging the address space is to return the
1553    address space number to indicate DW_AT_address_class.  */
1554 int
default_addr_space_debug(addr_space_t as)1555 default_addr_space_debug (addr_space_t as)
1556 {
1557   return as;
1558 }
1559 
1560 /* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE.
1561    Don't complain about any address space.  */
1562 
1563 void
default_addr_space_diagnose_usage(addr_space_t,location_t)1564 default_addr_space_diagnose_usage (addr_space_t, location_t)
1565 {
1566 }
1567 
1568 
1569 /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be
1570    called for targets with only a generic address space.  */
1571 
1572 rtx
default_addr_space_convert(rtx op ATTRIBUTE_UNUSED,tree from_type ATTRIBUTE_UNUSED,tree to_type ATTRIBUTE_UNUSED)1573 default_addr_space_convert (rtx op ATTRIBUTE_UNUSED,
1574 			    tree from_type ATTRIBUTE_UNUSED,
1575 			    tree to_type ATTRIBUTE_UNUSED)
1576 {
1577   gcc_unreachable ();
1578 }
1579 
1580 /* The defualt implementation of TARGET_HARD_REGNO_NREGS.  */
1581 
1582 unsigned int
default_hard_regno_nregs(unsigned int,machine_mode mode)1583 default_hard_regno_nregs (unsigned int, machine_mode mode)
1584 {
1585   /* Targets with variable-sized modes must provide their own definition
1586      of this hook.  */
1587   return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
1588 }
1589 
1590 bool
default_hard_regno_scratch_ok(unsigned int regno ATTRIBUTE_UNUSED)1591 default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
1592 {
1593   return true;
1594 }
1595 
1596 /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P.  */
1597 
1598 bool
default_mode_dependent_address_p(const_rtx addr ATTRIBUTE_UNUSED,addr_space_t addrspace ATTRIBUTE_UNUSED)1599 default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
1600 				  addr_space_t addrspace ATTRIBUTE_UNUSED)
1601 {
1602   return false;
1603 }
1604 
1605 extern bool default_new_address_profitable_p (rtx, rtx);
1606 
1607 
1608 /* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P.  */
1609 
1610 bool
default_new_address_profitable_p(rtx memref ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,rtx new_addr ATTRIBUTE_UNUSED)1611 default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED,
1612 				  rtx_insn *insn ATTRIBUTE_UNUSED,
1613 				  rtx new_addr ATTRIBUTE_UNUSED)
1614 {
1615   return true;
1616 }
1617 
1618 bool
default_target_option_valid_attribute_p(tree ARG_UNUSED (fndecl),tree ARG_UNUSED (name),tree ARG_UNUSED (args),int ARG_UNUSED (flags))1619 default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl),
1620 					 tree ARG_UNUSED (name),
1621 					 tree ARG_UNUSED (args),
1622 					 int ARG_UNUSED (flags))
1623 {
1624   warning (OPT_Wattributes,
1625 	   "target attribute is not supported on this machine");
1626 
1627   return false;
1628 }
1629 
1630 bool
default_target_option_pragma_parse(tree ARG_UNUSED (args),tree ARG_UNUSED (pop_target))1631 default_target_option_pragma_parse (tree ARG_UNUSED (args),
1632 				    tree ARG_UNUSED (pop_target))
1633 {
1634   /* If args is NULL the caller is handle_pragma_pop_options ().  In that case,
1635      emit no warning because "#pragma GCC pop_target" is valid on targets that
1636      do not have the "target" pragma.  */
1637   if (args)
1638     warning (OPT_Wpragmas,
1639 	     "%<#pragma GCC target%> is not supported for this machine");
1640 
1641   return false;
1642 }
1643 
1644 bool
default_target_can_inline_p(tree caller,tree callee)1645 default_target_can_inline_p (tree caller, tree callee)
1646 {
1647   tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee);
1648   tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller);
1649   if (! callee_opts)
1650     callee_opts = target_option_default_node;
1651   if (! caller_opts)
1652     caller_opts = target_option_default_node;
1653 
1654   /* If both caller and callee have attributes, assume that if the
1655      pointer is different, the two functions have different target
1656      options since build_target_option_node uses a hash table for the
1657      options.  */
1658   return callee_opts == caller_opts;
1659 }
1660 
1661 /* If the machine does not have a case insn that compares the bounds,
1662    this means extra overhead for dispatch tables, which raises the
1663    threshold for using them.  */
1664 
1665 unsigned int
default_case_values_threshold(void)1666 default_case_values_threshold (void)
1667 {
1668   return (targetm.have_casesi () ? 4 : 5);
1669 }
1670 
1671 bool
default_have_conditional_execution(void)1672 default_have_conditional_execution (void)
1673 {
1674   return HAVE_conditional_execution;
1675 }
1676 
1677 /* By default we assume that c99 functions are present at the runtime,
1678    but sincos is not.  */
1679 bool
default_libc_has_function(enum function_class fn_class,tree type ATTRIBUTE_UNUSED)1680 default_libc_has_function (enum function_class fn_class,
1681 			   tree type ATTRIBUTE_UNUSED)
1682 {
1683   if (fn_class == function_c94
1684       || fn_class == function_c99_misc
1685       || fn_class == function_c99_math_complex)
1686     return true;
1687 
1688   return false;
1689 }
1690 
1691 /* By default assume that libc has not a fast implementation.  */
1692 
1693 bool
default_libc_has_fast_function(int fcode ATTRIBUTE_UNUSED)1694 default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
1695 {
1696   return false;
1697 }
1698 
1699 bool
gnu_libc_has_function(enum function_class fn_class ATTRIBUTE_UNUSED,tree type ATTRIBUTE_UNUSED)1700 gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1701 		       tree type ATTRIBUTE_UNUSED)
1702 {
1703   return true;
1704 }
1705 
1706 bool
no_c99_libc_has_function(enum function_class fn_class ATTRIBUTE_UNUSED,tree type ATTRIBUTE_UNUSED)1707 no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1708 			  tree type ATTRIBUTE_UNUSED)
1709 {
1710   return false;
1711 }
1712 
1713 tree
default_builtin_tm_load_store(tree ARG_UNUSED (type))1714 default_builtin_tm_load_store (tree ARG_UNUSED (type))
1715 {
1716   return NULL_TREE;
1717 }
1718 
1719 /* Compute cost of moving registers to/from memory.  */
1720 
1721 int
default_memory_move_cost(machine_mode mode ATTRIBUTE_UNUSED,reg_class_t rclass ATTRIBUTE_UNUSED,bool in ATTRIBUTE_UNUSED)1722 default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1723 			  reg_class_t rclass ATTRIBUTE_UNUSED,
1724 			  bool in ATTRIBUTE_UNUSED)
1725 {
1726 #ifndef MEMORY_MOVE_COST
1727     return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in));
1728 #else
1729     return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in);
1730 #endif
1731 }
1732 
1733 /* Compute cost of moving data from a register of class FROM to one of
1734    TO, using MODE.  */
1735 
1736 int
default_register_move_cost(machine_mode mode ATTRIBUTE_UNUSED,reg_class_t from ATTRIBUTE_UNUSED,reg_class_t to ATTRIBUTE_UNUSED)1737 default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1738                             reg_class_t from ATTRIBUTE_UNUSED,
1739                             reg_class_t to ATTRIBUTE_UNUSED)
1740 {
1741 #ifndef REGISTER_MOVE_COST
1742   return 2;
1743 #else
1744   return REGISTER_MOVE_COST (MACRO_MODE (mode),
1745 			     (enum reg_class) from, (enum reg_class) to);
1746 #endif
1747 }
1748 
1749 /* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS.  */
1750 
1751 bool
default_slow_unaligned_access(machine_mode,unsigned int)1752 default_slow_unaligned_access (machine_mode, unsigned int)
1753 {
1754   return STRICT_ALIGNMENT;
1755 }
1756 
1757 /* The default implementation of TARGET_ESTIMATED_POLY_VALUE.  */
1758 
1759 HOST_WIDE_INT
default_estimated_poly_value(poly_int64 x,poly_value_estimate_kind)1760 default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind)
1761 {
1762   return x.coeffs[0];
1763 }
1764 
1765 /* For hooks which use the MOVE_RATIO macro, this gives the legacy default
1766    behavior.  SPEED_P is true if we are compiling for speed.  */
1767 
1768 unsigned int
get_move_ratio(bool speed_p ATTRIBUTE_UNUSED)1769 get_move_ratio (bool speed_p ATTRIBUTE_UNUSED)
1770 {
1771   unsigned int move_ratio;
1772 #ifdef MOVE_RATIO
1773   move_ratio = (unsigned int) MOVE_RATIO (speed_p);
1774 #else
1775 #if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
1776   move_ratio = 2;
1777 #else /* No cpymem patterns, pick a default.  */
1778   move_ratio = ((speed_p) ? 15 : 3);
1779 #endif
1780 #endif
1781   return move_ratio;
1782 }
1783 
1784 /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be
1785    used; return FALSE if the cpymem/setmem optab should be expanded, or
1786    a call to memcpy emitted.  */
1787 
1788 bool
default_use_by_pieces_infrastructure_p(unsigned HOST_WIDE_INT size,unsigned int alignment,enum by_pieces_operation op,bool speed_p)1789 default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
1790 					unsigned int alignment,
1791 					enum by_pieces_operation op,
1792 					bool speed_p)
1793 {
1794   unsigned int max_size = 0;
1795   unsigned int ratio = 0;
1796 
1797   switch (op)
1798     {
1799     case CLEAR_BY_PIECES:
1800       max_size = STORE_MAX_PIECES;
1801       ratio = CLEAR_RATIO (speed_p);
1802       break;
1803     case MOVE_BY_PIECES:
1804       max_size = MOVE_MAX_PIECES;
1805       ratio = get_move_ratio (speed_p);
1806       break;
1807     case SET_BY_PIECES:
1808       max_size = STORE_MAX_PIECES;
1809       ratio = SET_RATIO (speed_p);
1810       break;
1811     case STORE_BY_PIECES:
1812       max_size = STORE_MAX_PIECES;
1813       ratio = get_move_ratio (speed_p);
1814       break;
1815     case COMPARE_BY_PIECES:
1816       max_size = COMPARE_MAX_PIECES;
1817       /* Pick a likely default, just as in get_move_ratio.  */
1818       ratio = speed_p ? 15 : 3;
1819       break;
1820     }
1821 
1822   return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio;
1823 }
1824 
1825 /* This hook controls code generation for expanding a memcmp operation by
1826    pieces.  Return 1 for the normal pattern of compare/jump after each pair
1827    of loads, or a higher number to reduce the number of branches.  */
1828 
1829 int
default_compare_by_pieces_branch_ratio(machine_mode)1830 default_compare_by_pieces_branch_ratio (machine_mode)
1831 {
1832   return 1;
1833 }
1834 
1835 /* Helper for default_print_patchable_function_entry and other
1836    print_patchable_function_entry hook implementations.  */
1837 
1838 void
default_print_patchable_function_entry_1(FILE * file,unsigned HOST_WIDE_INT patch_area_size,bool record_p,unsigned int flags)1839 default_print_patchable_function_entry_1 (FILE *file,
1840 					  unsigned HOST_WIDE_INT
1841 					  patch_area_size,
1842 					  bool record_p,
1843 					  unsigned int flags)
1844 {
1845   const char *nop_templ = 0;
1846   int code_num;
1847   rtx_insn *my_nop = make_insn_raw (gen_nop ());
1848 
1849   /* We use the template alone, relying on the (currently sane) assumption
1850      that the NOP template does not have variable operands.  */
1851   code_num = recog_memoized (my_nop);
1852   nop_templ = get_insn_template (code_num, my_nop);
1853 
1854   if (record_p && targetm_common.have_named_sections)
1855     {
1856       char buf[256];
1857       static int patch_area_number;
1858       section *previous_section = in_section;
1859       const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
1860 
1861       gcc_assert (asm_op != NULL);
1862       patch_area_number++;
1863       ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", patch_area_number);
1864 
1865       switch_to_section (get_section ("__patchable_function_entries",
1866 				      flags, current_function_decl));
1867       assemble_align (POINTER_SIZE);
1868       fputs (asm_op, file);
1869       assemble_name_raw (file, buf);
1870       fputc ('\n', file);
1871 
1872       switch_to_section (previous_section);
1873       ASM_OUTPUT_LABEL (file, buf);
1874     }
1875 
1876   unsigned i;
1877   for (i = 0; i < patch_area_size; ++i)
1878     output_asm_insn (nop_templ, NULL);
1879 }
1880 
1881 /* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function
1882    entry.  If RECORD_P is true and the target supports named sections,
1883    the location of the NOPs will be recorded in a special object section
1884    called "__patchable_function_entries".  This routine may be called
1885    twice per function to put NOPs before and after the function
1886    entry.  */
1887 
1888 void
default_print_patchable_function_entry(FILE * file,unsigned HOST_WIDE_INT patch_area_size,bool record_p)1889 default_print_patchable_function_entry (FILE *file,
1890 					unsigned HOST_WIDE_INT patch_area_size,
1891 					bool record_p)
1892 {
1893   unsigned int flags = SECTION_WRITE | SECTION_RELRO;
1894   if (HAVE_GAS_SECTION_LINK_ORDER)
1895     flags |= SECTION_LINK_ORDER;
1896   default_print_patchable_function_entry_1 (file, patch_area_size, record_p,
1897 					    flags);
1898 }
1899 
1900 bool
default_profile_before_prologue(void)1901 default_profile_before_prologue (void)
1902 {
1903 #ifdef PROFILE_BEFORE_PROLOGUE
1904   return true;
1905 #else
1906   return false;
1907 #endif
1908 }
1909 
1910 /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS.  */
1911 
1912 reg_class_t
default_preferred_reload_class(rtx x ATTRIBUTE_UNUSED,reg_class_t rclass)1913 default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
1914 			        reg_class_t rclass)
1915 {
1916 #ifdef PREFERRED_RELOAD_CLASS
1917   return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass);
1918 #else
1919   return rclass;
1920 #endif
1921 }
1922 
1923 /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS.  */
1924 
1925 reg_class_t
default_preferred_output_reload_class(rtx x ATTRIBUTE_UNUSED,reg_class_t rclass)1926 default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
1927 				       reg_class_t rclass)
1928 {
1929   return rclass;
1930 }
1931 
1932 /* The default implementation of TARGET_PREFERRED_RENAME_CLASS.  */
1933 reg_class_t
default_preferred_rename_class(reg_class_t rclass ATTRIBUTE_UNUSED)1934 default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED)
1935 {
1936   return NO_REGS;
1937 }
1938 
1939 /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P.  */
1940 
1941 bool
default_class_likely_spilled_p(reg_class_t rclass)1942 default_class_likely_spilled_p (reg_class_t rclass)
1943 {
1944   return (reg_class_size[(int) rclass] == 1);
1945 }
1946 
1947 /* The default implementation of TARGET_CLASS_MAX_NREGS.  */
1948 
1949 unsigned char
default_class_max_nregs(reg_class_t rclass ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED)1950 default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
1951 			 machine_mode mode ATTRIBUTE_UNUSED)
1952 {
1953 #ifdef CLASS_MAX_NREGS
1954   return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
1955 					  MACRO_MODE (mode));
1956 #else
1957   /* Targets with variable-sized modes must provide their own definition
1958      of this hook.  */
1959   unsigned int size = GET_MODE_SIZE (mode).to_constant ();
1960   return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1961 #endif
1962 }
1963 
1964 /* Determine the debugging unwind mechanism for the target.  */
1965 
1966 enum unwind_info_type
default_debug_unwind_info(void)1967 default_debug_unwind_info (void)
1968 {
1969   /* If the target wants to force the use of dwarf2 unwind info, let it.  */
1970   /* ??? Change all users to the hook, then poison this.  */
1971 #ifdef DWARF2_FRAME_INFO
1972   if (DWARF2_FRAME_INFO)
1973     return UI_DWARF2;
1974 #endif
1975 
1976   /* Otherwise, only turn it on if dwarf2 debugging is enabled.  */
1977 #ifdef DWARF2_DEBUGGING_INFO
1978   if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
1979     return UI_DWARF2;
1980 #endif
1981 
1982   return UI_NONE;
1983 }
1984 
1985 /* Targets that set NUM_POLY_INT_COEFFS to something greater than 1
1986    must define this hook.  */
1987 
1988 unsigned int
default_dwarf_poly_indeterminate_value(unsigned int,unsigned int *,int *)1989 default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *)
1990 {
1991   gcc_unreachable ();
1992 }
1993 
1994 /* Determine the correct mode for a Dwarf frame register that represents
1995    register REGNO.  */
1996 
1997 machine_mode
default_dwarf_frame_reg_mode(int regno)1998 default_dwarf_frame_reg_mode (int regno)
1999 {
2000   machine_mode save_mode = reg_raw_mode[regno];
2001 
2002   if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (),
2003 					      regno, save_mode))
2004     save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi);
2005   return save_mode;
2006 }
2007 
2008 /* To be used by targets where reg_raw_mode doesn't return the right
2009    mode for registers used in apply_builtin_return and apply_builtin_arg.  */
2010 
2011 fixed_size_mode
default_get_reg_raw_mode(int regno)2012 default_get_reg_raw_mode (int regno)
2013 {
2014   /* Targets must override this hook if the underlying register is
2015      variable-sized.  */
2016   return as_a <fixed_size_mode> (reg_raw_mode[regno]);
2017 }
2018 
2019 /* Return true if a leaf function should stay leaf even with profiling
2020    enabled.  */
2021 
2022 bool
default_keep_leaf_when_profiled()2023 default_keep_leaf_when_profiled ()
2024 {
2025   return false;
2026 }
2027 
2028 /* Return true if the state of option OPTION should be stored in PCH files
2029    and checked by default_pch_valid_p.  Store the option's current state
2030    in STATE if so.  */
2031 
2032 static inline bool
option_affects_pch_p(int option,struct cl_option_state * state)2033 option_affects_pch_p (int option, struct cl_option_state *state)
2034 {
2035   if ((cl_options[option].flags & CL_TARGET) == 0)
2036     return false;
2037   if ((cl_options[option].flags & CL_PCH_IGNORE) != 0)
2038     return false;
2039   if (option_flag_var (option, &global_options) == &target_flags)
2040     if (targetm.check_pch_target_flags)
2041       return false;
2042   return get_option_state (&global_options, option, state);
2043 }
2044 
2045 /* Default version of get_pch_validity.
2046    By default, every flag difference is fatal; that will be mostly right for
2047    most targets, but completely right for very few.  */
2048 
2049 void *
default_get_pch_validity(size_t * sz)2050 default_get_pch_validity (size_t *sz)
2051 {
2052   struct cl_option_state state;
2053   size_t i;
2054   char *result, *r;
2055 
2056   *sz = 2;
2057   if (targetm.check_pch_target_flags)
2058     *sz += sizeof (target_flags);
2059   for (i = 0; i < cl_options_count; i++)
2060     if (option_affects_pch_p (i, &state))
2061       *sz += state.size;
2062 
2063   result = r = XNEWVEC (char, *sz);
2064   r[0] = flag_pic;
2065   r[1] = flag_pie;
2066   r += 2;
2067   if (targetm.check_pch_target_flags)
2068     {
2069       memcpy (r, &target_flags, sizeof (target_flags));
2070       r += sizeof (target_flags);
2071     }
2072 
2073   for (i = 0; i < cl_options_count; i++)
2074     if (option_affects_pch_p (i, &state))
2075       {
2076 	memcpy (r, state.data, state.size);
2077 	r += state.size;
2078       }
2079 
2080   return result;
2081 }
2082 
2083 /* Return a message which says that a PCH file was created with a different
2084    setting of OPTION.  */
2085 
2086 static const char *
pch_option_mismatch(const char * option)2087 pch_option_mismatch (const char *option)
2088 {
2089   return xasprintf (_("created and used with differing settings of '%s'"),
2090 		    option);
2091 }
2092 
2093 /* Default version of pch_valid_p.  */
2094 
2095 const char *
default_pch_valid_p(const void * data_p,size_t len)2096 default_pch_valid_p (const void *data_p, size_t len)
2097 {
2098   struct cl_option_state state;
2099   const char *data = (const char *)data_p;
2100   size_t i;
2101 
2102   /* -fpic and -fpie also usually make a PCH invalid.  */
2103   if (data[0] != flag_pic)
2104     return _("created and used with different settings of %<-fpic%>");
2105   if (data[1] != flag_pie)
2106     return _("created and used with different settings of %<-fpie%>");
2107   data += 2;
2108 
2109   /* Check target_flags.  */
2110   if (targetm.check_pch_target_flags)
2111     {
2112       int tf;
2113       const char *r;
2114 
2115       memcpy (&tf, data, sizeof (target_flags));
2116       data += sizeof (target_flags);
2117       len -= sizeof (target_flags);
2118       r = targetm.check_pch_target_flags (tf);
2119       if (r != NULL)
2120 	return r;
2121     }
2122 
2123   for (i = 0; i < cl_options_count; i++)
2124     if (option_affects_pch_p (i, &state))
2125       {
2126 	if (memcmp (data, state.data, state.size) != 0)
2127 	  return pch_option_mismatch (cl_options[i].opt_text);
2128 	data += state.size;
2129 	len -= state.size;
2130       }
2131 
2132   return NULL;
2133 }
2134 
2135 /* Default version of cstore_mode.  */
2136 
2137 scalar_int_mode
default_cstore_mode(enum insn_code icode)2138 default_cstore_mode (enum insn_code icode)
2139 {
2140   return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
2141 }
2142 
2143 /* Default version of member_type_forces_blk.  */
2144 
2145 bool
default_member_type_forces_blk(const_tree,machine_mode)2146 default_member_type_forces_blk (const_tree, machine_mode)
2147 {
2148   return false;
2149 }
2150 
2151 rtx
default_load_bounds_for_arg(rtx addr ATTRIBUTE_UNUSED,rtx ptr ATTRIBUTE_UNUSED,rtx bnd ATTRIBUTE_UNUSED)2152 default_load_bounds_for_arg (rtx addr ATTRIBUTE_UNUSED,
2153 			     rtx ptr ATTRIBUTE_UNUSED,
2154 			     rtx bnd ATTRIBUTE_UNUSED)
2155 {
2156   gcc_unreachable ();
2157 }
2158 
2159 void
default_store_bounds_for_arg(rtx val ATTRIBUTE_UNUSED,rtx addr ATTRIBUTE_UNUSED,rtx bounds ATTRIBUTE_UNUSED,rtx to ATTRIBUTE_UNUSED)2160 default_store_bounds_for_arg (rtx val ATTRIBUTE_UNUSED,
2161 			      rtx addr ATTRIBUTE_UNUSED,
2162 			      rtx bounds ATTRIBUTE_UNUSED,
2163 			      rtx to ATTRIBUTE_UNUSED)
2164 {
2165   gcc_unreachable ();
2166 }
2167 
2168 rtx
default_load_returned_bounds(rtx slot ATTRIBUTE_UNUSED)2169 default_load_returned_bounds (rtx slot ATTRIBUTE_UNUSED)
2170 {
2171   gcc_unreachable ();
2172 }
2173 
2174 void
default_store_returned_bounds(rtx slot ATTRIBUTE_UNUSED,rtx bounds ATTRIBUTE_UNUSED)2175 default_store_returned_bounds (rtx slot ATTRIBUTE_UNUSED,
2176 			       rtx bounds ATTRIBUTE_UNUSED)
2177 {
2178   gcc_unreachable ();
2179 }
2180 
2181 /* Default version of canonicalize_comparison.  */
2182 
2183 void
default_canonicalize_comparison(int *,rtx *,rtx *,bool)2184 default_canonicalize_comparison (int *, rtx *, rtx *, bool)
2185 {
2186 }
2187 
2188 /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV.  */
2189 
2190 void
default_atomic_assign_expand_fenv(tree *,tree *,tree *)2191 default_atomic_assign_expand_fenv (tree *, tree *, tree *)
2192 {
2193 }
2194 
2195 #ifndef PAD_VARARGS_DOWN
2196 #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN
2197 #endif
2198 
2199 /* Build an indirect-ref expression over the given TREE, which represents a
2200    piece of a va_arg() expansion.  */
2201 tree
build_va_arg_indirect_ref(tree addr)2202 build_va_arg_indirect_ref (tree addr)
2203 {
2204   addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr);
2205   return addr;
2206 }
2207 
2208 /* The "standard" implementation of va_arg: read the value from the
2209    current (padded) address and increment by the (padded) size.  */
2210 
2211 tree
std_gimplify_va_arg_expr(tree valist,tree type,gimple_seq * pre_p,gimple_seq * post_p)2212 std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2213 			  gimple_seq *post_p)
2214 {
2215   tree addr, t, type_size, rounded_size, valist_tmp;
2216   unsigned HOST_WIDE_INT align, boundary;
2217   bool indirect;
2218 
2219   /* All of the alignment and movement below is for args-grow-up machines.
2220      As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all
2221      implement their own specialized gimplify_va_arg_expr routines.  */
2222   if (ARGS_GROW_DOWNWARD)
2223     gcc_unreachable ();
2224 
2225   indirect = pass_va_arg_by_reference (type);
2226   if (indirect)
2227     type = build_pointer_type (type);
2228 
2229   if (targetm.calls.split_complex_arg
2230       && TREE_CODE (type) == COMPLEX_TYPE
2231       && targetm.calls.split_complex_arg (type))
2232     {
2233       tree real_part, imag_part;
2234 
2235       real_part = std_gimplify_va_arg_expr (valist,
2236 					    TREE_TYPE (type), pre_p, NULL);
2237       real_part = get_initialized_tmp_var (real_part, pre_p);
2238 
2239       imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
2240 					    TREE_TYPE (type), pre_p, NULL);
2241       imag_part = get_initialized_tmp_var (imag_part, pre_p);
2242 
2243       return build2 (COMPLEX_EXPR, type, real_part, imag_part);
2244    }
2245 
2246   align = PARM_BOUNDARY / BITS_PER_UNIT;
2247   boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
2248 
2249   /* When we align parameter on stack for caller, if the parameter
2250      alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
2251      aligned at MAX_SUPPORTED_STACK_ALIGNMENT.  We will match callee
2252      here with caller.  */
2253   if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
2254     boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
2255 
2256   boundary /= BITS_PER_UNIT;
2257 
2258   /* Hoist the valist value into a temporary for the moment.  */
2259   valist_tmp = get_initialized_tmp_var (valist, pre_p);
2260 
2261   /* va_list pointer is aligned to PARM_BOUNDARY.  If argument actually
2262      requires greater alignment, we must perform dynamic alignment.  */
2263   if (boundary > align
2264       && !TYPE_EMPTY_P (type)
2265       && !integer_zerop (TYPE_SIZE (type)))
2266     {
2267       t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2268 		  fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
2269       gimplify_and_add (t, pre_p);
2270 
2271       t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2272 		  fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
2273 			       valist_tmp,
2274 			       build_int_cst (TREE_TYPE (valist), -boundary)));
2275       gimplify_and_add (t, pre_p);
2276     }
2277   else
2278     boundary = align;
2279 
2280   /* If the actual alignment is less than the alignment of the type,
2281      adjust the type accordingly so that we don't assume strict alignment
2282      when dereferencing the pointer.  */
2283   boundary *= BITS_PER_UNIT;
2284   if (boundary < TYPE_ALIGN (type))
2285     {
2286       type = build_variant_type_copy (type);
2287       SET_TYPE_ALIGN (type, boundary);
2288     }
2289 
2290   /* Compute the rounded size of the type.  */
2291   type_size = arg_size_in_bytes (type);
2292   rounded_size = round_up (type_size, align);
2293 
2294   /* Reduce rounded_size so it's sharable with the postqueue.  */
2295   gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
2296 
2297   /* Get AP.  */
2298   addr = valist_tmp;
2299   if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
2300     {
2301       /* Small args are padded downward.  */
2302       t = fold_build2_loc (input_location, GT_EXPR, sizetype,
2303 		       rounded_size, size_int (align));
2304       t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
2305 		       size_binop (MINUS_EXPR, rounded_size, type_size));
2306       addr = fold_build_pointer_plus (addr, t);
2307     }
2308 
2309   /* Compute new value for AP.  */
2310   t = fold_build_pointer_plus (valist_tmp, rounded_size);
2311   t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2312   gimplify_and_add (t, pre_p);
2313 
2314   addr = fold_convert (build_pointer_type (type), addr);
2315 
2316   if (indirect)
2317     addr = build_va_arg_indirect_ref (addr);
2318 
2319   return build_va_arg_indirect_ref (addr);
2320 }
2321 
2322 /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
2323    not support nested low-overhead loops.  */
2324 
2325 bool
can_use_doloop_if_innermost(const widest_int &,const widest_int &,unsigned int loop_depth,bool)2326 can_use_doloop_if_innermost (const widest_int &, const widest_int &,
2327 			     unsigned int loop_depth, bool)
2328 {
2329   return loop_depth == 1;
2330 }
2331 
2332 /* Default implementation of TARGET_OPTAB_SUPPORTED_P.  */
2333 
2334 bool
default_optab_supported_p(int,machine_mode,machine_mode,optimization_type)2335 default_optab_supported_p (int, machine_mode, machine_mode, optimization_type)
2336 {
2337   return true;
2338 }
2339 
2340 /* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST.  */
2341 
2342 unsigned int
default_max_noce_ifcvt_seq_cost(edge e)2343 default_max_noce_ifcvt_seq_cost (edge e)
2344 {
2345   bool predictable_p = predictable_edge_p (e);
2346 
2347   if (predictable_p)
2348     {
2349       if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
2350 	return param_max_rtl_if_conversion_predictable_cost;
2351     }
2352   else
2353     {
2354       if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
2355 	return param_max_rtl_if_conversion_unpredictable_cost;
2356     }
2357 
2358   return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
2359 }
2360 
2361 /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION.  */
2362 
2363 unsigned int
default_min_arithmetic_precision(void)2364 default_min_arithmetic_precision (void)
2365 {
2366   return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT;
2367 }
2368 
2369 /* Default implementation of TARGET_C_EXCESS_PRECISION.  */
2370 
2371 enum flt_eval_method
default_excess_precision(enum excess_precision_type ATTRIBUTE_UNUSED)2372 default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
2373 {
2374   return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
2375 }
2376 
2377 /* Default implementation for
2378   TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE.  */
2379 HOST_WIDE_INT
default_stack_clash_protection_alloca_probe_range(void)2380 default_stack_clash_protection_alloca_probe_range (void)
2381 {
2382   return 0;
2383 }
2384 
2385 /* The default implementation of TARGET_EARLY_REMAT_MODES.  */
2386 
2387 void
default_select_early_remat_modes(sbitmap)2388 default_select_early_remat_modes (sbitmap)
2389 {
2390 }
2391 
2392 /* The default implementation of TARGET_PREFERRED_ELSE_VALUE.  */
2393 
2394 tree
default_preferred_else_value(unsigned,tree type,unsigned,tree *)2395 default_preferred_else_value (unsigned, tree type, unsigned, tree *)
2396 {
2397   return build_zero_cst (type);
2398 }
2399 
2400 /* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE.  */
2401 bool
default_have_speculation_safe_value(bool active ATTRIBUTE_UNUSED)2402 default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED)
2403 {
2404 #ifdef HAVE_speculation_barrier
2405   return active ? HAVE_speculation_barrier : true;
2406 #else
2407   return false;
2408 #endif
2409 }
2410 /* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE
2411    that can be used on targets that never have speculative execution.  */
2412 bool
speculation_safe_value_not_needed(bool active)2413 speculation_safe_value_not_needed (bool active)
2414 {
2415   return !active;
2416 }
2417 
2418 /* Default implementation of the speculation-safe-load builtin.  This
2419    implementation simply copies val to result and generates a
2420    speculation_barrier insn, if such a pattern is defined.  */
2421 rtx
default_speculation_safe_value(machine_mode mode ATTRIBUTE_UNUSED,rtx result,rtx val,rtx failval ATTRIBUTE_UNUSED)2422 default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED,
2423 				rtx result, rtx val,
2424 				rtx failval ATTRIBUTE_UNUSED)
2425 {
2426   emit_move_insn (result, val);
2427 
2428 #ifdef HAVE_speculation_barrier
2429   /* Assume the target knows what it is doing: if it defines a
2430      speculation barrier, but it is not enabled, then assume that one
2431      isn't needed.  */
2432   if (HAVE_speculation_barrier)
2433     emit_insn (gen_speculation_barrier ());
2434 #endif
2435 
2436   return result;
2437 }
2438 
2439 /* How many bits to shift in order to access the tag bits.
2440    The default is to store the tag in the top 8 bits of a 64 bit pointer, hence
2441    shifting 56 bits will leave just the tag.  */
2442 #define HWASAN_SHIFT (GET_MODE_PRECISION (Pmode) - 8)
2443 #define HWASAN_SHIFT_RTX GEN_INT (HWASAN_SHIFT)
2444 
2445 bool
default_memtag_can_tag_addresses()2446 default_memtag_can_tag_addresses ()
2447 {
2448   return false;
2449 }
2450 
2451 uint8_t
default_memtag_tag_size()2452 default_memtag_tag_size ()
2453 {
2454   return 8;
2455 }
2456 
2457 uint8_t
default_memtag_granule_size()2458 default_memtag_granule_size ()
2459 {
2460   return 16;
2461 }
2462 
2463 /* The default implementation of TARGET_MEMTAG_INSERT_RANDOM_TAG.  */
2464 rtx
default_memtag_insert_random_tag(rtx untagged,rtx target)2465 default_memtag_insert_random_tag (rtx untagged, rtx target)
2466 {
2467   gcc_assert (param_hwasan_instrument_stack);
2468   if (param_hwasan_random_frame_tag)
2469     {
2470       rtx fn = init_one_libfunc ("__hwasan_generate_tag");
2471       rtx new_tag = emit_library_call_value (fn, NULL_RTX, LCT_NORMAL, QImode);
2472       return targetm.memtag.set_tag (untagged, new_tag, target);
2473     }
2474   else
2475     {
2476       /* NOTE: The kernel API does not have __hwasan_generate_tag exposed.
2477 	 In the future we may add the option emit random tags with inline
2478 	 instrumentation instead of function calls.  This would be the same
2479 	 between the kernel and userland.  */
2480       return untagged;
2481     }
2482 }
2483 
2484 /* The default implementation of TARGET_MEMTAG_ADD_TAG.  */
2485 rtx
default_memtag_add_tag(rtx base,poly_int64 offset,uint8_t tag_offset)2486 default_memtag_add_tag (rtx base, poly_int64 offset, uint8_t tag_offset)
2487 {
2488   /* Need to look into what the most efficient code sequence is.
2489      This is a code sequence that would be emitted *many* times, so we
2490      want it as small as possible.
2491 
2492      There are two places where tag overflow is a question:
2493        - Tagging the shadow stack.
2494 	  (both tagging and untagging).
2495        - Tagging addressable pointers.
2496 
2497      We need to ensure both behaviors are the same (i.e. that the tag that
2498      ends up in a pointer after "overflowing" the tag bits with a tag addition
2499      is the same that ends up in the shadow space).
2500 
2501      The aim is that the behavior of tag addition should follow modulo
2502      wrapping in both instances.
2503 
2504      The libhwasan code doesn't have any path that increments a pointer's tag,
2505      which means it has no opinion on what happens when a tag increment
2506      overflows (and hence we can choose our own behavior).  */
2507 
2508   offset += ((uint64_t)tag_offset << HWASAN_SHIFT);
2509   return plus_constant (Pmode, base, offset);
2510 }
2511 
2512 /* The default implementation of TARGET_MEMTAG_SET_TAG.  */
2513 rtx
default_memtag_set_tag(rtx untagged,rtx tag,rtx target)2514 default_memtag_set_tag (rtx untagged, rtx tag, rtx target)
2515 {
2516   gcc_assert (GET_MODE (untagged) == Pmode && GET_MODE (tag) == QImode);
2517   tag = expand_simple_binop (Pmode, ASHIFT, tag, HWASAN_SHIFT_RTX, NULL_RTX,
2518 			     /* unsignedp = */1, OPTAB_WIDEN);
2519   rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target,
2520 				 /* unsignedp = */1, OPTAB_DIRECT);
2521   gcc_assert (ret);
2522   return ret;
2523 }
2524 
2525 /* The default implementation of TARGET_MEMTAG_EXTRACT_TAG.  */
2526 rtx
default_memtag_extract_tag(rtx tagged_pointer,rtx target)2527 default_memtag_extract_tag (rtx tagged_pointer, rtx target)
2528 {
2529   rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer,
2530 				 HWASAN_SHIFT_RTX, target,
2531 				 /* unsignedp = */0,
2532 				 OPTAB_DIRECT);
2533   rtx ret = gen_lowpart (QImode, tag);
2534   gcc_assert (ret);
2535   return ret;
2536 }
2537 
2538 /* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER.  */
2539 rtx
default_memtag_untagged_pointer(rtx tagged_pointer,rtx target)2540 default_memtag_untagged_pointer (rtx tagged_pointer, rtx target)
2541 {
2542   rtx tag_mask = gen_int_mode ((HOST_WIDE_INT_1U << HWASAN_SHIFT) - 1, Pmode);
2543   rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer,
2544 					   tag_mask, target, true,
2545 					   OPTAB_DIRECT);
2546   gcc_assert (untagged_base);
2547   return untagged_base;
2548 }
2549 
2550 #include "gt-targhooks.h"
2551