1 /* GCC backend functions for C-SKY targets.
2 Copyright (C) 2018-2021 Free Software Foundation, Inc.
3 Contributed by C-SKY Microsystems and Mentor Graphics.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "memmodel.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "cfghooks.h"
32 #include "df.h"
33 #include "tm_p.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "optabs.h"
37 #include "regs.h"
38 #include "emit-rtl.h"
39 #include "recog.h"
40 #include "cgraph.h"
41 #include "c-family/c-common.h"
42 #include "cpplib.h"
43 #include "diagnostic-core.h"
44 #include "alias.h"
45 #include "fold-const.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "varasm.h"
49 #include "output.h"
50 #include "insn-attr.h"
51 #include "flags.h"
52 #include "reload.h"
53 #include "explow.h"
54 #include "expr.h"
55 #include "cfgrtl.h"
56 #include "sched-int.h"
57 #include "common/common-target.h"
58 #include "langhooks.h"
59 #include "intl.h"
60 #include "libfuncs.h"
61 #include "opts.h"
62 #include "dumpfile.h"
63 #include "target-globals.h"
64 #include "builtins.h"
65 #include "tm-constrs.h"
66 #include "rtl-iter.h"
67 #include "pass_manager.h"
68 #include "tree-pass.h"
69 #include "context.h"
70
71 /* This file should be included last. */
72 #include "target-def.h"
73
74 /* Stack and register size macros. */
75
76 #define CSKY_NUM_WORDS(SIZE) \
77 (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
78 #define CSKY_NUM_REGS(MODE) \
79 CSKY_NUM_WORDS (GET_MODE_SIZE (MODE))
80 #define CSKY_STACK_ALIGN(SIZE) \
81 (CSKY_NUM_WORDS (SIZE) * UNITS_PER_WORD)
82
83 /* Offsets and range macros. */
84
85 #define CSKY_LD16_MAX_OFFSET(MODE) \
86 (31 * GET_MODE_SIZE (MODE))
87 #define CSKY_LD32_MAX_OFFSET(MODE) \
88 (4095 * GET_MODE_SIZE (MODE))
89 #define CSKY_LD16_OFFSET_MASK(MODE) \
90 (CSKY_LD16_MAX_OFFSET (MODE) + GET_MODE_SIZE (MODE) - 1)
91
92 #define CSKY_ADDI16_MAX_IMM 256
93 #define CSKY_SUBI16_MAX_IMM 256
94
95 #define CSKY_CONSTPOOL_LABEL_PREFIX "LCP"
96
97 /* Array of the smallest class containing reg number REGNO, indexed by
98 REGNO. Used by REGNO_REG_CLASS. */
99 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
100 {
101 /* Registers r0-r7. */
102 MINI_REGS, MINI_REGS, MINI_REGS, MINI_REGS,
103 MINI_REGS, MINI_REGS, MINI_REGS, MINI_REGS,
104 /* Registers r8-r15. */
105 LOW_REGS, LOW_REGS, LOW_REGS, LOW_REGS,
106 LOW_REGS, LOW_REGS, SP_REGS, LOW_REGS,
107 /* Registers r16-r31. */
108 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
109 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
110 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
111 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
112 /* Reserved. */
113 RESERVE_REGS,
114 /* CC,HI,LO registers. */
115 C_REGS, HI_REGS, LO_REGS,
116 /* Reserved. */
117 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
118 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
119 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
120 RESERVE_REGS, RESERVE_REGS, RESERVE_REGS, RESERVE_REGS,
121 /* Vec registers. */
122 V_REGS, V_REGS, V_REGS, V_REGS,
123 V_REGS, V_REGS, V_REGS, V_REGS,
124 V_REGS, V_REGS, V_REGS, V_REGS,
125 V_REGS, V_REGS, V_REGS, V_REGS,
126 /* Reserved. */
127 RESERVE_REGS, RESERVE_REGS,
128 /* Register epc. */
129 OTHER_REGS
130 };
131
132 /* Arrays that map GCC register numbers to debugger register numbers,
133 '-1' means that is INVALID_REGNUM.
134 TODO: which rules according to here ? */
135 const int csky_dbx_regno[FIRST_PSEUDO_REGISTER] =
136 {
137 0, 1, 2, 3, 4, 5, 6, 7,
138 8, 9, 10, 11, 12, 13, 14, 15,
139 16, 17, 18, 19, 20, 21, 22, 23,
140 24, 25, 26, 27, 28, 29, 30, 31,
141 -1, -1, 36, 37, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1,
143 -1, -1, -1, -1, 56, 57, 58, 59,
144 60, 61, 62, 63, 64, 65, 66, 67,
145 68, 69, 70, 71, -1, -1, 72
146 };
147
148 /* Table of machine attributes. */
149 static tree csky_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
150 static tree csky_handle_isr_attribute (tree *, tree, tree, int, bool *);
151 static const struct attribute_spec csky_attribute_table[] =
152 {
153 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
154 affects_type_identity, handler, exclude } */
155 { "naked", 0, 0, true, false, false, false, csky_handle_fndecl_attribute, NULL },
156 /* Interrupt Service Routines have special prologue and epilogue requirements. */
157 { "interrupt", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL },
158 { "isr", 0, 1, false, false, false, false, csky_handle_isr_attribute, NULL },
159 { NULL, 0, 0, false, false, false, false, NULL, NULL }
160 };
161
162 /* A C structure for machine-specific, per-function data.
163 This is added to the cfun structure. */
164 typedef struct GTY(()) machine_function
165 {
166 /* Records if LR has to be saved for far jumps. */
167 int far_jump_used;
168 /* Records the type of the current function. */
169 unsigned long func_type;
170 /* Record if the function has a variable argument list. */
171 int uses_anonymous_args;
172
173 /* Stack frame layout information. If frame_init_p is true,
174 these fields have been initialized and don't need to be
175 recomputed. */
176 unsigned int reg_mask; /* non-volatile reg saves */
177 int arg_size; /* stdarg spills (bytes) */
178 int reg_size; /* non-volatile reg saves (bytes) */
179 int local_size; /* locals */
180 int outbound_size; /* arg overflow on calls out */
181 int frame_size; /* total static size of stack frame */
182 int local_offset;
183 int reg_offset;
184 int arg_offset;
185 int frame_init_p;
186
187 } machine_function;
188
189 /* These macros are for the func_type values above. */
190 #define CSKY_FT_TYPE_MASK ((1 << 3) - 1)
191 #define CSKY_FT_UNKNOWN 0 /* Type not been determined */
192 #define CSKY_FT_NORMAL 1 /* Normal function */
193 #define CSKY_FT_ISR 4 /* Interrupt service routine */
194 #define CSKY_FT_FIQ 5 /* Fast interrupt service routine */
195 #define CSKY_FT_EXCEPTION 6 /* Exception handler */
196 #define CSKY_FT_INTERRUPT (1 << 2) /* overlap CSKY_FT_ISR */
197 #define CSKY_FT_NAKED (1 << 3) /* No prologue and epilogue */
198 #define CSKY_FUNCTION_TYPE(t) ((t) & CSKY_FT_TYPE_MASK)
199 #define CSKY_FUNCTION_IS_INTERRUPT(t) ((t) & CSKY_FT_INTERRUPT)
200 #define CSKY_FUNCTION_IS_NAKED(t) ((t) & CSKY_FT_NAKED)
201
202 struct csky_processors
203 {
204 const char *const name;
205 enum csky_processor_type core;
206 const char *arch;
207 enum csky_base_architecture base_arch;
208 enum csky_isa_feature isa_bits[CSKY_ISA_FEATURE_GET (max)];
209 };
210
211 static struct csky_processors all_cores[] =
212 {
213 #undef CSKY_CORE
214 #define CSKY_CORE(NAME, CORE, X, ARCH, ISA) \
215 {NAME, TARGET_CPU_##CORE, #ARCH, CSKY_BASE_ARCH_##ARCH, \
216 {ISA CSKY_ISA_FEATURE_GET (none)}},
217 #include "csky_cores.def"
218 #undef CSKY_CORE
219 {NULL, TARGET_CPU_csky_none, NULL, CSKY_BASE_ARCH_NONE, \
220 {CSKY_ISA_FEATURE_GET (none)}}
221 };
222
223 static struct csky_processors all_architectures[] =
224 {
225 #undef CSKY_ARCH
226 #define CSKY_ARCH(NAME, CORE, ARCH, ISA) \
227 {NAME, TARGET_CPU_##CORE, #ARCH, CSKY_BASE_ARCH_##ARCH, \
228 {ISA CSKY_ISA_FEATURE_GET (none)}},
229 #include "csky_cores.def"
230 #undef CSKY_ARCH
231 {NULL, TARGET_CPU_csky_none, NULL, CSKY_BASE_ARCH_NONE, \
232 {CSKY_ISA_FEATURE_GET (none)}}
233 };
234
235 struct csky_fpu_desc
236 {
237 const char *name;
238 enum csky_isa_feature isa_bits[CSKY_ISA_FEATURE_GET (max)];
239 };
240
241 static const struct csky_fpu_desc all_fpus[] =
242 {
243 #undef CSKY_FPU
244 #define CSKY_FPU(NAME, CNAME, ISA) \
245 {NAME, {ISA CSKY_ISA_FEATURE_GET (none)}},
246 #include "csky_cores.def"
247 #undef CSKY_FPU
248 };
249
250 /* Active target architecture. */
251 struct csky_build_target
252 {
253 /* Name of the target CPU, if known, or NULL if the target CPU was not
254 specified by the user (and inferred from the -march option). */
255 const char *core_name;
256 /* Name of the target ARCH. NULL if there is a selected CPU. */
257 const char *arch_name;
258 /* Preprocessor substring (never NULL). */
259 const char *arch_pp_name;
260 /* CPU identifier for the core we're compiling for (architecturally). */
261 enum csky_processor_type arch_core;
262 /* The base architecture value. */
263 enum csky_base_architecture base_arch;
264 /* Bitmap encapsulating the isa_bits for the target environment. */
265 sbitmap isa;
266 };
267
268 struct csky_build_target csky_active_target;
269
270 /* The following are used in the .md file as equivalents to bits. */
271 int csky_arch_isa_features[CSKY_ISA_FEATURE_GET (max)] = {0};
272
273 /* The highest CSKY architecture version supported by the target. */
274 enum csky_base_architecture csky_base_arch = CSKY_TARGET_ARCH_GET (NONE);
275
276 /* Forward definitions of types. */
277 typedef struct minipool_node Mnode;
278 typedef struct minipool_fixup Mfix;
279
280 static GTY(()) int tls_labelno;
281
282
283 /* Maximum constant offset that can be added/subtracted from SP in a
284 single instruction. For ck801, this is for addsp/subsp, otherwise
285 it is the range of addi/subi. */
286 #define CSKY_MAX_SP_ADJUST \
287 (CSKY_TARGET_ARCH (CK801) ? 508 : 4096)
288
289
290 /* Implement TARGET_CPU_CPP_BUILTINS. */
291
292 #define builtin_define(MACRO) cpp_define (pfile, MACRO)
293
294 void
csky_cpu_cpp_builtins(cpp_reader * pfile)295 csky_cpu_cpp_builtins (cpp_reader *pfile)
296 {
297 const char *arch_name = csky_active_target.arch_pp_name;
298 char *pp_name = (char *) alloca (1 + strlen (arch_name) + 4);
299 sprintf (pp_name, "__%s__", arch_name);
300 builtin_define (pp_name);
301
302 builtin_define ("__csky__=2");
303 builtin_define ("__CSKY__=2");
304 builtin_define ("__ckcore__=2");
305 builtin_define ("__CKCORE__=2");
306
307 builtin_define ("__CSKYABIV2__");
308 builtin_define ("__cskyabiv2__");
309 builtin_define ("__CSKYABI__=2");
310 builtin_define ("__cskyabi__=2");
311
312 if (TARGET_BIG_ENDIAN)
313 {
314 builtin_define ("__ckcoreBE__");
315 builtin_define ("__cskyBE__");
316 builtin_define ("__cskybe__");
317 builtin_define ("__CSKYBE__");
318 }
319 else
320 {
321 builtin_define ("__ckcoreLE__");
322 builtin_define ("__cskyLE__");
323 builtin_define ("__cskyle__");
324 builtin_define ("__CSKYLE__");
325 }
326
327 if (TARGET_HARD_FLOAT)
328 {
329 builtin_define ("__csky_hard_float__");
330 builtin_define ("__CSKY_HARD_FLOAT__");
331 if (TARGET_HARD_FLOAT_ABI)
332 {
333 builtin_define ("__csky_hard_float_abi__");
334 builtin_define ("__CSKY_HARD_FLOAT_ABI__");
335 }
336 if (TARGET_SINGLE_FPU)
337 {
338 builtin_define ("__csky_hard_float_fpu_sf__");
339 builtin_define ("__CSKY_HARD_FLOAT_FPU_SF__");
340 }
341 }
342 else
343 {
344 builtin_define ("__csky_soft_float__");
345 builtin_define ("__CSKY_SOFT_FLOAT__");
346 }
347
348 if (CSKY_ISA_FEATURE (fpv2_sf))
349 {
350 builtin_define ("__csky_fpuv2__");
351 builtin_define ("__CSKY_FPUV2__");
352 }
353
354 if (TARGET_ELRW)
355 {
356 builtin_define ("__csky_elrw__");
357 builtin_define ("__CSKY_ELRW__");
358 }
359 if (TARGET_ISTACK)
360 {
361 builtin_define ("__csky_istack__");
362 builtin_define ("__CSKY_ISTACK__");
363 }
364 if (TARGET_MP)
365 {
366 builtin_define ("__csky_mp__");
367 builtin_define ("__CSKY_MP__");
368 }
369 if (TARGET_CP)
370 {
371 builtin_define ("__csky_cp__");
372 builtin_define ("__CSKY_CP__");
373 }
374 if (TARGET_CACHE)
375 {
376 builtin_define ("__csky_cache__");
377 builtin_define ("__CSKY_CACHE__");
378 }
379 if (TARGET_SECURITY)
380 {
381 builtin_define ("__csky_security__");
382 builtin_define ("__CSKY_SECURITY__");
383 }
384 if (TARGET_TRUST)
385 {
386 builtin_define ("__csky_trust__");
387 builtin_define ("__CSKY_TRUST__");
388 }
389 if (TARGET_DSP)
390 {
391 builtin_define ("__csky_dsp__");
392 builtin_define ("__CSKY_DSP__");
393 }
394 if (TARGET_EDSP)
395 {
396 builtin_define ("__csky_edsp__");
397 builtin_define ("__CSKY_EDSP__");
398 }
399 if (TARGET_VDSP)
400 {
401 builtin_define ("__csky_vdsp__");
402 builtin_define ("__CSKY_VDSP__");
403 }
404 }
405
406
407 /******************************************************************
408 * Storage Layout *
409 ******************************************************************/
410
411
412 #undef TARGET_PROMOTE_FUNCTION_MODE
413 #define TARGET_PROMOTE_FUNCTION_MODE \
414 default_promote_function_mode_always_promote
415
416 #undef TARGET_CONSTANT_ALIGNMENT
417 #define TARGET_CONSTANT_ALIGNMENT csky_constant_alignment
418
419
420 /******************************************************************
421 * Stack Layout and Calling Conventions *
422 ******************************************************************/
423
424 #undef TARGET_CAN_ELIMINATE
425 #define TARGET_CAN_ELIMINATE csky_can_eliminate
426
427 #undef TARGET_FUNCTION_ARG
428 #define TARGET_FUNCTION_ARG csky_function_arg
429
430 #undef TARGET_FUNCTION_ARG_ADVANCE
431 #define TARGET_FUNCTION_ARG_ADVANCE csky_function_arg_advance
432
433 #undef TARGET_FUNCTION_VALUE
434 #define TARGET_FUNCTION_VALUE csky_function_value
435
436 #undef TARGET_LIBCALL_VALUE
437 #define TARGET_LIBCALL_VALUE csky_libcall_value
438
439 #undef TARGET_FUNCTION_VALUE_REGNO_P
440 #define TARGET_FUNCTION_VALUE_REGNO_P csky_function_value_regno_p
441
442 #undef TARGET_SPLIT_COMPLEX_ARG
443 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
444
445 #undef TARGET_PROMOTE_PROTOTYPES
446 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
447
448 #undef TARGET_MUST_PASS_IN_STACK
449 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
450
451 #undef TARGET_ARG_PARTIAL_BYTES
452 #define TARGET_ARG_PARTIAL_BYTES csky_arg_partial_bytes
453
454 #undef TARGET_PASS_BY_REFERENCE
455 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
456
457 #undef TARGET_ASM_OUTPUT_MI_THUNK
458 #define TARGET_ASM_OUTPUT_MI_THUNK csky_output_mi_thunk
459
460 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
461 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
462 hook_bool_const_tree_hwi_hwi_const_tree_true
463
464 #undef TARGET_ASM_FUNCTION_PROLOGUE
465 #define TARGET_ASM_FUNCTION_PROLOGUE csky_output_function_prologue
466
467 #undef TARGET_ASM_FUNCTION_EPILOGUE
468 #define TARGET_ASM_FUNCTION_EPILOGUE csky_output_function_epilogue
469
470 #undef TARGET_WARN_FUNC_RETURN
471 #define TARGET_WARN_FUNC_RETURN csky_warn_func_return
472
473 #undef TARGET_RETURN_IN_MEMORY
474 #define TARGET_RETURN_IN_MEMORY csky_return_in_memory
475
476
477 /******************************************************************
478 * Implementing the Varargs Macros *
479 ******************************************************************/
480
481
482 #undef TARGET_SETUP_INCOMING_VARARGS
483 #define TARGET_SETUP_INCOMING_VARARGS csky_setup_incoming_varargs
484
485
486 /******************************************************************
487 * Implicit Calls to Library Routines *
488 ******************************************************************/
489
490
491 #undef TARGET_INIT_LIBFUNCS
492 #define TARGET_INIT_LIBFUNCS csky_init_libfuncs
493
494
495 /******************************************************************
496 * Dividing the Output into Sections (Texts, Data, . . . ) *
497 ******************************************************************/
498
499
500 #undef TARGET_HAVE_TLS
501 #define TARGET_HAVE_TLS TARGET_CSKY_LINUX
502
503
504 /******************************************************************
505 * Defining target-specific uses of __attribute__ *
506 ******************************************************************/
507
508
509 #undef TARGET_ATTRIBUTE_TABLE
510 #define TARGET_ATTRIBUTE_TABLE csky_attribute_table
511
512 #undef TARGET_OPTION_OVERRIDE
513 #define TARGET_OPTION_OVERRIDE csky_option_override
514
515
516 /* Implement the BRANCH_COST target macro. */
517
518 int
csky_default_branch_cost(bool speed_p ATTRIBUTE_UNUSED,bool predictable_p ATTRIBUTE_UNUSED)519 csky_default_branch_cost (bool speed_p ATTRIBUTE_UNUSED,
520 bool predictable_p ATTRIBUTE_UNUSED)
521 {
522 return csky_branch_cost;
523 }
524
525 bool
csky_default_logical_op_non_short_circuit(void)526 csky_default_logical_op_non_short_circuit (void)
527 {
528 return BRANCH_COST (optimize_function_for_speed_p (cfun), false) >= 2;
529 }
530
531 /******************************************************************
532 * Register Usage *
533 ******************************************************************/
534
535 #undef TARGET_HARD_REGNO_NREGS
536 #define TARGET_HARD_REGNO_NREGS csky_hard_regno_nregs
537
538 #undef TARGET_HARD_REGNO_MODE_OK
539 #define TARGET_HARD_REGNO_MODE_OK csky_hard_regno_mode_ok
540
541 #undef TARGET_MODES_TIEABLE_P
542 #define TARGET_MODES_TIEABLE_P csky_modes_tieable_p
543
544 #undef TARGET_CAN_CHANGE_MODE_CLASS
545 #define TARGET_CAN_CHANGE_MODE_CLASS csky_can_change_mode_class
546
547 #undef TARGET_CONDITIONAL_REGISTER_USAGE
548 #define TARGET_CONDITIONAL_REGISTER_USAGE csky_conditional_register_usage
549
550 #undef TARGET_CLASS_LIKELY_SPILLED_P
551 #define TARGET_CLASS_LIKELY_SPILLED_P csky_class_likely_spilled_p
552
553 #undef TARGET_PREFERRED_RELOAD_CLASS
554 #define TARGET_PREFERRED_RELOAD_CLASS csky_preferred_reload_class
555
556 #undef TARGET_CLASS_MAX_NREGS
557 #define TARGET_CLASS_MAX_NREGS csky_class_max_nregs
558
559 #undef TARGET_SECONDARY_RELOAD
560 #define TARGET_SECONDARY_RELOAD csky_secondary_reload
561
562 #undef TARGET_SPILL_CLASS
563 #define TARGET_SPILL_CLASS csky_spill_class
564
565
566 /******************************************************************
567 * Addressing Modes *
568 ******************************************************************/
569
570
571 #undef TARGET_CANNOT_FORCE_CONST_MEM
572 #define TARGET_CANNOT_FORCE_CONST_MEM csky_cannot_force_const_mem
573
574 #undef TARGET_LEGITIMATE_CONSTANT_P
575 #define TARGET_LEGITIMATE_CONSTANT_P csky_legitimate_constant_p
576
577 #undef TARGET_LEGITIMIZE_ADDRESS
578 #define TARGET_LEGITIMIZE_ADDRESS csky_legitimize_address
579
580 #undef TARGET_LEGITIMATE_ADDRESS_P
581 #define TARGET_LEGITIMATE_ADDRESS_P csky_legitimate_address_p
582
583
584 /******************************************************************
585 * Others *
586 ******************************************************************/
587
588
589 #undef TARGET_CANNOT_COPY_INSN_P
590 #define TARGET_CANNOT_COPY_INSN_P csky_cannot_copy_insn_p
591
592
593 /******************************************************************
594 * Assembler Format *
595 ******************************************************************/
596
597
598 #undef TARGET_PRINT_OPERAND
599 #define TARGET_PRINT_OPERAND csky_print_operand
600
601 #undef TARGET_PRINT_OPERAND_ADDRESS
602 #define TARGET_PRINT_OPERAND_ADDRESS csky_print_operand_address
603
604 #undef TARGET_ASM_UNALIGNED_HI_OP
605 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
606
607 #undef TARGET_ASM_UNALIGNED_SI_OP
608 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
609
610 #undef TARGET_DWARF_REGISTER_SPAN
611 #define TARGET_DWARF_REGISTER_SPAN csky_dwarf_register_span
612
613
614 /******************************************************************
615 * Miscellaneous Parameters *
616 ******************************************************************/
617
618
619 #undef TARGET_MACHINE_DEPENDENT_REORG
620 #define TARGET_MACHINE_DEPENDENT_REORG csky_reorg
621
622 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
623 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS csky_allocate_stack_slots_for_args
624
625 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
626 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
627
628
629 /******************************************************************
630 * Trampolines for Nested Functions *
631 ******************************************************************/
632
633
634 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
635 #define TARGET_ASM_TRAMPOLINE_TEMPLATE csky_asm_trampoline_template
636 #undef TARGET_TRAMPOLINE_INIT
637 #define TARGET_TRAMPOLINE_INIT csky_trampoline_init
638
639 /* The low bit is ignored by jsr and jmp instructions so is safe to use. */
640 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
641 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
642
643 /******************************************************************
644 * Describing Relative Costs of Operations *
645 ******************************************************************/
646
647
648 #undef TARGET_REGISTER_MOVE_COST
649 #define TARGET_REGISTER_MOVE_COST csky_register_move_cost
650
651 #undef TARGET_MEMORY_MOVE_COST
652 #define TARGET_MEMORY_MOVE_COST csky_memory_move_cost
653
654 #undef TARGET_RTX_COSTS
655 #define TARGET_RTX_COSTS csky_rtx_costs
656
657 #undef TARGET_ADDRESS_COST
658 #define TARGET_ADDRESS_COST csky_address_cost
659
660
661 /******************************************************************
662 * Anchor address *
663 ******************************************************************/
664
665
666 /* FIXME: the max offset is related to mode size, the following is
667 defined according to SImode. How to deal with HImode and
668 QImode, and should the min offset be defined? */
669 #undef TARGET_MAX_ANCHOR_OFFSET
670 #define TARGET_MAX_ANCHOR_OFFSET \
671 ((TARGET_MINI_REGISTERS && optimize_size) ? 127 : 4095)
672
673
674 /******************************************************************
675 * Condition Code Status *
676 ******************************************************************/
677
678
679 #undef TARGET_FIXED_CONDITION_CODE_REGS
680 #define TARGET_FIXED_CONDITION_CODE_REGS csky_fixed_condition_code_regs
681
682
683 /******************************************************************
684 * Adjusting the Instruction Scheduler *
685 ******************************************************************/
686
687
688 #undef TARGET_SCHED_ISSUE_RATE
689 #define TARGET_SCHED_ISSUE_RATE csky_sched_issue_rate
690
691 #undef TARGET_SCHED_ADJUST_COST
692 #define TARGET_SCHED_ADJUST_COST csky_sched_adjust_cost
693
694
695 /* The declaration of functions. */
696 static void push_csky_minipool_fix (rtx_insn *, HOST_WIDE_INT, rtx *,
697 machine_mode, rtx);
698 static void csky_print_operand (FILE *stream, rtx x, int code);
699
700
701 /* Define a table to map ISR attribute arguments onto function type
702 modifiers. */
703
704 typedef struct
705 {
706 const char *const arg;
707 const unsigned long return_value;
708 } isr_attribute_entry;
709
710 static const isr_attribute_entry isr_attribute_map[] =
711 {
712 {"irq", CSKY_FT_ISR },
713 {"IRQ", CSKY_FT_ISR },
714 {"fiq", CSKY_FT_FIQ },
715 {"FIQ", CSKY_FT_FIQ },
716 {NULL, CSKY_FT_NORMAL }
717 };
718
719
720 /* Return the function type of the current function, if it has not been
721 determined, return CSKY_FT_UNKNOWN. */
722
723 static unsigned long
get_csky_isr_type(tree argument)724 get_csky_isr_type (tree argument)
725 {
726 const isr_attribute_entry *ptr;
727 const char *arg;
728
729 /* if argument is NULL, set default value ISR. */
730 if (argument == NULL_TREE)
731 return CSKY_FT_ISR;
732
733 if (TREE_VALUE (argument) == NULL_TREE
734 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
735 return CSKY_FT_UNKNOWN;
736
737 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
738
739 for (ptr = isr_attribute_map; ptr->arg != NULL; ptr++)
740 if (strcmp (arg, ptr->arg) == 0)
741 return ptr->return_value;
742
743 return CSKY_FT_UNKNOWN;
744 }
745
746 /* Classify cfun as a normal function or some sort of interrupt
747 handler, and set the corresponding bits in cfun->machine->func_type. */
748
749 static unsigned long
get_csky_current_func_type(void)750 get_csky_current_func_type (void)
751 {
752 if (CSKY_FUNCTION_TYPE (cfun->machine->func_type) == CSKY_FT_UNKNOWN)
753 {
754 unsigned long type = CSKY_FT_UNKNOWN;
755 tree a;
756 tree attr;
757
758 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
759
760 attr = DECL_ATTRIBUTES (current_function_decl);
761 a = lookup_attribute ("naked", attr);
762 if (a != NULL_TREE)
763 type |= CSKY_FT_NAKED;
764 a = lookup_attribute ("isr", attr);
765 if (a == NULL_TREE)
766 a = lookup_attribute ("interrupt", attr);
767 if (a == NULL_TREE)
768 type |= CSKY_FT_NORMAL;
769 else
770 type |= get_csky_isr_type (TREE_VALUE (a));
771
772 cfun->machine->func_type = type;
773 }
774
775 return cfun->machine->func_type;
776 }
777
778 /* These typedefs are located at the start of this file, so that
779 they can be used in the prototypes there. This comment is to
780 remind readers of that fact so that the following structures
781 can be understood more easily.
782
783 typedef struct minipool_node Mnode;
784 typedef struct minipool_fixup Mfix; */
785
786 struct minipool_node
787 {
788 /* Doubly linked chain of entries. */
789 Mnode *next;
790 Mnode *prev;
791 /* The maximum offset into the code that this entry can be placed. While
792 pushing fixes for forward references, all entries are sorted in order
793 of increasing max_address. */
794 HOST_WIDE_INT max_address;
795 /* Similarly for an entry inserted for a backwards ref. */
796 HOST_WIDE_INT min_address;
797 /* The number of fixes referencing this entry. This can become zero
798 if we "unpush" an entry. In this case we ignore the entry when we
799 come to emit the code. */
800 int refcount;
801 /* The offset from the start of the minipool. */
802 HOST_WIDE_INT offset;
803 /* The value in table. */
804 rtx value;
805 /* The mode of value. */
806 machine_mode mode;
807 /* The size of the value. */
808 int fix_size;
809 };
810
811 struct minipool_fixup
812 {
813 Mfix *next;
814 rtx_insn *insn;
815 HOST_WIDE_INT address;
816 rtx *loc;
817 machine_mode mode;
818 int fix_size;
819 rtx value;
820 Mnode *minipool;
821 HOST_WIDE_INT forwards;
822 HOST_WIDE_INT backwards;
823 };
824
825 static Mnode *minipool_vector_head;
826 static Mnode *minipool_vector_tail;
827 static rtx minipool_vector_label;
828 static HOST_WIDE_INT constpool_label_no = 0;
829
830 /* Obstack for minipool constant handling. */
831 static struct obstack minipool_obstack;
832 static char *minipool_startobj;
833 /* The linked list of all minipool fixes required for this function. */
834 Mfix *minipool_fix_head;
835 Mfix *minipool_fix_tail;
836 /* The fix entry for the current minipool, once it has been placed. */
837 Mfix *minipool_barrier;
838
839 /* Allow GC scanning of the minipool obstack. */
840 static void
csky_add_gc_roots(void)841 csky_add_gc_roots (void)
842 {
843 gcc_obstack_init (&minipool_obstack);
844 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
845 }
846
847 /* Implement TARGET_CONSTANT_ALIGNMENT.
848 Make strings word-aligned so strcpy from constants will be faster. */
849 static HOST_WIDE_INT
csky_constant_alignment(const_tree exp,HOST_WIDE_INT align)850 csky_constant_alignment (const_tree exp, HOST_WIDE_INT align)
851 {
852 if (TREE_CODE (exp) == STRING_CST
853 && !optimize_size
854 && align < BITS_PER_WORD)
855 return BITS_PER_WORD;
856 return align;
857 }
858
859 /* Record that there is a natural barrier in the insn stream at
860 ADDRESS. */
861
862 static void
push_csky_minipool_barrier(rtx_insn * insn,HOST_WIDE_INT address)863 push_csky_minipool_barrier (rtx_insn *insn, HOST_WIDE_INT address)
864 {
865 Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
866
867 fix->insn = insn;
868 fix->address = address;
869
870 fix->next = NULL;
871 if (minipool_fix_head != NULL)
872 minipool_fix_tail->next = fix;
873 else
874 minipool_fix_head = fix;
875
876 minipool_fix_tail = fix;
877 }
878
879 /* Compute the size of a vector jump table. */
880
881 static HOST_WIDE_INT
get_csky_jump_table_size(rtx insn)882 get_csky_jump_table_size (rtx insn)
883 {
884 /* ADDR_VECs only take room if read-only data does into the text
885 section. */
886 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
887 {
888 rtx body = PATTERN (insn);
889 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
890 HOST_WIDE_INT size;
891 HOST_WIDE_INT modesize;
892
893 modesize = GET_MODE_SIZE (GET_MODE (body));
894 size = modesize * XVECLEN (body, elt);
895 switch (modesize)
896 {
897 case 1:
898 /* Round up size of TBB table to a halfword boundary. */
899 size = (size + 1) & ~(HOST_WIDE_INT)1;
900 break;
901 case 2:
902 /* No padding necessary for TBH. */
903 break;
904 case 4:
905 break;
906 default:
907 gcc_unreachable ();
908 }
909 return size;
910 }
911
912 return 0;
913 }
914
915
916 /* Scan INSN and note any of its operands that need fixing.
917 If DO_PUSHES is false we do not actually push any of the fixups
918 needed. The function returns TRUE if any fixups were needed/pushed. */
919
920 static bool
note_csky_invalid_constants(rtx_insn * insn,HOST_WIDE_INT address,int do_pushes)921 note_csky_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address,
922 int do_pushes)
923 {
924 bool result = false;
925 int opno;
926
927 extract_constrain_insn (insn);
928
929 if (recog_data.n_alternatives == 0)
930 return false;
931
932 /* Fill in recog_op_alt with information about the constraints of
933 this insn. */
934 preprocess_constraints (insn);
935
936 const operand_alternative *op_alt = which_op_alt ();
937 for (opno = 0; opno < recog_data.n_operands; opno++)
938 {
939 /* Things we need to fix can only occur in inputs. */
940 if (recog_data.operand_type[opno] != OP_IN)
941 continue;
942
943 /* If this alternative is a memory reference, then any mention
944 of constants in this alternative is really to fool reload
945 into allowing us to accept one there. We need to fix them up
946 now so that we output the right code. */
947 if (op_alt[opno].memory_ok)
948 {
949 rtx op = recog_data.operand[opno];
950
951 if (CONSTANT_P (op))
952 {
953 if (do_pushes)
954 push_csky_minipool_fix (insn, address,
955 recog_data.operand_loc[opno],
956 recog_data.operand_mode[opno], op);
957 result = true;
958 }
959 }
960 }
961
962 return result;
963 }
964
965
966 /* Add a constant to the minipool for a forward reference. Returns the
967 node added or NULL if the constant will not fit in this pool. */
968
969 static Mnode *
add_csky_minipool_forward_ref(Mfix * fix)970 add_csky_minipool_forward_ref (Mfix *fix)
971 {
972 /* If set, max_mp is the first pool_entry that has a lower
973 constraint than the one we are trying to add. */
974 Mnode *max_mp = NULL;
975 HOST_WIDE_INT max_address = fix->address + fix->forwards;
976 Mnode *mp;
977
978 /* If the minipool starts before the end of FIX->INSN then this FIX
979 cannot be placed into the current pool. Furthermore, adding the
980 new constant pool entry may cause the pool to start FIX_SIZE bytes
981 earlier. */
982 if (minipool_vector_head
983 && (fix->address + get_attr_length (fix->insn)
984 >= minipool_vector_head->max_address - fix->fix_size))
985 return NULL;
986
987 /* Scan the pool to see if a constant with the same value has
988 already been added. While we are doing this, also note the
989 location where we must insert the constant if it doesn't already
990 exist. */
991 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
992 {
993 if (GET_CODE (fix->value) == GET_CODE (mp->value)
994 && fix->mode == mp->mode
995 && (GET_CODE (fix->value) != CODE_LABEL
996 || (CODE_LABEL_NUMBER (fix->value)
997 == CODE_LABEL_NUMBER (mp->value)))
998 && rtx_equal_p (fix->value, mp->value))
999 {
1000 /* More than one fix references this entry. */
1001 mp->refcount++;
1002 return mp;
1003 }
1004
1005 /* Note the insertion point if necessary. */
1006 if (max_mp == NULL && mp->max_address > max_address)
1007 max_mp = mp;
1008 }
1009
1010 /* The value is not currently in the minipool, so we need to create
1011 a new entry for it. If MAX_MP is NULL, the entry will be put on
1012 the end of the list since the placement is less constrained than
1013 any existing entry. Otherwise, we insert the new fix before
1014 MAX_MP and, if necessary, adjust the constraints on the other
1015 entries. */
1016 mp = XNEW (Mnode);
1017 mp->fix_size = fix->fix_size;
1018 mp->mode = fix->mode;
1019 mp->value = fix->value;
1020 mp->refcount = 1;
1021 /* Not yet required for a backwards ref. */
1022 mp->min_address = -65536;
1023
1024 if (max_mp == NULL)
1025 {
1026 mp->max_address = max_address;
1027 mp->next = NULL;
1028 mp->prev = minipool_vector_tail;
1029
1030 if (mp->prev == NULL)
1031 {
1032 minipool_vector_head = mp;
1033 minipool_vector_label
1034 = gen_csky_constpool_label (gen_rtx_CONST_INT (VOIDmode,
1035 constpool_label_no++));
1036 }
1037 else
1038 mp->prev->next = mp;
1039
1040 minipool_vector_tail = mp;
1041 }
1042 else
1043 {
1044 if (max_address > max_mp->max_address - mp->fix_size)
1045 mp->max_address = max_mp->max_address - mp->fix_size;
1046 else
1047 mp->max_address = max_address;
1048
1049 mp->next = max_mp;
1050 mp->prev = max_mp->prev;
1051 max_mp->prev = mp;
1052 if (mp->prev != NULL)
1053 mp->prev->next = mp;
1054 else
1055 minipool_vector_head = mp;
1056 }
1057
1058 /* Save the new entry. */
1059 max_mp = mp;
1060
1061 /* Scan over the preceding entries and adjust their addresses as
1062 required. */
1063 while (mp->prev != NULL
1064 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
1065 {
1066 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
1067 mp = mp->prev;
1068 }
1069
1070 return max_mp;
1071 }
1072
1073
1074 /* Return the cost of forcibly inserting a barrier after INSN. */
1075
1076 static int
get_csky_barrier_cost(rtx_insn * insn)1077 get_csky_barrier_cost (rtx_insn *insn)
1078 {
1079 /* Basing the location of the pool on the loop depth is preferable,
1080 but at the moment, the basic block information seems to be
1081 corrupt by this stage of the compilation. */
1082 int base_cost = 50;
1083 rtx next = next_nonnote_insn (insn);
1084
1085 if (next != NULL && GET_CODE (next) == CODE_LABEL)
1086 base_cost -= 20;
1087
1088 switch (GET_CODE (insn))
1089 {
1090 case CODE_LABEL:
1091 /* It will always be better to place the table before the label, rather
1092 than after it. */
1093 return 50;
1094
1095 case INSN:
1096 case CALL_INSN:
1097 return base_cost;
1098
1099 case JUMP_INSN:
1100 return base_cost - 10;
1101
1102 default:
1103 return base_cost + 10;
1104 }
1105 }
1106
1107
1108 /* Find the best place in the insn stream in the range
1109 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
1110 Create the barrier by inserting a jump and add a new fix entry for
1111 it. */
1112 static Mfix *
create_csky_fix_barrier(Mfix * fix,Mfix * fix_next,HOST_WIDE_INT max_address)1113 create_csky_fix_barrier (Mfix *fix, Mfix *fix_next,
1114 HOST_WIDE_INT max_address)
1115 {
1116 rtx_barrier *barrier;
1117 rtx_insn *from = (fix ? fix->insn : get_insns ());
1118 /* The instruction after which we will insert the jump. */
1119 rtx_insn *selected = NULL;
1120 int selected_cost;
1121 /* The address at which the jump instruction will be placed. */
1122 HOST_WIDE_INT selected_address = 0;
1123 Mfix *new_fix;
1124 HOST_WIDE_INT count = (fix ? fix->address : 0);
1125 HOST_WIDE_INT max_count = max_address;
1126 rtx_code_label *label = gen_label_rtx ();
1127
1128 selected_cost = get_csky_barrier_cost (from);
1129
1130 while (from && count < max_count)
1131 {
1132 int new_cost;
1133 rtx_jump_table_data *table;
1134
1135 /* Count the length of this insn. */
1136 count += get_attr_length (from);
1137
1138 /* If there is a jump table, add its length. */
1139 if (tablejump_p (from, NULL, &table))
1140 {
1141 count += get_csky_jump_table_size (table);
1142
1143 /* Jump tables aren't in a basic block, so base the cost on
1144 the dispatch insn. If we select this location, we will
1145 still put the pool after the table. */
1146 new_cost = get_csky_barrier_cost (from);
1147
1148 if (count < max_count
1149 && (!selected || new_cost <= selected_cost))
1150 {
1151 selected = table;
1152 selected_cost = new_cost;
1153 selected_address = count;
1154 }
1155
1156 /* Continue after the dispatch table. */
1157 from = NEXT_INSN (table);
1158 continue;
1159 }
1160
1161 new_cost = get_csky_barrier_cost (from);
1162
1163 if (count < max_count
1164 && (!selected || new_cost <= selected_cost))
1165 {
1166 selected = from;
1167 selected_cost = new_cost;
1168 selected_address = count;
1169 }
1170
1171 from = NEXT_INSN (from);
1172 }
1173
1174 /* Make sure that we found a place to insert the jump. */
1175 gcc_assert (selected);
1176
1177 /* Create a new JUMP_INSN that branches around a barrier. */
1178 from = emit_jump_insn_after (gen_jump (label), selected);
1179 JUMP_LABEL (from) = label;
1180 barrier = emit_barrier_after (from);
1181 emit_label_after (label, barrier);
1182
1183 /* Create a minipool barrier entry for the new barrier. */
1184 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
1185 new_fix->insn = barrier;
1186 new_fix->address = selected_address;
1187 if (fix)
1188 {
1189 new_fix->next = fix->next;
1190 fix->next = new_fix;
1191 }
1192 else
1193 new_fix->next = fix_next;
1194
1195 return new_fix;
1196 }
1197
1198
1199 /* Print a symbolic form of the constant X to the dump file F.
1200 This is used for dump output for -mconstpool in the target-dependent
1201 reorg pass. */
1202
1203 static void
print_csky_value(FILE * f,rtx x)1204 print_csky_value (FILE *f, rtx x)
1205 {
1206 switch (GET_CODE (x))
1207 {
1208 case CONST_INT:
1209 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
1210 return;
1211
1212 case CONST_DOUBLE:
1213 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
1214 return;
1215
1216 case CONST_VECTOR:
1217 {
1218 int i;
1219
1220 fprintf (f, "<");
1221 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
1222 {
1223 fprintf (f, HOST_WIDE_INT_PRINT_HEX,
1224 INTVAL (CONST_VECTOR_ELT (x, i)));
1225 if (i < (CONST_VECTOR_NUNITS (x) - 1))
1226 fputc (',', f);
1227 }
1228 fprintf (f, ">");
1229 }
1230 return;
1231
1232 case CONST_STRING:
1233 fprintf (f, "\"%s\"", XSTR (x, 0));
1234 return;
1235
1236 case SYMBOL_REF:
1237 fprintf (f, "`%s'", XSTR (x, 0));
1238 return;
1239
1240 case LABEL_REF:
1241 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
1242 return;
1243
1244 case CONST:
1245 print_csky_value (f, XEXP (x, 0));
1246 return;
1247
1248 case PLUS:
1249 print_csky_value (f, XEXP (x, 0));
1250 fprintf (f, "+");
1251 print_csky_value (f, XEXP (x, 1));
1252 return;
1253
1254 case PC:
1255 fprintf (f, "pc");
1256 return;
1257
1258 default:
1259 fprintf (f, "????");
1260 return;
1261 }
1262 }
1263
1264
1265 /* Record INSN, which will need fixing up to load a value from the
1266 minipool. ADDRESS is the offset of the insn since the start of the
1267 function; LOC is a pointer to the part of the insn which requires
1268 fixing; VALUE is the constant that must be loaded, which is of type
1269 MODE. */
1270
1271 static void
push_csky_minipool_fix(rtx_insn * insn,HOST_WIDE_INT address,rtx * loc,machine_mode mode,rtx value)1272 push_csky_minipool_fix (rtx_insn *insn, HOST_WIDE_INT address, rtx *loc,
1273 machine_mode mode, rtx value)
1274 {
1275 #define CSKY_ELRW16_RANGE 1400
1276 #define CSKY_LRW16_RANGE 700
1277 #define CSKY_CONSTANT_POOL_RANGE (TARGET_ELRW ? CSKY_ELRW16_RANGE \
1278 : CSKY_LRW16_RANGE)
1279
1280 /* Fixes less than a word need padding out to a word boundary. */
1281 #define CSKY_MINIPOOL_FIX_SIZE(mode) \
1282 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
1283
1284 Mfix *fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (*fix));
1285
1286 fix->insn = insn;
1287 fix->address = address;
1288 fix->loc = loc;
1289 fix->mode = mode;
1290 fix->fix_size = CSKY_MINIPOOL_FIX_SIZE (mode);
1291 fix->value = value;
1292 fix->forwards = CSKY_CONSTANT_POOL_RANGE;
1293 fix->backwards = 0;
1294 fix->minipool = NULL;
1295
1296 /* If an insn doesn't have a range defined for it, then it isn't
1297 expecting to be reworked by this code. Better to stop now than
1298 to generate duff assembly code. */
1299 gcc_assert (fix->forwards || fix->backwards);
1300
1301 if (dump_file)
1302 {
1303 fprintf (dump_file,
1304 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
1305 GET_MODE_NAME (mode),
1306 INSN_UID (insn), (unsigned long) address,
1307 -1 * (long)fix->backwards, (long)fix->forwards);
1308 print_csky_value (dump_file, fix->value);
1309 fprintf (dump_file, "\n");
1310 }
1311
1312 /* Add it to the chain of fixes. */
1313 fix->next = NULL;
1314
1315 if (minipool_fix_head != NULL)
1316 minipool_fix_tail->next = fix;
1317 else
1318 minipool_fix_head = fix;
1319
1320 minipool_fix_tail = fix;
1321 }
1322
1323
1324 /* Fill in the offsets for minipool entries. */
1325
1326 static void
assign_csky_minipool_offsets(Mfix * barrier)1327 assign_csky_minipool_offsets (Mfix *barrier)
1328 {
1329 HOST_WIDE_INT offset = 0;
1330 Mnode *mp;
1331
1332 minipool_barrier = barrier;
1333
1334 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
1335 {
1336 mp->offset = offset;
1337
1338 if (mp->refcount > 0)
1339 offset += mp->fix_size;
1340 }
1341 }
1342
1343
1344 /* Output the literal table. */
1345
1346 static HOST_WIDE_INT
dump_csky_minipool(rtx_insn * scan)1347 dump_csky_minipool (rtx_insn *scan)
1348 {
1349 Mnode *mp;
1350 Mnode *nmp;
1351 HOST_WIDE_INT pool_length = 0;
1352
1353 if (dump_file)
1354 fprintf (dump_file,
1355 ";; Emitting minipool after insn %u;\
1356 address %ld; align %d (bytes)\n",
1357 INSN_UID (scan), (unsigned long) minipool_barrier->address, 4);
1358
1359 scan = emit_insn_after (gen_align_4 (), scan);
1360 scan = emit_insn_after (minipool_vector_label, scan);
1361
1362 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
1363 {
1364 if (mp->refcount > 0)
1365 {
1366 if (dump_file)
1367 {
1368 fprintf (dump_file, ";; Offset %u, min %ld, max %ld ",
1369 (unsigned) mp->offset, (unsigned long) mp->min_address,
1370 (unsigned long) mp->max_address);
1371 print_csky_value (dump_file, mp->value);
1372 fputc ('\n', dump_file);
1373 }
1374
1375 switch (mp->fix_size)
1376 {
1377 case 4:
1378 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
1379 pool_length += 4;
1380 break;
1381 case 8:
1382 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
1383 pool_length += 8;
1384 break;
1385 default:
1386 gcc_unreachable ();
1387 }
1388 }
1389
1390 nmp = mp->next;
1391 free (mp);
1392 }
1393
1394 minipool_vector_head = minipool_vector_tail = NULL;
1395 scan = emit_barrier_after (scan);
1396
1397 return pool_length;
1398 }
1399
1400 /* Return true if INSN is a minipool load or instruction that will be
1401 converted to one. It is assumed that INSN has type attribute "load". */
1402
1403 bool
csky_minipool_load_p(rtx_insn * insn)1404 csky_minipool_load_p (rtx_insn *insn)
1405 {
1406 rtx op1, addr;
1407
1408 extract_insn_cached (insn);
1409
1410 op1 = recog_data.operand[1];
1411
1412 /* This is a constant that has not yet been turned into
1413 a minipool load. */
1414 if (CONSTANT_P (op1))
1415 return true;
1416
1417 /* Constant pool loads are label_refs. */
1418 if (GET_CODE (op1) == ZERO_EXTEND || GET_CODE (op1) == SIGN_EXTEND)
1419 op1 = XEXP (op1, 0);
1420 if (GET_CODE (op1) != MEM)
1421 return false;
1422 addr = XEXP (op1, 0);
1423 if (GET_CODE (addr) == PLUS && CONST_INT_P (XEXP (addr, 1)))
1424 addr = XEXP (addr, 0);
1425 return GET_CODE (addr) == LABEL_REF;
1426 }
1427
1428
1429 /* Compute the attribute "length" of push or pop insn, according to
1430 the registers it uses. */
1431
1432 int
csky_compute_pushpop_length(rtx * operands)1433 csky_compute_pushpop_length (rtx *operands)
1434 {
1435 rtx parallel_op = operands[2];
1436 /* Initialize to elements number of PARALLEL. */
1437 unsigned indx = XVECLEN (parallel_op, 0) - 1;
1438 unsigned first_indx = 0;
1439 unsigned regno = REGNO (operands[1]);
1440
1441 if (regno > CSKY_LR_REGNUM)
1442 return 4;
1443
1444 /* Check each register in the list. */
1445 for (; indx > first_indx; indx--)
1446 {
1447 regno = REGNO (XEXP (XVECEXP (parallel_op, 0, indx), 0));
1448 /* If a register number higher than 15 is included, a 32-bit insn
1449 is used. */
1450 if (regno > CSKY_LR_REGNUM)
1451 return 4;
1452 }
1453
1454 return 2;
1455 }
1456
1457 /* Emit constant pools for -mconstpool. */
1458 static void
csky_emit_constant_pools(void)1459 csky_emit_constant_pools (void)
1460 {
1461 rtx_insn *insn;
1462 HOST_WIDE_INT address = 0;
1463 Mfix *fix;
1464
1465 minipool_fix_head = minipool_fix_tail = NULL;
1466
1467 /* The first insn must always be a note, or the code below won't
1468 scan it properly. */
1469 insn = get_insns ();
1470 gcc_assert (NOTE_P (insn));
1471
1472 /* Scan the insns and record the operands that need fixing. */
1473 for (insn = next_nonnote_insn (insn); insn;
1474 insn = next_nonnote_insn (insn))
1475 {
1476 if (BARRIER_P (insn))
1477 push_csky_minipool_barrier (insn, address);
1478 else if (INSN_P (insn))
1479 {
1480 rtx_jump_table_data *table;
1481
1482 note_csky_invalid_constants (insn, address, true);
1483 address += get_attr_length (insn);
1484
1485 /* If the insn is a vector jump, add the size of the table
1486 and skip the table. */
1487 if (tablejump_p (insn, NULL, &table))
1488 {
1489 address += get_csky_jump_table_size (table);
1490 insn = table;
1491 }
1492 }
1493 }
1494
1495 fix = minipool_fix_head;
1496
1497 /* Now scan the fixups and perform the required changes. */
1498 while (fix)
1499 {
1500 Mfix *ftmp;
1501 Mfix *last_added_fix;
1502 Mfix *last_barrier = NULL;
1503 Mfix *this_fix;
1504 Mnode *mp;
1505 bool has_pending_const = false;
1506
1507 /* Check if there is any pending constant not processed. */
1508 for (mp = minipool_vector_head; mp; mp = mp->next)
1509 if (mp->refcount > 0)
1510 {
1511 has_pending_const = true;
1512 break;
1513 }
1514
1515 /* If no pending constant, skip over barrier insns. */
1516 if (has_pending_const == false)
1517 {
1518 while (fix && BARRIER_P (fix->insn))
1519 fix = fix->next;
1520 if (fix == NULL)
1521 break;
1522 }
1523
1524 last_added_fix = NULL;
1525
1526 for (ftmp = fix; ftmp; ftmp = ftmp->next)
1527 {
1528 if (BARRIER_P (ftmp->insn))
1529 {
1530 if (minipool_vector_head
1531 && ftmp->address >= minipool_vector_head->max_address)
1532 break;
1533
1534 last_barrier = ftmp;
1535 }
1536 else
1537 {
1538 ftmp->minipool = add_csky_minipool_forward_ref (ftmp);
1539 if (ftmp->minipool == NULL)
1540 break;
1541 }
1542 last_added_fix = ftmp; /* Keep track of the last fix added. */
1543 }
1544
1545 /* If the last added fix is a barrier, dump minipool after it. */
1546 if (last_added_fix && BARRIER_P (last_added_fix->insn))
1547 ftmp = last_barrier;
1548 else
1549 {
1550 /* ftmp is first fix that we can't fit into this pool.
1551 Insert a new barrier in the code somewhere between the previous
1552 fix and this one, and arrange to jump around it. */
1553 HOST_WIDE_INT max_address;
1554
1555 /* The last item on the list of fixes must be a barrier, so
1556 we can never run off the end of the list of fixes without
1557 last_barrier being set. */
1558 gcc_assert (ftmp);
1559
1560 /* Check that there isn't another fix that is in range that
1561 we couldn't fit into this pool because the pool was
1562 already too large: we need to put the pool before such an
1563 instruction. The pool itself may come just after the
1564 fix because create_csky_fix_barrier also allows space for a
1565 jump instruction. */
1566 max_address = minipool_vector_head->max_address;
1567 if (ftmp->address < max_address)
1568 max_address = ftmp->address + 1;
1569 last_barrier = create_csky_fix_barrier (last_added_fix, ftmp,
1570 max_address);
1571 }
1572
1573 assign_csky_minipool_offsets (last_barrier);
1574
1575 /* Scan over the fixes we have identified for this pool, fixing them
1576 up and adding the constants to the pool itself. */
1577 for (this_fix = fix; this_fix && ftmp != this_fix;
1578 this_fix = this_fix->next)
1579 {
1580 if (GET_CODE (this_fix->insn) != BARRIER)
1581 {
1582 rtx addr
1583 = plus_constant (Pmode,
1584 gen_rtx_LABEL_REF (VOIDmode,
1585 minipool_vector_label),
1586 this_fix->minipool->offset);
1587 rtx insn_body = PATTERN (this_fix->insn);
1588 rtx src = XEXP (insn_body, 1);
1589 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
1590 if (GET_CODE (this_fix->value) == SYMBOL_REF)
1591 emit_insn_after (gen_rtx_UNSPEC_VOLATILE (VOIDmode,
1592 gen_rtvec (1, src),
1593 VUNSPEC_SYMBOL_REF),
1594 this_fix->insn);
1595 }
1596 }
1597 dump_csky_minipool (last_barrier->insn);
1598 fix = ftmp;
1599 if (fix->next == NULL)
1600 break;
1601 }
1602
1603 /* Free the minipool memory. */
1604 obstack_free (&minipool_obstack, minipool_startobj);
1605 }
1606
1607
1608 /* Implement TARGET_MACHINE_DEPENDENT_REORG. This handles
1609 -mconstpool output. */
1610
1611 static void
csky_reorg(void)1612 csky_reorg (void)
1613 {
1614 if (TARGET_CONSTANT_POOL)
1615 csky_emit_constant_pools ();
1616 }
1617
1618
1619 /* Check to see if the current function contains a branch insn with the
1620 far jump attribute set. Such a function uses the LR register. */
1621
1622 static bool
csky_far_jump_used_p(void)1623 csky_far_jump_used_p (void)
1624 {
1625 rtx_insn *insn;
1626 if (cfun->machine->far_jump_used)
1627 return true;
1628
1629 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1630 if (GET_CODE (insn) == JUMP_INSN
1631 /* Ignore tablejump patterns. */
1632 && GET_CODE (PATTERN (insn)) != ADDR_VEC
1633 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
1634 && get_attr_far_jump (insn) == FAR_JUMP_YES)
1635 {
1636 cfun->machine->far_jump_used = 1;
1637 return true;
1638 }
1639 return false;
1640 }
1641
1642
1643 /* Return the mask of registers used by the current function. Set
1644 COUNT to the number of registers used. */
1645
1646 static unsigned int
get_csky_live_regs(int * count)1647 get_csky_live_regs (int *count)
1648 {
1649 int reg;
1650 unsigned int live_regs_mask = 0;
1651
1652 *count = 0;
1653 for (reg = 0; reg < CSKY_NGPR_REGS; reg++)
1654 {
1655 bool save = false;
1656
1657 /* Ignore unsupported registers. */
1658 if (CSKY_TARGET_ARCH (CK801) && reg > 8 && reg < 13)
1659 continue;
1660 if ((CSKY_TARGET_ARCH (CK801)
1661 || CSKY_TARGET_ARCH (CK802)
1662 || CSKY_TARGET_ARCH (CK803))
1663 && reg > 15)
1664 break;
1665
1666 /* Caller-saved registers marked as used. */
1667 if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
1668 save = true;
1669
1670 /* Frame pointer marked used. */
1671 else if (frame_pointer_needed && reg == FRAME_POINTER_REGNUM)
1672 save = true;
1673
1674 /* This is required for CK801/802 where FP is a fixed reg, otherwise
1675 we end up with no FP value available to the DWARF-2 unwinder. */
1676 else if (crtl->calls_eh_return && reg == FRAME_POINTER_REGNUM)
1677 save = true;
1678
1679 /* CK801/802 also need special handling for LR because it's clobbered
1680 by far jumps. */
1681 else if ((CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
1682 && reg == CSKY_LR_REGNUM
1683 && (!crtl->is_leaf || csky_far_jump_used_p ()))
1684 save = true;
1685
1686 /* Register is used for EH data return. */
1687 else if (crtl->calls_eh_return
1688 && reg >= CSKY_FIRST_EH_RETDATA_REGNUM
1689 && reg <= CSKY_LAST_EH_RETDATA_REGNUM)
1690 save = true;
1691
1692 /* We need a temporary reg to hold the offset for adjusting the SP
1693 for a large stack frame. */
1694 if (reg == CSKY_STACKADJUST_REGNUM
1695 && cfun->machine->reg_offset > CSKY_MAX_SP_ADJUST * 2)
1696 save = true;
1697
1698 /* Add reg to the mask. */
1699 if (save)
1700 {
1701 (*count)++;
1702 live_regs_mask |= (1 << reg);
1703 }
1704 }
1705 return live_regs_mask;
1706 }
1707
1708 /* Compute the stack frame layout, storing sizes of the various pieces
1709 in cfun->machine.
1710
1711 Stack frames constructed in the prologue look like:
1712 ... caller's frame ...
1713 incoming SP -> caller's outbound argument overflow
1714 argument spill
1715 optional FP -> register save
1716 local variables
1717 alloca() space
1718 adjusted SP -> outbound argument overflow
1719
1720 with SP/FP pointing at the base (low address) of the respective area,
1721 and each area aligned to a word boundary. */
1722
1723 static void
csky_layout_stack_frame(void)1724 csky_layout_stack_frame (void)
1725 {
1726 machine_function *infp = cfun->machine;
1727 int reg_count;
1728
1729 if (infp->frame_init_p)
1730 return;
1731
1732 /* Get sizes of local variables & outbound arguments. */
1733 infp->outbound_size = CSKY_STACK_ALIGN (crtl->outgoing_args_size);
1734 infp->local_offset = infp->outbound_size;
1735 infp->local_size = CSKY_STACK_ALIGN (get_frame_size ());
1736 infp->reg_offset = infp->local_offset + infp->local_size;
1737
1738 /* Now compute size of argument spill + saved regs. These do not
1739 need explicit alignment since they are already word-sized. */
1740 infp->reg_mask = get_csky_live_regs (®_count);
1741 infp->reg_size = reg_count * UNITS_PER_WORD;
1742 infp->arg_offset = infp->reg_offset + infp->reg_size;
1743 infp->arg_size = crtl->args.pretend_args_size;
1744 infp->frame_size = infp->arg_offset + infp->arg_size;
1745 infp->frame_init_p = reload_completed;
1746 }
1747
1748 /* Implement TARGET_CAN_ELIMINATE. */
1749 static bool
csky_can_eliminate(const int from ATTRIBUTE_UNUSED,const int to)1750 csky_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
1751 {
1752 if (to == STACK_POINTER_REGNUM)
1753 return !frame_pointer_needed;
1754 return true;
1755 }
1756
1757 /* Worker function for INITIAL_ELIMINATION_OFFSET macro.
1758 Define the offset between two registers, one to be eliminated, and
1759 the other its replacement, at the start of a routine. */
1760
1761 HOST_WIDE_INT
csky_initial_elimination_offset(int from,int to)1762 csky_initial_elimination_offset (int from, int to)
1763 {
1764 int offset;
1765
1766 csky_layout_stack_frame ();
1767
1768 /* Set OFFSET to the offset to the initial stack pointer. */
1769 switch (from)
1770 {
1771 case FRAME_POINTER_REGNUM:
1772 offset = cfun->machine->reg_offset;
1773 break;
1774
1775 case ARG_POINTER_REGNUM:
1776 offset = cfun->machine->arg_offset;
1777 break;
1778
1779 default:
1780 gcc_unreachable ();
1781 }
1782
1783 /* If we are asked for the offset to the frame pointer instead,
1784 then subtract the difference between the frame pointer and stack
1785 pointer. */
1786 if (to == FRAME_POINTER_REGNUM)
1787 offset -= cfun->machine->reg_offset;
1788 return offset;
1789 }
1790
1791
1792 /* Determine where to put an argument to a function.
1793 Value is zero to push the argument on the stack,
1794 or a hard register in which to store the argument.
1795
1796 CUM is a variable of type CUMULATIVE_ARGS which gives info about
1797 the preceding args and about the function being called.
1798 ARG is a description of the argument. */
1799 static rtx
csky_function_arg(cumulative_args_t pcum_v,const function_arg_info & arg)1800 csky_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
1801 {
1802 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1803 int reg = pcum->reg;
1804 machine_mode mode = arg.mode;
1805
1806 if (FUNCTION_VARG_MODE_P(mode)
1807 && !pcum->is_stdarg)
1808 {
1809 reg = pcum->freg;
1810
1811 if (reg < CSKY_NPARM_FREGS)
1812 return gen_rtx_REG (mode, CSKY_FIRST_VFP_REGNUM + reg);
1813 else
1814 return NULL_RTX;
1815 }
1816
1817 if (reg < CSKY_NPARM_REGS)
1818 return gen_rtx_REG (mode, CSKY_FIRST_PARM_REGNUM + reg);
1819
1820 return NULL_RTX;
1821 }
1822
1823
1824 /* Return the number of registers (words) needed to pass an argument of
1825 MODE and TYPE. */
1826
1827 static int
csky_num_arg_regs(machine_mode mode,const_tree type,bool is_stdarg)1828 csky_num_arg_regs (machine_mode mode, const_tree type, bool is_stdarg)
1829 {
1830 int size;
1831
1832 if (type && mode == BLKmode)
1833 size = int_size_in_bytes (type);
1834 else
1835 size = GET_MODE_SIZE (mode);
1836
1837 if (TARGET_HARD_FLOAT_ABI
1838 && !is_stdarg)
1839 {
1840 if (CSKY_VREG_MODE_P(mode)
1841 && !TARGET_SINGLE_FPU)
1842 return ((CSKY_NUM_WORDS (size) + 1) / 2);
1843 }
1844
1845 return CSKY_NUM_WORDS (size);
1846 }
1847
1848
1849 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
1850
1851 static void
csky_function_arg_advance(cumulative_args_t pcum_v,const function_arg_info & arg)1852 csky_function_arg_advance (cumulative_args_t pcum_v,
1853 const function_arg_info &arg)
1854 {
1855 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1856 int *reg = &pcum->reg;
1857 machine_mode mode = arg.mode;
1858
1859 int param_size = csky_num_arg_regs (mode, arg.type, pcum->is_stdarg);
1860 int param_regs_nums = CSKY_NPARM_REGS;
1861
1862 if (FUNCTION_VARG_MODE_P(mode)
1863 && !pcum->is_stdarg)
1864 {
1865 reg = &pcum->freg;
1866 param_regs_nums = CSKY_NPARM_FREGS;
1867 }
1868
1869 if (*reg + param_size > param_regs_nums)
1870 *reg = param_regs_nums;
1871 else
1872 *reg += param_size;
1873 }
1874
1875
1876 /* Implement TARGET_FUNCTION_VALUE. */
1877 static rtx
csky_function_value(const_tree type,const_tree func,bool outgoing ATTRIBUTE_UNUSED)1878 csky_function_value (const_tree type, const_tree func,
1879 bool outgoing ATTRIBUTE_UNUSED)
1880 {
1881 machine_mode mode;
1882 int unsignedp ATTRIBUTE_UNUSED;
1883 int size;
1884
1885 mode = TYPE_MODE (type);
1886 size = int_size_in_bytes (type);
1887
1888 if (FUNCTION_VARG_MODE_P(mode))
1889 {
1890 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
1891 return gen_rtx_REG (mode, CSKY_FIRST_VFP_REGNUM);
1892 }
1893
1894 /* Since we promote return types, we must promote the mode here too. */
1895 if (INTEGRAL_TYPE_P (type))
1896 {
1897 mode = promote_function_mode (type, mode, &unsignedp, func, 1);
1898 return gen_rtx_REG (mode, CSKY_FIRST_RET_REGNUM);
1899 }
1900
1901 if (mode == BLKmode && size > UNITS_PER_WORD
1902 && size <= UNITS_PER_WORD * 2)
1903 {
1904 rtx ret_regs[2];
1905 ret_regs[0] = gen_rtx_EXPR_LIST (SImode,
1906 gen_rtx_REG (SImode,
1907 CSKY_FIRST_RET_REGNUM),
1908 GEN_INT (0 * UNITS_PER_WORD));
1909 ret_regs[1] = gen_rtx_EXPR_LIST (SImode,
1910 gen_rtx_REG (SImode,
1911 CSKY_FIRST_RET_REGNUM + 1),
1912 GEN_INT (1 * UNITS_PER_WORD));
1913
1914 rtvec vec = gen_rtvec (2, ret_regs[0], ret_regs[1]);
1915
1916 return gen_rtx_PARALLEL (mode, vec);
1917 }
1918
1919 return gen_rtx_REG (mode, CSKY_FIRST_RET_REGNUM);
1920 }
1921
1922
1923 /* Implement TARGET_LIBCALL_VALUE. */
1924 static rtx
csky_libcall_value(machine_mode mode,const_rtx libcall ATTRIBUTE_UNUSED)1925 csky_libcall_value (machine_mode mode,
1926 const_rtx libcall ATTRIBUTE_UNUSED)
1927 {
1928 if (FUNCTION_VARG_MODE_P(mode))
1929 {
1930 return gen_rtx_REG (mode, CSKY_FIRST_VFP_REGNUM);
1931 }
1932 return gen_rtx_REG (mode, CSKY_FIRST_RET_REGNUM);
1933 }
1934
1935
1936 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
1937 On C-SKY, only r0 can return results. */
1938
1939 static bool
csky_function_value_regno_p(const unsigned int regno)1940 csky_function_value_regno_p (const unsigned int regno)
1941 {
1942 if (regno == CSKY_FIRST_RET_REGNUM
1943 || (TARGET_HARD_FLOAT_ABI
1944 && regno == CSKY_FIRST_VFP_REGNUM))
1945 return true;
1946 return false;
1947 }
1948
1949
1950 /* Return an RTX indicating where the return address to the
1951 calling function can be found. */
1952 rtx
csky_return_addr(int count,rtx frame ATTRIBUTE_UNUSED)1953 csky_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
1954 {
1955 if (count != 0)
1956 return NULL_RTX;
1957
1958 return get_hard_reg_initial_val (Pmode, CSKY_LR_REGNUM);
1959 }
1960
1961
1962 /* Implement TARGET_ARG_PARTIAL_BYTES.
1963 Return the number of bytes at the beginning of an argument
1964 that must be put in registers. The value must be zero for arguments
1965 that are passed entirely in registers or
1966 that are entirely pushed on the stack. */
1967 static int
csky_arg_partial_bytes(cumulative_args_t pcum_v,const function_arg_info & arg)1968 csky_arg_partial_bytes (cumulative_args_t pcum_v, const function_arg_info &arg)
1969 {
1970 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1971 int param_size = csky_num_arg_regs (arg.mode, arg.type, pcum->is_stdarg);
1972 int reg = pcum->reg;
1973
1974 if (FUNCTION_VARG_MODE_P(arg.mode)
1975 && !pcum->is_stdarg)
1976 return 0;
1977
1978 if (reg < CSKY_NPARM_REGS
1979 && reg + param_size > CSKY_NPARM_REGS)
1980 return (CSKY_NPARM_REGS - reg) * UNITS_PER_WORD;
1981
1982 return 0;
1983 }
1984
1985
1986 /* Implement TARGET_SETUP_INCOMING_VARARGS.
1987 On C-Sky the copy from the argument registers to the stack is emitted
1988 by the prologue hooks, so here we just have to note how much stack space
1989 to save. */
1990
1991 static void
csky_setup_incoming_varargs(cumulative_args_t pcum_v,const function_arg_info & arg,int * pretend_size,int second_time ATTRIBUTE_UNUSED)1992 csky_setup_incoming_varargs (cumulative_args_t pcum_v,
1993 const function_arg_info &arg,
1994 int *pretend_size,
1995 int second_time ATTRIBUTE_UNUSED)
1996 {
1997 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
1998 CUMULATIVE_ARGS local_cum;
1999 cumulative_args_t local_cum_v = pack_cumulative_args (&local_cum);
2000 int regs_to_push;
2001
2002 cfun->machine->uses_anonymous_args = 1;
2003 local_cum = *pcum;
2004 csky_function_arg_advance (local_cum_v, arg);
2005 regs_to_push = CSKY_NPARM_REGS - local_cum.reg;
2006 if (regs_to_push)
2007 *pretend_size = regs_to_push * UNITS_PER_WORD;
2008 }
2009
2010
2011 /* Implement TARGET_ASM_OUTPUT_MI_THUNK.
2012 Output code to add DELTA to the first argument, and then jump
2013 to FUNCTION. Used for C++ multiple inheritance. */
2014
2015 static void
csky_output_mi_thunk(FILE * file,tree thunk ATTRIBUTE_UNUSED,HOST_WIDE_INT delta,HOST_WIDE_INT vcall_offset,tree function)2016 csky_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
2017 HOST_WIDE_INT delta,
2018 HOST_WIDE_INT vcall_offset,
2019 tree function)
2020 {
2021 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk));
2022 const char *thiz = "a0";
2023 const char *reg0 = "t0";
2024 const char *reg1 = "t1";
2025 int maxoff = 4096; /* Constant range for addi/subi. */
2026
2027 assemble_start_function (thunk, fnname);
2028 final_start_function (emit_barrier (), file, 1);
2029
2030 rtx fnaddr = XEXP (DECL_RTL (function), 0);
2031
2032 if (CSKY_TARGET_ARCH (CK801))
2033 {
2034 /* CK801 can't use t registers and has only 16-bit addi/subi. */
2035 reg0 = "l0";
2036 reg1 = "l1";
2037 maxoff = 256;
2038 if (vcall_offset > maxoff || vcall_offset < -maxoff)
2039 fprintf (file, "\tpush\tl0, l1\n");
2040 else if (delta > maxoff || delta < -maxoff)
2041 fprintf (file, "\tpush\tl0\n");
2042 }
2043
2044 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2045 thiz = "a1";
2046
2047 /* Add delta to this_rtx. */
2048 if (delta != 0)
2049 {
2050 if (delta > maxoff || delta < -maxoff)
2051 {
2052 fprintf (file, "\tlrw\t%s, %ld\n", reg0, (long)delta);
2053 fprintf (file, "\taddu\t%s, %s, %s\n", thiz, thiz, reg0);
2054 }
2055 else
2056 fprintf (file, "\t%s\t%s, %s, %ld\n",
2057 (delta > 0 ? "addi" : "subi"), thiz, thiz,
2058 (long)(delta > 0 ? delta : -delta));
2059 }
2060
2061 /* If needed, add *(*this_rtx + vcall_offset) to this_rtx. */
2062 if (vcall_offset != 0)
2063 {
2064 fprintf (file, "\tld.w\t%s, (%s, 0)\n", reg0, thiz);
2065
2066 if (vcall_offset > maxoff || vcall_offset < -maxoff)
2067 {
2068 fprintf (file, "\tlrw\t%s, %ld\n", reg1, (long)vcall_offset);
2069 fprintf (file, "\taddu\t%s, %s, %s\n", reg0, reg0, reg1);
2070 }
2071 else
2072 fprintf (file, "\t%s\t%s, %s, %ld\n",
2073 (vcall_offset > 0 ? "addi" : "subi"), reg0, reg0,
2074 (long)(vcall_offset > 0 ? vcall_offset : -vcall_offset));
2075
2076 /* Load the offset and add it to this_rtx */
2077 fprintf (file, "\tld.w\t%s, (%s, 0)\n", reg0, reg0);
2078 fprintf (file, "\taddu\t%s, %s, %s\n", thiz, thiz, reg0);
2079 }
2080
2081 /* We must pop the scratch regs individually instead of using the
2082 "pop" insn, which also does a return. */
2083 if (CSKY_TARGET_ARCH (CK801))
2084 {
2085 if (vcall_offset > maxoff || vcall_offset < -maxoff)
2086 {
2087 fprintf (file, "\tld.w\tl0, (sp, 0)\n");
2088 fprintf (file, "\tld.w\tl1, (sp, 4)\n");
2089 fprintf (file, "\taddi\t sp, sp, 8\n");
2090 }
2091 else if (delta > maxoff || delta < -maxoff)
2092 {
2093 fprintf (file, "\tld.w\tl0, (sp, 0)\n");
2094 fprintf (file, "\taddi\tsp, sp, 4\n");
2095 }
2096 }
2097
2098 fprintf (file, "\tjbr\t");
2099 output_addr_const (file, fnaddr);
2100 fprintf (file, "\n");
2101
2102 final_end_function ();
2103 assemble_end_function (thunk, fnname);
2104 }
2105
2106
2107 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE.
2108 Conditionally modify five variables fixed_regs, call_used_regs, global_regs,
2109 reg_names, and reg_class_contents, to take into account any dependence of
2110 these register sets on target flags.
2111
2112 CK801 has registers r0-r8 and r13-r15. CK802 and CK803 have registers
2113 r0-r15 (the "low" registers). Other cpus use registers r0-r31 with
2114 -mhigh-registers, otherwise also only r0-r15.
2115
2116 CK801 only has 16-bit instructions, most of which can only reference
2117 r0-r7 (the "mini" registers). So we mark regs outside that range as
2118 fixed. -msmart can be used on other arch variants to force the same
2119 behavior because it results in smaller code size.
2120
2121 TODO: investigate whether it's beneficial to use r8-r13 as a spill
2122 class when TARGET_MINI_REGISTERS instead of making them unusable by
2123 the register allocator. */
2124
2125 static void
csky_conditional_register_usage(void)2126 csky_conditional_register_usage (void)
2127 {
2128 /* Only use mini registers in smart mode or 801. */
2129 if (TARGET_MINI_REGISTERS)
2130 {
2131 int i;
2132
2133 for (i = (CSKY_LAST_MINI_REGNUM + 1); i < 32; i++)
2134 {
2135 fixed_regs[i] = 1;
2136 call_used_regs[i] = 1;
2137 }
2138 }
2139 /* For some targets, the high registers are not supported.
2140 CPUs other than ck801/ck802/ck803 use high registers
2141 depending on -mhigh-registers option. */
2142 else if (CSKY_TARGET_ARCH (CK802)
2143 || CSKY_TARGET_ARCH (CK803)
2144 || !TARGET_HIGH_REGISTERS)
2145 {
2146 int i;
2147
2148 for (i = CSKY_FIRST_HIGH_REGNUM; i <= CSKY_LAST_HIGH_REGNUM; i++)
2149 {
2150 fixed_regs[i] = 1;
2151 call_used_regs[i] = 1;
2152 }
2153 }
2154
2155 /* On CK801/CK802 we must mark lr as a fixed register because it is
2156 used to implement far jumps.
2157 FIXME: perhaps there should be a command-line option controlling
2158 use of lr for far jumps on ck802 when !TARGET_MINI_REGS, when
2159 you really want lr to be available to the register allocator and
2160 you know there are no far jumps in the code. */
2161 if (CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
2162 {
2163 fixed_regs[CSKY_LR_REGNUM] = 1;
2164 call_used_regs[CSKY_LR_REGNUM] = 0;
2165 }
2166
2167 /* The hi/lo registers are only supported in dsp mode. */
2168 if (!TARGET_DSP)
2169 {
2170 fixed_regs[CSKY_HI_REGNUM] = 1;
2171 call_used_regs[CSKY_HI_REGNUM] = 1;
2172
2173 fixed_regs[CSKY_LO_REGNUM] = 1;
2174 call_used_regs[CSKY_LO_REGNUM] = 1;
2175 }
2176
2177 /* The V_REGS are only supported in hard float mode. */
2178 if (!TARGET_HARD_FLOAT)
2179 {
2180 int regno;
2181
2182 for (regno = CSKY_FIRST_VFP_REGNUM;
2183 regno <= CSKY_LAST_VFP_REGNUM; regno++)
2184 {
2185 fixed_regs[regno] = 1;
2186 call_used_regs[regno] = 1;
2187 }
2188 }
2189
2190 /* In pic mode, the gb register is not available for register
2191 allocation. Since gb is not clobbered by function
2192 calls, set its call_used_regs to 0. */
2193 if (flag_pic)
2194 {
2195 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2196 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0;
2197 }
2198 }
2199
2200 /* Implement TARGET_HARD_REGNO_NREGS. */
2201 static unsigned int
csky_hard_regno_nregs(unsigned int regno,machine_mode mode)2202 csky_hard_regno_nregs (unsigned int regno, machine_mode mode)
2203 {
2204 if (regno >= CSKY_FIRST_VFP_REGNUM && !CSKY_TARGET_ARCH (CK803))
2205 return 1;
2206 else
2207 return CSKY_NUM_REGS (mode);
2208 }
2209
2210 /* Implement TARGET_HARD_REGNO_MODE_OK. Return true if REGNO is a
2211 valid register for holding a quantity of type MODE. */
2212
2213 static bool
csky_hard_regno_mode_ok(unsigned int regno,machine_mode mode)2214 csky_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2215 {
2216 int nregs = CSKY_NUM_REGS (mode);
2217
2218 /* We can't handle more than doubleword sizes for any register. */
2219 if (nregs > 2)
2220 return false;
2221
2222 /* For general registers, return true if mode is one word size.
2223 When the size is larger than one word size, there should
2224 be two successive hard registers to put the data. */
2225 if (regno < CSKY_NGPR_REGS)
2226 {
2227 if (nregs < 2)
2228 return true;
2229 else if (TARGET_MINI_REGISTERS)
2230 return (regno < CSKY_LAST_MINI_REGNUM);
2231 else if (CSKY_TARGET_ARCH (CK802)
2232 || CSKY_TARGET_ARCH (CK803)
2233 || !TARGET_HIGH_REGISTERS)
2234 /* Without high register, r15 cannot hold doubleword data. */
2235 return (regno < (CSKY_SP_REGNUM - 1));
2236 else
2237 return (regno < (CSKY_SP_REGNUM - 1)
2238 || (regno >= CSKY_LR_REGNUM
2239 && regno < CSKY_LAST_HIGH_UNFIXED_REGNUM));
2240 }
2241 else if (regno == CSKY_CC_REGNUM)
2242 return (mode == CCmode);
2243 else if (regno == CSKY_HI_REGNUM || regno == CSKY_LO_REGNUM)
2244 {
2245 /* Don't allocate hi,lo register for float data even
2246 if in dsp mode, because it will cause high cost
2247 to reload data from hi,lo register. */
2248 if (!TARGET_DSP || mode == SFmode || mode == DFmode)
2249 return false;
2250 else if (nregs == 2)
2251 return (regno == CSKY_HI_REGNUM);
2252 else
2253 return true;
2254 }
2255 else if (CSKY_VREG_P (regno) && TARGET_HARD_FLOAT)
2256 return true;
2257
2258 return false;
2259 }
2260
2261 /* Implement TARGET_MODES_TIEABLE_P. We can't tie DFmode with other modes
2262 when V_REGs might be in use because those registers mess with the stored
2263 bits. */
2264 static bool
csky_modes_tieable_p(machine_mode mode1,machine_mode mode2)2265 csky_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2266 {
2267 return !(TARGET_HARD_FLOAT
2268 && mode1 != mode2
2269 && (mode1 == DFmode || mode2 == DFmode));
2270 }
2271
2272 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.
2273 V_REG registers can't do subreg as all values are reformatted to
2274 internal precision. */
2275 static bool
csky_can_change_mode_class(machine_mode from,machine_mode to,reg_class_t rclass)2276 csky_can_change_mode_class (machine_mode from,
2277 machine_mode to,
2278 reg_class_t rclass)
2279 {
2280 return (GET_MODE_SIZE (from) == GET_MODE_SIZE (to)
2281 || !reg_classes_intersect_p (V_REGS, rclass));
2282 }
2283
2284 /* Implement TARGET_CLASS_LIKELY_SPILLED_P.
2285 We need to define this for MINI_REGS when we only use r0 - r7.
2286 Otherwise we can end up using r0-r4 for function arguments, and don't
2287 have enough left over to do doubleword arithmetic. */
2288
2289 static bool
csky_class_likely_spilled_p(reg_class_t rclass)2290 csky_class_likely_spilled_p (reg_class_t rclass)
2291 {
2292 if ((TARGET_MINI_REGISTERS && rclass == MINI_REGS)
2293 || rclass == C_REGS)
2294 return true;
2295
2296 return false;
2297 }
2298
2299
2300 /* Implement TARGET_PREFERRED_RELOAD_CLASS.
2301 Given an rtx X being reloaded into a reg required to be
2302 in class CLASS, return the class of reg to actually use.
2303 In general this is just CLASS. */
2304
2305 static reg_class_t
csky_preferred_reload_class(rtx x,reg_class_t rclass)2306 csky_preferred_reload_class (rtx x, reg_class_t rclass)
2307 {
2308 if (TARGET_HARD_FLOAT
2309 && CONST_DOUBLE_P (x)
2310 && (GET_MODE (x) == DFmode || GET_MODE (x) == SFmode)
2311 && rclass == NO_REGS)
2312 return GENERAL_REGS;
2313 return rclass;
2314 }
2315
2316
2317 /* Implement TARGET_CLASS_MAX_NREGS.
2318 Return the maximum number of consecutive registers of class rclass needed
2319 to hold a value of mode mode.
2320 On the csky, this is the size of MODE in words,
2321 except in the FP regs, where a single reg is always enough. */
2322
2323 static unsigned char
csky_class_max_nregs(reg_class_t rclass,machine_mode mode)2324 csky_class_max_nregs (reg_class_t rclass, machine_mode mode)
2325 {
2326 if (rclass == V_REGS)
2327 return 1;
2328 else
2329 return CSKY_NUM_REGS (mode);
2330 }
2331
2332
2333 /* Implement TARGET_SECONDARY_RELOAD.
2334 If copying a register of RCLASS from/to X requires an intermediate
2335 register, the hook should return the REGISTER_CLASS required for this
2336 intermediate register.
2337 If no intermediate register is required, it should return NO_REGS.
2338 If more than one intermediate register is required, describe the one
2339 that is closest in the copy chain to the reload register. */
2340
2341 reg_class_t
csky_secondary_reload(bool in_p ATTRIBUTE_UNUSED,rtx x,reg_class_t rclass,machine_mode mode,secondary_reload_info * sri ATTRIBUTE_UNUSED)2342 csky_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
2343 reg_class_t rclass,
2344 machine_mode mode,
2345 secondary_reload_info *sri ATTRIBUTE_UNUSED)
2346 {
2347 int regno = -1;
2348
2349 /* Extract the real regno from X. */
2350 if (GET_CODE (x) == SIGN_EXTEND)
2351 {
2352 int off = 0;
2353
2354 x = XEXP (x, 0);
2355
2356 if (reg_renumber)
2357 regno = true_regnum (x);
2358 else
2359 {
2360 while (GET_CODE (x) == SUBREG)
2361 {
2362 off += subreg_regno_offset (REGNO (SUBREG_REG (x)),
2363 GET_MODE (SUBREG_REG (x)),
2364 SUBREG_BYTE (x), GET_MODE (x));
2365 x = SUBREG_REG (x);
2366 }
2367
2368 if (GET_CODE (x) == REG)
2369 regno = REGNO (x) + off;
2370 }
2371 }
2372 else if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2373 regno = true_regnum (x);
2374
2375 /* We always require a general register when copying anything to
2376 HI/LO_REGNUM, except when copying an SImode value from HI/LO_REGNUM
2377 to a general register, or when copying from register 0. */
2378 if ((rclass == HILO_REGS || rclass == LO_REGS || rclass == HI_REGS)
2379 && !CSKY_GENERAL_REGNO_P (regno))
2380 return GENERAL_REGS;
2381
2382 if (rclass == V_REGS && !CSKY_GENERAL_REGNO_P (regno))
2383 {
2384 /* Reload between vector reg and memory does not need an
2385 intermediate register. */
2386 if (MEM_P (x) && (mode == SFmode || mode == DFmode))
2387 return NO_REGS;
2388 else
2389 return GENERAL_REGS;
2390 }
2391
2392 return NO_REGS;
2393 }
2394
2395 /* Implement TARGET_SPILL_CLASS.
2396 Try spilling to a larger register class before spilling to memory. */
2397
2398 static reg_class_t
csky_spill_class(reg_class_t rclass,machine_mode mode ATTRIBUTE_UNUSED)2399 csky_spill_class (reg_class_t rclass, machine_mode mode ATTRIBUTE_UNUSED)
2400 {
2401 if ((rclass == MINI_REGS && !TARGET_MINI_REGISTERS)
2402 || (rclass == LOW_REGS && TARGET_HIGH_REGISTERS))
2403 return GENERAL_REGS;
2404 return NO_REGS;
2405 }
2406
2407 /* Convert a static initializer array of feature bits to sbitmap
2408 representation. */
2409 static void
csky_initialize_isa(sbitmap isa,const enum csky_isa_feature * isa_bits)2410 csky_initialize_isa (sbitmap isa, const enum csky_isa_feature *isa_bits)
2411 {
2412 bitmap_clear (isa);
2413 while (*isa_bits != CSKY_ISA_FEATURE_GET (none))
2414 bitmap_set_bit (isa, *(isa_bits++));
2415 }
2416
2417
2418 /* Configure a build target TARGET from the user-specified options OPTS and
2419 OPTS_SET. */
2420 static void
csky_configure_build_target(struct csky_build_target * target,struct cl_target_option * opts,struct gcc_options * opts_set)2421 csky_configure_build_target (struct csky_build_target *target,
2422 struct cl_target_option *opts,
2423 struct gcc_options *opts_set)
2424 {
2425 const struct csky_processors *csky_selected_tune = NULL;
2426 struct csky_processors *csky_selected_cpu = NULL;
2427 struct csky_processors *csky_selected_arch = NULL;
2428 sbitmap all_sbits = sbitmap_alloc (CSKY_ISA_FEATURE_GET (max));
2429 bitmap_clear (all_sbits);
2430
2431 bitmap_clear (target->isa);
2432 target->core_name = NULL;
2433 target->arch_name = NULL;
2434
2435 if (opts_set->x_csky_arch_option)
2436 csky_selected_arch = &all_architectures[opts->x_csky_arch_option];
2437
2438 if (opts_set->x_csky_cpu_option)
2439 {
2440 csky_selected_cpu = &all_cores[opts->x_csky_cpu_option];
2441 csky_selected_tune = &all_cores[opts->x_csky_cpu_option];
2442 }
2443
2444 if (csky_selected_cpu)
2445 {
2446 /* TODO: support combination of features
2447 between different cpu & arch, should based on arch. */
2448 if (csky_selected_arch
2449 && (csky_selected_cpu->base_arch != csky_selected_arch->base_arch))
2450 warning (0, "cpu %s is not based on arch %s, ignoring the arch",
2451 csky_selected_cpu->name, csky_selected_arch->name);
2452 if (!csky_selected_arch)
2453 csky_selected_arch = &all_architectures[csky_selected_cpu->base_arch];
2454 csky_initialize_isa (all_sbits, csky_selected_arch->isa_bits);
2455 target->core_name = csky_selected_cpu->name;
2456 }
2457 else if (csky_selected_arch)
2458 {
2459 csky_selected_cpu = csky_selected_arch;
2460 target->arch_name = csky_selected_arch->name;
2461 }
2462 else /* If the user did not specify a processor, choose one for them. */
2463 {
2464 csky_selected_cpu = &all_cores[TARGET_CPU_DEFAULT];
2465 csky_selected_arch = &all_architectures[csky_selected_cpu->base_arch];
2466 csky_initialize_isa (all_sbits, csky_selected_arch->isa_bits);
2467 target->core_name = csky_selected_cpu->name;
2468 }
2469
2470 /* The selected cpu may be an architecture, so lookup tuning by core ID. */
2471 if (!csky_selected_tune)
2472 csky_selected_tune = &all_cores[csky_selected_cpu->core];
2473 gcc_assert (csky_selected_tune);
2474
2475 gcc_assert (csky_selected_arch);
2476 gcc_assert (csky_selected_cpu);
2477 csky_initialize_isa (target->isa, csky_selected_cpu->isa_bits);
2478 bitmap_ior (target->isa, target->isa, all_sbits);
2479
2480 /* Finish initializing the target structure. */
2481 target->arch_pp_name = csky_selected_cpu->arch;
2482 target->base_arch = csky_selected_cpu->base_arch;
2483 target->arch_core = csky_selected_cpu->core;
2484
2485 sbitmap_free (all_sbits);
2486 }
2487
2488
2489 /* Implement TARGET_OPTION_OVERRIDE. */
2490
2491 static void
csky_option_override(void)2492 csky_option_override (void)
2493 {
2494 csky_active_target.isa = sbitmap_alloc (CSKY_ISA_FEATURE_GET (max));
2495
2496 /* Create the default target_options structure. We need this early
2497 to configure the overall build target. */
2498 target_option_default_node = target_option_current_node
2499 = build_target_option_node (&global_options, &global_options_set);
2500
2501 csky_configure_build_target (&csky_active_target,
2502 TREE_TARGET_OPTION (target_option_default_node),
2503 &global_options_set);
2504
2505 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2506 SUBTARGET_OVERRIDE_OPTIONS;
2507 #endif
2508
2509 csky_base_arch = csky_active_target.base_arch;
2510
2511 if (flag_pic && !(CSKY_TARGET_ARCH (CK810) || CSKY_TARGET_ARCH (CK807)))
2512 {
2513 flag_pic = 0;
2514 warning (0, "%qs is not supported by arch %s",
2515 "-fPIC", csky_active_target.arch_pp_name);
2516 }
2517
2518 /* Check floating-point options for consistency. */
2519 if (TARGET_HARD_FLOAT)
2520 {
2521 const struct csky_fpu_desc *csky_selected_fpu = NULL;
2522
2523 if (csky_fpu_index == TARGET_FPU_auto)
2524 {
2525 const char *target_fpu_name;
2526 bool ok;
2527 int fpu_index;
2528
2529 #ifdef CSKY_FPUTYPE_DEFAULT
2530 target_fpu_name = CSKY_FPUTYPE_DEFAULT;
2531 #else
2532 target_fpu_name = "fpv2";
2533 #endif
2534
2535 if (csky_active_target.core_name != NULL
2536 && !strchr (csky_active_target.core_name, 'f'))
2537 target_fpu_name = "auto";
2538 else if (CSKY_TARGET_ARCH (CK803) || !TARGET_DOUBLE_FLOAT)
2539 target_fpu_name = "fpv2_sf";
2540 else if (TARGET_DOUBLE_FLOAT && TARGET_FDIVDU)
2541 target_fpu_name = "fpv2_divd";
2542
2543 ok = opt_enum_arg_to_value (OPT_mfpu_, target_fpu_name, &fpu_index,
2544 CL_TARGET);
2545 gcc_assert (ok);
2546 csky_fpu_index = (enum csky_fpu_type) fpu_index;
2547 }
2548
2549 if (CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
2550 error ("%qs is not supported by arch %s",
2551 "-mhard-float", csky_active_target.arch_pp_name);
2552 else if (csky_fpu_index == TARGET_FPU_auto)
2553 error ("%<-mhard-float%> is not supported by the selected CPU");
2554 else
2555 {
2556 csky_selected_fpu = &all_fpus[csky_fpu_index];
2557 sbitmap fpu_bits = sbitmap_alloc (CSKY_ISA_FEATURE_GET (max));
2558 csky_initialize_isa (fpu_bits, csky_selected_fpu->isa_bits);
2559
2560 bitmap_ior (csky_active_target.isa, csky_active_target.isa,
2561 fpu_bits);
2562
2563 sbitmap_free (fpu_bits);
2564 }
2565 }
2566 else
2567 {
2568 if (TARGET_DOUBLE_FLOAT > 0)
2569 warning (0, "%<-mdouble-float%> ignored without %<-mhard-float%>");
2570 TARGET_DOUBLE_FLOAT = 0;
2571 if (TARGET_FDIVDU > 0)
2572 warning (0, "%<-mfdivdu%> ignored without %<-mhard-float%>");
2573 TARGET_FDIVDU = 0;
2574 }
2575
2576 /* Extended LRW instructions are enabled by default on CK801, disabled
2577 otherwise. */
2578 if (TARGET_ELRW == -1)
2579 TARGET_ELRW = CSKY_TARGET_ARCH (CK801);
2580
2581 /* DSP is enabled either by the processor feature or -mdsp
2582 command-line option. There is no -mno-dsp option as the assembler
2583 doesn't take one. */
2584 if (!TARGET_DSP)
2585 TARGET_DSP = CSKY_ISA_FEATURE (dsp);
2586
2587 /* There's both -mdiv and -mno-div. Take default from processor if
2588 neither is specified explicitly. */
2589 if (TARGET_DIV == -1)
2590 TARGET_DIV = CSKY_ISA_FEATURE (div);
2591
2592 /* TARGET_CONSTANT_POOL is mandatory for CK801 and CK802 and optional
2593 for other CPUs.
2594 The reason why the compiler has to generate constant pools for CK801/2
2595 instead of deferring to the assembler is that these cores don't have a
2596 long branch instruction other than jbsr, which clobbers lr. So for
2597 the compiler to correctly save/restore lr it has to know whether there
2598 are long branches, which depends on having accurate branch length
2599 counts, which in turn depends on having control over where constant
2600 pools are placed. */
2601 if ((CSKY_TARGET_ARCH (CK801) || CSKY_TARGET_ARCH (CK802))
2602 && !TARGET_CONSTANT_POOL)
2603 error ("%qs is not supported by arch %s",
2604 "-mno-constpool", csky_active_target.arch_pp_name);
2605 else if (TARGET_CONSTANT_POOL == -1)
2606 TARGET_CONSTANT_POOL = (CSKY_TARGET_ARCH (CK801)
2607 || CSKY_TARGET_ARCH (CK802));
2608
2609 /* TARGET_MINI_REGISTERS is mandatory for CK801, the default for CK802,
2610 and optional for other CPUs. TARGET_HIGH_REGISTERS is incompatible
2611 with TARGET_MINI_REGISTERS, is not supported by CK801/802/803,
2612 and is the default for other processors.
2613 See csky_conditional_register_usage. */
2614 if (TARGET_MINI_REGISTERS > 0 && TARGET_HIGH_REGISTERS > 0)
2615 error ("%<-msmart%> is incompatible with %<-mhigh-registers%>");
2616 else if (CSKY_TARGET_ARCH (CK801)
2617 || CSKY_TARGET_ARCH (CK802)
2618 || CSKY_TARGET_ARCH (CK803))
2619 {
2620 if (CSKY_TARGET_ARCH (CK801)
2621 || (CSKY_TARGET_ARCH (CK802) && TARGET_MINI_REGISTERS == -1))
2622 TARGET_MINI_REGISTERS = 1;
2623 else if (TARGET_MINI_REGISTERS == -1)
2624 TARGET_MINI_REGISTERS = 0;
2625 if (TARGET_HIGH_REGISTERS > 0)
2626 warning (0, "%qs is not supported by arch %s",
2627 "-mhigh-registers", csky_active_target.arch_pp_name);
2628 TARGET_HIGH_REGISTERS = 0;
2629 }
2630 else
2631 {
2632 if (TARGET_MINI_REGISTERS == -1)
2633 TARGET_MINI_REGISTERS = 0;
2634 if (TARGET_HIGH_REGISTERS == -1)
2635 TARGET_HIGH_REGISTERS = !TARGET_MINI_REGISTERS;
2636 }
2637
2638 /* -mmultiple-stld is the default for everything but CK801, which
2639 doesn't support it. */
2640 if (CSKY_TARGET_ARCH (CK801))
2641 {
2642 if (TARGET_MULTIPLE_STLD > 0)
2643 warning (0, "%qs is not supported by arch %s",
2644 "-mmultiple-stld", csky_active_target.arch_pp_name);
2645 TARGET_MULTIPLE_STLD = 0;
2646 }
2647
2648 /* Initialize boolean versions of the architectural flags, for use
2649 in the .md file. */
2650
2651 #undef CSKY_ISA
2652 #define CSKY_ISA(IDENT, DESC) \
2653 { \
2654 csky_arch_isa_features[CSKY_ISA_FEATURE_GET (IDENT)] = \
2655 bitmap_bit_p (csky_active_target.isa, CSKY_ISA_FEATURE_GET (IDENT)); \
2656 }
2657 #include "csky_isa.def"
2658 #undef CSKY_ISA
2659
2660 /* TODO */
2661
2662 /* Resynchronize the saved target options. */
2663 cl_target_option_save (TREE_TARGET_OPTION (target_option_default_node),
2664 &global_options, &global_options_set);
2665
2666 #ifdef ENABLE_TPF_DEBUG
2667 /* Don't emit DWARF4 unless specifically selected. The TPF
2668 debuggers do not yet support DWARF 3/4. */
2669 if (!global_options_set.x_dwarf_strict)
2670 dwarf_strict = 1;
2671 if (!global_options_set.x_dwarf_version)
2672 dwarf_version = 3;
2673 #endif
2674
2675 /* Don't run the scheduler before reload by default,
2676 since it tends to increase register pressure. */
2677 if (!global_options_set.x_flag_schedule_insns)
2678 flag_schedule_insns = 0;
2679
2680 csky_add_gc_roots ();
2681 }
2682
2683
2684 /* Return TRUE if X contains any references to TLS symbols. */
2685
2686 bool
csky_tls_referenced_p(rtx x)2687 csky_tls_referenced_p (rtx x)
2688 {
2689 if (!TARGET_TLS)
2690 return false;
2691
2692 subrtx_iterator::array_type array;
2693 FOR_EACH_SUBRTX (iter, array, x, ALL)
2694 {
2695 const_rtx x = *iter;
2696 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2697 return true;
2698
2699 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
2700 TLS offsets, not real symbol references. */
2701 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
2702 iter.skip_subrtxes ();
2703 }
2704 return false;
2705 }
2706
2707
2708 /* Implement TARGET_CANNOT_FORCE_CONST_MEM.
2709 Determine if it's legal to put X into the constant pool. This
2710 is not possible for the address of thread-local symbols, which
2711 is checked above. */
2712
2713 static bool
csky_cannot_force_const_mem(machine_mode mode ATTRIBUTE_UNUSED,rtx x)2714 csky_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED,
2715 rtx x)
2716 {
2717 return csky_tls_referenced_p (x);
2718 }
2719
2720
2721 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns nonzero if the
2722 constant value X is a legitimate general operand.
2723 It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2724
2725 static bool
csky_legitimate_constant_p(machine_mode mode,rtx x)2726 csky_legitimate_constant_p (machine_mode mode, rtx x)
2727 {
2728 return (!csky_cannot_force_const_mem (mode, x)
2729 && CONSTANT_P (x));
2730 }
2731
2732
2733 /* Return true if X is valid as an CSKY addressing register. */
2734
2735 static bool
is_csky_address_register_rtx_p(rtx x,int strict_p)2736 is_csky_address_register_rtx_p (rtx x, int strict_p)
2737 {
2738 int regno;
2739
2740 if (!x)
2741 return false;
2742 if (!REG_P (x))
2743 return false;
2744
2745 regno = REGNO (x);
2746
2747 if (strict_p)
2748 return (CSKY_GENERAL_REGNO_P (regno)
2749 || CSKY_GENERAL_REGNO_P (reg_renumber[regno]));
2750 else
2751 return CSKY_GENERAL_REGNO_P (regno) || regno >= FIRST_PSEUDO_REGISTER;
2752 }
2753
2754
2755 /* Return TRUE if X is a thread-local symbol. */
2756
2757 static bool
csky_tls_symbol_p(rtx x)2758 csky_tls_symbol_p (rtx x)
2759 {
2760 if (!TARGET_TLS)
2761 return false;
2762
2763 if (GET_CODE (x) != SYMBOL_REF)
2764 return false;
2765
2766 return SYMBOL_REF_TLS_MODEL (x) != 0;
2767 }
2768
2769
2770 /* Handle lazy initialization of __tls_get_addr libfunc. */
2771 static GTY(()) rtx tls_get_addr_libfunc;
2772
2773 static rtx
get_tls_get_addr(void)2774 get_tls_get_addr (void)
2775 {
2776 if (!tls_get_addr_libfunc)
2777 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
2778 return tls_get_addr_libfunc;
2779 }
2780
2781
2782 /* Emit a call to __tls_get_addr. */
2783
2784 static rtx_insn *
csky_call_tls_get_addr(rtx x,rtx reg,rtx * valuep,int reloc)2785 csky_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
2786 {
2787 rtx label, labelno, unspec, tmp;
2788 rtx_insn *insns;
2789
2790 start_sequence ();
2791
2792 labelno = GEN_INT (tls_labelno++);
2793 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_TLS_LABEL);
2794 unspec = gen_rtx_UNSPEC (Pmode,
2795 gen_rtvec (3, x, GEN_INT (reloc), label),
2796 UNSPEC_TLS);
2797 tmp = gen_reg_rtx (SImode);
2798 emit_move_insn (reg, unspec);
2799 emit_move_insn (tmp, label);
2800 emit_insn (gen_addsi3 (reg, reg, tmp));
2801 *valuep = emit_library_call_value (get_tls_get_addr (),
2802 NULL_RTX, LCT_PURE, /* LCT_CONST? */
2803 Pmode, reg, Pmode);
2804 insns = get_insns ();
2805 end_sequence ();
2806 return insns;
2807 }
2808
2809 /* Helper function for csky_legitimize_address, to handle the TLS cases.
2810 REG is a scratch register and may be null. */
2811
2812 rtx
csky_legitimize_tls_address(rtx x,rtx reg)2813 csky_legitimize_tls_address (rtx x, rtx reg)
2814 {
2815 rtx dest, tp, label, labelno, unspec, ret, eqv, addend, tmp;
2816 rtx_insn *insns;
2817 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
2818
2819 if (!reg)
2820 reg = gen_reg_rtx (SImode);
2821
2822 switch (model)
2823 {
2824 case TLS_MODEL_GLOBAL_DYNAMIC:
2825 insns = csky_call_tls_get_addr (x, reg, &ret, TLS_GD32);
2826 dest = gen_reg_rtx (Pmode);
2827 emit_libcall_block (insns, dest, ret, x);
2828 return dest;
2829
2830 case TLS_MODEL_LOCAL_DYNAMIC:
2831 insns = csky_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
2832
2833 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2834 share the LDM result with other LD model accesses. */
2835 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_TLS);
2836 dest = gen_reg_rtx (Pmode);
2837 emit_libcall_block (insns, dest, ret, eqv);
2838
2839 /* Load the addend. */
2840 addend = gen_rtx_UNSPEC (Pmode,
2841 gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
2842 UNSPEC_TLS);
2843 addend = force_reg (SImode, addend);
2844 return gen_rtx_PLUS (Pmode, dest, addend);
2845
2846 case TLS_MODEL_INITIAL_EXEC:
2847 labelno = GEN_INT (tls_labelno++);
2848 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_TLS_LABEL);
2849 unspec = gen_rtx_UNSPEC (Pmode,
2850 gen_rtvec (3, x, GEN_INT (TLS_IE32), label),
2851 UNSPEC_TLS);
2852 tmp = gen_reg_rtx (SImode);
2853 emit_move_insn (reg, unspec);
2854 emit_move_insn (tmp, label);
2855 emit_insn (gen_addsi3 (reg, reg, tmp));
2856 emit_move_insn (reg, gen_const_mem (Pmode, reg));
2857 tp = gen_rtx_REG (SImode, CSKY_TLS_REGNUM);
2858 return gen_rtx_PLUS (Pmode, tp, reg);
2859
2860 case TLS_MODEL_LOCAL_EXEC:
2861 unspec = gen_rtx_UNSPEC (Pmode,
2862 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
2863 UNSPEC_TLS);
2864 emit_move_insn (reg, unspec);
2865 tp = gen_rtx_REG (SImode, CSKY_TLS_REGNUM);
2866 return gen_rtx_PLUS (Pmode, tp, reg);
2867
2868 default:
2869 abort ();
2870 }
2871 }
2872
2873
2874 /* Implement TARGET_LEGITIMIZE_ADDRESS. */
2875
2876 static rtx
csky_legitimize_address(rtx x,rtx orig_x ATTRIBUTE_UNUSED,machine_mode mode)2877 csky_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
2878 machine_mode mode)
2879 {
2880 if (csky_tls_symbol_p (x))
2881 return csky_legitimize_tls_address (x, NULL_RTX);
2882
2883 if (GET_CODE (x) == PLUS)
2884 {
2885 rtx xop0 = XEXP (x, 0);
2886 rtx xop1 = XEXP (x, 1);
2887
2888 if (is_csky_address_register_rtx_p (xop0, 0)
2889 && CONST_INT_P (xop1))
2890 {
2891 HOST_WIDE_INT offset = INTVAL (xop1);
2892
2893 /* Try to replace ld32 rx,(ry, offset), to addi16 rz, oimm8
2894 and ld16 rx,(rz, new_ld_offset) to avoid emitting a
2895 32-bit ld, but this addi has a range limitation. */
2896 if (optimize_size
2897 && offset > CSKY_LD16_MAX_OFFSET (mode)
2898 && offset <= (CSKY_ADDI16_MAX_IMM
2899 + CSKY_LD16_MAX_OFFSET (mode)))
2900 {
2901 HOST_WIDE_INT new_ld_offset
2902 = offset & CSKY_LD16_OFFSET_MASK (mode);
2903
2904 xop0 = force_operand (plus_constant (Pmode, xop0,
2905 offset - new_ld_offset),
2906 NULL_RTX);
2907 x = plus_constant (Pmode, xop0, new_ld_offset);
2908 }
2909 else if (offset < 0 && offset >= (-CSKY_SUBI16_MAX_IMM))
2910 x = force_operand (x, NULL_RTX);
2911 else if (offset > CSKY_LD16_MAX_OFFSET (mode)
2912 || offset < 0)
2913 {
2914 /* For the remaining cases, force the constant into a
2915 register. */
2916 xop1 = force_reg (SImode, xop1);
2917 x = gen_rtx_PLUS (SImode, xop0, xop1);
2918 }
2919 }
2920
2921 /* If the index is store in register, force the
2922 base to register. */
2923 if (is_csky_address_register_rtx_p (xop1, 0)
2924 && !is_csky_address_register_rtx_p (xop0, 0))
2925 {
2926 xop0 = force_operand (xop0, NULL_RTX);
2927 x = gen_rtx_PLUS (SImode, xop0, xop1);
2928 }
2929 }
2930 /* Make sure to take full advantage of the pre-indexed addressing mode
2931 with absolute addresses which often allows for the base register to
2932 be factorized for multiple adjacent memory references, and it might
2933 even allows for the mini pool to be avoided entirely. */
2934 else if (CONST_INT_P (x) && optimize > 0)
2935 {
2936 HOST_WIDE_INT mask, base, index;
2937 rtx base_reg;
2938
2939 mask = CSKY_LD16_OFFSET_MASK (mode);
2940 base = INTVAL (x) & ~mask;
2941 index = INTVAL (x) & mask;
2942 base_reg = force_reg (SImode, GEN_INT (base));
2943 x = plus_constant (Pmode, base_reg, index);
2944 }
2945
2946 return x;
2947 }
2948
2949
2950 /* Return nonzero if INDEX is valid for an address index operand.
2951 ck801 use 16 bits ld
2952 ck802 use 16 and 32 bits ld
2953 others use ld and ldr. */
2954
2955 static int
ck801_legitimate_index_p(machine_mode mode,rtx index,int strict_p ATTRIBUTE_UNUSED)2956 ck801_legitimate_index_p (machine_mode mode, rtx index,
2957 int strict_p ATTRIBUTE_UNUSED)
2958 {
2959 enum rtx_code code = GET_CODE (index);
2960
2961 /* When the mode size is larger than 4, we may use two ld instruction
2962 to get data, the index and (index+1) should be valid. */
2963 if (GET_MODE_SIZE (mode) >= 8)
2964 return (code == CONST_INT
2965 && INTVAL (index) < CSKY_LD16_MAX_OFFSET (SImode)
2966 && INTVAL (index) >= 0 && (INTVAL (index) & 3) == 0);
2967
2968 if (code == CONST_INT && GET_MODE_SIZE (mode) > 0
2969 && INTVAL (index) <= CSKY_LD16_MAX_OFFSET (mode)
2970 && INTVAL (index) >= 0)
2971 return ((INTVAL (index) % GET_MODE_SIZE (mode)) == 0);
2972
2973 return 0;
2974 }
2975
2976
2977 static int
ck802_legitimate_index_p(machine_mode mode,rtx index,int strict_p ATTRIBUTE_UNUSED)2978 ck802_legitimate_index_p (machine_mode mode, rtx index,
2979 int strict_p ATTRIBUTE_UNUSED)
2980 {
2981 enum rtx_code code = GET_CODE (index);
2982
2983 /* When the mode size is larger than 4, we may use two ld instruction
2984 to get data, the index and (index+1) should be valid. */
2985 if (GET_MODE_SIZE (mode) >= 8)
2986 return (code == CONST_INT
2987 && INTVAL (index) < CSKY_LD32_MAX_OFFSET (SImode)
2988 && INTVAL (index) >= 0 && (INTVAL (index) & 3) == 0);
2989
2990 if (code == CONST_INT && GET_MODE_SIZE (mode) > 0
2991 && INTVAL (index) <= CSKY_LD32_MAX_OFFSET (mode)
2992 && INTVAL (index) >= 0)
2993 return ((INTVAL (index) % GET_MODE_SIZE (mode)) == 0);
2994
2995 return 0;
2996 }
2997
2998
2999 /* The instruction ldr rz, (rx, ry << i), i can be 0,1,2,3.
3000 Check that SHIFT is valid, that the code is MULT, and that
3001 the shift is a power of 2. */
3002
3003 static bool
is_ldr_shift_p(HOST_WIDE_INT shift,enum rtx_code code)3004 is_ldr_shift_p (HOST_WIDE_INT shift, enum rtx_code code)
3005 {
3006 if (code == ASHIFT)
3007 return (shift >= 0 && shift <= 3);
3008 else if (code == MULT)
3009 return (shift == 1
3010 || shift == 2
3011 || shift == 4
3012 || shift == 8);
3013 else
3014 return false;
3015 }
3016
3017
3018 static int
ck810_legitimate_index_p(machine_mode mode,rtx index,int strict_p)3019 ck810_legitimate_index_p (machine_mode mode, rtx index, int strict_p)
3020 {
3021 enum rtx_code code = GET_CODE (index);
3022
3023 if (TARGET_HARD_FLOAT
3024 && (mode == SFmode || mode == DFmode))
3025 return (code == CONST_INT && INTVAL (index) < 1024
3026 && INTVAL (index) >= 0
3027 && (INTVAL (index) & 3) == 0);
3028
3029 if (code == CONST_INT)
3030 {
3031 /* When the mode size is larger than 4, we may use two ld instruction
3032 to get data, the index and (index+1) should be valid. */
3033 if (GET_MODE_SIZE (mode) >= 8)
3034 return (INTVAL (index) < CSKY_LD32_MAX_OFFSET (SImode)
3035 && INTVAL (index) >= 0 && (INTVAL (index) & 3) == 0);
3036
3037 if (GET_MODE_SIZE (mode) > 0
3038 && INTVAL (index) <= CSKY_LD32_MAX_OFFSET (mode)
3039 && INTVAL (index) >= 0)
3040 return ((INTVAL (index) % GET_MODE_SIZE (mode)) == 0);
3041 }
3042 /* Allow ld.w rx, (gb, sym@got) when -fpic specially. */
3043 else if (code == UNSPEC)
3044 return (flag_pic == 1
3045 && (XINT (index, 1) == UNSPEC_PIC_SYMBOL_PLT
3046 || XINT (index, 1) == UNSPEC_PIC_SYMBOL_GOT));
3047 /* The follow index is for ldr instruction, the ldr cannot
3048 load dword data, so the mode size should not be larger than
3049 4. */
3050 else if (GET_MODE_SIZE (mode) <= 4)
3051 {
3052 if (is_csky_address_register_rtx_p (index, strict_p))
3053 return 1;
3054 else if (code == MULT || code == ASHIFT)
3055 {
3056 rtx xiop0 = XEXP (index, 0);
3057 rtx xiop1 = XEXP (index, 1);
3058
3059 /* FIXME can the xiop1 be the reg and xiop0 be the int when mult? */
3060 return (is_csky_address_register_rtx_p (xiop0, strict_p)
3061 && CONST_INT_P (xiop1)
3062 && is_ldr_shift_p (INTVAL (xiop1), code));
3063 }
3064 }
3065
3066 return 0;
3067 }
3068
3069
3070 static int
csky_legitimate_index_p(machine_mode mode,rtx index,int strict_p)3071 csky_legitimate_index_p (machine_mode mode, rtx index, int strict_p)
3072 {
3073 if (CSKY_TARGET_ARCH (CK801))
3074 return ck801_legitimate_index_p (mode, index, strict_p);
3075 else if (CSKY_TARGET_ARCH (CK802))
3076 return ck802_legitimate_index_p (mode, index, strict_p);
3077 else
3078 return ck810_legitimate_index_p (mode, index, strict_p);
3079 }
3080
3081
3082 /* Implement TARGET_LEGITIMATE_ADDRESS_P.
3083 Recognizes RTL expressions that are valid memory addresses for an
3084 instruction. The MODE argument is the machine mode for the MEM
3085 expression that wants to use this address.
3086
3087 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
3088 convert common non-canonical forms to canonical form so that they will
3089 be recognized. */
3090
3091 static bool
csky_legitimate_address_p(machine_mode mode,rtx addr,bool strict_p)3092 csky_legitimate_address_p (machine_mode mode, rtx addr, bool strict_p)
3093 {
3094 enum rtx_code code = GET_CODE (addr);
3095
3096 /* Match the RTX form emitted for constant pool references.
3097 After reload constants split into minipools will have addresses
3098 from a LABEL_REF. */
3099 if (reload_completed
3100 && ((code == LABEL_REF)
3101 || (code == CONST
3102 && GET_CODE (XEXP (addr, 0)) == PLUS
3103 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == LABEL_REF
3104 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))))
3105 return 1;
3106
3107 if (is_csky_address_register_rtx_p (addr, strict_p))
3108 return 1;
3109 /* It is a pc-relative load, may be generated for constpool. */
3110 else if (GET_CODE (addr) == LABEL_REF)
3111 return 1;
3112
3113 if (code == PLUS)
3114 {
3115 rtx xop0 = XEXP (addr, 0);
3116 rtx xop1 = XEXP (addr, 1);
3117
3118 return ((is_csky_address_register_rtx_p (xop0, strict_p)
3119 && csky_legitimate_index_p (mode, xop1, strict_p))
3120 || (is_csky_address_register_rtx_p (xop1, strict_p)
3121 && csky_legitimate_index_p (mode, xop0, strict_p)));
3122 }
3123
3124 return 0;
3125 }
3126
3127
3128 /* Functions to save and restore machine-specific function data. */
3129
3130 static struct machine_function *
csky_init_machine_status(void)3131 csky_init_machine_status (void)
3132 {
3133 struct machine_function *machine;
3134
3135 machine = ggc_cleared_alloc<machine_function> ();
3136
3137 #if CSKY_FT_UNKNOWN != 0
3138 machine->func_type = CSKY_FT_UNKNOWN;
3139 #endif
3140 return machine;
3141 }
3142
3143
3144 /* Implement INIT_EXPANDERS. */
3145
3146 void
csky_init_expanders(void)3147 csky_init_expanders (void)
3148 {
3149 /* Arrange to initialize and mark the machine per-function status. */
3150 init_machine_status = csky_init_machine_status;
3151 }
3152
3153
3154 /* Implement TARGET_CANNOT_COPY_INSN_P.
3155 We must not copy any rtx that uses a pc-relative address. */
3156
3157 static bool
csky_cannot_copy_insn_p(rtx_insn * insn)3158 csky_cannot_copy_insn_p (rtx_insn *insn)
3159 {
3160 subrtx_iterator::array_type array;
3161 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
3162 {
3163 const_rtx x = *iter;
3164 if (GET_CODE (x) == UNSPEC
3165 && (XINT (x, 1) == UNSPEC_TLS_LABEL
3166 || XINT (x, 1) == UNSPEC_PIC_SYMBOL_GOTPC_GRS))
3167 return true;
3168 }
3169 return false;
3170 }
3171
3172
3173 /* Extract the parts of an RTL expression that is a valid memory address
3174 for an instruction. Return FALSE if it is a invalid memory address. */
3175
3176 struct csky_address
3177 {
3178 rtx base, index, symbol, label, disp;
3179 HOST_WIDE_INT scale;
3180 };
3181
3182 static bool
decompose_csky_address(rtx addr,struct csky_address * out)3183 decompose_csky_address (rtx addr, struct csky_address *out)
3184 {
3185 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
3186 HOST_WIDE_INT scale = 1;
3187 rtx scale_rtx = NULL_RTX;
3188 int i;
3189
3190 out->base = out->index = out->symbol = out->label = out->disp = NULL_RTX;
3191 out->scale = 0;
3192
3193 if (REG_P (addr))
3194 {
3195 out->base = addr;
3196 return true;
3197 }
3198
3199 if (GET_CODE (addr) == LABEL_REF)
3200 {
3201 out->label = addr;
3202 return true;
3203 }
3204
3205 if (GET_CODE (addr) == CONST)
3206 addr = XEXP (addr, 0);
3207
3208 if (GET_CODE (addr) == PLUS)
3209 {
3210 rtx addends[2], op;
3211
3212 addends[0] = XEXP (addr, 0);
3213 addends[1] = XEXP (addr, 1);
3214
3215 if (GET_CODE (addends[0]) == LABEL_REF && CONST_INT_P (addends[1]))
3216 {
3217 out->label = addends[0];
3218 out->disp = addends[1];
3219 return true;
3220 }
3221
3222 if (!REG_P (addends[0]))
3223 std::swap (addends[0], addends[1]);
3224
3225 for (i = 0; i < 2; ++i)
3226 {
3227 op = addends[i];
3228 switch (GET_CODE (op))
3229 {
3230 case REG:
3231 if (!base)
3232 base = op;
3233 else if (!index)
3234 index = op;
3235 else
3236 return false;
3237 break;
3238 case CONST_INT:
3239 case UNSPEC:
3240 if (disp)
3241 return false;
3242 disp = op;
3243 break;
3244 case MULT:
3245 if (index)
3246 return false;
3247 index = XEXP (op, 0);
3248 scale_rtx = XEXP (op, 1);
3249 if (!CONST_INT_P (index) && !CONST_INT_P (scale_rtx))
3250 return false;
3251 else if (CONST_INT_P (index))
3252 std::swap (index, scale_rtx);
3253 scale = INTVAL (scale_rtx);
3254 break;
3255 case ASHIFT:
3256 if (index)
3257 return false;
3258 index = XEXP (op, 0);
3259 scale_rtx = XEXP (op, 1);
3260 if (!CONST_INT_P (scale_rtx))
3261 return false;
3262 scale = scale << INTVAL (scale_rtx);
3263 break;
3264 default:
3265 return false;
3266 }
3267 }
3268 }
3269
3270 if (!base)
3271 return false;
3272
3273 out->base = base;
3274 out->index = index;
3275 out->disp = disp;
3276 out->scale = scale;
3277
3278 return true;
3279 }
3280
3281 /* Helper function for the csky_simple_mem_operand predicate. Returns
3282 true if OP is an address of the form reg + displacement. */
3283
3284 bool
csky_simple_addr_operand_p(rtx op)3285 csky_simple_addr_operand_p (rtx op)
3286 {
3287 struct csky_address addr;
3288
3289 if (!decompose_csky_address (op, &addr))
3290 return false;
3291
3292 /* FIXME The PIC related code.
3293 Check if load the symbol address from got table. */
3294 if (addr.disp && GET_CODE (addr.disp) == UNSPEC)
3295 return false;
3296 if (!addr.index && !addr.symbol)
3297 return true;
3298 return false;
3299 }
3300
3301
3302 /* Print the UNSPEC operand in X to the STREAM. */
3303
3304 static void
csky_output_pic_addr_const(FILE * stream,rtx x,int code)3305 csky_output_pic_addr_const (FILE *stream, rtx x, int code)
3306 {
3307
3308 if (GET_CODE (x) != UNSPEC)
3309 return;
3310
3311 if (UNSPEC_TLS == XINT (x, 1))
3312 {
3313 /* FIXME It is not reached */
3314 return;
3315 }
3316
3317 csky_print_operand (stream, XVECEXP (x, 0, 0), code);
3318
3319 switch (XINT (x, 1))
3320 {
3321 case UNSPEC_PIC_SYMBOL_GOTOFF:
3322 fputs ("@GOTOFF", stream);
3323 break;
3324 case UNSPEC_PIC_SYMBOL_PLT:
3325 fputs ("@PLT", stream);
3326 break;
3327 case UNSPEC_PIC_SYMBOL_GOT:
3328 fputs ("@GOT", stream);
3329 break;
3330 case UNSPEC_PIC_SYMBOL_GOTPC:
3331 fputs ("@GOTPC", stream);
3332 break;
3333 case UNSPEC_PIC_SYMBOL_BSR:
3334 break;
3335 default:
3336 break;
3337 }
3338 }
3339
3340
3341 /* Output the constpool label according to the rtx expression X. */
3342
3343 static void
csky_output_constpool_label(FILE * stream,rtx x)3344 csky_output_constpool_label (FILE *stream, rtx x)
3345 {
3346 char buf[15];
3347
3348 gcc_assert (GET_CODE (x) == LABEL_REF);
3349 x = XEXP (x, 0);
3350
3351 if (GET_CODE (x) == UNSPEC_VOLATILE && XINT (x, 1) == VUNSPEC_POOL_LABEL)
3352 {
3353 ASM_GENERATE_INTERNAL_LABEL (buf, CSKY_CONSTPOOL_LABEL_PREFIX,
3354 INTVAL (XVECEXP (x, 0, 0)));
3355 assemble_name (stream, buf);
3356 }
3357 }
3358
3359
3360 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
3361
3362 static void
csky_print_operand_address(FILE * stream,machine_mode mode ATTRIBUTE_UNUSED,rtx x)3363 csky_print_operand_address (FILE *stream,
3364 machine_mode mode ATTRIBUTE_UNUSED,
3365 rtx x)
3366 {
3367
3368 struct csky_address addr;
3369
3370 decompose_csky_address (x, &addr);
3371
3372 if (addr.label && addr.disp && GET_CODE (addr.disp) == CONST_INT)
3373 {
3374 fprintf (stream, "[");
3375 csky_output_constpool_label (stream, addr.label);
3376 fprintf (stream, "+%d]", (int) INTVAL (addr.disp));
3377 }
3378 else if (addr.label)
3379 {
3380 fprintf (stream, "[");
3381 csky_output_constpool_label (stream, addr.label);
3382 fprintf (stream, "]");
3383 }
3384 else if (addr.symbol && addr.disp && GET_CODE (addr.disp) == CONST_INT)
3385 {
3386 fprintf (stream, "[");
3387 output_addr_const (stream, addr.symbol);
3388 fprintf (stream, "+%d]", (int) INTVAL (addr.disp));
3389 }
3390 else if (addr.symbol)
3391 {
3392 fprintf (stream, "[");
3393 output_addr_const (stream, addr.symbol);
3394 fprintf (stream, "]");
3395 }
3396 else if (addr.disp && GET_CODE (addr.disp) == CONST_INT)
3397 fprintf (stream, "(%s, %d)",
3398 reg_names[REGNO (addr.base)], (int) INTVAL (addr.disp));
3399 else if (addr.disp && GET_CODE (addr.disp) == UNSPEC)
3400 {
3401 if (REGNO (addr.base) != CSKY_GB_REGNUM)
3402 fprintf (stream, "(%s, ", reg_names[REGNO (addr.base)]);
3403 else
3404 fprintf (stream, "[");
3405 csky_output_pic_addr_const (stream, addr.disp, 0);
3406 fprintf (stream, "%s", (REGNO (addr.base) != CSKY_GB_REGNUM)
3407 ? ")" : "]");
3408 }
3409 else if (addr.index)
3410 fprintf (stream, "(%s, %s << %d)",
3411 reg_names[REGNO (addr.base)], reg_names[REGNO (addr.index)],
3412 exact_log2 ((int) (addr.scale)));
3413 else
3414 fprintf (stream, "(%s, 0)", reg_names[REGNO (addr.base)]);
3415 }
3416
3417
3418 /* Implement TARGET_PRINT_OPERAND.
3419 Print operand X (an rtx) in assembler syntax to file STREAM
3420 according to modifier CODE.
3421
3422 'N' print the log2(X+1), mainly used for bmaski
3423 'P' print the log2(X)
3424 'Q' print the log2(~X)
3425 'O' print a decimal number
3426 'M' print a decimal number as its negative
3427 'R' print the next register or memory location along, i.e. the lsw in
3428 a double word value
3429 'H' print the high 16 bits of a constant. */
3430
3431 static void
csky_print_operand(FILE * stream,rtx x,int code)3432 csky_print_operand (FILE *stream, rtx x, int code)
3433 {
3434 switch (code)
3435 {
3436 case 'N':
3437 if ((INTVAL (x) & 0xffffffff) == 0xffffffff)
3438 fprintf (stream, "0");
3439 else
3440 fprintf (stream, "%d",
3441 (int) exact_log2 ((INTVAL (x) & 0xffffffff) + 1) % 32);
3442 break;
3443 case 'P':
3444 fprintf (stream, "%d",
3445 (int) exact_log2 (INTVAL (x) & 0xffffffff));
3446 break;
3447 case 'Q':
3448 fprintf (stream, "%d",
3449 (int) exact_log2 (~INTVAL (x) & 0xffffffff));
3450 break;
3451 case 'O':
3452 fprintf (stream, "%d", (int) INTVAL (x));
3453 break;
3454 case 'M':
3455 fprintf (stream, "%d", (int) (-INTVAL (x)));
3456 break;
3457 case 'R':
3458 /* Next location along in memory or register. */
3459 switch (GET_CODE (x))
3460 {
3461 case REG:
3462 fputs (reg_names[REGNO (x) + 1], stream);
3463 break;
3464 case MEM:
3465 csky_print_operand_address
3466 (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
3467 break;
3468 default:
3469 gcc_unreachable ();
3470 }
3471 break;
3472 case 'H':
3473 fprintf (stream, "%ld", (long)((INTVAL (x) & 0xFFFF0000) >> 16));
3474 break;
3475 default:
3476 switch (GET_CODE (x))
3477 {
3478 case REG:
3479 fputs (reg_names[REGNO (x)], stream);
3480 break;
3481 case MEM:
3482 output_address (GET_MODE (x), XEXP (x, 0));
3483 break;
3484 case UNSPEC:
3485 csky_output_pic_addr_const (stream, x, code);
3486 break;
3487 default:
3488 output_addr_const (stream, x);
3489 break;
3490 }
3491 break;
3492 }
3493 }
3494
3495
3496
3497 /* Implement TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS. */
3498
3499 static bool
csky_allocate_stack_slots_for_args(void)3500 csky_allocate_stack_slots_for_args (void)
3501 {
3502 /* Naked functions should not allocate stack slots for arguments. */
3503 return !CSKY_FUNCTION_IS_NAKED (get_csky_current_func_type ());
3504 }
3505
3506
3507 /* Can we generate a constant with a single instruction, without using
3508 lrw? */
3509
3510 static int
const_ok_for_cskyv2(HOST_WIDE_INT value)3511 const_ok_for_cskyv2 (HOST_WIDE_INT value)
3512 {
3513 /* Try exact power of two. It can be generated by bgeni. */
3514 if (CSKY_CONST_OK_FOR_Ub (value))
3515 return 1;
3516
3517 /* Try exact power of two - 1. It can be generated by bmaski. */
3518 if (CSKY_CONST_OK_FOR_Uc (value) && value != -1)
3519 return 1;
3520
3521 /* Try if it can be generated by movi. */
3522 if (CSKY_CONST_OK_FOR_I (value))
3523 return 1;
3524
3525 /* The constant can be generated by movih.
3526 Notice that movih is a 32-bit instruction. */
3527 if (CSKY_CONST_OK_FOR_MOVIH (value))
3528 return 1;
3529
3530 return 0;
3531 }
3532
3533
3534 /* Tricks for synthesizing constants from values that can be directly
3535 manipulated by machine instructions. */
3536
3537 enum csky_inline_const_type
3538 {
3539 IC_UNINLINABLE = 0, /* Not inlineable */
3540 IC_SINGLE, /* Single instruction */
3541 IC_APPEND_NOT, /* Single instruction followed by a not */
3542 IC_APPEND_ADDI, /* Single insn followed by an addi */
3543 IC_APPEND_SUBI, /* Single insn followed by a subi */
3544 IC_BGENI_ADDI, /* Single insn(bgeni) followed by an addi */
3545 IC_BGENI_SUBI, /* Single insn(bgeni) followed by a subi */
3546 IC_APPEND_BSETI, /* Single insn followed by bseti */
3547 IC_APPEND_MOVI, /* Single insn followed by movi */
3548 IC_APPEND_BCLRI, /* Single insn followed by bclri */
3549 IC_APPEND_ROTLI, /* Single insn followed by rotli */
3550 IC_APPEND_LSLI, /* Single insn followed by lsli */
3551 IC_APPEND_IXH, /* Single insn followed by ixh */
3552 IC_APPEND_IXW /* Single insn followed by ixw */
3553 };
3554
3555
3556 /* Try tricks to load a constant inline and return the trick number if
3557 success, or IC_UNINLINABLE. */
3558
3559 static enum csky_inline_const_type
try_csky_constant_tricks(HOST_WIDE_INT value,HOST_WIDE_INT * x,HOST_WIDE_INT * y)3560 try_csky_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT *x,
3561 HOST_WIDE_INT *y)
3562 {
3563 HOST_WIDE_INT i, value_invert;
3564 unsigned HOST_WIDE_INT bit, shf, rot, lobits, hibits;
3565
3566 value &= 0xffffffff;
3567 value_invert = ~value & 0xffffffff;
3568
3569 if (const_ok_for_cskyv2 (value))
3570 {
3571 *x = value;
3572 return IC_SINGLE;
3573 }
3574
3575 /* Since movih is 32 bits, do not use it here, better code may
3576 be generated later. */
3577 if (const_ok_for_cskyv2 (value_invert)
3578 && !CSKY_CONST_OK_FOR_MOVIH (value_invert))
3579 {
3580 *x = value_invert;
3581 return IC_APPEND_NOT;
3582 }
3583
3584 /* One immediate generate instruction, and one 16-bit subi or addi. */
3585 for (i = 1; i <= 32; i++)
3586 {
3587 if (const_ok_for_cskyv2 (value - i)
3588 && !CSKY_CONST_OK_FOR_MOVIH (value - i))
3589 {
3590 *x = value - i;
3591 *y = i;
3592 return IC_APPEND_ADDI;
3593 }
3594
3595 if (const_ok_for_cskyv2 (value + i)
3596 && !CSKY_CONST_OK_FOR_MOVIH (value - i))
3597 {
3598 *x = value + i;
3599 *y = i;
3600 return IC_APPEND_SUBI;
3601 }
3602 }
3603
3604 /* Generate bgeni + addi. */
3605 if (CSKY_CONST_OK_FOR_Ub (value & 0xfffff000))
3606 {
3607 *x = (value & 0xfffff000);
3608 *y = (value & 0xfff);
3609 return IC_BGENI_ADDI;
3610 }
3611
3612 /* Generate bgeni + subi. */
3613 lobits = value & 0xfff;
3614 hibits = (unsigned HOST_WIDE_INT)(value & 0xfffff000) + (1 << 12);
3615 if (exact_log2 (hibits) >= 1
3616 && exact_log2 (hibits) <= 30
3617 && lobits != 0)
3618 {
3619 *x = hibits;
3620 *y = (0x1000 - lobits);
3621 return IC_BGENI_SUBI;
3622 }
3623
3624 /* One immediate generate instruction, and one bseti or bclri. */
3625 bit = 0x80000000ULL;
3626 for (i = 0; i <= 31; i++)
3627 {
3628 if (const_ok_for_cskyv2 (value & ~bit)
3629 && !CSKY_CONST_OK_FOR_MOVIH (value & ~bit))
3630 {
3631 *y = bit;
3632 *x = (value & ~bit);
3633 return IC_APPEND_BSETI;
3634 }
3635
3636 if (const_ok_for_cskyv2 (value | bit)
3637 && !CSKY_CONST_OK_FOR_MOVIH (value | bit))
3638 {
3639 *y = ~bit & 0xffffffff;
3640 *x = value | bit;
3641 return IC_APPEND_BCLRI;
3642 }
3643
3644 bit >>= 1;
3645 }
3646
3647 /* One immediate generate instruction, and one rotli or lsli. */
3648 shf = value;
3649 rot = value;
3650 for (i = 1; i < 31; i++)
3651 {
3652 int c;
3653
3654 /* Rotate left. */
3655 c = rot << 31;
3656 rot >>= 1;
3657 rot &= 0x7FFFFFFF;
3658 rot |= c;
3659
3660 if (const_ok_for_cskyv2 (rot) && !CSKY_CONST_OK_FOR_MOVIH (rot))
3661 {
3662 *y = i;
3663 *x = rot;
3664 return IC_APPEND_ROTLI;
3665 }
3666
3667 /* Can't use logical shift when low order bit is one. */
3668 if (shf & 1)
3669 shf = 0;
3670 else
3671 shf >>= 1;
3672
3673 if (shf != 0 && const_ok_for_cskyv2 (shf)
3674 && !CSKY_CONST_OK_FOR_MOVIH (shf))
3675 {
3676 *y = i;
3677 *x = shf;
3678 return IC_APPEND_LSLI;
3679 }
3680 }
3681
3682 /* One immediate generate instruction, and one ixh. */
3683 if (CSKY_ISA_FEATURE (E2)
3684 && (value % 3) == 0
3685 && const_ok_for_cskyv2 (value / 3)
3686 && !CSKY_CONST_OK_FOR_MOVIH (value / 3))
3687 {
3688 *x = value / 3;
3689 return IC_APPEND_IXH;
3690 }
3691
3692 /* One immediate generate instruction, and one ixw. */
3693 if (CSKY_ISA_FEATURE (E2)
3694 && (value % 5) == 0
3695 && const_ok_for_cskyv2 (value / 5)
3696 && !CSKY_CONST_OK_FOR_MOVIH (value / 5))
3697 {
3698 *x = value / 5;
3699 return IC_APPEND_IXW;
3700 }
3701
3702 /* Generate movih + bseti. */
3703 if (CSKY_CONST_OK_FOR_Ub (value & 0xffff))
3704 {
3705 *x = value & 0xffff0000;
3706 *y = value & 0xffff;
3707 return IC_APPEND_BSETI;
3708 }
3709
3710 /* Generate movih + not. */
3711 if (CSKY_CONST_OK_FOR_MOVIH (value_invert))
3712 {
3713 *x = value_invert;
3714 return IC_APPEND_NOT;
3715 }
3716
3717 /* One movih, and one 16bits addi or subi. */
3718 for (i = 1; i <= 32; i++)
3719 {
3720 if (CSKY_CONST_OK_FOR_MOVIH (value - i))
3721 {
3722 *x = value - i;
3723 *y = i;
3724 return IC_APPEND_ADDI;
3725 }
3726
3727 if (CSKY_CONST_OK_FOR_MOVIH (value + i))
3728 {
3729 *x = value + i;
3730 *y = i;
3731 return IC_APPEND_SUBI;
3732 }
3733 }
3734
3735 /* One movih, and one bseti or bclri. */
3736 bit = 0x80000000ULL;
3737 for (i = 0; i <= 31; i++)
3738 {
3739 if (CSKY_CONST_OK_FOR_MOVIH (value & ~bit))
3740 {
3741 *y = bit;
3742 *x = value & ~bit;
3743 return IC_APPEND_BSETI;
3744 }
3745
3746 if (CSKY_CONST_OK_FOR_MOVIH (value | bit))
3747 {
3748 *y = ~bit & 0xffffffff;
3749 *x = value | bit;
3750 return IC_APPEND_BCLRI;
3751 }
3752
3753 bit >>= 1;
3754 }
3755
3756 /* One movih, and one rotli or lsli. */
3757 shf = value;
3758 rot = value;
3759 for (i = 1; i < 31; i++)
3760 {
3761 int c;
3762
3763 /* Rotate left. */
3764 c = rot << 31;
3765 rot >>= 1;
3766 rot &= 0x7FFFFFFF;
3767 rot |= c;
3768
3769 if (CSKY_CONST_OK_FOR_MOVIH (rot))
3770 {
3771 *y = i;
3772 *x = rot;
3773 return IC_APPEND_ROTLI;
3774 }
3775
3776 /* Can't use logical shift when low order bit is one. */
3777 if (shf & 1)
3778 shf = 0;
3779 else
3780 shf >>= 1;
3781
3782 if (shf != 0 && CSKY_CONST_OK_FOR_MOVIH (shf))
3783 {
3784 *y = i;
3785 *x = shf;
3786 return IC_APPEND_LSLI;
3787 }
3788 }
3789
3790 return IC_UNINLINABLE;
3791 }
3792
3793
3794 /* Actually output a constant using a trick.
3795 FIXME: I think this would be better handled by a splitter than at the
3796 asm output level. */
3797
3798 static const char *
csky_output_inline_const(machine_mode mode,rtx operands[])3799 csky_output_inline_const (machine_mode mode, rtx operands[])
3800 {
3801 HOST_WIDE_INT x = 0, y = 0;
3802 enum csky_inline_const_type trick_type;
3803 rtx out_operands[3];
3804 char buf[256];
3805 char load_op[128];
3806 const char *dst_fmt;
3807 HOST_WIDE_INT value = INTVAL (operands[1]);
3808 int ivalue = (int) value;
3809 unsigned int uvalue = (unsigned int) value;
3810
3811 trick_type = try_csky_constant_tricks (value, &x, &y);
3812 /* lrw's are handled separately: Large inlinable constants never get
3813 turned into lrw's. Our caller uses try_csky_constant_tricks to back
3814 off to an lrw rather than calling this routine. */
3815 gcc_assert (trick_type != IC_UNINLINABLE);
3816
3817 /* Operands: 0 = dst, 1 = load immedate., 2 = adjust immedate. */
3818 out_operands[0] = operands[0];
3819 out_operands[1] = GEN_INT (x);
3820 if (trick_type != IC_SINGLE && trick_type != IC_APPEND_NOT)
3821 out_operands[2] = GEN_INT (y);
3822
3823 /* Select dst format based on mode. */
3824 if (mode == DImode && TARGET_BIG_ENDIAN)
3825 dst_fmt = "%R0";
3826 else
3827 dst_fmt = "%0";
3828
3829 /* Try movi16: 0~31,movi32: 0~65535. */
3830 if (CSKY_CONST_OK_FOR_I (x))
3831 sprintf (load_op, "movi\t%s, %%1", dst_fmt);
3832 /* Try exact power of two - 1. */
3833 else if (CSKY_CONST_OK_FOR_Uc (x))
3834 sprintf (load_op, "bmaski\t%s, %%N1", dst_fmt);
3835 /* Try movih. */
3836 else if (CSKY_CONST_OK_FOR_MOVIH (x))
3837 sprintf (load_op, "movih\t%s, %%H1", dst_fmt);
3838 else
3839 {
3840 sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
3841 gcc_unreachable ();
3842 }
3843
3844 switch (trick_type)
3845 {
3846 case IC_SINGLE:
3847 strcpy (buf, load_op);
3848 break;
3849 /* Add instruction 'not'. */
3850 case IC_APPEND_NOT:
3851 sprintf (buf, "%s\n\tnot\t%s, %s\t// %d 0x%x", load_op, dst_fmt,
3852 dst_fmt, ivalue, uvalue);
3853 break;
3854 /* Add instruction 'addi'. */
3855 case IC_APPEND_ADDI:
3856 sprintf (buf, "%s\n\taddi\t%s, %s, %%2\t// %d 0x%x", load_op,
3857 dst_fmt, dst_fmt, ivalue, uvalue);
3858 break;
3859 /* Add instruction 'subi'. */
3860 case IC_APPEND_SUBI:
3861 sprintf (buf, "%s\n\tsubi\t%s, %s, %%2\t// %d 0x%x", load_op,
3862 dst_fmt, dst_fmt, ivalue, uvalue);
3863 break;
3864 /* Add instruction 'addi', the last instruction is bgeni. */
3865 case IC_BGENI_ADDI:
3866 sprintf (buf, "%s\n\taddi\t%s, %s, %%2\t// %d 0x%x", load_op,
3867 dst_fmt, dst_fmt, ivalue, uvalue);
3868 break;
3869 /* Add instruction 'subi', the last instruction is bgeni. */
3870 case IC_BGENI_SUBI:
3871 sprintf (buf, "%s\n\tsubi\t%s, %s, %%2\t// %d 0x%x", load_op,
3872 dst_fmt, dst_fmt, ivalue, uvalue);
3873 break;
3874 /* Add instruction 'bseti'. */
3875 case IC_APPEND_BSETI:
3876 sprintf (buf, "%s\n\tbseti\t%s, %s, %%P2\t// %d 0x%x", load_op,
3877 dst_fmt, dst_fmt, ivalue, uvalue);
3878 break;
3879 /* Add instruction 'movi'. */
3880 case IC_APPEND_MOVI:
3881 sprintf (buf, "%s\n\tmovi\t%s, %%2\t// %d 0x%x", load_op, dst_fmt,
3882 ivalue, uvalue);
3883 break;
3884 /* Add instruction 'bclri'. */
3885 case IC_APPEND_BCLRI:
3886 sprintf (buf, "%s\n\tbclri\t%s, %s, %%Q2\t// %d 0x%x", load_op,
3887 dst_fmt, dst_fmt, ivalue, uvalue);
3888 break;
3889 /* Add instruction 'rotli'. */
3890 case IC_APPEND_ROTLI:
3891 sprintf (buf, "%s\n\trotli\t%s, %s, %%2\t// %d 0x%x", load_op,
3892 dst_fmt, dst_fmt, ivalue, uvalue);
3893 break;
3894 /* Add instruction 'lsli'. */
3895 case IC_APPEND_LSLI:
3896 sprintf (buf, "%s\n\tlsli\t%s, %s, %%2\t// %d 0x%x", load_op,
3897 dst_fmt, dst_fmt, ivalue, uvalue);
3898 break;
3899 /* Add instruction 'ixh'. */
3900 case IC_APPEND_IXH:
3901 sprintf (buf, "%s\n\tixh\t%s, %s, %s\t// %d 0x%x", load_op,
3902 dst_fmt, dst_fmt, dst_fmt, ivalue, uvalue);
3903 break;
3904 /* Add instruction 'ixw'. */
3905 case IC_APPEND_IXW:
3906 sprintf (buf, "%s\n\tixw\t%s, %s, %s\t// %d 0x%x", load_op,
3907 dst_fmt, dst_fmt, dst_fmt, ivalue, uvalue);
3908 break;
3909 default:
3910 return "";
3911 }
3912
3913 output_asm_insn (buf, out_operands);
3914
3915 return "";
3916 }
3917
3918 /* This is a helper function for the Uo constraint for movsi patterns. */
3919
3920 bool
csky_inlinable_constant(HOST_WIDE_INT value)3921 csky_inlinable_constant (HOST_WIDE_INT value)
3922 {
3923 HOST_WIDE_INT x, y;
3924 return (!(CSKY_TARGET_ARCH (CK802) || CSKY_TARGET_ARCH (CK801))
3925 && try_csky_constant_tricks (value, &x, &y));
3926 }
3927
3928
3929 /* Return true if the constant VAL can be expressed by an 8-bit constant
3930 with a shift value, filling in *BASE and *SHIFT. */
3931
3932 bool
csky_shifted_imm8_constant(unsigned HOST_WIDE_INT val,unsigned int * base,unsigned int * shift)3933 csky_shifted_imm8_constant (unsigned HOST_WIDE_INT val,
3934 unsigned int *base, unsigned int *shift)
3935 {
3936 unsigned HOST_WIDE_INT mask = 0xff;
3937 int i;
3938 val = val & (unsigned HOST_WIDE_INT) 0xffffffffu;
3939 if (val == 0)
3940 return 0;
3941
3942 for (i = 0; i < 25; i++)
3943 if ((val & (mask << i)) == val)
3944 {
3945 if (base)
3946 *base = (unsigned int) (val >> i);
3947 if (shift)
3948 *shift = (unsigned int) i;
3949 return true;
3950 }
3951
3952 return false;
3953 }
3954
3955
3956 /* Output a move of a word or less value. */
3957
3958 const char *
csky_output_move(rtx insn ATTRIBUTE_UNUSED,rtx operands[],machine_mode mode ATTRIBUTE_UNUSED)3959 csky_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
3960 machine_mode mode ATTRIBUTE_UNUSED)
3961 {
3962 rtx dst = operands[0];
3963 rtx src = operands[1];
3964 struct csky_address op0, op1;
3965
3966 if (REG_P (dst))
3967 {
3968 /* The situation mov reg to reg. */
3969 if (REG_P (src))
3970 {
3971 int dstreg = REGNO (dst);
3972 int srcreg = REGNO (src);
3973
3974 /* hilo registers exchange their places,
3975 and their order of Dimode as same as other
3976 general registers in LITTLE_ENDIAN mode. */
3977 if (TARGET_BIG_ENDIAN)
3978 {
3979 if (dstreg == CSKY_HI_REGNUM)
3980 return "mthi\t%1";
3981 else if (dstreg == CSKY_LO_REGNUM)
3982 return "mtlo\t%1";
3983 else if (srcreg == CSKY_HI_REGNUM)
3984 return "mfhi\t%0";
3985 else if (srcreg == CSKY_LO_REGNUM)
3986 return "mflo\t%0";
3987 }
3988 else
3989 {
3990 if (dstreg == CSKY_HI_REGNUM)
3991 return "mtlo\t%1";
3992 else if (dstreg == CSKY_LO_REGNUM)
3993 return "mthi\t%1";
3994 else if (srcreg == CSKY_HI_REGNUM)
3995 return "mflo\t%0";
3996 else if (srcreg == CSKY_LO_REGNUM)
3997 return "mfhi\t%0";
3998 }
3999
4000 if (CSKY_VREG_P (dstreg) && CSKY_VREG_P (srcreg))
4001 return "fmovs\t%0, %1";
4002 if (CSKY_VREG_P (dstreg))
4003 return "fmtvrl\t%0, %1";
4004 if (CSKY_VREG_P (srcreg))
4005 return "fmfvrl\t%0, %1";
4006
4007 if (REGNO (src) == CSKY_CC_REGNUM)
4008 return "mvc\t%0";
4009 else
4010 return "mov\t%0, %1";
4011 }
4012 /* The situation mov memory to reg. */
4013 else if (GET_CODE (src) == MEM)
4014 {
4015 decompose_csky_address (XEXP (src, 0), &op1);
4016
4017 if (op1.index)
4018 switch (GET_MODE (src))
4019 {
4020 case E_HImode:
4021 return "ldr.h\t%0, %1";
4022 case E_QImode:
4023 return "ldr.b\t%0, %1";
4024 case E_SImode:
4025 case E_SFmode:
4026 if (CSKY_VREG_P (REGNO (dst)))
4027 return "fldrs\t%0, %1";
4028 else
4029 return "ldr.w\t%0, %1";
4030 default:
4031 gcc_unreachable ();
4032 }
4033 /* Generate lrw rx, [LABEL]. This happens when the compiler
4034 generates constant pool references and uses lrw to get the
4035 constant into memory. */
4036 else if (op1.label)
4037 return "lrw\t%0, %1";
4038 /* Generate lrs.w rx, [symbol@GOT/PLT]. */
4039 else if (flag_pic == 1 && op1.disp && GET_CODE (op1.disp) == UNSPEC)
4040 return "lrs.w\t%0, %1";
4041 else
4042 switch (GET_MODE (src))
4043 {
4044 case E_HImode:
4045 return "ld.h\t%0, %1";
4046 case E_QImode:
4047 return "ld.b\t%0, %1";
4048 case E_SFmode:
4049 case E_SImode:
4050 if (CSKY_VREG_P (REGNO (dst)))
4051 return "flds\t%0, %1";
4052 else
4053 return "ld.w\t%0, %1";
4054 default:
4055 gcc_unreachable ();
4056 }
4057 }
4058 /* The situation mov integer to reg. */
4059 else if (GET_CODE (src) == CONST_INT ||
4060 (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode))
4061 {
4062 HOST_WIDE_INT x, y;
4063 const REAL_VALUE_TYPE *d;
4064 long l;
4065
4066 if (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode)
4067 {
4068 d = CONST_DOUBLE_REAL_VALUE (src);
4069 REAL_VALUE_TO_TARGET_SINGLE (*d, l);
4070 operands[1] = GEN_INT (l);
4071 src = operands[1];
4072 }
4073
4074 if (try_csky_constant_tricks (INTVAL (src), &x, &y))
4075 return csky_output_inline_const (SImode, operands);
4076 /* Return '#' to split it. */
4077 else if (CSKY_CONST_OK_FOR_T (INTVAL (src)))
4078 return "#";
4079 else
4080 return "lrw\t%0, %x1\t";
4081 }
4082 else if (TARGET_ANCHOR && GET_CODE (src) == SYMBOL_REF)
4083 {
4084 if (SYMBOL_REF_FUNCTION_P (src))
4085 return "lrw\t%0, %1@BTEXT";
4086 else
4087 return "lrw\t%0, %1@BDATA";
4088 }
4089 else if (GET_CODE (src) == UNSPEC
4090 && XINT (src, 1) == UNSPEC_PIC_SYMBOL_GRS)
4091 return "grs\t%0, %1";
4092 else
4093 return "lrw\t%0, %1";
4094 }
4095 else if (GET_CODE (dst) == MEM)
4096 {
4097 decompose_csky_address (XEXP (dst, 0), &op0);
4098
4099 if (op0.index)
4100 switch (GET_MODE (src))
4101 {
4102 case E_HImode:
4103 return "str.h\t%1, %0";
4104 case E_QImode:
4105 return "str.b\t%1, %0";
4106 case E_SFmode:
4107 case E_SImode:
4108 if (CSKY_VREG_P (REGNO (src)))
4109 return "fstrs\t%1, %0";
4110 else
4111 return "str.w\t%1, %0";
4112 default:
4113 gcc_unreachable ();
4114 }
4115 else
4116 switch (GET_MODE (dst))
4117 {
4118 case E_HImode:
4119 return "st.h\t%1, %0";
4120 case E_QImode:
4121 return "st.b\t%1, %0";
4122 case E_SImode:
4123 case E_SFmode:
4124 if (CSKY_VREG_P (REGNO (src)))
4125 return "fsts\t%1, %0";
4126 else
4127 return "st.w\t%1, %0";
4128 default:
4129 gcc_unreachable ();
4130 }
4131 }
4132
4133 gcc_unreachable ();
4134 }
4135
4136
4137 /* Output a move of a word or less value. Specific for ck801. */
4138
4139 const char *
csky_output_ck801_move(rtx insn ATTRIBUTE_UNUSED,rtx operands[],machine_mode mode ATTRIBUTE_UNUSED)4140 csky_output_ck801_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
4141 machine_mode mode ATTRIBUTE_UNUSED)
4142 {
4143 rtx dst = operands[0];
4144 rtx src = operands[1];
4145 struct csky_address op1;
4146
4147 if (REG_P (dst))
4148 {
4149 if (REG_P (src))
4150 return "mov\t%0, %1";
4151 else if (GET_CODE (src) == MEM)
4152 {
4153 decompose_csky_address (XEXP (src, 0), &op1);
4154
4155 /* Generate lrw rx, [LABEL]. This happens when the compiler
4156 generates constant pool references and uses lrw to get the
4157 constant in memory. */
4158 if (op1.label)
4159 return "lrw\t%0, %1";
4160 else
4161 switch (GET_MODE (src))
4162 {
4163 case E_HImode:
4164 return "ld.h\t%0, %1";
4165 case E_QImode:
4166 return "ld.b\t%0, %1";
4167 case E_SFmode:
4168 case E_SImode:
4169 return "ld.w\t%0, %1";
4170 default:
4171 gcc_unreachable ();
4172 }
4173 }
4174 else if (GET_CODE (src) == CONST_INT)
4175 {
4176 if (REGNO (dst) > 7)
4177 return "lrw\t%0, %x1\t";
4178 else if (CSKY_CONST_OK_FOR_N (INTVAL (src) + 1))
4179 return "movi\t%0, %1";
4180 /* Return '#' to split it. */
4181 else if (CSKY_CONST_OK_FOR_T (INTVAL (src)))
4182 return "#";
4183 else if (csky_shifted_imm8_constant (INTVAL (src), NULL, NULL))
4184 return "#";
4185 else
4186 return "lrw\t%0, %x1\t";
4187 }
4188 else if (GET_CODE (src) == CONST_DOUBLE && GET_MODE (src) == SFmode)
4189 {
4190 const REAL_VALUE_TYPE *d;
4191 long l;
4192
4193 d = CONST_DOUBLE_REAL_VALUE (src);
4194 REAL_VALUE_TO_TARGET_SINGLE (*d, l);
4195 operands[1] = GEN_INT (l);
4196 src = operands[1];
4197
4198 if (CSKY_CONST_OK_FOR_N (INTVAL (src) + 1))
4199 return "movi\t%0, %1";
4200 else
4201 return "lrw\t%0, %x1\t";
4202 }
4203 else if (TARGET_ANCHOR && GET_CODE (src) == SYMBOL_REF)
4204 {
4205 if (SYMBOL_REF_FUNCTION_P (src))
4206 return "lrw\t%0, %1@BTEXT";
4207 else
4208 return "lrw\t%0, %1@BDATA";
4209 }
4210 else
4211 return "lrw\t%0, %1";
4212 }
4213 else if (GET_CODE (dst) == MEM)
4214 switch (GET_MODE (dst))
4215 {
4216 case E_HImode:
4217 return "st.h\t%1, %0";
4218 case E_QImode:
4219 return "st.b\t%1, %0";
4220 case E_SImode:
4221 case E_SFmode:
4222 return "st.w\t%1, %0";
4223 default:
4224 gcc_unreachable ();
4225 }
4226
4227 gcc_unreachable ();
4228 }
4229
4230
4231 /* Return a sequence of instructions to perform DI or DF move.
4232 Since the CSKY cannot move a DI or DF in one instruction, we have
4233 to take care when we see overlapping source and dest registers. */
4234
4235 const char *
csky_output_movedouble(rtx operands[],machine_mode mode ATTRIBUTE_UNUSED)4236 csky_output_movedouble (rtx operands[],
4237 machine_mode mode ATTRIBUTE_UNUSED)
4238 {
4239 rtx dst = operands[0];
4240 rtx src = operands[1];
4241
4242 if (REG_P (dst))
4243 {
4244 if (REG_P (src))
4245 {
4246 int dstreg = REGNO (dst);
4247 int srcreg = REGNO (src);
4248
4249 if (CSKY_HILO_REG_P (srcreg))
4250 {
4251 if (TARGET_BIG_ENDIAN)
4252 return "mfhi\t%0\n\tmflo\t%R0";
4253 else
4254 return "mfhi\t%R0\n\tmflo\t%0";
4255 }
4256 else if (CSKY_HILO_REG_P (dstreg))
4257 {
4258 if (TARGET_BIG_ENDIAN)
4259 return "mthi\t%1\n\tmtlo\t%R1";
4260 else
4261 return "mthi\t%R1\n\tmtlo\t%1";
4262 }
4263 else if (CSKY_VREG_P (srcreg) && CSKY_VREG_P (dstreg))
4264 return "fmovd\t%0, %1";
4265 else if (CSKY_VREG_P (srcreg))
4266 {
4267 /* Since the vector registers in fpuv2_soft processors
4268 like ck803f are 32 bits wide, just one insn is needed
4269 to complete the move operation. */
4270 if (TARGET_SOFT_FPU)
4271 return "fmfvrl\t%0, %1";
4272 else if (TARGET_BIG_ENDIAN)
4273 return "fmfvrh\t%0, %1\n\tfmfvrl\t%R0, %1";
4274 else
4275 return "fmfvrh\t%R0, %1\n\tfmfvrl\t%0, %1";
4276 }
4277 else if (CSKY_VREG_P (dstreg))
4278 {
4279 if (TARGET_SOFT_FPU)
4280 return "fmtvrl\t%0, %1";
4281 else if (TARGET_BIG_ENDIAN)
4282 return "fmtvrh\t%0, %1\n\tfmtvrl\t%0, %R1";
4283 else
4284 return "fmtvrh\t%0, %R1\n\tfmtvrl\t%0, %1";
4285 }
4286
4287 /* Ensure the second source not overwritten. */
4288 if (srcreg + 1 == dstreg)
4289 return "mov\t%R0, %R1\n\tmov\t%0, %1";
4290 else
4291 return "mov\t%0, %1\n\tmov\t%R0, %R1";
4292 }
4293 else if (GET_CODE (src) == MEM)
4294 {
4295 rtx memexp = XEXP (src, 0);
4296 int dstreg = REGNO (dst);
4297 int basereg = -1;
4298 struct csky_address op0;
4299
4300 decompose_csky_address (XEXP (src, 0), &op0);
4301
4302 if (GET_CODE (memexp) == LABEL_REF
4303 || (GET_CODE (memexp) == CONST
4304 && GET_CODE (XEXP (memexp, 0)) == PLUS
4305 && GET_CODE (XEXP (XEXP (memexp, 0), 0)) == LABEL_REF))
4306 return "lrw\t%0, [%1]\n\tlrw\t%R0, [%R1]";
4307 else if (GET_CODE (memexp) == REG)
4308 basereg = REGNO (memexp);
4309 else if (GET_CODE (memexp) == PLUS)
4310 {
4311 if (GET_CODE (XEXP (memexp, 0)) == REG)
4312 basereg = REGNO (XEXP (memexp, 0));
4313 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4314 basereg = REGNO (XEXP (memexp, 1));
4315 else
4316 gcc_unreachable ();
4317 }
4318 else
4319 gcc_unreachable ();
4320
4321
4322 /* When FPUV2. */
4323 if (CSKY_VREG_P (dstreg))
4324 {
4325 if (op0.index)
4326 return "fldrd\t%0, %1";
4327 else
4328 return "fldd\t%0, %1";
4329 }
4330 /* FIXME length attribute is wrong here. */
4331 if (dstreg == basereg)
4332 /* Just load them in reverse order. */
4333 return "ld.w\t%R0, %R1\n\tld.w\t%0, %1";
4334 else
4335 return "ld.w\t%0, %1\n\tld.w\t%R0, %R1";
4336 }
4337 else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
4338 {
4339 split_double (src, operands + 2, operands + 3);
4340
4341 if (CSKY_CONST_OK_FOR_I (INTVAL (operands[2])))
4342 output_asm_insn ("movi\t%0, %2", operands);
4343 else if (CSKY_CONST_OK_FOR_Uc (INTVAL (operands[2])))
4344 output_asm_insn ("bmaski\t%0, %N2", operands);
4345 else if (CSKY_CONST_OK_FOR_Ub (INTVAL (operands[2])))
4346 output_asm_insn ("bgeni\t%0, %P2", operands);
4347 else
4348 output_asm_insn ("lrw\t%0, %2", operands);
4349
4350 if (CSKY_CONST_OK_FOR_I (INTVAL (operands[3])))
4351 output_asm_insn ("movi\t%R0, %3", operands);
4352 else if (CSKY_CONST_OK_FOR_Uc (INTVAL (operands[3])))
4353 output_asm_insn ("bmaski\t%R0, %N3", operands);
4354
4355 else if (CSKY_CONST_OK_FOR_Ub (INTVAL (operands[3])))
4356 output_asm_insn ("bgeni\t%R0, %P3", operands);
4357 else
4358 output_asm_insn ("lrw\t%R0, %3", operands);
4359
4360 return "";
4361 }
4362 else
4363 gcc_unreachable ();
4364 }
4365 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
4366 {
4367 rtx memexp = XEXP (dst, 0);
4368 int srcreg = REGNO (src);
4369 int basereg = -1;
4370 struct csky_address op0;
4371
4372 decompose_csky_address (XEXP (dst, 0), &op0);
4373
4374 if (GET_CODE (memexp) == REG)
4375 basereg = REGNO (memexp);
4376 else if (GET_CODE (memexp) == PLUS)
4377 {
4378 if (GET_CODE (XEXP (memexp, 0)) == REG)
4379 basereg = REGNO (XEXP (memexp, 0));
4380 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4381 basereg = REGNO (XEXP (memexp, 1));
4382 else
4383 gcc_unreachable ();
4384 }
4385 else
4386 gcc_unreachable ();
4387
4388 /* When FPUV2. */
4389 if (CSKY_VREG_P (srcreg))
4390 {
4391 if (op0.index)
4392 return "fstrd\t%1, %0";
4393 else
4394 return "fstd\t%1, %0";
4395 }
4396 /* FIXME length attribute is wrong here. */
4397 if (srcreg == basereg)
4398 /* Just load them in reverse order. */
4399 return "st.w\t%R1, %R0\n\tst.w\t%1, %0";
4400 else
4401 return "st.w\t%1, %0\n\tst.w\t%R1, %R0";
4402 }
4403 else
4404 gcc_unreachable ();
4405 }
4406
4407
4408 const char *
csky_output_ck801_movedouble(rtx operands[],machine_mode mode ATTRIBUTE_UNUSED)4409 csky_output_ck801_movedouble (rtx operands[],
4410 machine_mode mode ATTRIBUTE_UNUSED)
4411 {
4412 rtx dst = operands[0];
4413 rtx src = operands[1];
4414
4415 if (REG_P (dst))
4416 {
4417 if (REG_P (src))
4418 {
4419 int dstreg = REGNO (dst);
4420 int srcreg = REGNO (src);
4421
4422 /* Ensure the second source not overwritten. */
4423 if (srcreg + 1 == dstreg)
4424 return "mov\t%R0, %R1\n\tmov\t%0, %1";
4425 else
4426 return "mov\t%0, %1\n\tmov\t%R0, %R1";
4427 }
4428 else if (GET_CODE (src) == MEM)
4429 {
4430 rtx memexp = XEXP (src, 0);
4431 int dstreg = REGNO (dst);
4432 int basereg = -1;
4433 struct csky_address op0;
4434
4435 decompose_csky_address (XEXP (src, 0), &op0);
4436
4437 if (GET_CODE (memexp) == LABEL_REF
4438 || (GET_CODE (memexp) == CONST
4439 && GET_CODE (XEXP (memexp, 0)) == PLUS
4440 && GET_CODE (XEXP (XEXP (memexp, 0), 0)) == LABEL_REF))
4441 return "lrw\t%0, [%1]\n\tlrw\t%R0, [%R1]";
4442 else if (GET_CODE (memexp) == REG)
4443 basereg = REGNO (memexp);
4444 else if (GET_CODE (memexp) == PLUS)
4445 {
4446 if (GET_CODE (XEXP (memexp, 0)) == REG)
4447 basereg = REGNO (XEXP (memexp, 0));
4448 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4449 basereg = REGNO (XEXP (memexp, 1));
4450 else
4451 gcc_unreachable ();
4452 }
4453 else
4454 gcc_unreachable ();
4455
4456 /* FIXME length attribute is wrong here. */
4457 if (dstreg == basereg)
4458 /* Just load them in reverse order. */
4459 return "ld.w\t%R0, %R1\n\tld.w\t%0, %1";
4460 else
4461 return "ld.w\t%0, %1\n\tld.w\t%R0, %R1";
4462 }
4463 else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
4464 {
4465 split_double (src, operands + 2, operands + 3);
4466
4467 if (REGNO (dst) <= 7
4468 && CSKY_CONST_OK_FOR_N (INTVAL (operands[2]) + 1))
4469 output_asm_insn ("movi\t%0, %2", operands);
4470 else
4471 output_asm_insn ("lrw\t%0, %2", operands);
4472
4473
4474 if (REGNO (dst) <= 6
4475 && CSKY_CONST_OK_FOR_N (INTVAL (operands[3]) + 1))
4476 output_asm_insn ("movi\t%R0, %3", operands);
4477 else
4478 output_asm_insn ("lrw\t%R0, %3", operands);
4479
4480 return "";
4481
4482
4483 }
4484 else
4485 gcc_unreachable ();
4486 }
4487 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
4488 {
4489 rtx memexp = XEXP (dst, 0);
4490 int srcreg = REGNO (src);
4491 int basereg = -1;
4492 struct csky_address op0;
4493
4494 decompose_csky_address (XEXP (dst, 0), &op0);
4495
4496 if (GET_CODE (memexp) == REG)
4497 basereg = REGNO (memexp);
4498 else if (GET_CODE (memexp) == PLUS)
4499 {
4500 if (GET_CODE (XEXP (memexp, 0)) == REG)
4501 basereg = REGNO (XEXP (memexp, 0));
4502 else if (GET_CODE (XEXP (memexp, 1)) == REG)
4503 basereg = REGNO (XEXP (memexp, 1));
4504 else
4505 gcc_unreachable ();
4506 }
4507 else
4508 gcc_unreachable ();
4509
4510 /* FIXME length attribute is wrong here. */
4511 if (srcreg == basereg)
4512 /* Just load them in reverse order. */
4513 return "st.w\t%R1, %R0\n\tst.w\t%1, %0";
4514 else
4515 return "st.w\t%1, %0\n\tst.w\t%R1, %R0";
4516 }
4517 else
4518 gcc_unreachable ();
4519 }
4520
4521 /* Split operands for an AND expression when OPERANDS[2] is a constant.
4522 Note operands[0] is marked earlyclobber in this case and can be
4523 overwritten. Return true if "DONE", false otherwise. */
4524 bool
csky_split_and(rtx * operands)4525 csky_split_and (rtx *operands)
4526 {
4527 HOST_WIDE_INT mask = INTVAL (operands[2]);
4528 rtx not_value = GEN_INT (~mask);
4529 int i;
4530
4531 /* All zeros or all ones can be handled by a move instruction. */
4532 if (mask == 0)
4533 {
4534 emit_move_insn (operands[0], const0_rtx);
4535 return true;
4536 }
4537 if (mask == -1)
4538 {
4539 emit_move_insn (operands[0], operands[1]);
4540 return true;
4541 }
4542
4543 /* Check for constants that can be handled directly by the 32-bit andi
4544 instruction. */
4545 if (CSKY_ISA_FEATURE (E2) && csky_arith_O_operand (operands[2], SImode))
4546 return false;
4547
4548 /* Try to transform to andni instruction. */
4549 if (CSKY_ISA_FEATURE (E2) && csky_arith_O_operand (not_value, SImode))
4550 {
4551 emit_insn (gen_cskyv2_andnsi3 (operands[0], not_value, operands[1]));
4552 return true;
4553 }
4554
4555 /* If there are only one or two 0 bits in the constant, we can
4556 replace the operation with bclri instructions on those bits.
4557 Note CK801 has only the 16-bit bclri that operates on a single
4558 register, so we must count a move if we are post-reload. */
4559 if (popcount_hwi (~mask & 0xffffffff)
4560 <= (reload_completed && !CSKY_ISA_FEATURE (E2) ? 1 : 2))
4561 {
4562 rtx input = operands[1];
4563
4564 if (!CSKY_ISA_FEATURE (E2))
4565 {
4566 emit_move_insn (operands[0], input);
4567 input = operands[0];
4568 }
4569
4570 for (i = 0; i < 32; i++)
4571 if ((mask & (1 << i)) == 0x0)
4572 {
4573 emit_insn (gen_bclri (operands[0], input, GEN_INT (i)));
4574 input = operands[0];
4575 }
4576 return true;
4577 }
4578
4579 /* If the constant mask is outside the [0, 4095] range for
4580 constraint O, or if constraint O is not allowed (ck801),
4581 maybe the constant is a contiguous bit range that we can
4582 handle by bit extract (low bits) or shifts (high bits). */
4583 for (i = (CSKY_ISA_FEATURE (E2) ? 13 : 1); i < 32; i++)
4584 {
4585 if ((((HOST_WIDE_INT) 1) << i) - 1 == mask)
4586 {
4587 if (CSKY_ISA_FEATURE (2E3))
4588 emit_insn (gen_cskyv2_extzv (operands[0], operands[1],
4589 GEN_INT (i), const0_rtx));
4590 else
4591 {
4592 rtx shift = GEN_INT (32 - i);
4593 rtx reg = (reload_completed
4594 ? operands[0] : gen_reg_rtx (SImode));
4595
4596 emit_insn (gen_ashlsi3 (reg, operands[1], shift));
4597 emit_insn (gen_lshrsi3 (operands[0], reg, shift));
4598 }
4599 return true;
4600 }
4601 else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~mask)
4602 {
4603 rtx shift = GEN_INT (i);
4604 rtx reg = (reload_completed
4605 ? operands[0] : gen_reg_rtx (SImode));
4606
4607 emit_insn (gen_lshrsi3 (reg, operands[1], shift));
4608 emit_insn (gen_ashlsi3 (operands[0], reg, shift));
4609 return true;
4610 }
4611 }
4612
4613 /* If the constant is a negative number, it seems better to use
4614 andn and copy the NOT_VALUE to a register instead of the
4615 original value, since the NOT_VALUE is always smaller and thus
4616 more likely to be representable as a small constant.
4617 This transformation can only be done before reload because
4618 it requires a temporary. Hopefully register allocation can get
4619 rid of the extra move required for CK801. */
4620 if (!reload_completed && INTVAL (operands[2]) < 0)
4621 {
4622 rtx reg = copy_to_mode_reg (SImode, not_value);
4623
4624 if (CSKY_ISA_FEATURE (E2))
4625 emit_insn (gen_cskyv2_andnsi3 (operands[0], reg, operands[1]));
4626 else
4627 {
4628 emit_move_insn (operands[0], operands[1]);
4629 emit_insn (gen_ck801_andnsi3 (operands[0], reg, operands[0]));
4630 }
4631 return true;
4632 }
4633
4634 /* If the above ways are all not working, move the constant
4635 to a register. We can clobber operands[0] as it is
4636 marked earlyclobber in the insn constraints, but then we have to
4637 swap operands 1 and 2 to match the constraints on the 2-operand
4638 16-bit and instruction. */
4639 if (reload_completed)
4640 {
4641 emit_move_insn (operands[0], operands[2]);
4642 operands[2] = operands[1];
4643 operands[1] = operands[0];
4644 }
4645 else
4646 operands[2] = copy_to_mode_reg (SImode, operands[2]);
4647 return false;
4648 }
4649
4650 /* Split operands for an IOR expression when OPERANDS[2] is a constant.
4651 Note operands[0] is marked earlyclobber in this case and can be
4652 overwritten. Return true if "DONE", false otherwise. */
4653 bool
csky_split_ior(rtx * operands)4654 csky_split_ior (rtx *operands)
4655 {
4656 HOST_WIDE_INT mask = INTVAL (operands[2]);
4657 int i;
4658
4659 /* All zeros or all ones can be handled by a move instruction. */
4660 if (mask == 0)
4661 {
4662 emit_move_insn (operands[0], operands[1]);
4663 return true;
4664 }
4665 if (mask == -1)
4666 {
4667 emit_move_insn (operands[0], gen_int_mode (-1, SImode));
4668 return true;
4669 }
4670
4671 /* Check for constants that can be handled directly by the 32-bit ori
4672 instruction. */
4673 if (CSKY_ISA_FEATURE (E2) && csky_literal_I_operand (operands[2], SImode))
4674 return false;
4675
4676 /* If there are only one or two 1 bits in the value, we can replace
4677 the operation with bseti instructions to set those bits.
4678 Note CK801 has only the 16-bit bclri that operates on a single
4679 register, so we must count a move if we are post-reload. */
4680 if (popcount_hwi (mask & 0xffffffff)
4681 <= (reload_completed && !CSKY_ISA_FEATURE (E2) ? 1 : 2))
4682 {
4683 rtx input = operands[1];
4684
4685 if (!CSKY_ISA_FEATURE (E2))
4686 {
4687 emit_move_insn (operands[0], input);
4688 input = operands[0];
4689 }
4690
4691 for (i = 0; i < 32; i++)
4692 if (mask & (1 << i))
4693 {
4694 emit_insn (gen_bseti (operands[0], input, GEN_INT (i)));
4695 input = operands[0];
4696 }
4697 return true;
4698 }
4699
4700 /* If the above ways are all not working, move the constant
4701 to a register. We can clobber operands[0] as it is
4702 marked earlyclobber in the insn constraints, but then we have to
4703 swap operands 1 and 2 to match the constraints on the 2-operand
4704 16-bit ior instruction. */
4705 if (reload_completed)
4706 {
4707 emit_move_insn (operands[0], operands[2]);
4708 operands[2] = operands[1];
4709 operands[1] = operands[0];
4710 }
4711 else
4712 operands[2] = copy_to_mode_reg (SImode, operands[2]);
4713 return false;
4714 }
4715
4716
4717 /* Split operands for an XOR expression when OPERANDS[2] is a constant.
4718 Note operands[0] is marked earlyclobber in this case and can be
4719 overwritten. Return true if "DONE", false otherwise. */
4720 bool
csky_split_xor(rtx * operands)4721 csky_split_xor (rtx *operands)
4722 {
4723 HOST_WIDE_INT mask = INTVAL (operands[2]);
4724
4725 /* All zeros can be turned into move instruction. */
4726 if (mask == 0)
4727 {
4728 emit_move_insn (operands[0], operands[1]);
4729 return true;
4730 }
4731
4732 /* All ones can be turned into a bitwise not. */
4733 if (mask == -1)
4734 {
4735 if (CSKY_ISA_FEATURE (E2))
4736 emit_insn (gen_cskyv2_one_cmplsi2 (operands[0], operands[1]));
4737 else
4738 {
4739 emit_move_insn (operands[0], operands[1]);
4740 emit_insn (gen_ck801_one_cmplsi2 (operands[0], operands[0]));
4741 }
4742 return true;
4743 }
4744
4745 /* Check for constants that can be handled directly by the 32-bit xori
4746 instruction. */
4747 if (CSKY_ISA_FEATURE (E2) && csky_arith_O_operand (operands[2], SImode))
4748 return false;
4749
4750 /* If the above ways are all not working, move the constant
4751 to a register. We can clobber operands[0] as it is
4752 marked earlyclobber in the insn constraints, but then we have to
4753 swap operands 1 and 2 to match the constraints on the 2-operand
4754 16-bit ior instruction. */
4755 if (reload_completed)
4756 {
4757 emit_move_insn (operands[0], operands[2]);
4758 operands[2] = operands[1];
4759 operands[1] = operands[0];
4760 }
4761 else
4762 operands[2] = copy_to_mode_reg (SImode, operands[2]);
4763 return false;
4764 }
4765
4766
4767 /* Return true if X is an address form involving a symbol or label ref. */
4768 bool
csky_symbolic_address_p(rtx x)4769 csky_symbolic_address_p (rtx x)
4770 {
4771 switch (GET_CODE (x))
4772 {
4773 case SYMBOL_REF:
4774 case LABEL_REF:
4775 return 1;
4776 case CONST:
4777 x = XEXP (x, 0);
4778 return ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
4779 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
4780 && GET_CODE (XEXP (x, 1)) == CONST_INT);
4781 default:
4782 return 0;
4783 }
4784 }
4785
4786
4787 /* Emit a comparison instruction.
4788 Return true if an inverted comparison is generated. */
4789
4790 bool
csky_emit_compare(enum rtx_code code,rtx op0,rtx op1)4791 csky_emit_compare (enum rtx_code code, rtx op0, rtx op1)
4792 {
4793 bool invert;
4794 rtx cc_reg = gen_rtx_REG (CCmode, CSKY_CC_REGNUM);
4795
4796 if (GET_CODE (op1) == CONST_INT)
4797 {
4798 HOST_WIDE_INT val = INTVAL (op1);
4799
4800 switch (code)
4801 {
4802 case GTU:
4803 /* Unsigned (GTU 0) is the same as (NE 0); everything else is
4804 converted below to LEU (reversed cmphs). */
4805 if (val == 0)
4806 code = NE;
4807 /* Check whether (GTU A imm) can become (GEU A imm + 1). */
4808 else if (TARGET_MINI_REGISTERS
4809 ? CSKY_CONST_OK_FOR_J (val + 1)
4810 : CSKY_CONST_OK_FOR_Uk (val + 1))
4811 {
4812 op1 = GEN_INT (val + 1);
4813 code = GEU;
4814 }
4815 break;
4816 /* Check whether (LE A imm) can become (LT A imm + 1),
4817 or (GT A imm) can become (GE A imm + 1). */
4818 case GT:
4819 case LE:
4820 if (TARGET_MINI_REGISTERS
4821 ? CSKY_CONST_OK_FOR_J (val + 1)
4822 : CSKY_CONST_OK_FOR_Uk (val + 1))
4823 {
4824 op1 = GEN_INT (val + 1);
4825 code = code == LE ? LT : GE;
4826 }
4827 break;
4828
4829 default:
4830 break;
4831 }
4832 }
4833
4834 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
4835 op1 = force_reg (GET_MODE (op1), op1);
4836
4837 /* cmpnei: 0-31 (K immediate)
4838 ti: 1-32 (J immediate, 0 using btsti x,31). */
4839 invert = false;
4840 switch (code)
4841 {
4842 /* Use inverted condition, cmpne. */
4843 case EQ:
4844 code = NE;
4845 invert = true;
4846 /* Fall through. */
4847 /* Use normal condition, cmpne. */
4848 case NE:
4849 if (GET_CODE (op1) == CONST_INT
4850 && (TARGET_MINI_REGISTERS
4851 ? !csky_literal_K_operand (op1, SImode)
4852 : !csky_literal_I_operand (op1, SImode)))
4853 op1 = force_reg (SImode, op1);
4854 break;
4855
4856 /* Use inverted condition, reversed cmplt. */
4857 case LE:
4858 code = GT;
4859 invert = true;
4860 /* Fall through. */
4861 /* Use normal condition, reversed cmplt. */
4862 case GT:
4863 if (GET_CODE (op1) == CONST_INT)
4864 op1 = force_reg (SImode, op1);
4865 break;
4866
4867 /* Use inverted condition, cmplt. */
4868 case GE:
4869 code = LT;
4870 invert = true;
4871 /* Fall through. */
4872 /* Use normal condition, cmplt. */
4873 case LT:
4874 /* covered by btsti x,31. */
4875 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0
4876 && (TARGET_MINI_REGISTERS
4877 ? !csky_literal_J_operand (op1, SImode)
4878 : !csky_literal_Uk_operand (op1, SImode)))
4879 op1 = force_reg (SImode, op1);
4880 break;
4881
4882 /* Use inverted condition, cmple. */
4883 case GTU:
4884 /* We coped with unsigned > 0 above. */
4885 gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
4886 code = LEU;
4887 invert = true;
4888 /* Fall through. */
4889 /* Use normal condition, reversed cmphs. */
4890 case LEU:
4891 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
4892 op1 = force_reg (SImode, op1);
4893 break;
4894
4895 /* Use inverted condition, cmphs. */
4896 case LTU:
4897 code = GEU;
4898 invert = true;
4899 /* Fall through. */
4900 /* Use normal condition, cmphs. */
4901 case GEU:
4902 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0
4903 && (TARGET_MINI_REGISTERS
4904 ? !csky_literal_J_operand (op1, SImode)
4905 : !csky_literal_Uk_operand (op1, SImode)))
4906 op1 = force_reg (SImode, op1);
4907 break;
4908
4909 default:
4910 break;
4911 }
4912
4913 emit_insn (gen_rtx_SET (cc_reg,
4914 gen_rtx_fmt_ee (code, CCmode, op0, op1)));
4915 return invert;
4916 }
4917
4918 /* Return true if push/pop can be used to save/restore all the registers
4919 indicated by MASK. We currently don't attempt to handle situations where
4920 some of the registers could be handled by push/pop and others saved and
4921 restored individually. */
4922
4923 static bool
csky_can_use_pushpop(unsigned int mask)4924 csky_can_use_pushpop (unsigned int mask)
4925 {
4926 int i;
4927 int end_reg;
4928
4929 if (!TARGET_PUSHPOP)
4930 return false;
4931
4932 if (mask == 0)
4933 return false;
4934
4935 /* Regs 0-3, 12-14, 18-27, 29-31 cannot be in the mask. */
4936 if (mask & 0xeffc700f)
4937 return false;
4938
4939 /* Regs in the range r4-r11 must be contiguous. */
4940 for (end_reg = 0, i = 11; i >= 4; i--)
4941 {
4942 if (!end_reg && (mask & (1 << i)))
4943 end_reg = i;
4944 if (end_reg && !(mask & (1 << i)))
4945 return false;
4946 }
4947
4948 /* Likewise for regs in the range r16-r17. */
4949 for (end_reg = 0, i = 17; i >= 16; i--)
4950 {
4951 if (!end_reg && (mask & (1 << i)))
4952 end_reg = i;
4953 if (end_reg && !(mask & (1 << i)))
4954 return false;
4955 }
4956
4957 return true;
4958 }
4959
4960
4961 /* Return true if store/load multiple instructions can be used to
4962 save/restore at least some of the registers indicated by MASK.
4963 Unlike the push/pop case, this does handle partial ranges.
4964 Set *BR and *ER to the beginning and end (respectively) of the
4965 register range that can be handled. */
4966
4967 static bool
csky_can_use_ldstm(int mask,int * br,int * er)4968 csky_can_use_ldstm (int mask, int *br, int *er)
4969 {
4970 int regno;
4971 int begin_reg = 0, end_reg = 0;
4972 int count = 0;
4973
4974 if (!TARGET_MULTIPLE_STLD)
4975 return false;
4976
4977 /* We'll only handle registers in the range 4-11, the contiguous range
4978 of caller-saved registers. Higher-numbered registers are handled
4979 individually in addition to this, but we'll give up on doing ldstm
4980 entirely if we need to save/restore the low-numbered EH registers. */
4981 if (mask & 0xf)
4982 return false;
4983
4984 for (regno = 4; regno <= 11; regno++)
4985 {
4986 if (mask & 1 << regno)
4987 {
4988 if (!begin_reg)
4989 begin_reg = regno;
4990 end_reg = regno;
4991 count++;
4992 }
4993 else if (begin_reg)
4994 break;
4995 }
4996
4997 if (count >= CSKY_MIN_MULTIPLE_STLD && count <= CSKY_MAX_MULTIPLE_STLD)
4998 {
4999 if (br)
5000 *br = begin_reg;
5001 if (er)
5002 *er = end_reg;
5003 return true;
5004 }
5005 return false;
5006 }
5007
5008
5009 const char *
csky_output_return_instruction(void)5010 csky_output_return_instruction (void)
5011 {
5012 unsigned long func_type = get_csky_current_func_type ();
5013
5014 if (CSKY_FUNCTION_IS_NAKED (func_type))
5015 return "";
5016 if (CSKY_FUNCTION_IS_INTERRUPT (func_type))
5017 return "ipop\n\tnir\n";
5018 else
5019 return "rts\n";
5020 }
5021
5022
5023 /* Adjust the stack pointer by OFFSET bytes. OFFSET is negative if this
5024 is in the prologue, positive if in the epilogue. This may require
5025 multiple instructions and/or use of CSKY_STACKADJUST_REGNUM as
5026 a scratch register. Emit CFA notes as appropriate. */
5027 static void
expand_csky_stack_adjust(int offset)5028 expand_csky_stack_adjust (int offset)
5029 {
5030 rtx set;
5031 rtx_insn *insn;
5032 int size = (offset > 0 ? offset : -offset);
5033
5034 if (offset == 0)
5035 return;
5036
5037 /* If OFFSET is too large for addi/subi, load it into
5038 CSKY_STACKADJUST_REGNUM and use a register add/sub instead.
5039 This case is not mentioned in the ABI documentation, but it is
5040 supported by GDB prologue analysis provided that the instruction(s)
5041 to initialize CSKY_STACKADJUST_REGNUM appear directly before
5042 the sub. Depending on the value of OFFSET, this might be a
5043 lrw instruction or the "tricks" used by csky_output_inline_const to
5044 encode special-case integer constants. */
5045 if (size > CSKY_MAX_SP_ADJUST * 2)
5046 {
5047 rtx tmp, dwarf;
5048
5049 /* We should have reserved the scratch register already in
5050 csky_layout_stack_frame. */
5051 gcc_assert (cfun->machine->reg_size != 0
5052 && (cfun->machine->reg_mask
5053 & (1 << CSKY_STACKADJUST_REGNUM)));
5054
5055 /* Prevent the optimizer from reordering these instructions to
5056 keep GDB happy. */
5057 if (!flag_sched_prolog)
5058 emit_insn (gen_blockage ());
5059
5060 tmp = gen_rtx_REG (SImode, CSKY_STACKADJUST_REGNUM);
5061 emit_move_insn (tmp, GEN_INT (size));
5062
5063 if (offset > 0)
5064 set = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp);
5065 else
5066 set = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp);
5067 insn = emit_insn (set);
5068 RTX_FRAME_RELATED_P (insn) = 1;
5069 dwarf = gen_rtx_SET (stack_pointer_rtx,
5070 plus_constant (Pmode, stack_pointer_rtx, offset));
5071 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
5072
5073 /* More make GDB happy. */
5074 if (!flag_sched_prolog)
5075 emit_insn (gen_blockage ());
5076 }
5077
5078 /* Use one or two addi or subi insns to adjust stack. */
5079 else
5080 while (size)
5081 {
5082 int delta = (size > CSKY_MAX_SP_ADJUST
5083 ? CSKY_MAX_SP_ADJUST : size);
5084
5085 if (offset > 0)
5086 set = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
5087 GEN_INT (delta));
5088 else
5089 set = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx,
5090 GEN_INT (delta));
5091 insn = emit_insn (set);
5092 RTX_FRAME_RELATED_P (insn) = 1;
5093 size -= delta;
5094 }
5095 }
5096
5097
5098 /* Generate and emit an insn that we will recognize as a push_multi.
5099 Unfortunately, since this insn does not reflect very well the actual
5100 semantics of the operation, we need to annotate the insn for the benefit
5101 of DWARF2 frame unwind information. DWARF_REGS_MASK is a subset of
5102 MASK for registers that should be annotated for DWARF2 frame unwind
5103 information. */
5104
5105 static rtx
emit_csky_regs_push(unsigned long mask)5106 emit_csky_regs_push (unsigned long mask)
5107 {
5108 int num_regs = 0;
5109 int i, j;
5110 rtx par;
5111 rtx dwarf;
5112 rtx tmp;
5113 int dwarf_par_index;
5114
5115 for (i = 0; i < CSKY_NGPR_REGS; i++)
5116 {
5117 if (mask & (1 << i))
5118 num_regs++;
5119 }
5120
5121 /* The reg range for push is:r4-r11,r15-r17,r28. */
5122 gcc_assert (num_regs && num_regs <= 12);
5123
5124 /* For the body of the insn we are going to generate an UNSPEC in
5125 parallel with several USEs. This allows the insn to be recognized
5126 by the push_multi pattern in the csky.md file.
5127
5128 The body of the insn looks something like this:
5129
5130 (parallel [
5131 (set (mem:BLK (pre_modify:SI (reg:SI sp)
5132 (const_int:SI <num>)))
5133 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSHPOP_MULT))
5134 (use (reg:SI XX))
5135 (use (reg:SI YY))
5136 ...
5137 ])
5138
5139 For the frame note however, we try to be more explicit and actually
5140 show each register being stored into the stack frame, plus a (single)
5141 decrement of the stack pointer. We do it this way in order to be
5142 friendly to the stack unwinding code, which only wants to see a single
5143 stack decrement per instruction. The RTL we generate for the note looks
5144 something like this:
5145
5146 (sequence [
5147 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
5148 (set (mem:SI (reg:SI sp)) (reg:SI r4))
5149 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI XX))
5150 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI YY))
5151 ...
5152 ])
5153
5154 FIXME:: In an ideal world the PRE_MODIFY would not exist and
5155 instead we'd have a parallel expression detailing all
5156 the stores to the various memory addresses so that debug
5157 information is more up-to-date. Remember however while writing
5158 this to take care of the constraints with the push instruction.
5159
5160 Note also that this has to be taken care of for the VFP registers.
5161
5162 For more see PR43399. */
5163
5164 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
5165 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_regs + 1));
5166 dwarf_par_index = 1;
5167
5168 for (i = 0; i < CSKY_NGPR_REGS; i++)
5169 if (mask & (1 << i))
5170 {
5171 rtx reg = gen_rtx_REG (SImode, i);
5172 rtx addr = plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs);
5173 tmp = gen_frame_mem (BLKmode,
5174 gen_rtx_PRE_MODIFY (Pmode,
5175 stack_pointer_rtx, addr));
5176 XVECEXP (par, 0, 0)
5177 = gen_rtx_SET (tmp,
5178 gen_rtx_UNSPEC (BLKmode,
5179 gen_rtvec (1, reg),
5180 UNSPEC_PUSHPOP_MULT));
5181 tmp = gen_rtx_SET (gen_frame_mem (SImode, stack_pointer_rtx),
5182 reg);
5183 RTX_FRAME_RELATED_P (tmp) = 1;
5184 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
5185
5186 break;
5187 }
5188
5189 for (j = 1, i++; j < num_regs; i++)
5190 if (mask & (1 << i))
5191 {
5192 rtx reg = gen_rtx_REG (SImode, i);
5193 rtx addr = plus_constant (Pmode, stack_pointer_rtx, 4 * j);
5194 tmp = gen_rtx_SET (gen_frame_mem (SImode, addr), reg);
5195 RTX_FRAME_RELATED_P (tmp) = 1;
5196 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
5197 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
5198 j++;
5199 }
5200
5201 par = emit_insn (par);
5202
5203 tmp = gen_rtx_SET (stack_pointer_rtx,
5204 plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs));
5205 RTX_FRAME_RELATED_P (tmp) = 1;
5206 XVECEXP (dwarf, 0, 0) = tmp;
5207
5208 add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
5209 RTX_FRAME_RELATED_P (par) = 1;
5210
5211 return par;
5212 }
5213
5214
5215 /* Generate and emit an insn pattern that we will recognize as a pop_multi.
5216 SAVED_REGS_MASK shows which registers need to be restored.
5217
5218 Unfortunately, since this insn does not reflect very well the actual
5219 semantics of the operation, we need to annotate the insn for the benefit
5220 of DWARF2 frame unwind information. */
5221
5222 static void
emit_csky_regs_pop(unsigned long mask)5223 emit_csky_regs_pop (unsigned long mask)
5224 {
5225 int num_regs = 0;
5226 int i, j;
5227 rtx par;
5228
5229 for (i = 0; i < CSKY_NGPR_REGS; i++)
5230 if (mask & (1 << i))
5231 num_regs++;
5232
5233 /* The reg range for push is:r4-r11,r15-r17,r28. */
5234 gcc_assert (num_regs && num_regs <= 12);
5235
5236 /* The first element is (return),
5237 the second element is
5238 (set (reg:SI 'first reg number')
5239 (unspec:SI [(mem)] UNSPEC_PUSHPOP_MULT),
5240 the rest elements is (use (reg:SI 'rest reg number')),
5241 so the length should be number of register to be poped
5242 plus one. */
5243 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs + 1));
5244
5245 XVECEXP (par, 0, 0) = ret_rtx;
5246
5247 for (i = 0; i < CSKY_NGPR_REGS; i++)
5248 if (mask & (1 << i))
5249 {
5250 rtx reg = gen_rtx_REG (SImode, i);
5251 rtx addr = plus_constant (Pmode, stack_pointer_rtx, 4 * num_regs);
5252 rtx tmp = gen_frame_mem (SImode,
5253 gen_rtx_POST_MODIFY (Pmode,
5254 stack_pointer_rtx, addr));
5255 XVECEXP (par, 0, 1)
5256 = gen_rtx_SET (reg,
5257 gen_rtx_UNSPEC (SImode,
5258 gen_rtvec (1, tmp),
5259 UNSPEC_PUSHPOP_MULT));
5260 break;
5261 }
5262
5263 for (j = 2, i++; j < (num_regs + 1); i++)
5264 if (mask & (1 << i))
5265 {
5266 rtx reg = gen_rtx_REG (SImode, i);
5267 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
5268 j++;
5269 }
5270
5271 par = emit_jump_insn (par);
5272 }
5273
5274
5275 /* Generate the function prologue. */
5276
5277 void
csky_expand_prologue(void)5278 csky_expand_prologue (void)
5279 {
5280 rtx_insn *insn;
5281 unsigned long func_type = get_csky_current_func_type ();
5282 unsigned int reg_mask;
5283 int reg_size;
5284
5285 if (CSKY_FUNCTION_IS_NAKED (func_type))
5286 {
5287 if (flag_stack_usage_info)
5288 current_function_static_stack_size = 0;
5289 return;
5290 }
5291
5292 csky_layout_stack_frame ();
5293 reg_mask = cfun->machine->reg_mask;
5294 reg_size = cfun->machine->reg_size;
5295
5296 /* Adjust stack pointer past argument overflow area. */
5297 if (cfun->machine->arg_size != 0)
5298 {
5299 int offset = cfun->machine->arg_size;
5300 expand_csky_stack_adjust (- offset);
5301
5302 /* If we have a parameter passed partially in regs and partially
5303 in memory, the registers will have been stored to memory already
5304 in function.c. So we only need to copy varargs from registers
5305 to stack. */
5306 if (cfun->machine->uses_anonymous_args)
5307 {
5308 int rn = CSKY_FIRST_PARM_REGNUM + CSKY_NPARM_REGS - 1;
5309 for (offset -= 4; offset >= 0; offset -= 4, rn--)
5310 {
5311 rtx dst = gen_frame_mem (SImode,
5312 plus_constant (Pmode,
5313 stack_pointer_rtx,
5314 offset));
5315 insn = emit_move_insn (dst, gen_rtx_REG (SImode, rn));
5316 RTX_FRAME_RELATED_P (insn) = 1;
5317 }
5318 }
5319 }
5320
5321 /* Push caller-saved registers to stack. */
5322 if (csky_can_use_pushpop (reg_mask))
5323 emit_csky_regs_push (reg_mask);
5324 else if (reg_size)
5325 {
5326 int sreg = -1, ereg = -1;
5327 bool stm_p = csky_can_use_ldstm (reg_mask, &sreg, &ereg);
5328 int stm_regs = stm_p ? ereg - sreg + 1 : 0;
5329 int stm_size = stm_regs * 4;
5330
5331 /* First adjust the SP to the low end of the register save area. */
5332 expand_csky_stack_adjust (- reg_size);
5333
5334 /* Emit individual register saves. Even if we are going to emit an
5335 stm, we may need to save individual registers above that too. */
5336 if (reg_size > stm_size)
5337 {
5338 int offset = reg_size - 4;
5339 int regno = 31;
5340 for ( ; regno > ereg; regno--)
5341 if (reg_mask & (1 << regno))
5342 {
5343 rtx dst = gen_rtx_MEM (SImode,
5344 plus_constant (Pmode,
5345 stack_pointer_rtx,
5346 offset));
5347 rtx insn = emit_insn (gen_movsi (dst,
5348 gen_rtx_REG (SImode, regno)));
5349 RTX_FRAME_RELATED_P (insn) = 1;
5350 if (offset == stm_size)
5351 break;
5352 offset -= 4;
5353 }
5354 }
5355
5356 /* If possible, emit a stm to do a bulk store of sequential
5357 registers to the stack. Note that it is an error in the ABI
5358 documentation that it doesn't list stm as a valid prologue
5359 instruction. */
5360 if (stm_p)
5361 {
5362 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (stm_regs));
5363 int regno, slot;
5364 for (regno = sreg, slot = 0; regno <= ereg; regno++, slot++)
5365 {
5366 rtx reg = gen_rtx_REG (SImode, regno);
5367 rtx addr = plus_constant (Pmode, stack_pointer_rtx, slot * 4);
5368 rtx set = gen_rtx_SET (gen_frame_mem (SImode, addr), reg);
5369 RTX_FRAME_RELATED_P (set) = 1;
5370 XVECEXP (par, 0, slot) = set;
5371 }
5372 insn = emit_insn (par);
5373 RTX_FRAME_RELATED_P (insn) = 1;
5374 }
5375 }
5376
5377 /* Initialize hard frame pointer, if necessary. It points at the base
5378 of the register save area. */
5379 if (frame_pointer_needed)
5380 {
5381 insn = emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
5382 RTX_FRAME_RELATED_P (insn) = 1;
5383 }
5384
5385 /* Reserve stack space for locals and outgoing args. */
5386 expand_csky_stack_adjust (- cfun->machine->reg_offset);
5387
5388 /* Put the GOT address in reg_gb for PIC, using R13 as a scratch.
5389 See section 4.7.1 in the ABI documentation,
5390 "Function Prologue for PIC". */
5391 if (flag_pic && (reg_mask & (1 << PIC_OFFSET_TABLE_REGNUM)))
5392 {
5393 rtx l1 = gen_label_rtx ();
5394 rtx grs_label = gen_rtx_LABEL_REF (SImode, l1);
5395 rtx reg_gb = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
5396 rtx reg_temp = gen_rtx_REG (SImode, 13);
5397
5398 rtx tmp0_unspec = gen_rtx_UNSPEC (Pmode,
5399 gen_rtvec (1, grs_label),
5400 UNSPEC_PIC_SYMBOL_GOTPC_GRS);
5401 rtx tmp1_unspec = gen_rtx_UNSPEC (Pmode,
5402 gen_rtvec (1, grs_label),
5403 UNSPEC_PIC_SYMBOL_GOTPC);
5404
5405 emit_insn (gen_prologue_get_pc (tmp0_unspec));
5406 emit_move_insn (reg_temp, tmp1_unspec);
5407 emit_insn (gen_addsi3 (reg_gb, reg_gb, reg_temp));
5408 }
5409
5410 if (flag_stack_usage_info)
5411 current_function_static_stack_size = cfun->machine->frame_size;
5412
5413 if (!flag_sched_prolog)
5414 emit_insn (gen_blockage ());
5415 }
5416
5417 void
csky_expand_epilogue(void)5418 csky_expand_epilogue (void)
5419 {
5420 unsigned long func_type = get_csky_current_func_type ();
5421 unsigned int reg_mask;
5422 int reg_size;
5423 int adjust;
5424 rtx_insn *insn;
5425
5426 if (!flag_sched_prolog)
5427 emit_insn (gen_blockage ());
5428
5429 if (CSKY_FUNCTION_IS_NAKED (func_type))
5430 {
5431 emit_jump_insn (gen_simple_return ());
5432 return;
5433 }
5434
5435 /* Get the frame information. */
5436 csky_layout_stack_frame ();
5437 reg_mask = cfun->machine->reg_mask;
5438 reg_size = cfun->machine->reg_size;
5439 adjust = reg_size + cfun->machine->arg_size;
5440
5441 /* Restore the SP to the base of the register save area. */
5442 if (frame_pointer_needed)
5443 {
5444 insn = emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
5445 RTX_FRAME_RELATED_P (insn) = 1;
5446 }
5447 else
5448 expand_csky_stack_adjust (cfun->machine->reg_offset);
5449
5450 /* Restore the callee-saved registers. */
5451 if (csky_can_use_pushpop (reg_mask)
5452 && cfun->machine->arg_size == 0
5453 && !CSKY_FUNCTION_IS_INTERRUPT (func_type)
5454 && !crtl->calls_eh_return)
5455 {
5456 /* Pop includes an implicit return, so we are done. */
5457 emit_csky_regs_pop (reg_mask);
5458 return;
5459 }
5460 else if (reg_size)
5461 {
5462 int sreg = -1, ereg = -1;
5463 bool ldm_p = csky_can_use_ldstm (reg_mask, &sreg, &ereg);
5464 int ldm_regs = ldm_p ? ereg - sreg + 1 : 0;
5465 int ldm_size = ldm_regs * 4;
5466
5467 /* Emit individual register loads. Even if we are going to emit an
5468 ldm, we may need to load individual registers above that too. */
5469 if (reg_size > ldm_size)
5470 {
5471 int offset = reg_size - 4;
5472 int regno = 31;
5473 for ( ; regno > ereg; regno--)
5474 if (reg_mask & (1 << regno))
5475 {
5476 rtx src = gen_frame_mem (SImode,
5477 plus_constant (Pmode,
5478 stack_pointer_rtx,
5479 offset));
5480 rtx reg = gen_rtx_REG (SImode, regno);
5481 insn = emit_move_insn (reg, src);
5482 RTX_FRAME_RELATED_P (insn) = 1;
5483 add_reg_note (insn, REG_CFA_RESTORE, reg);
5484 if (offset == ldm_size)
5485 break;
5486 offset -= 4;
5487 }
5488 }
5489
5490 /* If possible, emit a ldm to do a bulk load of sequential
5491 registers from the stack. */
5492 if (ldm_p)
5493 {
5494 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (ldm_regs));
5495 int regno, slot;
5496 for (regno = sreg, slot = 0; regno <= ereg; regno++, slot++)
5497 {
5498 rtx reg = gen_rtx_REG (SImode, regno);
5499 rtx addr = plus_constant (Pmode, stack_pointer_rtx, slot * 4);
5500 rtx set = gen_rtx_SET (reg, gen_frame_mem (SImode, addr));
5501 XVECEXP (par, 0, slot) = set;
5502 }
5503 insn = emit_insn (par);
5504 RTX_FRAME_RELATED_P (insn) = 1;
5505 for (regno = sreg; regno <= ereg; regno++)
5506 {
5507 rtx reg = gen_rtx_REG (SImode, regno);
5508 add_reg_note (insn, REG_CFA_RESTORE, reg);
5509 }
5510 }
5511 }
5512
5513 /* Emit the final stack pointer adjustment to deallocate the saved
5514 registers and incoming argument area. */
5515 expand_csky_stack_adjust (adjust);
5516
5517 /* Extra stack adjustment for exception handler return. */
5518 if (crtl->calls_eh_return)
5519 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
5520 EH_RETURN_STACKADJ_RTX));
5521
5522 /* Now we can return. */
5523 emit_jump_insn (gen_simple_return ());
5524 }
5525
5526
5527 static void
csky_output_function_prologue(FILE * f)5528 csky_output_function_prologue (FILE *f)
5529 {
5530 unsigned long func_type = get_csky_current_func_type ();
5531
5532 switch ((int) CSKY_FUNCTION_TYPE (func_type))
5533 {
5534 default:
5535 case CSKY_FT_NORMAL:
5536 break;
5537 case CSKY_FT_INTERRUPT:
5538 {
5539 asm_fprintf (f, "\t# Interrupt Service Routine.\n");
5540 asm_fprintf (f, "\tnie\n\tipush\n");
5541 break;
5542 }
5543 case CSKY_FT_FIQ:
5544 asm_fprintf (f, "\t# Fast Interrupt Service Routine.\n");
5545 break;
5546 case CSKY_FT_EXCEPTION:
5547 asm_fprintf (f, "\t# CSKY Exception Handler.\n");
5548 break;
5549 case CSKY_FT_NAKED:
5550 asm_fprintf (f, "\t# Naked Function: prologue and epilogue \
5551 provided by programmer.\n");
5552 return;
5553 }
5554
5555 csky_layout_stack_frame ();
5556
5557 /* Generate .stack_size function-name, size for callgraph;
5558 the default stack size is 0. */
5559 if (TARGET_STACK_SIZE && cfun->machine->frame_size > 0)
5560 {
5561 gcc_assert (current_function_decl != NULL);
5562 const char *func_name =
5563 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl));
5564 if (func_name[0] == '*')
5565 asm_fprintf (f, "\t.stack_size %s, %d\n",
5566 &func_name[1], cfun->machine->frame_size);
5567 else
5568 asm_fprintf (f, "\t.stack_size %s, %d\n",
5569 func_name, cfun->machine->frame_size);
5570 }
5571 }
5572
5573
5574 static void
csky_output_function_epilogue(FILE * file ATTRIBUTE_UNUSED)5575 csky_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED)
5576 {
5577
5578 }
5579
5580
5581 /* Helper for csky_eh_return splitter: store the call frame exception
5582 handler address in lr. */
5583 void
csky_set_eh_return_address(rtx source,rtx scratch)5584 csky_set_eh_return_address (rtx source, rtx scratch)
5585 {
5586 HOST_WIDE_INT delta = 0;
5587 rtx basereg, addr;
5588 unsigned int reg_mask;
5589
5590 csky_layout_stack_frame ();
5591 reg_mask = cfun->machine->reg_mask;
5592
5593 if (reg_mask & (1 << CSKY_LR_REGNUM))
5594 {
5595 /* Find LR in the stack frame. */
5596 int i = 0;
5597
5598 if (frame_pointer_needed)
5599 {
5600 basereg = frame_pointer_rtx;
5601 delta = 0;
5602 }
5603 else
5604 {
5605 basereg = stack_pointer_rtx;
5606 delta = cfun->machine->reg_offset;
5607 }
5608
5609 /* At this point, (basereg + delta) points at the low end of
5610 the reg save area. Regs are saved sequentially from low
5611 to high from this address. */
5612 for (i = 0; i < CSKY_LR_REGNUM; i++)
5613 if (reg_mask & (1 << i))
5614 delta += 4;
5615
5616 if ((CSKY_TARGET_ARCH (CK801) && delta >= CSKY_LD16_MAX_OFFSET (Pmode))
5617 || delta >= CSKY_LD32_MAX_OFFSET (Pmode))
5618 {
5619 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
5620 emit_insn (gen_addsi3 (scratch, scratch, basereg));
5621 addr = scratch;
5622 }
5623 else
5624 addr = plus_constant (Pmode, basereg, delta);
5625 emit_move_insn (gen_frame_mem (Pmode, addr), source);
5626 }
5627 else
5628 emit_move_insn (gen_rtx_REG (Pmode, CSKY_LR_REGNUM), source);
5629 }
5630
5631 /* Return TRUE if X references a SYMBOL_REF. */
5632
5633 bool
csky_symbol_mentioned_p(rtx x)5634 csky_symbol_mentioned_p (rtx x)
5635 {
5636 const char *fmt;
5637 int i;
5638
5639 if (GET_CODE (x) == SYMBOL_REF)
5640 return true;
5641
5642 fmt = GET_RTX_FORMAT (GET_CODE (x));
5643 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5644 {
5645 if (fmt[i] == 'E')
5646 {
5647 int j;
5648
5649 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5650 if (csky_symbol_mentioned_p (XVECEXP (x, i, j)))
5651 return true;
5652 }
5653 else if (fmt[i] == 'e' && csky_symbol_mentioned_p (XEXP (x, i)))
5654 return true;
5655 }
5656 return false;
5657 }
5658
5659
5660 /* Return TRUE if X references a LABEL_REF. */
5661
5662 bool
csky_label_mentioned_p(rtx x)5663 csky_label_mentioned_p (rtx x)
5664 {
5665 const char *fmt;
5666 int i;
5667
5668 if (GET_CODE (x) == LABEL_REF)
5669 return true;
5670
5671 fmt = GET_RTX_FORMAT (GET_CODE (x));
5672 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5673 {
5674 if (fmt[i] == 'E')
5675 {
5676 int j;
5677
5678 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5679 if (csky_label_mentioned_p (XVECEXP (x, i, j)))
5680 return true;
5681 }
5682 else if (fmt[i] == 'e' && csky_label_mentioned_p (XEXP (x, i)))
5683 return true;
5684 }
5685
5686 return false;
5687 }
5688
5689
5690 static bool
tls_unspec_mentioned_p(rtx x)5691 tls_unspec_mentioned_p (rtx x)
5692 {
5693 switch (GET_CODE (x))
5694 {
5695 case CONST:
5696 return tls_unspec_mentioned_p (XEXP (x, 0));
5697
5698 case UNSPEC:
5699 if (XINT (x, 1) == UNSPEC_TLS)
5700 return true;
5701
5702 /* Fall through. */
5703 default:
5704 return false;
5705 }
5706 }
5707
5708
5709 /* Implement LEGITIMATE_PIC_OPERAND_P. */
5710 bool
csky_legitimate_pic_operand_p(rtx x)5711 csky_legitimate_pic_operand_p (rtx x)
5712 {
5713 if (tls_unspec_mentioned_p (x))
5714 return true;
5715 if (csky_symbol_mentioned_p (x) || csky_label_mentioned_p (x))
5716 return false;
5717 return true;
5718 }
5719
5720 rtx
csky_legitimize_pic_address(rtx orig,rtx reg,bool gotrel_p)5721 csky_legitimize_pic_address (rtx orig, rtx reg, bool gotrel_p)
5722 {
5723 rtx pic_reg = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
5724 bool optimize_p = false;
5725
5726 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
5727 {
5728 rtx pic_ref, address, rtx_tmp;
5729 rtx insn;
5730 rtx pic_reg = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
5731 int subregs = 0;
5732
5733 if (reg == 0)
5734 {
5735 gcc_assert (can_create_pseudo_p ());
5736 reg = gen_reg_rtx (Pmode);
5737 subregs = 1;
5738 }
5739
5740 if (subregs)
5741 address = gen_reg_rtx (Pmode);
5742 else
5743 address = reg;
5744
5745 if (GET_CODE (orig) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (orig))
5746 {
5747 /* When gotrel_p generate sym@GOT, otherwise generate sym@PLT. */
5748 rtx_tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, orig),
5749 (gotrel_p
5750 ? UNSPEC_PIC_SYMBOL_GOT
5751 : UNSPEC_PIC_SYMBOL_PLT));
5752 optimize_p = gotrel_p;
5753 if (flag_pic != 1)
5754 {
5755 emit_move_insn (address, rtx_tmp);
5756 rtx_tmp = gen_rtx_MULT (Pmode, address, GEN_INT (1));
5757 }
5758 pic_ref = gen_const_mem (Pmode,
5759 gen_rtx_PLUS (Pmode, pic_reg, rtx_tmp));
5760 }
5761 else
5762 {
5763 /* bsr symbol */
5764 if (flag_pic == 1 && !gotrel_p)
5765 {
5766 pic_ref = gen_rtx_UNSPEC (Pmode,
5767 gen_rtvec (1, orig),
5768 UNSPEC_PIC_SYMBOL_BSR);
5769 return pic_ref;
5770 }
5771 /* grs rx, symbol */
5772 else if (flag_pic == 1 && (GET_CODE (orig) == SYMBOL_REF)
5773 && SYMBOL_REF_FUNCTION_P (orig))
5774 {
5775 pic_ref = gen_rtx_UNSPEC (Pmode,
5776 gen_rtvec (1, orig),
5777 UNSPEC_PIC_SYMBOL_GRS);
5778 return pic_ref;
5779 }
5780 /* lrw rx, symbol@GOTOFF; add rx, rx, gb */
5781 else
5782 {
5783 rtx_tmp = gen_rtx_UNSPEC (Pmode,
5784 gen_rtvec (1, orig),
5785 UNSPEC_PIC_SYMBOL_GOTOFF);
5786 emit_move_insn (address, rtx_tmp);
5787 pic_ref = gen_rtx_PLUS (Pmode, address, pic_reg);
5788 optimize_p = true;
5789 }
5790 }
5791
5792 insn = emit_move_insn (reg, pic_ref);
5793 /* Put a REG_EQUAL note on this insn,
5794 so that it can be optimized by loop. */
5795 if (optimize_p)
5796 set_unique_reg_note (insn, REG_EQUAL, orig);
5797
5798 return reg;
5799 }
5800 else if (GET_CODE (orig) == CONST)
5801 {
5802 rtx base, offset;
5803
5804 if (GET_CODE (XEXP (orig, 0)) == PLUS
5805 && XEXP (XEXP (orig, 0), 1) == pic_reg)
5806 return orig;
5807
5808 if (reg == 0)
5809 {
5810 gcc_assert (can_create_pseudo_p ());
5811 reg = gen_reg_rtx (Pmode);
5812 }
5813
5814 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
5815
5816 base = csky_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
5817 reg, gotrel_p);
5818 offset = csky_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
5819 base == reg ? 0 : reg, gotrel_p);
5820
5821 if (GET_CODE (offset) == CONST_INT)
5822 return plus_constant (Pmode, base, INTVAL (offset));
5823
5824 return gen_rtx_PLUS (Pmode, base, offset);
5825 }
5826
5827 return orig;
5828 }
5829
5830
5831 /* Functions to output assembly code for a function call. */
5832
5833 char *
csky_output_call(rtx * operands,int index)5834 csky_output_call (rtx *operands, int index)
5835 {
5836 static char buffer[20];
5837 rtx addr = operands[index];
5838
5839 if (REG_P (addr))
5840 sprintf (buffer, "jsr\t%%%d", index);
5841 else if (flag_pic && (GET_CODE (addr) == UNSPEC))
5842 sprintf (buffer, "bsr\t%%%d", index);
5843 else
5844 sprintf (buffer, "jbsr\t%%%d", index);
5845
5846 return buffer;
5847 }
5848
5849
5850 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
5851 Output assembler code for a block containing the constant parts
5852 of a trampoline, leaving space for the variable parts.
5853 Note that STATIC_CHAIN_REGNUM is t1 (aka r12) on ck801 and
5854 t1 (r13) otherwise. */
5855
5856 static void
csky_asm_trampoline_template(FILE * f)5857 csky_asm_trampoline_template (FILE *f)
5858 {
5859 if (CSKY_ISA_FEATURE (2E3))
5860 {
5861 fprintf (f, "\tlrw\t%s, [.Lstatic_chain]\n",
5862 reg_names[STATIC_CHAIN_REGNUM]);
5863 fprintf (f, "\tjmpi\t[.Lfunc_address]\n");
5864 /* 2 32-bit insns = 8 bytes. */
5865 }
5866 else if (CSKY_TARGET_ARCH (CK801))
5867 {
5868 /* It's hard to provide general support for trampolines on this
5869 core. We need a register other than the one holding the
5870 static chain (r13) to hold the function pointer for the
5871 indirect jump to it. But ck801 has such a limited register set
5872 there is no other call-clobbered scratch register available -- in
5873 particular, this core does not have r12, which we use for the
5874 ck802 case below. If we use a callee-saved register like r4,
5875 saving the old value on the stack screws up the stack frame
5876 if there are overflow arguments pushed on the stack
5877 by the caller. In theory we could test for that and handle
5878 limited cases with parameters that all fit in r0-r3 with no
5879 stack overflow, but punt for now. */
5880 sorry ("Nested function trampolines not supported on CK801.");
5881 }
5882 else
5883 {
5884 fprintf (f, "\tlrw\t%s, [.Lfunc_address]\n",
5885 reg_names[CSKY_T1_REGNUM]);
5886 fprintf (f, "\tlrw\t%s, [.Lstatic_chain]\n",
5887 reg_names[STATIC_CHAIN_REGNUM]);
5888 fprintf (f, "\tjmp\t%s\n",
5889 reg_names[CSKY_T1_REGNUM]);
5890 /* To align constant pool on a word boundary. */
5891 fprintf (f, "\t.align 2\n");
5892 /* 2 32-bit lrw insns + 16-bit jump + 16-bit pad = 12 bytes. */
5893 }
5894
5895 fprintf (f, ".Lstatic_chain:\n");
5896 fprintf (f, "\t.long 0\n");
5897 fprintf (f, ".Lfunc_address:\n");
5898 fprintf (f, "\t.long 0\n");
5899 /* 2 words of constant pool = 8 bytes. */
5900 }
5901
5902 /* Worker function for TARGET_TRAMPOLINE_INIT. */
5903
5904 static void
csky_trampoline_init(rtx m_tramp,tree fndecl,rtx chain_value)5905 csky_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5906 {
5907 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
5908 rtx mem, a_tramp;
5909 int pool = TRAMPOLINE_SIZE - 8;
5910
5911 emit_block_move (m_tramp, assemble_trampoline_template (),
5912 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
5913
5914 mem = adjust_address (m_tramp, SImode, pool);
5915 emit_move_insn (mem, chain_value);
5916 mem = adjust_address (m_tramp, SImode, pool + 4);
5917 emit_move_insn (mem, fnaddr);
5918
5919 a_tramp = XEXP (m_tramp, 0);
5920 maybe_emit_call_builtin___clear_cache (a_tramp,
5921 plus_constant (Pmode,
5922 a_tramp,
5923 TRAMPOLINE_SIZE));
5924 }
5925
5926
5927 /* Emit a comparison insn for float values.
5928 Return true if the comparison is inverted. */
5929
5930 bool
csky_emit_compare_float(enum rtx_code code,rtx op0,rtx op1)5931 csky_emit_compare_float (enum rtx_code code, rtx op0, rtx op1)
5932 {
5933 rtx cc_reg = gen_rtx_REG (CCmode, CSKY_CC_REGNUM);
5934 bool invert;
5935 machine_mode mode = GET_MODE (op1);
5936
5937 if (op1 != CONST0_RTX (mode))
5938 op1 = force_reg (mode, op1);
5939
5940 invert = false;
5941 switch (code)
5942 {
5943 case EQ:
5944 code = NE;
5945 invert = true;
5946 break;
5947
5948 case NE:
5949 break;
5950 case LE:
5951 if (op1 == CONST0_RTX (mode))
5952 op1 = force_reg (mode, op1);
5953 break;
5954 case GT:
5955 if (op1 == CONST0_RTX (mode))
5956 op1 = force_reg (mode, op1);
5957 break;
5958 case GE:
5959 break;
5960 case LT:
5961 if (op1 == CONST0_RTX (mode))
5962 {
5963 code = GE;
5964 invert = true;
5965 }
5966 break;
5967 case UNORDERED:
5968 break;
5969 case ORDERED:
5970 code = UNORDERED;
5971 invert = true;
5972 break;
5973
5974 default:
5975 break;
5976 }
5977
5978 emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
5979
5980 return invert;
5981 }
5982
5983 /* Support for the Q memory constraint. Returns true if OP is a MEM RTX
5984 with an address consisting of base + index or base + displacement. */
5985 bool
csky_valid_fpuv2_mem_operand(rtx op)5986 csky_valid_fpuv2_mem_operand (rtx op)
5987 {
5988 struct csky_address addr;
5989
5990 if (GET_CODE (op) != MEM)
5991 return false;
5992
5993 if (!decompose_csky_address (XEXP (op, 0), &addr))
5994 return false;
5995
5996 /* Verify base register. */
5997 if (!is_csky_address_register_rtx_p (addr.base, 0))
5998 return false;
5999
6000 /* Verify index operand. */
6001 if (addr.index)
6002 {
6003 if (!is_csky_address_register_rtx_p (addr.index, 0))
6004 return false;
6005
6006 if (addr.scale == 1 || addr.scale == 2 || addr.scale == 4
6007 || addr.scale == 8)
6008 return true;
6009
6010 return false;
6011 }
6012 /* Verify disp operand. */
6013 else if (addr.disp)
6014 {
6015 rtx disp = addr.disp;
6016
6017 if (!CONST_INT_P (disp))
6018 return false;
6019
6020 if (((unsigned) INTVAL (disp) % 4) == 0
6021 && (unsigned) INTVAL (disp) <= (unsigned) 1020)
6022 return true;
6023
6024 return false;
6025 }
6026 return true;
6027 }
6028
6029
6030 /* Returns the (interrupt) function type of the current
6031 function, or CSKY_FT_UNKNOWN if the type cannot be determined. */
6032
6033 static unsigned long
csky_isr_value(tree argument)6034 csky_isr_value (tree argument)
6035 {
6036 const isr_attribute_entry *ptr;
6037 const char *arg;
6038
6039 /* No argument - default to IRQ. */
6040 if (argument == NULL_TREE)
6041 return CSKY_FT_ISR;
6042
6043 /* Get the value of the argument. */
6044 if (TREE_VALUE (argument) == NULL_TREE
6045 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
6046 return CSKY_FT_UNKNOWN;
6047
6048 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
6049
6050 /* Check it against the list of known arguments. */
6051 for (ptr = isr_attribute_map; ptr->arg != NULL; ptr++)
6052 if (strcmp (arg, ptr->arg) == 0)
6053 return ptr->return_value;
6054
6055 /* An unrecognized interrupt type. */
6056 return CSKY_FT_UNKNOWN;
6057 }
6058
6059 /* Handle an attribute requiring a FUNCTION_DECL;
6060 arguments as in struct attribute_spec.handler. */
6061
6062 static tree
csky_handle_fndecl_attribute(tree * node,tree name,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)6063 csky_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
6064 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
6065 {
6066 if (TREE_CODE (*node) != FUNCTION_DECL)
6067 {
6068 warning (OPT_Wattributes, "%qE attribute only applies to functions",
6069 name);
6070 *no_add_attrs = true;
6071 }
6072
6073 return NULL_TREE;
6074 }
6075
6076 /* Handle an "interrupt" or "isr" attribute;
6077 arguments as in struct attribute_spec.handler. */
6078
6079 static tree
csky_handle_isr_attribute(tree * node,tree name,tree args,int flags,bool * no_add_attrs)6080 csky_handle_isr_attribute (tree *node, tree name, tree args, int flags,
6081 bool *no_add_attrs)
6082 {
6083
6084 if (!TARGET_ISTACK)
6085 {
6086 warning (OPT_Wattributes, "%qE attribute ignored without %<-mistack%>",
6087 name);
6088 *no_add_attrs = true;
6089 return NULL_TREE;
6090 }
6091
6092 if (DECL_P (*node))
6093 {
6094 if (TREE_CODE (*node) != FUNCTION_DECL)
6095 {
6096 warning (OPT_Wattributes, "%qE attribute only applies to functions",
6097 name);
6098 *no_add_attrs = true;
6099 }
6100 }
6101 else
6102 {
6103 if (TREE_CODE (*node) == FUNCTION_TYPE
6104 || TREE_CODE (*node) == METHOD_TYPE)
6105 {
6106 if (csky_isr_value (args) == CSKY_FT_UNKNOWN)
6107 {
6108 warning (OPT_Wattributes, "%qE attribute ignored", name);
6109 *no_add_attrs = true;
6110 }
6111 }
6112 else if (TREE_CODE (*node) == POINTER_TYPE
6113 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
6114 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
6115 && csky_isr_value (args) != CSKY_FT_UNKNOWN)
6116 {
6117 *node = build_variant_type_copy (*node);
6118 TREE_TYPE (*node) = build_type_attribute_variant (TREE_TYPE (*node),
6119 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
6120 *no_add_attrs = true;
6121 }
6122 else if (flags & ((int)ATTR_FLAG_DECL_NEXT
6123 | (int)ATTR_FLAG_FUNCTION_NEXT
6124 | (int)ATTR_FLAG_ARRAY_NEXT))
6125 {
6126 *no_add_attrs = true;
6127 return tree_cons (name, args, NULL_TREE);
6128 }
6129 else
6130 warning (OPT_Wattributes, "%qE attribute ignored", name);
6131 }
6132 return NULL_TREE;
6133 }
6134
6135
6136 /* Implement TARGET_REGISTER_MOVE_COST: compute extra cost of moving data
6137 between one register class and another. */
6138
6139 int
csky_register_move_cost(machine_mode mode ATTRIBUTE_UNUSED,reg_class_t from,reg_class_t to)6140 csky_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
6141 reg_class_t from, reg_class_t to)
6142 {
6143 #define GR_REG_CLASS_P(CLASS) \
6144 ((CLASS) == GENERAL_REGS || (CLASS) == MINI_REGS || (CLASS) == SP_REGS \
6145 || (CLASS) == LOW_REGS)
6146
6147 #define HILO_REG_CLASS_P(CLASS) \
6148 ((CLASS) == HI_REGS || (CLASS) == LO_REGS || (CLASS) == HILO_REGS)
6149
6150 #define V_REG_CLASS_P(CLASS) \
6151 ((CLASS) == V_REGS)
6152
6153 if (V_REG_CLASS_P (from) && V_REG_CLASS_P (to))
6154 return 2;
6155
6156 if ((V_REG_CLASS_P (from) && GR_REG_CLASS_P (to))
6157 || (GR_REG_CLASS_P (from) && V_REG_CLASS_P (to)))
6158 return 6;
6159
6160 if ((HILO_REG_CLASS_P (from) && GR_REG_CLASS_P (to))
6161 || (GR_REG_CLASS_P (from) && HILO_REG_CLASS_P (to)))
6162 return 16;
6163
6164 if (HILO_REG_CLASS_P (from) && HILO_REG_CLASS_P (to))
6165 return 32;
6166
6167 if ((HILO_REG_CLASS_P (from) && V_REG_CLASS_P (to))
6168 || (V_REG_CLASS_P (from) && HILO_REG_CLASS_P (to)))
6169 return 64;
6170
6171 return 2;
6172 }
6173
6174
6175 /* Implement TARGET_MEMORY_MOVE_COST: compute the cost of moving data
6176 between registers and memory. */
6177
6178 int
csky_memory_move_cost(machine_mode mode,reg_class_t rclass,bool in)6179 csky_memory_move_cost (machine_mode mode, reg_class_t rclass,
6180 bool in)
6181 {
6182 return (4 + memory_move_secondary_cost (mode, rclass, in));
6183 }
6184
6185
6186 /* TARGET_RTX_COSTS helper for ck801/ck802. */
6187
6188 static bool
ck802_ck801_rtx_costs(rtx x,int code,int outer_code,int * total,bool speed)6189 ck802_ck801_rtx_costs (rtx x, int code, int outer_code, int *total,
6190 bool speed)
6191 {
6192 machine_mode mode = GET_MODE (x);
6193 switch (code)
6194 {
6195 /* Accessing memory costs quite a lot for first word; */
6196 case MEM:
6197 *total = COSTS_N_INSNS (1 + CSKY_NUM_REGS (mode));
6198 return false;
6199 case DIV:
6200 case UDIV:
6201 case MOD:
6202 case UMOD:
6203 *total = 100;
6204 return true;
6205
6206 case ROTATE:
6207 case ROTATERT:
6208 case ASHIFT:
6209 case LSHIFTRT:
6210 case ASHIFTRT:
6211 if (speed)
6212 *total = 2;
6213 else
6214 *total = COSTS_N_INSNS (1);
6215 return false;
6216
6217 case MINUS:
6218 case PLUS:
6219 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6220 return false;
6221
6222 case AND:
6223 {
6224 enum rtx_code subcode = GET_CODE (XEXP (x, 1));
6225
6226 /* If subcode is "not", we'll try to combine it into e.g. "andn"
6227 instruction, so give AND itself zero cost. */
6228 if (subcode == NOT)
6229 {
6230 *total = 0;
6231 return false;
6232 }
6233 }
6234 /* Fall through. */
6235 case XOR:
6236 case IOR:
6237 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6238 return false;
6239
6240 case MULT:
6241 /* FIXME: is ixw supported on ck801/ck802? */
6242 /* We can use "ix.h/w" insn to replace multiply by 2 or 4.
6243 "ix.h/w" is a 32-bit insn, so let its cost be a little less than
6244 "mult" insn. */
6245 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6246 {
6247 unsigned HOST_WIDE_INT m
6248 = (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)));
6249 if ((m == 2 || m == 4) && outer_code == PLUS)
6250 {
6251 *total = 2;
6252 return true;
6253 }
6254 else
6255 {
6256 /* Because mult is relatively slower than other operations,
6257 we try to use other insns when optimizing for speed.
6258 When optimizing for size, give it lower cost. */
6259 if (speed)
6260 {
6261 *total = COSTS_N_INSNS (10 * CSKY_NUM_REGS (mode));
6262 return true;
6263 }
6264 int cycle = 0;
6265 while (m)
6266 {
6267 m >>= 2;
6268 cycle++;
6269 }
6270 *total = COSTS_N_INSNS (1) + cycle;
6271 return false;
6272 }
6273 }
6274 if (!speed)
6275 *total = COSTS_N_INSNS (1);
6276 return false;
6277
6278 case NEG:
6279 /* Usually, we use subtract from 0 to substitute for neg, and
6280 it costs 1 extra insn to move 0 to a register. */
6281 *total = COSTS_N_INSNS (2 * CSKY_NUM_REGS (mode));
6282 return false;
6283
6284 case NOT:
6285 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6286 return false;
6287
6288 case COMPARE:
6289 *total = COSTS_N_INSNS (1);
6290 return false;
6291
6292 case SIGN_EXTEND:
6293 case ZERO_EXTEND:
6294 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6295 return false;
6296
6297 case SIGN_EXTRACT:
6298 case ZERO_EXTRACT:
6299 if (REG_P (XEXP (x, 0))
6300 && CONST_INT_P (XEXP (x, 1))
6301 && CONST_INT_P (XEXP (x, 2))
6302 && INTVAL (XEXP (x, 1)) == 8
6303 && INTVAL (XEXP (x, 2)) % 8 == 0)
6304 {
6305 *total = COSTS_N_INSNS (1);
6306 return true;
6307 }
6308 *total = COSTS_N_INSNS (CSKY_NUM_REGS (mode));
6309 return false;
6310
6311 case CONST_INT:
6312 {
6313 unsigned HOST_WIDE_INT t = (unsigned HOST_WIDE_INT) (INTVAL (x));
6314
6315 if (outer_code == COMPARE)
6316 {
6317 if (t < 0x10000)
6318 *total = 0;
6319 else
6320 *total = COSTS_N_INSNS (2);
6321 }
6322 else if (outer_code == AND || outer_code == IOR || outer_code == XOR)
6323 {
6324 /* "andi,xori,ori" are 32-bit insns, so let it cost a
6325 little more. */
6326 if (t < 0x1000)
6327 {
6328 /* Try replacing "andi" by "sextb/h", so let it cost more. */
6329 if (outer_code == AND && (t == 0xff || t == 0xffff))
6330 {
6331 *total = 8;
6332 return true;
6333 }
6334 *total = 2;
6335 }
6336 else if (t < 0x10000)
6337 *total = COSTS_N_INSNS (1);
6338 else
6339 *total = COSTS_N_INSNS (2);
6340 }
6341 else if (outer_code == PLUS || outer_code == MINUS)
6342 {
6343 /* "addi/subi rx,ry,imm", if imm<9, it is more often a
6344 16-bit insn. If imm>=9, use "movi" insn; it's probably
6345 less than "addi/subi". */
6346 if (t < 9)
6347 *total = 0;
6348 else if (t < 0x1000)
6349 *total = 2;
6350 else if (t < 0x10000)
6351 *total = COSTS_N_INSNS (1);
6352 else
6353 *total = COSTS_N_INSNS (2);
6354 }
6355 else if (outer_code == ROTATE || outer_code == ROTATERT
6356 || outer_code == LSHIFTRT || outer_code == ASHIFTRT
6357 || outer_code == ASHIFT)
6358 {
6359 if (t < 32)
6360 *total = 0;
6361 else
6362 *total = COSTS_N_INSNS (2);
6363 }
6364 else
6365 {
6366 if (t < 0x10000)
6367 if (outer_code == SET && t < 256)
6368 *total = 0;
6369 else
6370 *total = COSTS_N_INSNS (1);
6371 else
6372 *total = COSTS_N_INSNS (2);
6373 }
6374 }
6375 return true;
6376
6377 case CONST:
6378 case LABEL_REF:
6379 case SYMBOL_REF:
6380 *total = COSTS_N_INSNS (3);
6381 return true;
6382 default:
6383 return false;
6384 }
6385 }
6386
6387
6388 /* TARGET_RTX_COSTS helper for ck803. */
6389
6390 static bool
ck803_rtx_costs(rtx x,int code,int outer_code ATTRIBUTE_UNUSED,int * total,bool speed ATTRIBUTE_UNUSED)6391 ck803_rtx_costs (rtx x, int code, int outer_code ATTRIBUTE_UNUSED,
6392 int *total, bool speed ATTRIBUTE_UNUSED)
6393 {
6394 switch (code)
6395 {
6396 case SET:
6397 if (MEM_P (XEXP (x, 1)))
6398 {
6399 struct csky_address op1;
6400 bool address_valid
6401 = decompose_csky_address (XEXP (XEXP (x, 1), 0), &op1);
6402 if (op1.index)
6403 {
6404 *total = COSTS_N_INSNS (3);
6405 return true;
6406 }
6407 else if (address_valid)
6408 {
6409 *total = COSTS_N_INSNS (1);
6410 return true;
6411 }
6412 }
6413 if (REG_P (XEXP (x, 0)) && (GET_CODE (XEXP (x, 1)) == PLUS))
6414 {
6415 rtx sub_exp = XEXP (x, 1);
6416 if (REG_P (XEXP (sub_exp, 0)) && REG_P (XEXP (sub_exp, 1)))
6417 {
6418 *total = COSTS_N_INSNS (1);
6419 return true;
6420 }
6421 }
6422 return false;
6423 case MULT:
6424 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6425 {
6426 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6427 if (val % 2 == 0 && val < 0xffffffff && val > 0)
6428 {
6429 *total = COSTS_N_INSNS (1);
6430 return true;
6431 }
6432 }
6433 return false;
6434
6435 case CONST:
6436 case LABEL_REF:
6437 case SYMBOL_REF:
6438 *total = COSTS_N_INSNS (3);
6439 return true;
6440 default:
6441 return false;
6442 }
6443 }
6444
6445 /* TARGET_RTX_COSTS helper for ck807+ arches. */
6446
6447 static bool
ck807_ck810_rtx_costs(rtx x,int code,int outer_code ATTRIBUTE_UNUSED,int * total,bool speed ATTRIBUTE_UNUSED)6448 ck807_ck810_rtx_costs (rtx x, int code,
6449 int outer_code ATTRIBUTE_UNUSED,
6450 int *total, bool speed ATTRIBUTE_UNUSED)
6451 {
6452 switch (code)
6453 {
6454 case MULT:
6455 if (REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)))
6456 {
6457 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
6458 if (val % 2 == 0 && val < 0xffffffff && val > 0)
6459 {
6460 *total = COSTS_N_INSNS (1);
6461 return true;
6462 }
6463 }
6464 return false;
6465
6466 case CONST:
6467 case LABEL_REF:
6468 case SYMBOL_REF:
6469 *total = COSTS_N_INSNS (3);
6470 return true;
6471 default:
6472 return false;
6473 }
6474 }
6475
6476
6477 /* Implement TARGET_RTX_COSTS, to compute a (partial) cost for rtx X.
6478 Return true if the complete cost has been computed, and false if
6479 subexpressions should be scanned. In either case, *TOTAL contains
6480 the cost result. */
6481
6482 static bool
csky_rtx_costs(rtx x,machine_mode mode ATTRIBUTE_UNUSED,int outer_code,int opno ATTRIBUTE_UNUSED,int * total,bool speed)6483 csky_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
6484 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
6485 {
6486 int code = GET_CODE (x);
6487
6488 if (CSKY_TARGET_ARCH (CK802) || CSKY_TARGET_ARCH (CK801))
6489 return ck802_ck801_rtx_costs (x, code, outer_code, total, speed);
6490 else if (CSKY_TARGET_ARCH (CK803))
6491 return ck803_rtx_costs (x, code, outer_code, total, speed);
6492 else if (CSKY_TARGET_ARCH (CK807) || CSKY_TARGET_ARCH (CK810))
6493 return ck807_ck810_rtx_costs (x, code, outer_code, total, speed);
6494 else
6495 gcc_unreachable ();
6496 }
6497
6498 /* Emit assembly code for CASESI. This is only used on CK801 and CK802
6499 when optimizing for size, and uses helper functions in libgcc instead
6500 of doing the control transfer inline. */
6501
6502 const char *
csky_output_casesi(rtx * operands)6503 csky_output_casesi (rtx *operands)
6504 {
6505 rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[0])));
6506
6507 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
6508
6509 switch (GET_MODE (diff_vec))
6510 {
6511 case E_QImode:
6512 return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned
6513 ? "jbsr\t___gnu_csky_case_uqi"
6514 : "jbsr\t___gnu_csky_case_sqi");
6515 case E_HImode:
6516 return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned
6517 ? "jbsr\t___gnu_csky_case_uhi"
6518 : "jbsr\t___gnu_csky_case_shi");
6519 case E_SImode:
6520 return "jbsr\t___gnu_csky_case_si";
6521 default:
6522 gcc_unreachable ();
6523 }
6524 }
6525
6526 /* Implement TARGET_SCHED_ISSUE_RATE. Lookup the issue rate in the
6527 per-core tuning structs. */
6528 static int
csky_sched_issue_rate(void)6529 csky_sched_issue_rate (void)
6530 {
6531 if (CSKY_TARGET_ARCH (CK810))
6532 return 2;
6533 else
6534 return 1;
6535 }
6536
6537
6538 /* This function implements the target macro TARGET_SCHED_ADJUST_COST.
6539 It corrects the value of COST based on the relationship between
6540 INSN and DEP through the dependence DEP_TYPE. It returns the new
6541 value. */
6542
6543 static int
csky_sched_adjust_cost(rtx_insn * insn,int dep_type,rtx_insn * dep,int cost,unsigned int dw ATTRIBUTE_UNUSED)6544 csky_sched_adjust_cost (rtx_insn *insn,
6545 int dep_type,
6546 rtx_insn *dep,
6547 int cost,
6548 unsigned int dw ATTRIBUTE_UNUSED)
6549 {
6550 if (dep_type == REG_DEP_ANTI || dep_type == REG_DEP_OUTPUT)
6551 return 0;
6552 /* The REG_DEP_TRUE situation. */
6553 else if (recog_memoized (insn) >= 0 && recog_memoized (dep) >= 0)
6554 {
6555 enum attr_type insn_type = get_attr_type (insn);
6556 if (CSKY_TARGET_ARCH (CK803))
6557 {
6558 /* The ld or st's base reg depends on the pre insn,
6559 it will delay 1 cycle. */
6560 if (insn_type == TYPE_LOAD || insn_type == TYPE_STORE)
6561 {
6562 rtx pattern = PATTERN (insn);
6563
6564 gcc_assert (GET_CODE (pattern) == SET);
6565 rtx addr = (insn_type == TYPE_LOAD
6566 ? SET_SRC (pattern) : SET_DEST (pattern));
6567
6568 enum rtx_code code = GET_CODE (addr);
6569 if (code == ZERO_EXTEND || code == SIGN_EXTEND)
6570 addr = XEXP (addr, 0);
6571 gcc_assert (GET_CODE (addr) == MEM);
6572
6573 rtx base = XEXP (addr, 0);
6574 rtx reg = NULL_RTX;
6575 if (REG_P (base))
6576 reg = base;
6577 if (GET_CODE (base) == PLUS
6578 && GET_CODE (XEXP (base, 0)) == REG)
6579 reg = XEXP (base, 0);
6580 if ((reg != NULL_RTX) && reg_set_p (reg, PATTERN (dep)))
6581 return 2;
6582 }
6583 }
6584 else if (CSKY_TARGET_ARCH (CK802))
6585 {
6586 if ((insn_type == TYPE_CALL_JSR || insn_type == TYPE_BRANCH_JMP)
6587 && get_attr_type (dep) != TYPE_LOAD)
6588 return 1;
6589
6590 if (insn_type == TYPE_LOAD || insn_type == TYPE_STORE)
6591 {
6592 rtx pattern = PATTERN (insn);
6593
6594 gcc_assert (GET_CODE (pattern) == SET);
6595
6596 rtx addr = (insn_type == TYPE_LOAD
6597 ? SET_SRC (pattern) : SET_DEST (pattern));
6598
6599 enum rtx_code code = GET_CODE (addr);
6600 if (code == ZERO_EXTEND || code == SIGN_EXTEND)
6601 addr = XEXP (addr, 0);
6602 gcc_assert (GET_CODE (addr) == MEM);
6603
6604 rtx base = XEXP (addr, 0);
6605 rtx reg = NULL_RTX;
6606 if (REG_P (base))
6607 reg = base;
6608 if (GET_CODE (base) == PLUS
6609 && GET_CODE (XEXP (base, 0)) == REG)
6610 reg = XEXP (base, 0);
6611 if ((reg != NULL_RTX) && reg_set_p (reg, PATTERN (dep))
6612 && get_attr_type (dep) != TYPE_LOAD)
6613 return 1;
6614
6615 if (insn_type == TYPE_STORE
6616 && reg_referenced_p (SET_SRC (pattern), PATTERN (dep)))
6617 return 1;
6618 }
6619 }
6620 }
6621 return cost;
6622 }
6623
6624 static bool
csky_warn_func_return(tree decl)6625 csky_warn_func_return (tree decl)
6626 {
6627 /* Naked functions are implemented entirely in assembly, including the
6628 return sequence, so suppress warnings about this. */
6629 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
6630 }
6631
6632
6633 /* Implement TARGET_RETURN_IN_MEMORY to decide whether TYPE should be
6634 returned in memory (true) or in a register (false).
6635 FNTYPE is the type of the function making the call. */
6636 static bool
csky_return_in_memory(const_tree type,const_tree fntype ATTRIBUTE_UNUSED)6637 csky_return_in_memory (const_tree type,
6638 const_tree fntype ATTRIBUTE_UNUSED)
6639 {
6640 const HOST_WIDE_INT size = int_size_in_bytes (type);
6641 return (size == -1 || size > 2 * UNITS_PER_WORD);
6642 }
6643
6644
6645 /* Implement TARGET_DWARF_REGISTER_SPAN.
6646 Dwarf models VFP registers as 64-bit or 128-bit registers default.
6647 GCC models tham as 32-bit registers, so we need to describe this to
6648 the DWARF generation code. Other registers can use the default. */
6649 static rtx
csky_dwarf_register_span(rtx rtl)6650 csky_dwarf_register_span (rtx rtl)
6651 {
6652 machine_mode mode;
6653 unsigned regno;
6654 rtx parts[16];
6655 int nregs;
6656 int i;
6657
6658 regno = REGNO (rtl);
6659 if (!CSKY_VREG_P (regno))
6660 return NULL_RTX;
6661
6662 mode = GET_MODE (rtl);
6663 if (GET_MODE_SIZE (mode) < 8)
6664 return NULL_RTX;
6665
6666 if (TARGET_SOFT_FPU)
6667 {
6668 nregs = GET_MODE_SIZE (mode) / 4;
6669 for (i = 0; i < nregs; i += 2)
6670 if (TARGET_BIG_ENDIAN)
6671 {
6672 parts[i] = gen_rtx_REG (SImode, regno + i + 1);
6673 parts[i + 1] = gen_rtx_REG (SImode, regno + i);
6674 }
6675 else
6676 {
6677 parts[i] = gen_rtx_REG (SImode, regno + i);
6678 parts[i + 1] = gen_rtx_REG (SImode, regno + i + 1);
6679 }
6680 }
6681 else
6682 {
6683 /* FIXME: dwarf2 considers all general registers to be the same
6684 as the CPU bit width. Transform the 64-bit FPU registers to
6685 32 bits here, and we will modify the unwind processing to
6686 fit CSKY architecture later. */
6687 nregs = GET_MODE_SIZE (mode) / 8;
6688 for (i = 0; i < nregs; i++)
6689 parts[i] = gen_rtx_REG (SImode, regno + i);
6690 }
6691
6692 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nregs , parts));
6693 }
6694
6695 /* Implement TARGET_INIT_LIBFUNCS. */
6696
6697 static void
csky_init_libfuncs(void)6698 csky_init_libfuncs (void)
6699 {
6700 if (TARGET_CSKY_LINUX)
6701 init_sync_libfuncs (UNITS_PER_WORD);
6702 if (!TARGET_LIBCCRT)
6703 return;
6704
6705 #define CSKY_GCC_SYM(sym) "__csky_ccrt_" # sym
6706
6707 /* int */
6708
6709 /* Arithmetic functions */
6710 set_optab_libfunc (ashl_optab, DImode, CSKY_GCC_SYM (ashldi3));
6711 set_optab_libfunc (ashr_optab, DImode, CSKY_GCC_SYM (ashrdi3));
6712 set_optab_libfunc (sdiv_optab, SImode, CSKY_GCC_SYM (divsi3));
6713 set_optab_libfunc (sdiv_optab, DImode, CSKY_GCC_SYM (divdi3));
6714 set_optab_libfunc (lshr_optab, DImode, CSKY_GCC_SYM (lshrdi3));
6715 set_optab_libfunc (smod_optab, SImode, CSKY_GCC_SYM (modsi3));
6716 set_optab_libfunc (smod_optab, DImode, CSKY_GCC_SYM (moddi3));
6717 set_optab_libfunc (smul_optab, DImode, CSKY_GCC_SYM (muldi3));
6718 set_optab_libfunc (neg_optab, DImode, CSKY_GCC_SYM (negdi2));
6719 set_optab_libfunc (udiv_optab, SImode, CSKY_GCC_SYM (udivsi3));
6720 set_optab_libfunc (udiv_optab, DImode, CSKY_GCC_SYM (udivdi3));
6721 set_optab_libfunc (udivmod_optab, DImode, CSKY_GCC_SYM (udivmoddi4));
6722 set_optab_libfunc (umod_optab, SImode, CSKY_GCC_SYM (umodsi3));
6723 set_optab_libfunc (umod_optab, DImode, CSKY_GCC_SYM (umoddi3));
6724
6725 /* Comparison functions */
6726 set_optab_libfunc (cmp_optab, DImode, CSKY_GCC_SYM (cmpdi2));
6727 set_optab_libfunc (ucmp_optab, DImode, CSKY_GCC_SYM (ucmpdi2));
6728
6729 /* Trapping arithmetic functions */
6730 set_optab_libfunc (absv_optab, SImode, CSKY_GCC_SYM (absvsi2));
6731 set_optab_libfunc (absv_optab, DImode, CSKY_GCC_SYM (absvdi2));
6732 set_optab_libfunc (addv_optab, SImode, CSKY_GCC_SYM (addvsi3));
6733 set_optab_libfunc (addv_optab, DImode, CSKY_GCC_SYM (addvdi3));
6734 set_optab_libfunc (smulv_optab, SImode, CSKY_GCC_SYM (mulvsi3));
6735 set_optab_libfunc (smulv_optab, DImode, CSKY_GCC_SYM (mulvdi3));
6736 set_optab_libfunc (negv_optab, SImode, CSKY_GCC_SYM (negvsi2));
6737 set_optab_libfunc (negv_optab, DImode, CSKY_GCC_SYM (negvdi2));
6738 set_optab_libfunc (subv_optab, SImode, CSKY_GCC_SYM (subvsi3));
6739 set_optab_libfunc (subv_optab, DImode, CSKY_GCC_SYM (subvdi3));
6740
6741 /* Bit operations */
6742 set_optab_libfunc (clz_optab, SImode, CSKY_GCC_SYM (clzsi2));
6743 set_optab_libfunc (clz_optab, DImode, CSKY_GCC_SYM (clzdi2));
6744 set_optab_libfunc (ctz_optab, SImode, CSKY_GCC_SYM (ctzsi2));
6745 set_optab_libfunc (ctz_optab, DImode, CSKY_GCC_SYM (ctzdi2));
6746 set_optab_libfunc (ffs_optab, DImode, CSKY_GCC_SYM (ffsdi2));
6747 set_optab_libfunc (parity_optab, SImode, CSKY_GCC_SYM (paritysi2));
6748 set_optab_libfunc (parity_optab, DImode, CSKY_GCC_SYM (paritydi2));
6749 set_optab_libfunc (popcount_optab,SImode, CSKY_GCC_SYM (popcountsi2));
6750 set_optab_libfunc (popcount_optab,DImode, CSKY_GCC_SYM (popcountdi2));
6751 set_optab_libfunc (bswap_optab, SImode, CSKY_GCC_SYM (bswapsi2));
6752 set_optab_libfunc (bswap_optab, DImode, CSKY_GCC_SYM (bswapdi2));
6753
6754 /* float */
6755
6756 /* Arithmetic functions */
6757 set_optab_libfunc (add_optab, SFmode, CSKY_GCC_SYM (addsf3));
6758 set_optab_libfunc (add_optab, DFmode, CSKY_GCC_SYM (adddf3));
6759 set_optab_libfunc (sub_optab, SFmode, CSKY_GCC_SYM (subsf3));
6760 set_optab_libfunc (sub_optab, DFmode, CSKY_GCC_SYM (subdf3));
6761 set_optab_libfunc (smul_optab, SFmode, CSKY_GCC_SYM (mulsf3));
6762 set_optab_libfunc (smul_optab, DFmode, CSKY_GCC_SYM (muldf3));
6763 set_optab_libfunc (sdiv_optab, SFmode, CSKY_GCC_SYM (divsf3));
6764 set_optab_libfunc (sdiv_optab, DFmode, CSKY_GCC_SYM (divdf3));
6765 set_optab_libfunc (neg_optab, SFmode, CSKY_GCC_SYM (negsf2));
6766 set_optab_libfunc (neg_optab, DFmode, CSKY_GCC_SYM (negdf2));
6767
6768 /* Conversion functions */
6769 set_conv_libfunc (sext_optab, DFmode, SFmode, CSKY_GCC_SYM (extendsfdf2));
6770 set_conv_libfunc (trunc_optab, SFmode, DFmode, CSKY_GCC_SYM (truncdfsf2));
6771 set_conv_libfunc (sfix_optab, SImode, SFmode, CSKY_GCC_SYM (fixsfsi));
6772 set_conv_libfunc (sfix_optab, SImode, DFmode, CSKY_GCC_SYM (fixdfsi));
6773 set_conv_libfunc (sfix_optab, DImode, SFmode, CSKY_GCC_SYM (fixsfdi));
6774 set_conv_libfunc (sfix_optab, DImode, DFmode, CSKY_GCC_SYM (fixdfdi));
6775 set_conv_libfunc (ufix_optab, SImode, SFmode, CSKY_GCC_SYM (fixunssfsi));
6776 set_conv_libfunc (ufix_optab, SImode, DFmode, CSKY_GCC_SYM (fixunsdfsi));
6777 set_conv_libfunc (ufix_optab, DImode, SFmode, CSKY_GCC_SYM (fixunssfdi));
6778 set_conv_libfunc (ufix_optab, DImode, DFmode, CSKY_GCC_SYM (fixunsdfdi));
6779 set_conv_libfunc (sfloat_optab, SFmode, SImode, CSKY_GCC_SYM (floatsisf));
6780 set_conv_libfunc (sfloat_optab, DFmode, SImode, CSKY_GCC_SYM (floatsidf));
6781 set_conv_libfunc (sfloat_optab, SFmode, DImode, CSKY_GCC_SYM (floatdisf));
6782 set_conv_libfunc (sfloat_optab, DFmode, DImode, CSKY_GCC_SYM (floatdidf));
6783 set_conv_libfunc (ufloat_optab, SFmode, SImode, CSKY_GCC_SYM (floatunsisf));
6784 set_conv_libfunc (ufloat_optab, DFmode, SImode, CSKY_GCC_SYM (floatunsidf));
6785 set_conv_libfunc (ufloat_optab, SFmode, DImode, CSKY_GCC_SYM (floatundisf));
6786 set_conv_libfunc (ufloat_optab, DFmode, DImode, CSKY_GCC_SYM (floatundidf));
6787
6788 /* Comparison functions */
6789 set_optab_libfunc (cmp_optab, SFmode, CSKY_GCC_SYM (cmpsf2));
6790 set_optab_libfunc (cmp_optab, DFmode, CSKY_GCC_SYM (cmpdf2));
6791 set_optab_libfunc (unord_optab, SFmode, CSKY_GCC_SYM (unordsf2));
6792 set_optab_libfunc (unord_optab, DFmode, CSKY_GCC_SYM (unorddf2));
6793 set_optab_libfunc (eq_optab, SFmode, CSKY_GCC_SYM (eqsf2));
6794 set_optab_libfunc (eq_optab, DFmode, CSKY_GCC_SYM (eqdf2));
6795 set_optab_libfunc (ne_optab, SFmode, CSKY_GCC_SYM (nesf2));
6796 set_optab_libfunc (ne_optab, DFmode, CSKY_GCC_SYM (nedf2));
6797 set_optab_libfunc (ge_optab, SFmode, CSKY_GCC_SYM (gesf2));
6798 set_optab_libfunc (ge_optab, DFmode, CSKY_GCC_SYM (gedf2));
6799 set_optab_libfunc (lt_optab, SFmode, CSKY_GCC_SYM (ltsf2));
6800 set_optab_libfunc (lt_optab, DFmode, CSKY_GCC_SYM (ltdf2));
6801 set_optab_libfunc (le_optab, SFmode, CSKY_GCC_SYM (lesf2));
6802 set_optab_libfunc (le_optab, DFmode, CSKY_GCC_SYM (ledf2));
6803 set_optab_libfunc (gt_optab, SFmode, CSKY_GCC_SYM (gtsf2));
6804 set_optab_libfunc (gt_optab, DFmode, CSKY_GCC_SYM (gtdf2));
6805 }
6806
6807
6808 /* Implement TARGET_ADDRESS_COST to estimate cost of the memory address X.
6809 For C-SKY, (register) and (register + offset) have the same cost.
6810 Other situations cost more. */
6811
6812 static int
csky_address_cost(rtx x,machine_mode mode ATTRIBUTE_UNUSED,addr_space_t as ATTRIBUTE_UNUSED,bool speed ATTRIBUTE_UNUSED)6813 csky_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
6814 addr_space_t as ATTRIBUTE_UNUSED,
6815 bool speed ATTRIBUTE_UNUSED)
6816 {
6817 enum rtx_code code = GET_CODE (x);
6818
6819 if (code == REG)
6820 return COSTS_N_INSNS (1);
6821 if (code == PLUS
6822 && REG_P (XEXP (x, 0))
6823 && CONST_INT_P (XEXP (x, 1)))
6824 return COSTS_N_INSNS (1);
6825
6826 return COSTS_N_INSNS (3);
6827 }
6828
6829
6830 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. */
6831
6832 static bool
csky_fixed_condition_code_regs(unsigned int * p1,unsigned int * p2)6833 csky_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
6834 {
6835 *p1 = CSKY_CC_REGNUM;
6836 *p2 = INVALID_REGNUM;
6837 return true;
6838 }
6839
6840 void
csky_init_cumulative_args(CUMULATIVE_ARGS * pcum,tree fntype,rtx libname ATTRIBUTE_UNUSED,tree fndecl ATTRIBUTE_UNUSED)6841 csky_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
6842 rtx libname ATTRIBUTE_UNUSED,
6843 tree fndecl ATTRIBUTE_UNUSED)
6844 {
6845 memset(pcum, 0, sizeof(*pcum));
6846 if (stdarg_p (fntype))
6847 pcum->is_stdarg = true;
6848 }
6849
6850 struct gcc_target targetm = TARGET_INITIALIZER;
6851
6852 #include "gt-csky.h"
6853