1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "insn-codes.h"
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
52 #include "tree-gimple.h"
53 #include "langhooks.h"
54
55 /* Processor costs */
56 static const
57 struct processor_costs cypress_costs = {
58 COSTS_N_INSNS (2), /* int load */
59 COSTS_N_INSNS (2), /* int signed load */
60 COSTS_N_INSNS (2), /* int zeroed load */
61 COSTS_N_INSNS (2), /* float load */
62 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
63 COSTS_N_INSNS (5), /* fadd, fsub */
64 COSTS_N_INSNS (1), /* fcmp */
65 COSTS_N_INSNS (1), /* fmov, fmovr */
66 COSTS_N_INSNS (7), /* fmul */
67 COSTS_N_INSNS (37), /* fdivs */
68 COSTS_N_INSNS (37), /* fdivd */
69 COSTS_N_INSNS (63), /* fsqrts */
70 COSTS_N_INSNS (63), /* fsqrtd */
71 COSTS_N_INSNS (1), /* imul */
72 COSTS_N_INSNS (1), /* imulX */
73 0, /* imul bit factor */
74 COSTS_N_INSNS (1), /* idiv */
75 COSTS_N_INSNS (1), /* idivX */
76 COSTS_N_INSNS (1), /* movcc/movr */
77 0, /* shift penalty */
78 };
79
80 static const
81 struct processor_costs supersparc_costs = {
82 COSTS_N_INSNS (1), /* int load */
83 COSTS_N_INSNS (1), /* int signed load */
84 COSTS_N_INSNS (1), /* int zeroed load */
85 COSTS_N_INSNS (0), /* float load */
86 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
87 COSTS_N_INSNS (3), /* fadd, fsub */
88 COSTS_N_INSNS (3), /* fcmp */
89 COSTS_N_INSNS (1), /* fmov, fmovr */
90 COSTS_N_INSNS (3), /* fmul */
91 COSTS_N_INSNS (6), /* fdivs */
92 COSTS_N_INSNS (9), /* fdivd */
93 COSTS_N_INSNS (12), /* fsqrts */
94 COSTS_N_INSNS (12), /* fsqrtd */
95 COSTS_N_INSNS (4), /* imul */
96 COSTS_N_INSNS (4), /* imulX */
97 0, /* imul bit factor */
98 COSTS_N_INSNS (4), /* idiv */
99 COSTS_N_INSNS (4), /* idivX */
100 COSTS_N_INSNS (1), /* movcc/movr */
101 1, /* shift penalty */
102 };
103
104 static const
105 struct processor_costs hypersparc_costs = {
106 COSTS_N_INSNS (1), /* int load */
107 COSTS_N_INSNS (1), /* int signed load */
108 COSTS_N_INSNS (1), /* int zeroed load */
109 COSTS_N_INSNS (1), /* float load */
110 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
111 COSTS_N_INSNS (1), /* fadd, fsub */
112 COSTS_N_INSNS (1), /* fcmp */
113 COSTS_N_INSNS (1), /* fmov, fmovr */
114 COSTS_N_INSNS (1), /* fmul */
115 COSTS_N_INSNS (8), /* fdivs */
116 COSTS_N_INSNS (12), /* fdivd */
117 COSTS_N_INSNS (17), /* fsqrts */
118 COSTS_N_INSNS (17), /* fsqrtd */
119 COSTS_N_INSNS (17), /* imul */
120 COSTS_N_INSNS (17), /* imulX */
121 0, /* imul bit factor */
122 COSTS_N_INSNS (17), /* idiv */
123 COSTS_N_INSNS (17), /* idivX */
124 COSTS_N_INSNS (1), /* movcc/movr */
125 0, /* shift penalty */
126 };
127
128 static const
129 struct processor_costs sparclet_costs = {
130 COSTS_N_INSNS (3), /* int load */
131 COSTS_N_INSNS (3), /* int signed load */
132 COSTS_N_INSNS (1), /* int zeroed load */
133 COSTS_N_INSNS (1), /* float load */
134 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
135 COSTS_N_INSNS (1), /* fadd, fsub */
136 COSTS_N_INSNS (1), /* fcmp */
137 COSTS_N_INSNS (1), /* fmov, fmovr */
138 COSTS_N_INSNS (1), /* fmul */
139 COSTS_N_INSNS (1), /* fdivs */
140 COSTS_N_INSNS (1), /* fdivd */
141 COSTS_N_INSNS (1), /* fsqrts */
142 COSTS_N_INSNS (1), /* fsqrtd */
143 COSTS_N_INSNS (5), /* imul */
144 COSTS_N_INSNS (5), /* imulX */
145 0, /* imul bit factor */
146 COSTS_N_INSNS (5), /* idiv */
147 COSTS_N_INSNS (5), /* idivX */
148 COSTS_N_INSNS (1), /* movcc/movr */
149 0, /* shift penalty */
150 };
151
152 static const
153 struct processor_costs ultrasparc_costs = {
154 COSTS_N_INSNS (2), /* int load */
155 COSTS_N_INSNS (3), /* int signed load */
156 COSTS_N_INSNS (2), /* int zeroed load */
157 COSTS_N_INSNS (2), /* float load */
158 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
159 COSTS_N_INSNS (4), /* fadd, fsub */
160 COSTS_N_INSNS (1), /* fcmp */
161 COSTS_N_INSNS (2), /* fmov, fmovr */
162 COSTS_N_INSNS (4), /* fmul */
163 COSTS_N_INSNS (13), /* fdivs */
164 COSTS_N_INSNS (23), /* fdivd */
165 COSTS_N_INSNS (13), /* fsqrts */
166 COSTS_N_INSNS (23), /* fsqrtd */
167 COSTS_N_INSNS (4), /* imul */
168 COSTS_N_INSNS (4), /* imulX */
169 2, /* imul bit factor */
170 COSTS_N_INSNS (37), /* idiv */
171 COSTS_N_INSNS (68), /* idivX */
172 COSTS_N_INSNS (2), /* movcc/movr */
173 2, /* shift penalty */
174 };
175
176 static const
177 struct processor_costs ultrasparc3_costs = {
178 COSTS_N_INSNS (2), /* int load */
179 COSTS_N_INSNS (3), /* int signed load */
180 COSTS_N_INSNS (3), /* int zeroed load */
181 COSTS_N_INSNS (2), /* float load */
182 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
183 COSTS_N_INSNS (4), /* fadd, fsub */
184 COSTS_N_INSNS (5), /* fcmp */
185 COSTS_N_INSNS (3), /* fmov, fmovr */
186 COSTS_N_INSNS (4), /* fmul */
187 COSTS_N_INSNS (17), /* fdivs */
188 COSTS_N_INSNS (20), /* fdivd */
189 COSTS_N_INSNS (20), /* fsqrts */
190 COSTS_N_INSNS (29), /* fsqrtd */
191 COSTS_N_INSNS (6), /* imul */
192 COSTS_N_INSNS (6), /* imulX */
193 0, /* imul bit factor */
194 COSTS_N_INSNS (40), /* idiv */
195 COSTS_N_INSNS (71), /* idivX */
196 COSTS_N_INSNS (2), /* movcc/movr */
197 0, /* shift penalty */
198 };
199
200 static const
201 struct processor_costs niagara_costs = {
202 COSTS_N_INSNS (3), /* int load */
203 COSTS_N_INSNS (3), /* int signed load */
204 COSTS_N_INSNS (3), /* int zeroed load */
205 COSTS_N_INSNS (9), /* float load */
206 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
207 COSTS_N_INSNS (8), /* fadd, fsub */
208 COSTS_N_INSNS (26), /* fcmp */
209 COSTS_N_INSNS (8), /* fmov, fmovr */
210 COSTS_N_INSNS (29), /* fmul */
211 COSTS_N_INSNS (54), /* fdivs */
212 COSTS_N_INSNS (83), /* fdivd */
213 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
214 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
215 COSTS_N_INSNS (11), /* imul */
216 COSTS_N_INSNS (11), /* imulX */
217 0, /* imul bit factor */
218 COSTS_N_INSNS (72), /* idiv */
219 COSTS_N_INSNS (72), /* idivX */
220 COSTS_N_INSNS (1), /* movcc/movr */
221 0, /* shift penalty */
222 };
223
224 const struct processor_costs *sparc_costs = &cypress_costs;
225
226 #ifdef HAVE_AS_RELAX_OPTION
227 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
228 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
229 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
230 somebody does not branch between the sethi and jmp. */
231 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
232 #else
233 #define LEAF_SIBCALL_SLOT_RESERVED_P \
234 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
235 #endif
236
237 /* Global variables for machine-dependent things. */
238
239 /* Size of frame. Need to know this to emit return insns from leaf procedures.
240 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
241 reload pass. This is important as the value is later used for scheduling
242 (to see what can go in a delay slot).
243 APPARENT_FSIZE is the size of the stack less the register save area and less
244 the outgoing argument area. It is used when saving call preserved regs. */
245 static HOST_WIDE_INT apparent_fsize;
246 static HOST_WIDE_INT actual_fsize;
247
248 /* Number of live general or floating point registers needed to be
249 saved (as 4-byte quantities). */
250 static int num_gfregs;
251
252 /* The alias set for prologue/epilogue register save/restore. */
253 static GTY(()) int sparc_sr_alias_set;
254
255 /* The alias set for the structure return value. */
256 static GTY(()) int struct_value_alias_set;
257
258 /* Save the operands last given to a compare for use when we
259 generate a scc or bcc insn. */
260 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
261
262 /* Vector to say how input registers are mapped to output registers.
263 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
264 eliminate it. You must use -fomit-frame-pointer to get that. */
265 char leaf_reg_remap[] =
266 { 0, 1, 2, 3, 4, 5, 6, 7,
267 -1, -1, -1, -1, -1, -1, 14, -1,
268 -1, -1, -1, -1, -1, -1, -1, -1,
269 8, 9, 10, 11, 12, 13, -1, 15,
270
271 32, 33, 34, 35, 36, 37, 38, 39,
272 40, 41, 42, 43, 44, 45, 46, 47,
273 48, 49, 50, 51, 52, 53, 54, 55,
274 56, 57, 58, 59, 60, 61, 62, 63,
275 64, 65, 66, 67, 68, 69, 70, 71,
276 72, 73, 74, 75, 76, 77, 78, 79,
277 80, 81, 82, 83, 84, 85, 86, 87,
278 88, 89, 90, 91, 92, 93, 94, 95,
279 96, 97, 98, 99, 100};
280
281 /* Vector, indexed by hard register number, which contains 1
282 for a register that is allowable in a candidate for leaf
283 function treatment. */
284 char sparc_leaf_regs[] =
285 { 1, 1, 1, 1, 1, 1, 1, 1,
286 0, 0, 0, 0, 0, 0, 1, 0,
287 0, 0, 0, 0, 0, 0, 0, 0,
288 1, 1, 1, 1, 1, 1, 0, 1,
289 1, 1, 1, 1, 1, 1, 1, 1,
290 1, 1, 1, 1, 1, 1, 1, 1,
291 1, 1, 1, 1, 1, 1, 1, 1,
292 1, 1, 1, 1, 1, 1, 1, 1,
293 1, 1, 1, 1, 1, 1, 1, 1,
294 1, 1, 1, 1, 1, 1, 1, 1,
295 1, 1, 1, 1, 1, 1, 1, 1,
296 1, 1, 1, 1, 1, 1, 1, 1,
297 1, 1, 1, 1, 1};
298
299 struct machine_function GTY(())
300 {
301 /* Some local-dynamic TLS symbol name. */
302 const char *some_ld_name;
303
304 /* True if the current function is leaf and uses only leaf regs,
305 so that the SPARC leaf function optimization can be applied.
306 Private version of current_function_uses_only_leaf_regs, see
307 sparc_expand_prologue for the rationale. */
308 int leaf_function_p;
309
310 /* True if the data calculated by sparc_expand_prologue are valid. */
311 bool prologue_data_valid_p;
312 };
313
314 #define sparc_leaf_function_p cfun->machine->leaf_function_p
315 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
316
317 /* Register we pretend to think the frame pointer is allocated to.
318 Normally, this is %fp, but if we are in a leaf procedure, this
319 is %sp+"something". We record "something" separately as it may
320 be too big for reg+constant addressing. */
321 static rtx frame_base_reg;
322 static HOST_WIDE_INT frame_base_offset;
323
324 /* 1 if the next opcode is to be specially indented. */
325 int sparc_indent_opcode = 0;
326
327 static bool sparc_handle_option (size_t, const char *, int);
328 static void sparc_init_modes (void);
329 static void scan_record_type (tree, int *, int *, int *);
330 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
331 tree, int, int, int *, int *);
332
333 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
334 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
335
336 static void sparc_output_addr_vec (rtx);
337 static void sparc_output_addr_diff_vec (rtx);
338 static void sparc_output_deferred_case_vectors (void);
339 static rtx sparc_builtin_saveregs (void);
340 static int epilogue_renumber (rtx *, int);
341 static bool sparc_assemble_integer (rtx, unsigned int, int);
342 static int set_extends (rtx);
343 static void emit_pic_helper (void);
344 static void load_pic_register (bool);
345 static int save_or_restore_regs (int, int, rtx, int, int);
346 static void emit_save_or_restore_regs (int);
347 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
348 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
349 #ifdef OBJECT_FORMAT_ELF
350 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
351 #endif
352
353 static int sparc_adjust_cost (rtx, rtx, rtx, int);
354 static int sparc_issue_rate (void);
355 static void sparc_sched_init (FILE *, int, int);
356 static int sparc_use_sched_lookahead (void);
357
358 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
359 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
360 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
361 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
362 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
363
364 static bool sparc_function_ok_for_sibcall (tree, tree);
365 static void sparc_init_libfuncs (void);
366 static void sparc_init_builtins (void);
367 static void sparc_vis_init_builtins (void);
368 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
369 static tree sparc_fold_builtin (tree, tree, bool);
370 static int sparc_vis_mul8x16 (int, int);
371 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
372 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
373 HOST_WIDE_INT, tree);
374 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
375 HOST_WIDE_INT, tree);
376 static struct machine_function * sparc_init_machine_status (void);
377 static bool sparc_cannot_force_const_mem (rtx);
378 static rtx sparc_tls_get_addr (void);
379 static rtx sparc_tls_got (void);
380 static const char *get_some_local_dynamic_name (void);
381 static int get_some_local_dynamic_name_1 (rtx *, void *);
382 static bool sparc_rtx_costs (rtx, int, int, int *);
383 static bool sparc_promote_prototypes (tree);
384 static rtx sparc_struct_value_rtx (tree, int);
385 static bool sparc_return_in_memory (tree, tree);
386 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
387 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
388 static bool sparc_vector_mode_supported_p (enum machine_mode);
389 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
390 enum machine_mode, tree, bool);
391 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
392 enum machine_mode, tree, bool);
393 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
394 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
395 static void sparc_file_end (void);
396 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
397 static const char *sparc_mangle_fundamental_type (tree);
398 #endif
399 #ifdef SUBTARGET_ATTRIBUTE_TABLE
400 const struct attribute_spec sparc_attribute_table[];
401 #endif
402
403 /* Option handling. */
404
405 /* Parsed value. */
406 enum cmodel sparc_cmodel;
407
408 char sparc_hard_reg_printed[8];
409
410 struct sparc_cpu_select sparc_select[] =
411 {
412 /* switch name, tune arch */
413 { (char *)0, "default", 1, 1 },
414 { (char *)0, "-mcpu=", 1, 1 },
415 { (char *)0, "-mtune=", 1, 0 },
416 { 0, 0, 0, 0 }
417 };
418
419 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
420 enum processor_type sparc_cpu;
421
422 /* Whetheran FPU option was specified. */
423 static bool fpu_option_set = false;
424
425 /* Initialize the GCC target structure. */
426
427 /* The sparc default is to use .half rather than .short for aligned
428 HI objects. Use .word instead of .long on non-ELF systems. */
429 #undef TARGET_ASM_ALIGNED_HI_OP
430 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
431 #ifndef OBJECT_FORMAT_ELF
432 #undef TARGET_ASM_ALIGNED_SI_OP
433 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
434 #endif
435
436 #undef TARGET_ASM_UNALIGNED_HI_OP
437 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
438 #undef TARGET_ASM_UNALIGNED_SI_OP
439 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
440 #undef TARGET_ASM_UNALIGNED_DI_OP
441 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
442
443 /* The target hook has to handle DI-mode values. */
444 #undef TARGET_ASM_INTEGER
445 #define TARGET_ASM_INTEGER sparc_assemble_integer
446
447 #undef TARGET_ASM_FUNCTION_PROLOGUE
448 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
449 #undef TARGET_ASM_FUNCTION_EPILOGUE
450 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
451
452 #undef TARGET_SCHED_ADJUST_COST
453 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
454 #undef TARGET_SCHED_ISSUE_RATE
455 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
456 #undef TARGET_SCHED_INIT
457 #define TARGET_SCHED_INIT sparc_sched_init
458 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
459 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
460
461 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
462 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
463
464 #undef TARGET_INIT_LIBFUNCS
465 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
466 #undef TARGET_INIT_BUILTINS
467 #define TARGET_INIT_BUILTINS sparc_init_builtins
468
469 #undef TARGET_EXPAND_BUILTIN
470 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
471 #undef TARGET_FOLD_BUILTIN
472 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
473
474 #if TARGET_TLS
475 #undef TARGET_HAVE_TLS
476 #define TARGET_HAVE_TLS true
477 #endif
478
479 #undef TARGET_CANNOT_FORCE_CONST_MEM
480 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
481
482 #undef TARGET_ASM_OUTPUT_MI_THUNK
483 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
484 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
485 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
486
487 #undef TARGET_RTX_COSTS
488 #define TARGET_RTX_COSTS sparc_rtx_costs
489 #undef TARGET_ADDRESS_COST
490 #define TARGET_ADDRESS_COST hook_int_rtx_0
491
492 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
493 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
494 test for this value. */
495 #undef TARGET_PROMOTE_FUNCTION_ARGS
496 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
497
498 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
499 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
500 test for this value. */
501 #undef TARGET_PROMOTE_FUNCTION_RETURN
502 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
503
504 #undef TARGET_PROMOTE_PROTOTYPES
505 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
506
507 #undef TARGET_STRUCT_VALUE_RTX
508 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
509 #undef TARGET_RETURN_IN_MEMORY
510 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
511 #undef TARGET_MUST_PASS_IN_STACK
512 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
513 #undef TARGET_PASS_BY_REFERENCE
514 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
515 #undef TARGET_ARG_PARTIAL_BYTES
516 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
517
518 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
519 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
520 #undef TARGET_STRICT_ARGUMENT_NAMING
521 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
522
523 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
524 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
525
526 #undef TARGET_VECTOR_MODE_SUPPORTED_P
527 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
528
529 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
530 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
531
532 #ifdef SUBTARGET_INSERT_ATTRIBUTES
533 #undef TARGET_INSERT_ATTRIBUTES
534 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
535 #endif
536
537 #ifdef SUBTARGET_ATTRIBUTE_TABLE
538 #undef TARGET_ATTRIBUTE_TABLE
539 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
540 #endif
541
542 #undef TARGET_RELAXED_ORDERING
543 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
544
545 #undef TARGET_DEFAULT_TARGET_FLAGS
546 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
547 #undef TARGET_HANDLE_OPTION
548 #define TARGET_HANDLE_OPTION sparc_handle_option
549
550 #if TARGET_GNU_TLS
551 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
552 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
553 #endif
554
555 #undef TARGET_ASM_FILE_END
556 #define TARGET_ASM_FILE_END sparc_file_end
557
558 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
559 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
560 #define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type
561 #endif
562
563 struct gcc_target targetm = TARGET_INITIALIZER;
564
565 /* Implement TARGET_HANDLE_OPTION. */
566
567 static bool
sparc_handle_option(size_t code,const char * arg,int value ATTRIBUTE_UNUSED)568 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
569 {
570 switch (code)
571 {
572 case OPT_mfpu:
573 case OPT_mhard_float:
574 case OPT_msoft_float:
575 fpu_option_set = true;
576 break;
577
578 case OPT_mcpu_:
579 sparc_select[1].string = arg;
580 break;
581
582 case OPT_mtune_:
583 sparc_select[2].string = arg;
584 break;
585 }
586
587 return true;
588 }
589
590 /* Validate and override various options, and do some machine dependent
591 initialization. */
592
593 void
sparc_override_options(void)594 sparc_override_options (void)
595 {
596 static struct code_model {
597 const char *const name;
598 const int value;
599 } const cmodels[] = {
600 { "32", CM_32 },
601 { "medlow", CM_MEDLOW },
602 { "medmid", CM_MEDMID },
603 { "medany", CM_MEDANY },
604 { "embmedany", CM_EMBMEDANY },
605 { 0, 0 }
606 };
607 const struct code_model *cmodel;
608 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
609 static struct cpu_default {
610 const int cpu;
611 const char *const name;
612 } const cpu_default[] = {
613 /* There must be one entry here for each TARGET_CPU value. */
614 { TARGET_CPU_sparc, "cypress" },
615 { TARGET_CPU_sparclet, "tsc701" },
616 { TARGET_CPU_sparclite, "f930" },
617 { TARGET_CPU_v8, "v8" },
618 { TARGET_CPU_hypersparc, "hypersparc" },
619 { TARGET_CPU_sparclite86x, "sparclite86x" },
620 { TARGET_CPU_supersparc, "supersparc" },
621 { TARGET_CPU_v9, "v9" },
622 { TARGET_CPU_ultrasparc, "ultrasparc" },
623 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
624 { TARGET_CPU_niagara, "niagara" },
625 { 0, 0 }
626 };
627 const struct cpu_default *def;
628 /* Table of values for -m{cpu,tune}=. */
629 static struct cpu_table {
630 const char *const name;
631 const enum processor_type processor;
632 const int disable;
633 const int enable;
634 } const cpu_table[] = {
635 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
636 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
637 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
638 /* TI TMS390Z55 supersparc */
639 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
640 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
641 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
642 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
643 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
644 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
645 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
646 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
647 MASK_SPARCLITE },
648 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
649 /* TEMIC sparclet */
650 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
651 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
652 /* TI ultrasparc I, II, IIi */
653 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
654 /* Although insns using %y are deprecated, it is a clear win on current
655 ultrasparcs. */
656 |MASK_DEPRECATED_V8_INSNS},
657 /* TI ultrasparc III */
658 /* ??? Check if %y issue still holds true in ultra3. */
659 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
660 /* UltraSPARC T1 */
661 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
662 { 0, 0, 0, 0 }
663 };
664 const struct cpu_table *cpu;
665 const struct sparc_cpu_select *sel;
666 int fpu;
667
668 #ifndef SPARC_BI_ARCH
669 /* Check for unsupported architecture size. */
670 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
671 error ("%s is not supported by this configuration",
672 DEFAULT_ARCH32_P ? "-m64" : "-m32");
673 #endif
674
675 /* We force all 64bit archs to use 128 bit long double */
676 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
677 {
678 error ("-mlong-double-64 not allowed with -m64");
679 target_flags |= MASK_LONG_DOUBLE_128;
680 }
681
682 /* Code model selection. */
683 sparc_cmodel = SPARC_DEFAULT_CMODEL;
684
685 #ifdef SPARC_BI_ARCH
686 if (TARGET_ARCH32)
687 sparc_cmodel = CM_32;
688 #endif
689
690 if (sparc_cmodel_string != NULL)
691 {
692 if (TARGET_ARCH64)
693 {
694 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
695 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
696 break;
697 if (cmodel->name == NULL)
698 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
699 else
700 sparc_cmodel = cmodel->value;
701 }
702 else
703 error ("-mcmodel= is not supported on 32 bit systems");
704 }
705
706 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
707
708 /* Set the default CPU. */
709 for (def = &cpu_default[0]; def->name; ++def)
710 if (def->cpu == TARGET_CPU_DEFAULT)
711 break;
712 gcc_assert (def->name);
713 sparc_select[0].string = def->name;
714
715 for (sel = &sparc_select[0]; sel->name; ++sel)
716 {
717 if (sel->string)
718 {
719 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
720 if (! strcmp (sel->string, cpu->name))
721 {
722 if (sel->set_tune_p)
723 sparc_cpu = cpu->processor;
724
725 if (sel->set_arch_p)
726 {
727 target_flags &= ~cpu->disable;
728 target_flags |= cpu->enable;
729 }
730 break;
731 }
732
733 if (! cpu->name)
734 error ("bad value (%s) for %s switch", sel->string, sel->name);
735 }
736 }
737
738 /* If -mfpu or -mno-fpu was explicitly used, don't override with
739 the processor default. */
740 if (fpu_option_set)
741 target_flags = (target_flags & ~MASK_FPU) | fpu;
742
743 /* Don't allow -mvis if FPU is disabled. */
744 if (! TARGET_FPU)
745 target_flags &= ~MASK_VIS;
746
747 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
748 are available.
749 -m64 also implies v9. */
750 if (TARGET_VIS || TARGET_ARCH64)
751 {
752 target_flags |= MASK_V9;
753 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
754 }
755
756 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
757 if (TARGET_V9 && TARGET_ARCH32)
758 target_flags |= MASK_DEPRECATED_V8_INSNS;
759
760 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
761 if (! TARGET_V9 || TARGET_ARCH64)
762 target_flags &= ~MASK_V8PLUS;
763
764 /* Don't use stack biasing in 32 bit mode. */
765 if (TARGET_ARCH32)
766 target_flags &= ~MASK_STACK_BIAS;
767
768 /* Supply a default value for align_functions. */
769 if (align_functions == 0
770 && (sparc_cpu == PROCESSOR_ULTRASPARC
771 || sparc_cpu == PROCESSOR_ULTRASPARC3
772 || sparc_cpu == PROCESSOR_NIAGARA))
773 align_functions = 32;
774
775 /* Validate PCC_STRUCT_RETURN. */
776 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
777 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
778
779 /* Only use .uaxword when compiling for a 64-bit target. */
780 if (!TARGET_ARCH64)
781 targetm.asm_out.unaligned_op.di = NULL;
782
783 /* Do various machine dependent initializations. */
784 sparc_init_modes ();
785
786 /* Acquire unique alias sets for our private stuff. */
787 sparc_sr_alias_set = new_alias_set ();
788 struct_value_alias_set = new_alias_set ();
789
790 /* Set up function hooks. */
791 init_machine_status = sparc_init_machine_status;
792
793 switch (sparc_cpu)
794 {
795 case PROCESSOR_V7:
796 case PROCESSOR_CYPRESS:
797 sparc_costs = &cypress_costs;
798 break;
799 case PROCESSOR_V8:
800 case PROCESSOR_SPARCLITE:
801 case PROCESSOR_SUPERSPARC:
802 sparc_costs = &supersparc_costs;
803 break;
804 case PROCESSOR_F930:
805 case PROCESSOR_F934:
806 case PROCESSOR_HYPERSPARC:
807 case PROCESSOR_SPARCLITE86X:
808 sparc_costs = &hypersparc_costs;
809 break;
810 case PROCESSOR_SPARCLET:
811 case PROCESSOR_TSC701:
812 sparc_costs = &sparclet_costs;
813 break;
814 case PROCESSOR_V9:
815 case PROCESSOR_ULTRASPARC:
816 sparc_costs = &ultrasparc_costs;
817 break;
818 case PROCESSOR_ULTRASPARC3:
819 sparc_costs = &ultrasparc3_costs;
820 break;
821 case PROCESSOR_NIAGARA:
822 sparc_costs = &niagara_costs;
823 break;
824 };
825
826 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
827 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
828 target_flags |= MASK_LONG_DOUBLE_128;
829 #endif
830 }
831
832 #ifdef SUBTARGET_ATTRIBUTE_TABLE
833 /* Table of valid machine attributes. */
834 const struct attribute_spec sparc_attribute_table[] =
835 {
836 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
837 SUBTARGET_ATTRIBUTE_TABLE,
838 { NULL, 0, 0, false, false, false, NULL }
839 };
840 #endif
841
842 /* Miscellaneous utilities. */
843
844 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
845 or branch on register contents instructions. */
846
847 int
v9_regcmp_p(enum rtx_code code)848 v9_regcmp_p (enum rtx_code code)
849 {
850 return (code == EQ || code == NE || code == GE || code == LT
851 || code == LE || code == GT);
852 }
853
854 /* Nonzero if OP is a floating point constant which can
855 be loaded into an integer register using a single
856 sethi instruction. */
857
858 int
fp_sethi_p(rtx op)859 fp_sethi_p (rtx op)
860 {
861 if (GET_CODE (op) == CONST_DOUBLE)
862 {
863 REAL_VALUE_TYPE r;
864 long i;
865
866 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
867 REAL_VALUE_TO_TARGET_SINGLE (r, i);
868 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
869 }
870
871 return 0;
872 }
873
874 /* Nonzero if OP is a floating point constant which can
875 be loaded into an integer register using a single
876 mov instruction. */
877
878 int
fp_mov_p(rtx op)879 fp_mov_p (rtx op)
880 {
881 if (GET_CODE (op) == CONST_DOUBLE)
882 {
883 REAL_VALUE_TYPE r;
884 long i;
885
886 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
887 REAL_VALUE_TO_TARGET_SINGLE (r, i);
888 return SPARC_SIMM13_P (i);
889 }
890
891 return 0;
892 }
893
894 /* Nonzero if OP is a floating point constant which can
895 be loaded into an integer register using a high/losum
896 instruction sequence. */
897
898 int
fp_high_losum_p(rtx op)899 fp_high_losum_p (rtx op)
900 {
901 /* The constraints calling this should only be in
902 SFmode move insns, so any constant which cannot
903 be moved using a single insn will do. */
904 if (GET_CODE (op) == CONST_DOUBLE)
905 {
906 REAL_VALUE_TYPE r;
907 long i;
908
909 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
910 REAL_VALUE_TO_TARGET_SINGLE (r, i);
911 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
912 }
913
914 return 0;
915 }
916
917 /* Expand a move instruction. Return true if all work is done. */
918
919 bool
sparc_expand_move(enum machine_mode mode,rtx * operands)920 sparc_expand_move (enum machine_mode mode, rtx *operands)
921 {
922 /* Handle sets of MEM first. */
923 if (GET_CODE (operands[0]) == MEM)
924 {
925 /* 0 is a register (or a pair of registers) on SPARC. */
926 if (register_or_zero_operand (operands[1], mode))
927 return false;
928
929 if (!reload_in_progress)
930 {
931 operands[0] = validize_mem (operands[0]);
932 operands[1] = force_reg (mode, operands[1]);
933 }
934 }
935
936 /* Fixup TLS cases. */
937 if (TARGET_HAVE_TLS
938 && CONSTANT_P (operands[1])
939 && GET_CODE (operands[1]) != HIGH
940 && sparc_tls_referenced_p (operands [1]))
941 {
942 rtx sym = operands[1];
943 rtx addend = NULL;
944
945 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
946 {
947 addend = XEXP (XEXP (sym, 0), 1);
948 sym = XEXP (XEXP (sym, 0), 0);
949 }
950
951 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
952
953 sym = legitimize_tls_address (sym);
954 if (addend)
955 {
956 sym = gen_rtx_PLUS (mode, sym, addend);
957 sym = force_operand (sym, operands[0]);
958 }
959 operands[1] = sym;
960 }
961
962 /* Fixup PIC cases. */
963 if (flag_pic && CONSTANT_P (operands[1]))
964 {
965 if (pic_address_needs_scratch (operands[1]))
966 operands[1] = legitimize_pic_address (operands[1], mode, 0);
967
968 if (GET_CODE (operands[1]) == LABEL_REF && mode == SImode)
969 {
970 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
971 return true;
972 }
973
974 if (GET_CODE (operands[1]) == LABEL_REF && mode == DImode)
975 {
976 gcc_assert (TARGET_ARCH64);
977 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
978 return true;
979 }
980
981 if (symbolic_operand (operands[1], mode))
982 {
983 operands[1] = legitimize_pic_address (operands[1],
984 mode,
985 (reload_in_progress ?
986 operands[0] :
987 NULL_RTX));
988 return false;
989 }
990 }
991
992 /* If we are trying to toss an integer constant into FP registers,
993 or loading a FP or vector constant, force it into memory. */
994 if (CONSTANT_P (operands[1])
995 && REG_P (operands[0])
996 && (SPARC_FP_REG_P (REGNO (operands[0]))
997 || SCALAR_FLOAT_MODE_P (mode)
998 || VECTOR_MODE_P (mode)))
999 {
1000 /* emit_group_store will send such bogosity to us when it is
1001 not storing directly into memory. So fix this up to avoid
1002 crashes in output_constant_pool. */
1003 if (operands [1] == const0_rtx)
1004 operands[1] = CONST0_RTX (mode);
1005
1006 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1007 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1008 && const_zero_operand (operands[1], mode))
1009 return false;
1010
1011 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1012 /* We are able to build any SF constant in integer registers
1013 with at most 2 instructions. */
1014 && (mode == SFmode
1015 /* And any DF constant in integer registers. */
1016 || (mode == DFmode
1017 && (reload_completed || reload_in_progress))))
1018 return false;
1019
1020 operands[1] = force_const_mem (mode, operands[1]);
1021 if (!reload_in_progress)
1022 operands[1] = validize_mem (operands[1]);
1023 return false;
1024 }
1025
1026 /* Accept non-constants and valid constants unmodified. */
1027 if (!CONSTANT_P (operands[1])
1028 || GET_CODE (operands[1]) == HIGH
1029 || input_operand (operands[1], mode))
1030 return false;
1031
1032 switch (mode)
1033 {
1034 case QImode:
1035 /* All QImode constants require only one insn, so proceed. */
1036 break;
1037
1038 case HImode:
1039 case SImode:
1040 sparc_emit_set_const32 (operands[0], operands[1]);
1041 return true;
1042
1043 case DImode:
1044 /* input_operand should have filtered out 32-bit mode. */
1045 sparc_emit_set_const64 (operands[0], operands[1]);
1046 return true;
1047
1048 default:
1049 gcc_unreachable ();
1050 }
1051
1052 return false;
1053 }
1054
1055 /* Load OP1, a 32-bit constant, into OP0, a register.
1056 We know it can't be done in one insn when we get
1057 here, the move expander guarantees this. */
1058
1059 void
sparc_emit_set_const32(rtx op0,rtx op1)1060 sparc_emit_set_const32 (rtx op0, rtx op1)
1061 {
1062 enum machine_mode mode = GET_MODE (op0);
1063 rtx temp;
1064
1065 if (reload_in_progress || reload_completed)
1066 temp = op0;
1067 else
1068 temp = gen_reg_rtx (mode);
1069
1070 if (GET_CODE (op1) == CONST_INT)
1071 {
1072 gcc_assert (!small_int_operand (op1, mode)
1073 && !const_high_operand (op1, mode));
1074
1075 /* Emit them as real moves instead of a HIGH/LO_SUM,
1076 this way CSE can see everything and reuse intermediate
1077 values if it wants. */
1078 emit_insn (gen_rtx_SET (VOIDmode, temp,
1079 GEN_INT (INTVAL (op1)
1080 & ~(HOST_WIDE_INT)0x3ff)));
1081
1082 emit_insn (gen_rtx_SET (VOIDmode,
1083 op0,
1084 gen_rtx_IOR (mode, temp,
1085 GEN_INT (INTVAL (op1) & 0x3ff))));
1086 }
1087 else
1088 {
1089 /* A symbol, emit in the traditional way. */
1090 emit_insn (gen_rtx_SET (VOIDmode, temp,
1091 gen_rtx_HIGH (mode, op1)));
1092 emit_insn (gen_rtx_SET (VOIDmode,
1093 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1094 }
1095 }
1096
1097 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1098 If TEMP is nonzero, we are forbidden to use any other scratch
1099 registers. Otherwise, we are allowed to generate them as needed.
1100
1101 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1102 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1103
1104 void
sparc_emit_set_symbolic_const64(rtx op0,rtx op1,rtx temp)1105 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1106 {
1107 rtx temp1, temp2, temp3, temp4, temp5;
1108 rtx ti_temp = 0;
1109
1110 if (temp && GET_MODE (temp) == TImode)
1111 {
1112 ti_temp = temp;
1113 temp = gen_rtx_REG (DImode, REGNO (temp));
1114 }
1115
1116 /* SPARC-V9 code-model support. */
1117 switch (sparc_cmodel)
1118 {
1119 case CM_MEDLOW:
1120 /* The range spanned by all instructions in the object is less
1121 than 2^31 bytes (2GB) and the distance from any instruction
1122 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1123 than 2^31 bytes (2GB).
1124
1125 The executable must be in the low 4TB of the virtual address
1126 space.
1127
1128 sethi %hi(symbol), %temp1
1129 or %temp1, %lo(symbol), %reg */
1130 if (temp)
1131 temp1 = temp; /* op0 is allowed. */
1132 else
1133 temp1 = gen_reg_rtx (DImode);
1134
1135 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1136 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1137 break;
1138
1139 case CM_MEDMID:
1140 /* The range spanned by all instructions in the object is less
1141 than 2^31 bytes (2GB) and the distance from any instruction
1142 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1143 than 2^31 bytes (2GB).
1144
1145 The executable must be in the low 16TB of the virtual address
1146 space.
1147
1148 sethi %h44(symbol), %temp1
1149 or %temp1, %m44(symbol), %temp2
1150 sllx %temp2, 12, %temp3
1151 or %temp3, %l44(symbol), %reg */
1152 if (temp)
1153 {
1154 temp1 = op0;
1155 temp2 = op0;
1156 temp3 = temp; /* op0 is allowed. */
1157 }
1158 else
1159 {
1160 temp1 = gen_reg_rtx (DImode);
1161 temp2 = gen_reg_rtx (DImode);
1162 temp3 = gen_reg_rtx (DImode);
1163 }
1164
1165 emit_insn (gen_seth44 (temp1, op1));
1166 emit_insn (gen_setm44 (temp2, temp1, op1));
1167 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1168 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1169 emit_insn (gen_setl44 (op0, temp3, op1));
1170 break;
1171
1172 case CM_MEDANY:
1173 /* The range spanned by all instructions in the object is less
1174 than 2^31 bytes (2GB) and the distance from any instruction
1175 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1176 than 2^31 bytes (2GB).
1177
1178 The executable can be placed anywhere in the virtual address
1179 space.
1180
1181 sethi %hh(symbol), %temp1
1182 sethi %lm(symbol), %temp2
1183 or %temp1, %hm(symbol), %temp3
1184 sllx %temp3, 32, %temp4
1185 or %temp4, %temp2, %temp5
1186 or %temp5, %lo(symbol), %reg */
1187 if (temp)
1188 {
1189 /* It is possible that one of the registers we got for operands[2]
1190 might coincide with that of operands[0] (which is why we made
1191 it TImode). Pick the other one to use as our scratch. */
1192 if (rtx_equal_p (temp, op0))
1193 {
1194 gcc_assert (ti_temp);
1195 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1196 }
1197 temp1 = op0;
1198 temp2 = temp; /* op0 is _not_ allowed, see above. */
1199 temp3 = op0;
1200 temp4 = op0;
1201 temp5 = op0;
1202 }
1203 else
1204 {
1205 temp1 = gen_reg_rtx (DImode);
1206 temp2 = gen_reg_rtx (DImode);
1207 temp3 = gen_reg_rtx (DImode);
1208 temp4 = gen_reg_rtx (DImode);
1209 temp5 = gen_reg_rtx (DImode);
1210 }
1211
1212 emit_insn (gen_sethh (temp1, op1));
1213 emit_insn (gen_setlm (temp2, op1));
1214 emit_insn (gen_sethm (temp3, temp1, op1));
1215 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1216 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1217 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1218 gen_rtx_PLUS (DImode, temp4, temp2)));
1219 emit_insn (gen_setlo (op0, temp5, op1));
1220 break;
1221
1222 case CM_EMBMEDANY:
1223 /* Old old old backwards compatibility kruft here.
1224 Essentially it is MEDLOW with a fixed 64-bit
1225 virtual base added to all data segment addresses.
1226 Text-segment stuff is computed like MEDANY, we can't
1227 reuse the code above because the relocation knobs
1228 look different.
1229
1230 Data segment: sethi %hi(symbol), %temp1
1231 add %temp1, EMBMEDANY_BASE_REG, %temp2
1232 or %temp2, %lo(symbol), %reg */
1233 if (data_segment_operand (op1, GET_MODE (op1)))
1234 {
1235 if (temp)
1236 {
1237 temp1 = temp; /* op0 is allowed. */
1238 temp2 = op0;
1239 }
1240 else
1241 {
1242 temp1 = gen_reg_rtx (DImode);
1243 temp2 = gen_reg_rtx (DImode);
1244 }
1245
1246 emit_insn (gen_embmedany_sethi (temp1, op1));
1247 emit_insn (gen_embmedany_brsum (temp2, temp1));
1248 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1249 }
1250
1251 /* Text segment: sethi %uhi(symbol), %temp1
1252 sethi %hi(symbol), %temp2
1253 or %temp1, %ulo(symbol), %temp3
1254 sllx %temp3, 32, %temp4
1255 or %temp4, %temp2, %temp5
1256 or %temp5, %lo(symbol), %reg */
1257 else
1258 {
1259 if (temp)
1260 {
1261 /* It is possible that one of the registers we got for operands[2]
1262 might coincide with that of operands[0] (which is why we made
1263 it TImode). Pick the other one to use as our scratch. */
1264 if (rtx_equal_p (temp, op0))
1265 {
1266 gcc_assert (ti_temp);
1267 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1268 }
1269 temp1 = op0;
1270 temp2 = temp; /* op0 is _not_ allowed, see above. */
1271 temp3 = op0;
1272 temp4 = op0;
1273 temp5 = op0;
1274 }
1275 else
1276 {
1277 temp1 = gen_reg_rtx (DImode);
1278 temp2 = gen_reg_rtx (DImode);
1279 temp3 = gen_reg_rtx (DImode);
1280 temp4 = gen_reg_rtx (DImode);
1281 temp5 = gen_reg_rtx (DImode);
1282 }
1283
1284 emit_insn (gen_embmedany_textuhi (temp1, op1));
1285 emit_insn (gen_embmedany_texthi (temp2, op1));
1286 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1287 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1288 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1289 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1290 gen_rtx_PLUS (DImode, temp4, temp2)));
1291 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1292 }
1293 break;
1294
1295 default:
1296 gcc_unreachable ();
1297 }
1298 }
1299
1300 #if HOST_BITS_PER_WIDE_INT == 32
1301 void
sparc_emit_set_const64(rtx op0 ATTRIBUTE_UNUSED,rtx op1 ATTRIBUTE_UNUSED)1302 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1303 {
1304 gcc_unreachable ();
1305 }
1306 #else
1307 /* These avoid problems when cross compiling. If we do not
1308 go through all this hair then the optimizer will see
1309 invalid REG_EQUAL notes or in some cases none at all. */
1310 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1311 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1312 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1313 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1314
1315 /* The optimizer is not to assume anything about exactly
1316 which bits are set for a HIGH, they are unspecified.
1317 Unfortunately this leads to many missed optimizations
1318 during CSE. We mask out the non-HIGH bits, and matches
1319 a plain movdi, to alleviate this problem. */
1320 static rtx
gen_safe_HIGH64(rtx dest,HOST_WIDE_INT val)1321 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1322 {
1323 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1324 }
1325
1326 static rtx
gen_safe_SET64(rtx dest,HOST_WIDE_INT val)1327 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1328 {
1329 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1330 }
1331
1332 static rtx
gen_safe_OR64(rtx src,HOST_WIDE_INT val)1333 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1334 {
1335 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1336 }
1337
1338 static rtx
gen_safe_XOR64(rtx src,HOST_WIDE_INT val)1339 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1340 {
1341 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1342 }
1343
1344 /* Worker routines for 64-bit constant formation on arch64.
1345 One of the key things to be doing in these emissions is
1346 to create as many temp REGs as possible. This makes it
1347 possible for half-built constants to be used later when
1348 such values are similar to something required later on.
1349 Without doing this, the optimizer cannot see such
1350 opportunities. */
1351
1352 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1353 unsigned HOST_WIDE_INT, int);
1354
1355 static void
sparc_emit_set_const64_quick1(rtx op0,rtx temp,unsigned HOST_WIDE_INT low_bits,int is_neg)1356 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1357 unsigned HOST_WIDE_INT low_bits, int is_neg)
1358 {
1359 unsigned HOST_WIDE_INT high_bits;
1360
1361 if (is_neg)
1362 high_bits = (~low_bits) & 0xffffffff;
1363 else
1364 high_bits = low_bits;
1365
1366 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1367 if (!is_neg)
1368 {
1369 emit_insn (gen_rtx_SET (VOIDmode, op0,
1370 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1371 }
1372 else
1373 {
1374 /* If we are XOR'ing with -1, then we should emit a one's complement
1375 instead. This way the combiner will notice logical operations
1376 such as ANDN later on and substitute. */
1377 if ((low_bits & 0x3ff) == 0x3ff)
1378 {
1379 emit_insn (gen_rtx_SET (VOIDmode, op0,
1380 gen_rtx_NOT (DImode, temp)));
1381 }
1382 else
1383 {
1384 emit_insn (gen_rtx_SET (VOIDmode, op0,
1385 gen_safe_XOR64 (temp,
1386 (-(HOST_WIDE_INT)0x400
1387 | (low_bits & 0x3ff)))));
1388 }
1389 }
1390 }
1391
1392 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1393 unsigned HOST_WIDE_INT, int);
1394
1395 static void
sparc_emit_set_const64_quick2(rtx op0,rtx temp,unsigned HOST_WIDE_INT high_bits,unsigned HOST_WIDE_INT low_immediate,int shift_count)1396 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1397 unsigned HOST_WIDE_INT high_bits,
1398 unsigned HOST_WIDE_INT low_immediate,
1399 int shift_count)
1400 {
1401 rtx temp2 = op0;
1402
1403 if ((high_bits & 0xfffffc00) != 0)
1404 {
1405 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1406 if ((high_bits & ~0xfffffc00) != 0)
1407 emit_insn (gen_rtx_SET (VOIDmode, op0,
1408 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1409 else
1410 temp2 = temp;
1411 }
1412 else
1413 {
1414 emit_insn (gen_safe_SET64 (temp, high_bits));
1415 temp2 = temp;
1416 }
1417
1418 /* Now shift it up into place. */
1419 emit_insn (gen_rtx_SET (VOIDmode, op0,
1420 gen_rtx_ASHIFT (DImode, temp2,
1421 GEN_INT (shift_count))));
1422
1423 /* If there is a low immediate part piece, finish up by
1424 putting that in as well. */
1425 if (low_immediate != 0)
1426 emit_insn (gen_rtx_SET (VOIDmode, op0,
1427 gen_safe_OR64 (op0, low_immediate)));
1428 }
1429
1430 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1431 unsigned HOST_WIDE_INT);
1432
1433 /* Full 64-bit constant decomposition. Even though this is the
1434 'worst' case, we still optimize a few things away. */
1435 static void
sparc_emit_set_const64_longway(rtx op0,rtx temp,unsigned HOST_WIDE_INT high_bits,unsigned HOST_WIDE_INT low_bits)1436 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1437 unsigned HOST_WIDE_INT high_bits,
1438 unsigned HOST_WIDE_INT low_bits)
1439 {
1440 rtx sub_temp;
1441
1442 if (reload_in_progress || reload_completed)
1443 sub_temp = op0;
1444 else
1445 sub_temp = gen_reg_rtx (DImode);
1446
1447 if ((high_bits & 0xfffffc00) != 0)
1448 {
1449 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1450 if ((high_bits & ~0xfffffc00) != 0)
1451 emit_insn (gen_rtx_SET (VOIDmode,
1452 sub_temp,
1453 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1454 else
1455 sub_temp = temp;
1456 }
1457 else
1458 {
1459 emit_insn (gen_safe_SET64 (temp, high_bits));
1460 sub_temp = temp;
1461 }
1462
1463 if (!reload_in_progress && !reload_completed)
1464 {
1465 rtx temp2 = gen_reg_rtx (DImode);
1466 rtx temp3 = gen_reg_rtx (DImode);
1467 rtx temp4 = gen_reg_rtx (DImode);
1468
1469 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1470 gen_rtx_ASHIFT (DImode, sub_temp,
1471 GEN_INT (32))));
1472
1473 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1474 if ((low_bits & ~0xfffffc00) != 0)
1475 {
1476 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1477 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1478 emit_insn (gen_rtx_SET (VOIDmode, op0,
1479 gen_rtx_PLUS (DImode, temp4, temp3)));
1480 }
1481 else
1482 {
1483 emit_insn (gen_rtx_SET (VOIDmode, op0,
1484 gen_rtx_PLUS (DImode, temp4, temp2)));
1485 }
1486 }
1487 else
1488 {
1489 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1490 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1491 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1492 int to_shift = 12;
1493
1494 /* We are in the middle of reload, so this is really
1495 painful. However we do still make an attempt to
1496 avoid emitting truly stupid code. */
1497 if (low1 != const0_rtx)
1498 {
1499 emit_insn (gen_rtx_SET (VOIDmode, op0,
1500 gen_rtx_ASHIFT (DImode, sub_temp,
1501 GEN_INT (to_shift))));
1502 emit_insn (gen_rtx_SET (VOIDmode, op0,
1503 gen_rtx_IOR (DImode, op0, low1)));
1504 sub_temp = op0;
1505 to_shift = 12;
1506 }
1507 else
1508 {
1509 to_shift += 12;
1510 }
1511 if (low2 != const0_rtx)
1512 {
1513 emit_insn (gen_rtx_SET (VOIDmode, op0,
1514 gen_rtx_ASHIFT (DImode, sub_temp,
1515 GEN_INT (to_shift))));
1516 emit_insn (gen_rtx_SET (VOIDmode, op0,
1517 gen_rtx_IOR (DImode, op0, low2)));
1518 sub_temp = op0;
1519 to_shift = 8;
1520 }
1521 else
1522 {
1523 to_shift += 8;
1524 }
1525 emit_insn (gen_rtx_SET (VOIDmode, op0,
1526 gen_rtx_ASHIFT (DImode, sub_temp,
1527 GEN_INT (to_shift))));
1528 if (low3 != const0_rtx)
1529 emit_insn (gen_rtx_SET (VOIDmode, op0,
1530 gen_rtx_IOR (DImode, op0, low3)));
1531 /* phew... */
1532 }
1533 }
1534
1535 /* Analyze a 64-bit constant for certain properties. */
1536 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1537 unsigned HOST_WIDE_INT,
1538 int *, int *, int *);
1539
1540 static void
analyze_64bit_constant(unsigned HOST_WIDE_INT high_bits,unsigned HOST_WIDE_INT low_bits,int * hbsp,int * lbsp,int * abbasp)1541 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1542 unsigned HOST_WIDE_INT low_bits,
1543 int *hbsp, int *lbsp, int *abbasp)
1544 {
1545 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1546 int i;
1547
1548 lowest_bit_set = highest_bit_set = -1;
1549 i = 0;
1550 do
1551 {
1552 if ((lowest_bit_set == -1)
1553 && ((low_bits >> i) & 1))
1554 lowest_bit_set = i;
1555 if ((highest_bit_set == -1)
1556 && ((high_bits >> (32 - i - 1)) & 1))
1557 highest_bit_set = (64 - i - 1);
1558 }
1559 while (++i < 32
1560 && ((highest_bit_set == -1)
1561 || (lowest_bit_set == -1)));
1562 if (i == 32)
1563 {
1564 i = 0;
1565 do
1566 {
1567 if ((lowest_bit_set == -1)
1568 && ((high_bits >> i) & 1))
1569 lowest_bit_set = i + 32;
1570 if ((highest_bit_set == -1)
1571 && ((low_bits >> (32 - i - 1)) & 1))
1572 highest_bit_set = 32 - i - 1;
1573 }
1574 while (++i < 32
1575 && ((highest_bit_set == -1)
1576 || (lowest_bit_set == -1)));
1577 }
1578 /* If there are no bits set this should have gone out
1579 as one instruction! */
1580 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1581 all_bits_between_are_set = 1;
1582 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1583 {
1584 if (i < 32)
1585 {
1586 if ((low_bits & (1 << i)) != 0)
1587 continue;
1588 }
1589 else
1590 {
1591 if ((high_bits & (1 << (i - 32))) != 0)
1592 continue;
1593 }
1594 all_bits_between_are_set = 0;
1595 break;
1596 }
1597 *hbsp = highest_bit_set;
1598 *lbsp = lowest_bit_set;
1599 *abbasp = all_bits_between_are_set;
1600 }
1601
1602 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1603
1604 static int
const64_is_2insns(unsigned HOST_WIDE_INT high_bits,unsigned HOST_WIDE_INT low_bits)1605 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1606 unsigned HOST_WIDE_INT low_bits)
1607 {
1608 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1609
1610 if (high_bits == 0
1611 || high_bits == 0xffffffff)
1612 return 1;
1613
1614 analyze_64bit_constant (high_bits, low_bits,
1615 &highest_bit_set, &lowest_bit_set,
1616 &all_bits_between_are_set);
1617
1618 if ((highest_bit_set == 63
1619 || lowest_bit_set == 0)
1620 && all_bits_between_are_set != 0)
1621 return 1;
1622
1623 if ((highest_bit_set - lowest_bit_set) < 21)
1624 return 1;
1625
1626 return 0;
1627 }
1628
1629 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1630 unsigned HOST_WIDE_INT,
1631 int, int);
1632
1633 static unsigned HOST_WIDE_INT
create_simple_focus_bits(unsigned HOST_WIDE_INT high_bits,unsigned HOST_WIDE_INT low_bits,int lowest_bit_set,int shift)1634 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1635 unsigned HOST_WIDE_INT low_bits,
1636 int lowest_bit_set, int shift)
1637 {
1638 HOST_WIDE_INT hi, lo;
1639
1640 if (lowest_bit_set < 32)
1641 {
1642 lo = (low_bits >> lowest_bit_set) << shift;
1643 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1644 }
1645 else
1646 {
1647 lo = 0;
1648 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1649 }
1650 gcc_assert (! (hi & lo));
1651 return (hi | lo);
1652 }
1653
1654 /* Here we are sure to be arch64 and this is an integer constant
1655 being loaded into a register. Emit the most efficient
1656 insn sequence possible. Detection of all the 1-insn cases
1657 has been done already. */
1658 void
sparc_emit_set_const64(rtx op0,rtx op1)1659 sparc_emit_set_const64 (rtx op0, rtx op1)
1660 {
1661 unsigned HOST_WIDE_INT high_bits, low_bits;
1662 int lowest_bit_set, highest_bit_set;
1663 int all_bits_between_are_set;
1664 rtx temp = 0;
1665
1666 /* Sanity check that we know what we are working with. */
1667 gcc_assert (TARGET_ARCH64
1668 && (GET_CODE (op0) == SUBREG
1669 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1670
1671 if (reload_in_progress || reload_completed)
1672 temp = op0;
1673
1674 if (GET_CODE (op1) != CONST_INT)
1675 {
1676 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1677 return;
1678 }
1679
1680 if (! temp)
1681 temp = gen_reg_rtx (DImode);
1682
1683 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1684 low_bits = (INTVAL (op1) & 0xffffffff);
1685
1686 /* low_bits bits 0 --> 31
1687 high_bits bits 32 --> 63 */
1688
1689 analyze_64bit_constant (high_bits, low_bits,
1690 &highest_bit_set, &lowest_bit_set,
1691 &all_bits_between_are_set);
1692
1693 /* First try for a 2-insn sequence. */
1694
1695 /* These situations are preferred because the optimizer can
1696 * do more things with them:
1697 * 1) mov -1, %reg
1698 * sllx %reg, shift, %reg
1699 * 2) mov -1, %reg
1700 * srlx %reg, shift, %reg
1701 * 3) mov some_small_const, %reg
1702 * sllx %reg, shift, %reg
1703 */
1704 if (((highest_bit_set == 63
1705 || lowest_bit_set == 0)
1706 && all_bits_between_are_set != 0)
1707 || ((highest_bit_set - lowest_bit_set) < 12))
1708 {
1709 HOST_WIDE_INT the_const = -1;
1710 int shift = lowest_bit_set;
1711
1712 if ((highest_bit_set != 63
1713 && lowest_bit_set != 0)
1714 || all_bits_between_are_set == 0)
1715 {
1716 the_const =
1717 create_simple_focus_bits (high_bits, low_bits,
1718 lowest_bit_set, 0);
1719 }
1720 else if (lowest_bit_set == 0)
1721 shift = -(63 - highest_bit_set);
1722
1723 gcc_assert (SPARC_SIMM13_P (the_const));
1724 gcc_assert (shift != 0);
1725
1726 emit_insn (gen_safe_SET64 (temp, the_const));
1727 if (shift > 0)
1728 emit_insn (gen_rtx_SET (VOIDmode,
1729 op0,
1730 gen_rtx_ASHIFT (DImode,
1731 temp,
1732 GEN_INT (shift))));
1733 else if (shift < 0)
1734 emit_insn (gen_rtx_SET (VOIDmode,
1735 op0,
1736 gen_rtx_LSHIFTRT (DImode,
1737 temp,
1738 GEN_INT (-shift))));
1739 return;
1740 }
1741
1742 /* Now a range of 22 or less bits set somewhere.
1743 * 1) sethi %hi(focus_bits), %reg
1744 * sllx %reg, shift, %reg
1745 * 2) sethi %hi(focus_bits), %reg
1746 * srlx %reg, shift, %reg
1747 */
1748 if ((highest_bit_set - lowest_bit_set) < 21)
1749 {
1750 unsigned HOST_WIDE_INT focus_bits =
1751 create_simple_focus_bits (high_bits, low_bits,
1752 lowest_bit_set, 10);
1753
1754 gcc_assert (SPARC_SETHI_P (focus_bits));
1755 gcc_assert (lowest_bit_set != 10);
1756
1757 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1758
1759 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1760 if (lowest_bit_set < 10)
1761 emit_insn (gen_rtx_SET (VOIDmode,
1762 op0,
1763 gen_rtx_LSHIFTRT (DImode, temp,
1764 GEN_INT (10 - lowest_bit_set))));
1765 else if (lowest_bit_set > 10)
1766 emit_insn (gen_rtx_SET (VOIDmode,
1767 op0,
1768 gen_rtx_ASHIFT (DImode, temp,
1769 GEN_INT (lowest_bit_set - 10))));
1770 return;
1771 }
1772
1773 /* 1) sethi %hi(low_bits), %reg
1774 * or %reg, %lo(low_bits), %reg
1775 * 2) sethi %hi(~low_bits), %reg
1776 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1777 */
1778 if (high_bits == 0
1779 || high_bits == 0xffffffff)
1780 {
1781 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1782 (high_bits == 0xffffffff));
1783 return;
1784 }
1785
1786 /* Now, try 3-insn sequences. */
1787
1788 /* 1) sethi %hi(high_bits), %reg
1789 * or %reg, %lo(high_bits), %reg
1790 * sllx %reg, 32, %reg
1791 */
1792 if (low_bits == 0)
1793 {
1794 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1795 return;
1796 }
1797
1798 /* We may be able to do something quick
1799 when the constant is negated, so try that. */
1800 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1801 (~low_bits) & 0xfffffc00))
1802 {
1803 /* NOTE: The trailing bits get XOR'd so we need the
1804 non-negated bits, not the negated ones. */
1805 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1806
1807 if ((((~high_bits) & 0xffffffff) == 0
1808 && ((~low_bits) & 0x80000000) == 0)
1809 || (((~high_bits) & 0xffffffff) == 0xffffffff
1810 && ((~low_bits) & 0x80000000) != 0))
1811 {
1812 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1813
1814 if ((SPARC_SETHI_P (fast_int)
1815 && (~high_bits & 0xffffffff) == 0)
1816 || SPARC_SIMM13_P (fast_int))
1817 emit_insn (gen_safe_SET64 (temp, fast_int));
1818 else
1819 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1820 }
1821 else
1822 {
1823 rtx negated_const;
1824 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1825 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1826 sparc_emit_set_const64 (temp, negated_const);
1827 }
1828
1829 /* If we are XOR'ing with -1, then we should emit a one's complement
1830 instead. This way the combiner will notice logical operations
1831 such as ANDN later on and substitute. */
1832 if (trailing_bits == 0x3ff)
1833 {
1834 emit_insn (gen_rtx_SET (VOIDmode, op0,
1835 gen_rtx_NOT (DImode, temp)));
1836 }
1837 else
1838 {
1839 emit_insn (gen_rtx_SET (VOIDmode,
1840 op0,
1841 gen_safe_XOR64 (temp,
1842 (-0x400 | trailing_bits))));
1843 }
1844 return;
1845 }
1846
1847 /* 1) sethi %hi(xxx), %reg
1848 * or %reg, %lo(xxx), %reg
1849 * sllx %reg, yyy, %reg
1850 *
1851 * ??? This is just a generalized version of the low_bits==0
1852 * thing above, FIXME...
1853 */
1854 if ((highest_bit_set - lowest_bit_set) < 32)
1855 {
1856 unsigned HOST_WIDE_INT focus_bits =
1857 create_simple_focus_bits (high_bits, low_bits,
1858 lowest_bit_set, 0);
1859
1860 /* We can't get here in this state. */
1861 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1862
1863 /* So what we know is that the set bits straddle the
1864 middle of the 64-bit word. */
1865 sparc_emit_set_const64_quick2 (op0, temp,
1866 focus_bits, 0,
1867 lowest_bit_set);
1868 return;
1869 }
1870
1871 /* 1) sethi %hi(high_bits), %reg
1872 * or %reg, %lo(high_bits), %reg
1873 * sllx %reg, 32, %reg
1874 * or %reg, low_bits, %reg
1875 */
1876 if (SPARC_SIMM13_P(low_bits)
1877 && ((int)low_bits > 0))
1878 {
1879 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1880 return;
1881 }
1882
1883 /* The easiest way when all else fails, is full decomposition. */
1884 #if 0
1885 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1886 high_bits, low_bits, ~high_bits, ~low_bits);
1887 #endif
1888 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1889 }
1890 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1891
1892 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1893 return the mode to be used for the comparison. For floating-point,
1894 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1895 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1896 processing is needed. */
1897
1898 enum machine_mode
select_cc_mode(enum rtx_code op,rtx x,rtx y ATTRIBUTE_UNUSED)1899 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1900 {
1901 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1902 {
1903 switch (op)
1904 {
1905 case EQ:
1906 case NE:
1907 case UNORDERED:
1908 case ORDERED:
1909 case UNLT:
1910 case UNLE:
1911 case UNGT:
1912 case UNGE:
1913 case UNEQ:
1914 case LTGT:
1915 return CCFPmode;
1916
1917 case LT:
1918 case LE:
1919 case GT:
1920 case GE:
1921 return CCFPEmode;
1922
1923 default:
1924 gcc_unreachable ();
1925 }
1926 }
1927 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1928 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1929 {
1930 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1931 return CCX_NOOVmode;
1932 else
1933 return CC_NOOVmode;
1934 }
1935 else
1936 {
1937 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1938 return CCXmode;
1939 else
1940 return CCmode;
1941 }
1942 }
1943
1944 /* X and Y are two things to compare using CODE. Emit the compare insn and
1945 return the rtx for the cc reg in the proper mode. */
1946
1947 rtx
gen_compare_reg(enum rtx_code code)1948 gen_compare_reg (enum rtx_code code)
1949 {
1950 rtx x = sparc_compare_op0;
1951 rtx y = sparc_compare_op1;
1952 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1953 rtx cc_reg;
1954
1955 if (sparc_compare_emitted != NULL_RTX)
1956 {
1957 cc_reg = sparc_compare_emitted;
1958 sparc_compare_emitted = NULL_RTX;
1959 return cc_reg;
1960 }
1961
1962 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1963 fcc regs (cse can't tell they're really call clobbered regs and will
1964 remove a duplicate comparison even if there is an intervening function
1965 call - it will then try to reload the cc reg via an int reg which is why
1966 we need the movcc patterns). It is possible to provide the movcc
1967 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1968 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1969 to tell cse that CCFPE mode registers (even pseudos) are call
1970 clobbered. */
1971
1972 /* ??? This is an experiment. Rather than making changes to cse which may
1973 or may not be easy/clean, we do our own cse. This is possible because
1974 we will generate hard registers. Cse knows they're call clobbered (it
1975 doesn't know the same thing about pseudos). If we guess wrong, no big
1976 deal, but if we win, great! */
1977
1978 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1979 #if 1 /* experiment */
1980 {
1981 int reg;
1982 /* We cycle through the registers to ensure they're all exercised. */
1983 static int next_fcc_reg = 0;
1984 /* Previous x,y for each fcc reg. */
1985 static rtx prev_args[4][2];
1986
1987 /* Scan prev_args for x,y. */
1988 for (reg = 0; reg < 4; reg++)
1989 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
1990 break;
1991 if (reg == 4)
1992 {
1993 reg = next_fcc_reg;
1994 prev_args[reg][0] = x;
1995 prev_args[reg][1] = y;
1996 next_fcc_reg = (next_fcc_reg + 1) & 3;
1997 }
1998 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
1999 }
2000 #else
2001 cc_reg = gen_reg_rtx (mode);
2002 #endif /* ! experiment */
2003 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2004 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2005 else
2006 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2007
2008 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2009 gen_rtx_COMPARE (mode, x, y)));
2010
2011 return cc_reg;
2012 }
2013
2014 /* This function is used for v9 only.
2015 CODE is the code for an Scc's comparison.
2016 OPERANDS[0] is the target of the Scc insn.
2017 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2018 been generated yet).
2019
2020 This function is needed to turn
2021
2022 (set (reg:SI 110)
2023 (gt (reg:CCX 100 %icc)
2024 (const_int 0)))
2025 into
2026 (set (reg:SI 110)
2027 (gt:DI (reg:CCX 100 %icc)
2028 (const_int 0)))
2029
2030 IE: The instruction recognizer needs to see the mode of the comparison to
2031 find the right instruction. We could use "gt:DI" right in the
2032 define_expand, but leaving it out allows us to handle DI, SI, etc.
2033
2034 We refer to the global sparc compare operands sparc_compare_op0 and
2035 sparc_compare_op1. */
2036
2037 int
gen_v9_scc(enum rtx_code compare_code,register rtx * operands)2038 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2039 {
2040 if (! TARGET_ARCH64
2041 && (GET_MODE (sparc_compare_op0) == DImode
2042 || GET_MODE (operands[0]) == DImode))
2043 return 0;
2044
2045 /* Try to use the movrCC insns. */
2046 if (TARGET_ARCH64
2047 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2048 && sparc_compare_op1 == const0_rtx
2049 && v9_regcmp_p (compare_code))
2050 {
2051 rtx op0 = sparc_compare_op0;
2052 rtx temp;
2053
2054 /* Special case for op0 != 0. This can be done with one instruction if
2055 operands[0] == sparc_compare_op0. */
2056
2057 if (compare_code == NE
2058 && GET_MODE (operands[0]) == DImode
2059 && rtx_equal_p (op0, operands[0]))
2060 {
2061 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2062 gen_rtx_IF_THEN_ELSE (DImode,
2063 gen_rtx_fmt_ee (compare_code, DImode,
2064 op0, const0_rtx),
2065 const1_rtx,
2066 operands[0])));
2067 return 1;
2068 }
2069
2070 if (reg_overlap_mentioned_p (operands[0], op0))
2071 {
2072 /* Handle the case where operands[0] == sparc_compare_op0.
2073 We "early clobber" the result. */
2074 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2075 emit_move_insn (op0, sparc_compare_op0);
2076 }
2077
2078 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2079 if (GET_MODE (op0) != DImode)
2080 {
2081 temp = gen_reg_rtx (DImode);
2082 convert_move (temp, op0, 0);
2083 }
2084 else
2085 temp = op0;
2086 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2087 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2088 gen_rtx_fmt_ee (compare_code, DImode,
2089 temp, const0_rtx),
2090 const1_rtx,
2091 operands[0])));
2092 return 1;
2093 }
2094 else
2095 {
2096 operands[1] = gen_compare_reg (compare_code);
2097
2098 switch (GET_MODE (operands[1]))
2099 {
2100 case CCmode :
2101 case CCXmode :
2102 case CCFPEmode :
2103 case CCFPmode :
2104 break;
2105 default :
2106 gcc_unreachable ();
2107 }
2108 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2109 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2110 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2111 gen_rtx_fmt_ee (compare_code,
2112 GET_MODE (operands[1]),
2113 operands[1], const0_rtx),
2114 const1_rtx, operands[0])));
2115 return 1;
2116 }
2117 }
2118
2119 /* Emit a conditional jump insn for the v9 architecture using comparison code
2120 CODE and jump target LABEL.
2121 This function exists to take advantage of the v9 brxx insns. */
2122
2123 void
emit_v9_brxx_insn(enum rtx_code code,rtx op0,rtx label)2124 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2125 {
2126 gcc_assert (sparc_compare_emitted == NULL_RTX);
2127 emit_jump_insn (gen_rtx_SET (VOIDmode,
2128 pc_rtx,
2129 gen_rtx_IF_THEN_ELSE (VOIDmode,
2130 gen_rtx_fmt_ee (code, GET_MODE (op0),
2131 op0, const0_rtx),
2132 gen_rtx_LABEL_REF (VOIDmode, label),
2133 pc_rtx)));
2134 }
2135
2136 /* Generate a DFmode part of a hard TFmode register.
2137 REG is the TFmode hard register, LOW is 1 for the
2138 low 64bit of the register and 0 otherwise.
2139 */
2140 rtx
gen_df_reg(rtx reg,int low)2141 gen_df_reg (rtx reg, int low)
2142 {
2143 int regno = REGNO (reg);
2144
2145 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2146 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2147 return gen_rtx_REG (DFmode, regno);
2148 }
2149
2150 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2151 Unlike normal calls, TFmode operands are passed by reference. It is
2152 assumed that no more than 3 operands are required. */
2153
2154 static void
emit_soft_tfmode_libcall(const char * func_name,int nargs,rtx * operands)2155 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2156 {
2157 rtx ret_slot = NULL, arg[3], func_sym;
2158 int i;
2159
2160 /* We only expect to be called for conversions, unary, and binary ops. */
2161 gcc_assert (nargs == 2 || nargs == 3);
2162
2163 for (i = 0; i < nargs; ++i)
2164 {
2165 rtx this_arg = operands[i];
2166 rtx this_slot;
2167
2168 /* TFmode arguments and return values are passed by reference. */
2169 if (GET_MODE (this_arg) == TFmode)
2170 {
2171 int force_stack_temp;
2172
2173 force_stack_temp = 0;
2174 if (TARGET_BUGGY_QP_LIB && i == 0)
2175 force_stack_temp = 1;
2176
2177 if (GET_CODE (this_arg) == MEM
2178 && ! force_stack_temp)
2179 this_arg = XEXP (this_arg, 0);
2180 else if (CONSTANT_P (this_arg)
2181 && ! force_stack_temp)
2182 {
2183 this_slot = force_const_mem (TFmode, this_arg);
2184 this_arg = XEXP (this_slot, 0);
2185 }
2186 else
2187 {
2188 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2189
2190 /* Operand 0 is the return value. We'll copy it out later. */
2191 if (i > 0)
2192 emit_move_insn (this_slot, this_arg);
2193 else
2194 ret_slot = this_slot;
2195
2196 this_arg = XEXP (this_slot, 0);
2197 }
2198 }
2199
2200 arg[i] = this_arg;
2201 }
2202
2203 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2204
2205 if (GET_MODE (operands[0]) == TFmode)
2206 {
2207 if (nargs == 2)
2208 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2209 arg[0], GET_MODE (arg[0]),
2210 arg[1], GET_MODE (arg[1]));
2211 else
2212 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2213 arg[0], GET_MODE (arg[0]),
2214 arg[1], GET_MODE (arg[1]),
2215 arg[2], GET_MODE (arg[2]));
2216
2217 if (ret_slot)
2218 emit_move_insn (operands[0], ret_slot);
2219 }
2220 else
2221 {
2222 rtx ret;
2223
2224 gcc_assert (nargs == 2);
2225
2226 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2227 GET_MODE (operands[0]), 1,
2228 arg[1], GET_MODE (arg[1]));
2229
2230 if (ret != operands[0])
2231 emit_move_insn (operands[0], ret);
2232 }
2233 }
2234
2235 /* Expand soft-float TFmode calls to sparc abi routines. */
2236
2237 static void
emit_soft_tfmode_binop(enum rtx_code code,rtx * operands)2238 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2239 {
2240 const char *func;
2241
2242 switch (code)
2243 {
2244 case PLUS:
2245 func = "_Qp_add";
2246 break;
2247 case MINUS:
2248 func = "_Qp_sub";
2249 break;
2250 case MULT:
2251 func = "_Qp_mul";
2252 break;
2253 case DIV:
2254 func = "_Qp_div";
2255 break;
2256 default:
2257 gcc_unreachable ();
2258 }
2259
2260 emit_soft_tfmode_libcall (func, 3, operands);
2261 }
2262
2263 static void
emit_soft_tfmode_unop(enum rtx_code code,rtx * operands)2264 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2265 {
2266 const char *func;
2267
2268 gcc_assert (code == SQRT);
2269 func = "_Qp_sqrt";
2270
2271 emit_soft_tfmode_libcall (func, 2, operands);
2272 }
2273
2274 static void
emit_soft_tfmode_cvt(enum rtx_code code,rtx * operands)2275 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2276 {
2277 const char *func;
2278
2279 switch (code)
2280 {
2281 case FLOAT_EXTEND:
2282 switch (GET_MODE (operands[1]))
2283 {
2284 case SFmode:
2285 func = "_Qp_stoq";
2286 break;
2287 case DFmode:
2288 func = "_Qp_dtoq";
2289 break;
2290 default:
2291 gcc_unreachable ();
2292 }
2293 break;
2294
2295 case FLOAT_TRUNCATE:
2296 switch (GET_MODE (operands[0]))
2297 {
2298 case SFmode:
2299 func = "_Qp_qtos";
2300 break;
2301 case DFmode:
2302 func = "_Qp_qtod";
2303 break;
2304 default:
2305 gcc_unreachable ();
2306 }
2307 break;
2308
2309 case FLOAT:
2310 switch (GET_MODE (operands[1]))
2311 {
2312 case SImode:
2313 func = "_Qp_itoq";
2314 break;
2315 case DImode:
2316 func = "_Qp_xtoq";
2317 break;
2318 default:
2319 gcc_unreachable ();
2320 }
2321 break;
2322
2323 case UNSIGNED_FLOAT:
2324 switch (GET_MODE (operands[1]))
2325 {
2326 case SImode:
2327 func = "_Qp_uitoq";
2328 break;
2329 case DImode:
2330 func = "_Qp_uxtoq";
2331 break;
2332 default:
2333 gcc_unreachable ();
2334 }
2335 break;
2336
2337 case FIX:
2338 switch (GET_MODE (operands[0]))
2339 {
2340 case SImode:
2341 func = "_Qp_qtoi";
2342 break;
2343 case DImode:
2344 func = "_Qp_qtox";
2345 break;
2346 default:
2347 gcc_unreachable ();
2348 }
2349 break;
2350
2351 case UNSIGNED_FIX:
2352 switch (GET_MODE (operands[0]))
2353 {
2354 case SImode:
2355 func = "_Qp_qtoui";
2356 break;
2357 case DImode:
2358 func = "_Qp_qtoux";
2359 break;
2360 default:
2361 gcc_unreachable ();
2362 }
2363 break;
2364
2365 default:
2366 gcc_unreachable ();
2367 }
2368
2369 emit_soft_tfmode_libcall (func, 2, operands);
2370 }
2371
2372 /* Expand a hard-float tfmode operation. All arguments must be in
2373 registers. */
2374
2375 static void
emit_hard_tfmode_operation(enum rtx_code code,rtx * operands)2376 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2377 {
2378 rtx op, dest;
2379
2380 if (GET_RTX_CLASS (code) == RTX_UNARY)
2381 {
2382 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2383 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2384 }
2385 else
2386 {
2387 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2388 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2389 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2390 operands[1], operands[2]);
2391 }
2392
2393 if (register_operand (operands[0], VOIDmode))
2394 dest = operands[0];
2395 else
2396 dest = gen_reg_rtx (GET_MODE (operands[0]));
2397
2398 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2399
2400 if (dest != operands[0])
2401 emit_move_insn (operands[0], dest);
2402 }
2403
2404 void
emit_tfmode_binop(enum rtx_code code,rtx * operands)2405 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2406 {
2407 if (TARGET_HARD_QUAD)
2408 emit_hard_tfmode_operation (code, operands);
2409 else
2410 emit_soft_tfmode_binop (code, operands);
2411 }
2412
2413 void
emit_tfmode_unop(enum rtx_code code,rtx * operands)2414 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2415 {
2416 if (TARGET_HARD_QUAD)
2417 emit_hard_tfmode_operation (code, operands);
2418 else
2419 emit_soft_tfmode_unop (code, operands);
2420 }
2421
2422 void
emit_tfmode_cvt(enum rtx_code code,rtx * operands)2423 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2424 {
2425 if (TARGET_HARD_QUAD)
2426 emit_hard_tfmode_operation (code, operands);
2427 else
2428 emit_soft_tfmode_cvt (code, operands);
2429 }
2430
2431 /* Return nonzero if a branch/jump/call instruction will be emitting
2432 nop into its delay slot. */
2433
2434 int
empty_delay_slot(rtx insn)2435 empty_delay_slot (rtx insn)
2436 {
2437 rtx seq;
2438
2439 /* If no previous instruction (should not happen), return true. */
2440 if (PREV_INSN (insn) == NULL)
2441 return 1;
2442
2443 seq = NEXT_INSN (PREV_INSN (insn));
2444 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2445 return 0;
2446
2447 return 1;
2448 }
2449
2450 /* Return nonzero if TRIAL can go into the call delay slot. */
2451
2452 int
tls_call_delay(rtx trial)2453 tls_call_delay (rtx trial)
2454 {
2455 rtx pat;
2456
2457 /* Binutils allows
2458 call __tls_get_addr, %tgd_call (foo)
2459 add %l7, %o0, %o0, %tgd_add (foo)
2460 while Sun as/ld does not. */
2461 if (TARGET_GNU_TLS || !TARGET_TLS)
2462 return 1;
2463
2464 pat = PATTERN (trial);
2465
2466 /* We must reject tgd_add{32|64}, i.e.
2467 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2468 and tldm_add{32|64}, i.e.
2469 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2470 for Sun as/ld. */
2471 if (GET_CODE (pat) == SET
2472 && GET_CODE (SET_SRC (pat)) == PLUS)
2473 {
2474 rtx unspec = XEXP (SET_SRC (pat), 1);
2475
2476 if (GET_CODE (unspec) == UNSPEC
2477 && (XINT (unspec, 1) == UNSPEC_TLSGD
2478 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2479 return 0;
2480 }
2481
2482 return 1;
2483 }
2484
2485 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2486 instruction. RETURN_P is true if the v9 variant 'return' is to be
2487 considered in the test too.
2488
2489 TRIAL must be a SET whose destination is a REG appropriate for the
2490 'restore' instruction or, if RETURN_P is true, for the 'return'
2491 instruction. */
2492
2493 static int
eligible_for_restore_insn(rtx trial,bool return_p)2494 eligible_for_restore_insn (rtx trial, bool return_p)
2495 {
2496 rtx pat = PATTERN (trial);
2497 rtx src = SET_SRC (pat);
2498
2499 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2500 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2501 && arith_operand (src, GET_MODE (src)))
2502 {
2503 if (TARGET_ARCH64)
2504 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2505 else
2506 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2507 }
2508
2509 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2510 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2511 && arith_double_operand (src, GET_MODE (src)))
2512 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2513
2514 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2515 else if (! TARGET_FPU && register_operand (src, SFmode))
2516 return 1;
2517
2518 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2519 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2520 return 1;
2521
2522 /* If we have the 'return' instruction, anything that does not use
2523 local or output registers and can go into a delay slot wins. */
2524 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2525 && (get_attr_in_uncond_branch_delay (trial)
2526 == IN_UNCOND_BRANCH_DELAY_TRUE))
2527 return 1;
2528
2529 /* The 'restore src1,src2,dest' pattern for SImode. */
2530 else if (GET_CODE (src) == PLUS
2531 && register_operand (XEXP (src, 0), SImode)
2532 && arith_operand (XEXP (src, 1), SImode))
2533 return 1;
2534
2535 /* The 'restore src1,src2,dest' pattern for DImode. */
2536 else if (GET_CODE (src) == PLUS
2537 && register_operand (XEXP (src, 0), DImode)
2538 && arith_double_operand (XEXP (src, 1), DImode))
2539 return 1;
2540
2541 /* The 'restore src1,%lo(src2),dest' pattern. */
2542 else if (GET_CODE (src) == LO_SUM
2543 && ! TARGET_CM_MEDMID
2544 && ((register_operand (XEXP (src, 0), SImode)
2545 && immediate_operand (XEXP (src, 1), SImode))
2546 || (TARGET_ARCH64
2547 && register_operand (XEXP (src, 0), DImode)
2548 && immediate_operand (XEXP (src, 1), DImode))))
2549 return 1;
2550
2551 /* The 'restore src,src,dest' pattern. */
2552 else if (GET_CODE (src) == ASHIFT
2553 && (register_operand (XEXP (src, 0), SImode)
2554 || register_operand (XEXP (src, 0), DImode))
2555 && XEXP (src, 1) == const1_rtx)
2556 return 1;
2557
2558 return 0;
2559 }
2560
2561 /* Return nonzero if TRIAL can go into the function return's
2562 delay slot. */
2563
2564 int
eligible_for_return_delay(rtx trial)2565 eligible_for_return_delay (rtx trial)
2566 {
2567 rtx pat;
2568
2569 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2570 return 0;
2571
2572 if (get_attr_length (trial) != 1)
2573 return 0;
2574
2575 /* If there are any call-saved registers, we should scan TRIAL if it
2576 does not reference them. For now just make it easy. */
2577 if (num_gfregs)
2578 return 0;
2579
2580 /* If the function uses __builtin_eh_return, the eh_return machinery
2581 occupies the delay slot. */
2582 if (current_function_calls_eh_return)
2583 return 0;
2584
2585 /* In the case of a true leaf function, anything can go into the slot. */
2586 if (sparc_leaf_function_p)
2587 return get_attr_in_uncond_branch_delay (trial)
2588 == IN_UNCOND_BRANCH_DELAY_TRUE;
2589
2590 pat = PATTERN (trial);
2591
2592 /* Otherwise, only operations which can be done in tandem with
2593 a `restore' or `return' insn can go into the delay slot. */
2594 if (GET_CODE (SET_DEST (pat)) != REG
2595 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2596 return 0;
2597
2598 /* If this instruction sets up floating point register and we have a return
2599 instruction, it can probably go in. But restore will not work
2600 with FP_REGS. */
2601 if (REGNO (SET_DEST (pat)) >= 32)
2602 return (TARGET_V9
2603 && ! epilogue_renumber (&pat, 1)
2604 && (get_attr_in_uncond_branch_delay (trial)
2605 == IN_UNCOND_BRANCH_DELAY_TRUE));
2606
2607 return eligible_for_restore_insn (trial, true);
2608 }
2609
2610 /* Return nonzero if TRIAL can go into the sibling call's
2611 delay slot. */
2612
2613 int
eligible_for_sibcall_delay(rtx trial)2614 eligible_for_sibcall_delay (rtx trial)
2615 {
2616 rtx pat;
2617
2618 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2619 return 0;
2620
2621 if (get_attr_length (trial) != 1)
2622 return 0;
2623
2624 pat = PATTERN (trial);
2625
2626 if (sparc_leaf_function_p)
2627 {
2628 /* If the tail call is done using the call instruction,
2629 we have to restore %o7 in the delay slot. */
2630 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2631 return 0;
2632
2633 /* %g1 is used to build the function address */
2634 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2635 return 0;
2636
2637 return 1;
2638 }
2639
2640 /* Otherwise, only operations which can be done in tandem with
2641 a `restore' insn can go into the delay slot. */
2642 if (GET_CODE (SET_DEST (pat)) != REG
2643 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2644 || REGNO (SET_DEST (pat)) >= 32)
2645 return 0;
2646
2647 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2648 in most cases. */
2649 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2650 return 0;
2651
2652 return eligible_for_restore_insn (trial, false);
2653 }
2654
2655 int
short_branch(int uid1,int uid2)2656 short_branch (int uid1, int uid2)
2657 {
2658 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2659
2660 /* Leave a few words of "slop". */
2661 if (delta >= -1023 && delta <= 1022)
2662 return 1;
2663
2664 return 0;
2665 }
2666
2667 /* Return nonzero if REG is not used after INSN.
2668 We assume REG is a reload reg, and therefore does
2669 not live past labels or calls or jumps. */
2670 int
reg_unused_after(rtx reg,rtx insn)2671 reg_unused_after (rtx reg, rtx insn)
2672 {
2673 enum rtx_code code, prev_code = UNKNOWN;
2674
2675 while ((insn = NEXT_INSN (insn)))
2676 {
2677 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2678 return 1;
2679
2680 code = GET_CODE (insn);
2681 if (GET_CODE (insn) == CODE_LABEL)
2682 return 1;
2683
2684 if (INSN_P (insn))
2685 {
2686 rtx set = single_set (insn);
2687 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2688 if (set && in_src)
2689 return 0;
2690 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2691 return 1;
2692 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2693 return 0;
2694 }
2695 prev_code = code;
2696 }
2697 return 1;
2698 }
2699
2700 /* Determine if it's legal to put X into the constant pool. This
2701 is not possible if X contains the address of a symbol that is
2702 not constant (TLS) or not known at final link time (PIC). */
2703
2704 static bool
sparc_cannot_force_const_mem(rtx x)2705 sparc_cannot_force_const_mem (rtx x)
2706 {
2707 switch (GET_CODE (x))
2708 {
2709 case CONST_INT:
2710 case CONST_DOUBLE:
2711 case CONST_VECTOR:
2712 /* Accept all non-symbolic constants. */
2713 return false;
2714
2715 case LABEL_REF:
2716 /* Labels are OK iff we are non-PIC. */
2717 return flag_pic != 0;
2718
2719 case SYMBOL_REF:
2720 /* 'Naked' TLS symbol references are never OK,
2721 non-TLS symbols are OK iff we are non-PIC. */
2722 if (SYMBOL_REF_TLS_MODEL (x))
2723 return true;
2724 else
2725 return flag_pic != 0;
2726
2727 case CONST:
2728 return sparc_cannot_force_const_mem (XEXP (x, 0));
2729 case PLUS:
2730 case MINUS:
2731 return sparc_cannot_force_const_mem (XEXP (x, 0))
2732 || sparc_cannot_force_const_mem (XEXP (x, 1));
2733 case UNSPEC:
2734 return true;
2735 default:
2736 gcc_unreachable ();
2737 }
2738 }
2739
2740 /* PIC support. */
2741 static GTY(()) char pic_helper_symbol_name[256];
2742 static GTY(()) rtx pic_helper_symbol;
2743 static GTY(()) bool pic_helper_emitted_p = false;
2744 static GTY(()) rtx global_offset_table;
2745
2746 /* Ensure that we are not using patterns that are not OK with PIC. */
2747
2748 int
check_pic(int i)2749 check_pic (int i)
2750 {
2751 switch (flag_pic)
2752 {
2753 case 1:
2754 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2755 && (GET_CODE (recog_data.operand[i]) != CONST
2756 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2757 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2758 == global_offset_table)
2759 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2760 == CONST))));
2761 case 2:
2762 default:
2763 return 1;
2764 }
2765 }
2766
2767 /* Return true if X is an address which needs a temporary register when
2768 reloaded while generating PIC code. */
2769
2770 int
pic_address_needs_scratch(rtx x)2771 pic_address_needs_scratch (rtx x)
2772 {
2773 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2774 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2775 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2776 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2777 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2778 return 1;
2779
2780 return 0;
2781 }
2782
2783 /* Determine if a given RTX is a valid constant. We already know this
2784 satisfies CONSTANT_P. */
2785
2786 bool
legitimate_constant_p(rtx x)2787 legitimate_constant_p (rtx x)
2788 {
2789 rtx inner;
2790
2791 switch (GET_CODE (x))
2792 {
2793 case SYMBOL_REF:
2794 /* TLS symbols are not constant. */
2795 if (SYMBOL_REF_TLS_MODEL (x))
2796 return false;
2797 break;
2798
2799 case CONST:
2800 inner = XEXP (x, 0);
2801
2802 /* Offsets of TLS symbols are never valid.
2803 Discourage CSE from creating them. */
2804 if (GET_CODE (inner) == PLUS
2805 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2806 return false;
2807 break;
2808
2809 case CONST_DOUBLE:
2810 if (GET_MODE (x) == VOIDmode)
2811 return true;
2812
2813 /* Floating point constants are generally not ok.
2814 The only exception is 0.0 in VIS. */
2815 if (TARGET_VIS
2816 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2817 && const_zero_operand (x, GET_MODE (x)))
2818 return true;
2819
2820 return false;
2821
2822 case CONST_VECTOR:
2823 /* Vector constants are generally not ok.
2824 The only exception is 0 in VIS. */
2825 if (TARGET_VIS
2826 && const_zero_operand (x, GET_MODE (x)))
2827 return true;
2828
2829 return false;
2830
2831 default:
2832 break;
2833 }
2834
2835 return true;
2836 }
2837
2838 /* Determine if a given RTX is a valid constant address. */
2839
2840 bool
constant_address_p(rtx x)2841 constant_address_p (rtx x)
2842 {
2843 switch (GET_CODE (x))
2844 {
2845 case LABEL_REF:
2846 case CONST_INT:
2847 case HIGH:
2848 return true;
2849
2850 case CONST:
2851 if (flag_pic && pic_address_needs_scratch (x))
2852 return false;
2853 return legitimate_constant_p (x);
2854
2855 case SYMBOL_REF:
2856 return !flag_pic && legitimate_constant_p (x);
2857
2858 default:
2859 return false;
2860 }
2861 }
2862
2863 /* Nonzero if the constant value X is a legitimate general operand
2864 when generating PIC code. It is given that flag_pic is on and
2865 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2866
2867 bool
legitimate_pic_operand_p(rtx x)2868 legitimate_pic_operand_p (rtx x)
2869 {
2870 if (pic_address_needs_scratch (x))
2871 return false;
2872 if (SPARC_SYMBOL_REF_TLS_P (x)
2873 || (GET_CODE (x) == CONST
2874 && GET_CODE (XEXP (x, 0)) == PLUS
2875 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2876 return false;
2877 return true;
2878 }
2879
2880 /* Return nonzero if ADDR is a valid memory address.
2881 STRICT specifies whether strict register checking applies. */
2882
2883 int
legitimate_address_p(enum machine_mode mode,rtx addr,int strict)2884 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2885 {
2886 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2887
2888 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2889 rs1 = addr;
2890 else if (GET_CODE (addr) == PLUS)
2891 {
2892 rs1 = XEXP (addr, 0);
2893 rs2 = XEXP (addr, 1);
2894
2895 /* Canonicalize. REG comes first, if there are no regs,
2896 LO_SUM comes first. */
2897 if (!REG_P (rs1)
2898 && GET_CODE (rs1) != SUBREG
2899 && (REG_P (rs2)
2900 || GET_CODE (rs2) == SUBREG
2901 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2902 {
2903 rs1 = XEXP (addr, 1);
2904 rs2 = XEXP (addr, 0);
2905 }
2906
2907 if ((flag_pic == 1
2908 && rs1 == pic_offset_table_rtx
2909 && !REG_P (rs2)
2910 && GET_CODE (rs2) != SUBREG
2911 && GET_CODE (rs2) != LO_SUM
2912 && GET_CODE (rs2) != MEM
2913 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2914 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2915 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2916 || ((REG_P (rs1)
2917 || GET_CODE (rs1) == SUBREG)
2918 && RTX_OK_FOR_OFFSET_P (rs2)))
2919 {
2920 imm1 = rs2;
2921 rs2 = NULL;
2922 }
2923 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2924 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2925 {
2926 /* We prohibit REG + REG for TFmode when there are no quad move insns
2927 and we consequently need to split. We do this because REG+REG
2928 is not an offsettable address. If we get the situation in reload
2929 where source and destination of a movtf pattern are both MEMs with
2930 REG+REG address, then only one of them gets converted to an
2931 offsettable address. */
2932 if (mode == TFmode
2933 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2934 return 0;
2935
2936 /* We prohibit REG + REG on ARCH32 if not optimizing for
2937 DFmode/DImode because then mem_min_alignment is likely to be zero
2938 after reload and the forced split would lack a matching splitter
2939 pattern. */
2940 if (TARGET_ARCH32 && !optimize
2941 && (mode == DFmode || mode == DImode))
2942 return 0;
2943 }
2944 else if (USE_AS_OFFSETABLE_LO10
2945 && GET_CODE (rs1) == LO_SUM
2946 && TARGET_ARCH64
2947 && ! TARGET_CM_MEDMID
2948 && RTX_OK_FOR_OLO10_P (rs2))
2949 {
2950 rs2 = NULL;
2951 imm1 = XEXP (rs1, 1);
2952 rs1 = XEXP (rs1, 0);
2953 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2954 return 0;
2955 }
2956 }
2957 else if (GET_CODE (addr) == LO_SUM)
2958 {
2959 rs1 = XEXP (addr, 0);
2960 imm1 = XEXP (addr, 1);
2961
2962 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2963 return 0;
2964
2965 /* We can't allow TFmode in 32-bit mode, because an offset greater
2966 than the alignment (8) may cause the LO_SUM to overflow. */
2967 if (mode == TFmode && TARGET_ARCH32)
2968 return 0;
2969 }
2970 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2971 return 1;
2972 else
2973 return 0;
2974
2975 if (GET_CODE (rs1) == SUBREG)
2976 rs1 = SUBREG_REG (rs1);
2977 if (!REG_P (rs1))
2978 return 0;
2979
2980 if (rs2)
2981 {
2982 if (GET_CODE (rs2) == SUBREG)
2983 rs2 = SUBREG_REG (rs2);
2984 if (!REG_P (rs2))
2985 return 0;
2986 }
2987
2988 if (strict)
2989 {
2990 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
2991 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
2992 return 0;
2993 }
2994 else
2995 {
2996 if ((REGNO (rs1) >= 32
2997 && REGNO (rs1) != FRAME_POINTER_REGNUM
2998 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
2999 || (rs2
3000 && (REGNO (rs2) >= 32
3001 && REGNO (rs2) != FRAME_POINTER_REGNUM
3002 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3003 return 0;
3004 }
3005 return 1;
3006 }
3007
3008 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3009
3010 static GTY(()) rtx sparc_tls_symbol;
3011
3012 static rtx
sparc_tls_get_addr(void)3013 sparc_tls_get_addr (void)
3014 {
3015 if (!sparc_tls_symbol)
3016 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3017
3018 return sparc_tls_symbol;
3019 }
3020
3021 static rtx
sparc_tls_got(void)3022 sparc_tls_got (void)
3023 {
3024 rtx temp;
3025 if (flag_pic)
3026 {
3027 current_function_uses_pic_offset_table = 1;
3028 return pic_offset_table_rtx;
3029 }
3030
3031 if (!global_offset_table)
3032 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3033 temp = gen_reg_rtx (Pmode);
3034 emit_move_insn (temp, global_offset_table);
3035 return temp;
3036 }
3037
3038 /* Return 1 if *X is a thread-local symbol. */
3039
3040 static int
sparc_tls_symbol_ref_1(rtx * x,void * data ATTRIBUTE_UNUSED)3041 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3042 {
3043 return SPARC_SYMBOL_REF_TLS_P (*x);
3044 }
3045
3046 /* Return 1 if X contains a thread-local symbol. */
3047
3048 bool
sparc_tls_referenced_p(rtx x)3049 sparc_tls_referenced_p (rtx x)
3050 {
3051 if (!TARGET_HAVE_TLS)
3052 return false;
3053
3054 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3055 }
3056
3057 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3058 this (thread-local) address. */
3059
3060 rtx
legitimize_tls_address(rtx addr)3061 legitimize_tls_address (rtx addr)
3062 {
3063 rtx temp1, temp2, temp3, ret, o0, got, insn;
3064
3065 gcc_assert (! no_new_pseudos);
3066
3067 if (GET_CODE (addr) == SYMBOL_REF)
3068 switch (SYMBOL_REF_TLS_MODEL (addr))
3069 {
3070 case TLS_MODEL_GLOBAL_DYNAMIC:
3071 start_sequence ();
3072 temp1 = gen_reg_rtx (SImode);
3073 temp2 = gen_reg_rtx (SImode);
3074 ret = gen_reg_rtx (Pmode);
3075 o0 = gen_rtx_REG (Pmode, 8);
3076 got = sparc_tls_got ();
3077 emit_insn (gen_tgd_hi22 (temp1, addr));
3078 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3079 if (TARGET_ARCH32)
3080 {
3081 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3082 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3083 addr, const1_rtx));
3084 }
3085 else
3086 {
3087 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3088 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3089 addr, const1_rtx));
3090 }
3091 CALL_INSN_FUNCTION_USAGE (insn)
3092 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3093 CALL_INSN_FUNCTION_USAGE (insn));
3094 insn = get_insns ();
3095 end_sequence ();
3096 emit_libcall_block (insn, ret, o0, addr);
3097 break;
3098
3099 case TLS_MODEL_LOCAL_DYNAMIC:
3100 start_sequence ();
3101 temp1 = gen_reg_rtx (SImode);
3102 temp2 = gen_reg_rtx (SImode);
3103 temp3 = gen_reg_rtx (Pmode);
3104 ret = gen_reg_rtx (Pmode);
3105 o0 = gen_rtx_REG (Pmode, 8);
3106 got = sparc_tls_got ();
3107 emit_insn (gen_tldm_hi22 (temp1));
3108 emit_insn (gen_tldm_lo10 (temp2, temp1));
3109 if (TARGET_ARCH32)
3110 {
3111 emit_insn (gen_tldm_add32 (o0, got, temp2));
3112 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3113 const1_rtx));
3114 }
3115 else
3116 {
3117 emit_insn (gen_tldm_add64 (o0, got, temp2));
3118 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3119 const1_rtx));
3120 }
3121 CALL_INSN_FUNCTION_USAGE (insn)
3122 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3123 CALL_INSN_FUNCTION_USAGE (insn));
3124 insn = get_insns ();
3125 end_sequence ();
3126 emit_libcall_block (insn, temp3, o0,
3127 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3128 UNSPEC_TLSLD_BASE));
3129 temp1 = gen_reg_rtx (SImode);
3130 temp2 = gen_reg_rtx (SImode);
3131 emit_insn (gen_tldo_hix22 (temp1, addr));
3132 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3133 if (TARGET_ARCH32)
3134 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3135 else
3136 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3137 break;
3138
3139 case TLS_MODEL_INITIAL_EXEC:
3140 temp1 = gen_reg_rtx (SImode);
3141 temp2 = gen_reg_rtx (SImode);
3142 temp3 = gen_reg_rtx (Pmode);
3143 got = sparc_tls_got ();
3144 emit_insn (gen_tie_hi22 (temp1, addr));
3145 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3146 if (TARGET_ARCH32)
3147 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3148 else
3149 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3150 if (TARGET_SUN_TLS)
3151 {
3152 ret = gen_reg_rtx (Pmode);
3153 if (TARGET_ARCH32)
3154 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3155 temp3, addr));
3156 else
3157 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3158 temp3, addr));
3159 }
3160 else
3161 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3162 break;
3163
3164 case TLS_MODEL_LOCAL_EXEC:
3165 temp1 = gen_reg_rtx (Pmode);
3166 temp2 = gen_reg_rtx (Pmode);
3167 if (TARGET_ARCH32)
3168 {
3169 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3170 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3171 }
3172 else
3173 {
3174 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3175 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3176 }
3177 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3178 break;
3179
3180 default:
3181 gcc_unreachable ();
3182 }
3183
3184 else
3185 gcc_unreachable (); /* for now ... */
3186
3187 return ret;
3188 }
3189
3190
3191 /* Legitimize PIC addresses. If the address is already position-independent,
3192 we return ORIG. Newly generated position-independent addresses go into a
3193 reg. This is REG if nonzero, otherwise we allocate register(s) as
3194 necessary. */
3195
3196 rtx
legitimize_pic_address(rtx orig,enum machine_mode mode ATTRIBUTE_UNUSED,rtx reg)3197 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3198 rtx reg)
3199 {
3200 if (GET_CODE (orig) == SYMBOL_REF)
3201 {
3202 rtx pic_ref, address;
3203 rtx insn;
3204
3205 if (reg == 0)
3206 {
3207 gcc_assert (! reload_in_progress && ! reload_completed);
3208 reg = gen_reg_rtx (Pmode);
3209 }
3210
3211 if (flag_pic == 2)
3212 {
3213 /* If not during reload, allocate another temp reg here for loading
3214 in the address, so that these instructions can be optimized
3215 properly. */
3216 rtx temp_reg = ((reload_in_progress || reload_completed)
3217 ? reg : gen_reg_rtx (Pmode));
3218
3219 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3220 won't get confused into thinking that these two instructions
3221 are loading in the true address of the symbol. If in the
3222 future a PIC rtx exists, that should be used instead. */
3223 if (TARGET_ARCH64)
3224 {
3225 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3226 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3227 }
3228 else
3229 {
3230 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3231 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3232 }
3233 address = temp_reg;
3234 }
3235 else
3236 address = orig;
3237
3238 pic_ref = gen_const_mem (Pmode,
3239 gen_rtx_PLUS (Pmode,
3240 pic_offset_table_rtx, address));
3241 current_function_uses_pic_offset_table = 1;
3242 insn = emit_move_insn (reg, pic_ref);
3243 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3244 by loop. */
3245 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3246 REG_NOTES (insn));
3247 return reg;
3248 }
3249 else if (GET_CODE (orig) == CONST)
3250 {
3251 rtx base, offset;
3252
3253 if (GET_CODE (XEXP (orig, 0)) == PLUS
3254 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3255 return orig;
3256
3257 if (reg == 0)
3258 {
3259 gcc_assert (! reload_in_progress && ! reload_completed);
3260 reg = gen_reg_rtx (Pmode);
3261 }
3262
3263 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3264 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3265 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3266 base == reg ? 0 : reg);
3267
3268 if (GET_CODE (offset) == CONST_INT)
3269 {
3270 if (SMALL_INT (offset))
3271 return plus_constant (base, INTVAL (offset));
3272 else if (! reload_in_progress && ! reload_completed)
3273 offset = force_reg (Pmode, offset);
3274 else
3275 /* If we reach here, then something is seriously wrong. */
3276 gcc_unreachable ();
3277 }
3278 return gen_rtx_PLUS (Pmode, base, offset);
3279 }
3280 else if (GET_CODE (orig) == LABEL_REF)
3281 /* ??? Why do we do this? */
3282 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3283 the register is live instead, in case it is eliminated. */
3284 current_function_uses_pic_offset_table = 1;
3285
3286 return orig;
3287 }
3288
3289 /* Try machine-dependent ways of modifying an illegitimate address X
3290 to be legitimate. If we find one, return the new, valid address.
3291
3292 OLDX is the address as it was before break_out_memory_refs was called.
3293 In some cases it is useful to look at this to decide what needs to be done.
3294
3295 MODE is the mode of the operand pointed to by X. */
3296
3297 rtx
legitimize_address(rtx x,rtx oldx ATTRIBUTE_UNUSED,enum machine_mode mode)3298 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3299 {
3300 rtx orig_x = x;
3301
3302 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3303 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3304 force_operand (XEXP (x, 0), NULL_RTX));
3305 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3306 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3307 force_operand (XEXP (x, 1), NULL_RTX));
3308 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3309 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3310 XEXP (x, 1));
3311 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3312 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3313 force_operand (XEXP (x, 1), NULL_RTX));
3314
3315 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3316 return x;
3317
3318 if (SPARC_SYMBOL_REF_TLS_P (x))
3319 x = legitimize_tls_address (x);
3320 else if (flag_pic)
3321 x = legitimize_pic_address (x, mode, 0);
3322 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3323 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3324 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3325 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3326 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3327 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3328 else if (GET_CODE (x) == SYMBOL_REF
3329 || GET_CODE (x) == CONST
3330 || GET_CODE (x) == LABEL_REF)
3331 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3332 return x;
3333 }
3334
3335 /* Emit the special PIC helper function. */
3336
3337 static void
emit_pic_helper(void)3338 emit_pic_helper (void)
3339 {
3340 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3341 int align;
3342
3343 switch_to_section (text_section);
3344
3345 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3346 if (align > 0)
3347 ASM_OUTPUT_ALIGN (asm_out_file, align);
3348 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3349 if (flag_delayed_branch)
3350 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3351 pic_name, pic_name);
3352 else
3353 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3354 pic_name, pic_name);
3355
3356 pic_helper_emitted_p = true;
3357 }
3358
3359 /* Emit code to load the PIC register. */
3360
3361 static void
load_pic_register(bool delay_pic_helper)3362 load_pic_register (bool delay_pic_helper)
3363 {
3364 int orig_flag_pic = flag_pic;
3365
3366 /* If we haven't initialized the special PIC symbols, do so now. */
3367 if (!pic_helper_symbol_name[0])
3368 {
3369 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3370 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3371 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3372 }
3373
3374 /* If we haven't emitted the special PIC helper function, do so now unless
3375 we are requested to delay it. */
3376 if (!delay_pic_helper && !pic_helper_emitted_p)
3377 emit_pic_helper ();
3378
3379 flag_pic = 0;
3380 if (TARGET_ARCH64)
3381 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3382 pic_helper_symbol));
3383 else
3384 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3385 pic_helper_symbol));
3386 flag_pic = orig_flag_pic;
3387
3388 /* Need to emit this whether or not we obey regdecls,
3389 since setjmp/longjmp can cause life info to screw up.
3390 ??? In the case where we don't obey regdecls, this is not sufficient
3391 since we may not fall out the bottom. */
3392 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3393 }
3394
3395 /* Return 1 if RTX is a MEM which is known to be aligned to at
3396 least a DESIRED byte boundary. */
3397
3398 int
mem_min_alignment(rtx mem,int desired)3399 mem_min_alignment (rtx mem, int desired)
3400 {
3401 rtx addr, base, offset;
3402
3403 /* If it's not a MEM we can't accept it. */
3404 if (GET_CODE (mem) != MEM)
3405 return 0;
3406
3407 /* Obviously... */
3408 if (!TARGET_UNALIGNED_DOUBLES
3409 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3410 return 1;
3411
3412 /* ??? The rest of the function predates MEM_ALIGN so
3413 there is probably a bit of redundancy. */
3414 addr = XEXP (mem, 0);
3415 base = offset = NULL_RTX;
3416 if (GET_CODE (addr) == PLUS)
3417 {
3418 if (GET_CODE (XEXP (addr, 0)) == REG)
3419 {
3420 base = XEXP (addr, 0);
3421
3422 /* What we are saying here is that if the base
3423 REG is aligned properly, the compiler will make
3424 sure any REG based index upon it will be so
3425 as well. */
3426 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3427 offset = XEXP (addr, 1);
3428 else
3429 offset = const0_rtx;
3430 }
3431 }
3432 else if (GET_CODE (addr) == REG)
3433 {
3434 base = addr;
3435 offset = const0_rtx;
3436 }
3437
3438 if (base != NULL_RTX)
3439 {
3440 int regno = REGNO (base);
3441
3442 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3443 {
3444 /* Check if the compiler has recorded some information
3445 about the alignment of the base REG. If reload has
3446 completed, we already matched with proper alignments.
3447 If not running global_alloc, reload might give us
3448 unaligned pointer to local stack though. */
3449 if (((cfun != 0
3450 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3451 || (optimize && reload_completed))
3452 && (INTVAL (offset) & (desired - 1)) == 0)
3453 return 1;
3454 }
3455 else
3456 {
3457 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3458 return 1;
3459 }
3460 }
3461 else if (! TARGET_UNALIGNED_DOUBLES
3462 || CONSTANT_P (addr)
3463 || GET_CODE (addr) == LO_SUM)
3464 {
3465 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3466 is true, in which case we can only assume that an access is aligned if
3467 it is to a constant address, or the address involves a LO_SUM. */
3468 return 1;
3469 }
3470
3471 /* An obviously unaligned address. */
3472 return 0;
3473 }
3474
3475
3476 /* Vectors to keep interesting information about registers where it can easily
3477 be got. We used to use the actual mode value as the bit number, but there
3478 are more than 32 modes now. Instead we use two tables: one indexed by
3479 hard register number, and one indexed by mode. */
3480
3481 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3482 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
3483 mapped into one sparc_mode_class mode. */
3484
3485 enum sparc_mode_class {
3486 S_MODE, D_MODE, T_MODE, O_MODE,
3487 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3488 CC_MODE, CCFP_MODE
3489 };
3490
3491 /* Modes for single-word and smaller quantities. */
3492 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3493
3494 /* Modes for double-word and smaller quantities. */
3495 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3496
3497 /* Modes for quad-word and smaller quantities. */
3498 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3499
3500 /* Modes for 8-word and smaller quantities. */
3501 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3502
3503 /* Modes for single-float quantities. We must allow any single word or
3504 smaller quantity. This is because the fix/float conversion instructions
3505 take integer inputs/outputs from the float registers. */
3506 #define SF_MODES (S_MODES)
3507
3508 /* Modes for double-float and smaller quantities. */
3509 #define DF_MODES (S_MODES | D_MODES)
3510
3511 /* Modes for double-float only quantities. */
3512 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3513
3514 /* Modes for quad-float only quantities. */
3515 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3516
3517 /* Modes for quad-float and smaller quantities. */
3518 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3519
3520 /* Modes for quad-float and double-float quantities. */
3521 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3522
3523 /* Modes for quad-float pair only quantities. */
3524 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3525
3526 /* Modes for quad-float pairs and smaller quantities. */
3527 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3528
3529 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3530
3531 /* Modes for condition codes. */
3532 #define CC_MODES (1 << (int) CC_MODE)
3533 #define CCFP_MODES (1 << (int) CCFP_MODE)
3534
3535 /* Value is 1 if register/mode pair is acceptable on sparc.
3536 The funny mixture of D and T modes is because integer operations
3537 do not specially operate on tetra quantities, so non-quad-aligned
3538 registers can hold quadword quantities (except %o4 and %i4 because
3539 they cross fixed registers). */
3540
3541 /* This points to either the 32 bit or the 64 bit version. */
3542 const int *hard_regno_mode_classes;
3543
3544 static const int hard_32bit_mode_classes[] = {
3545 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3546 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3547 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3548 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3549
3550 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3551 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3552 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3553 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3554
3555 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3556 and none can hold SFmode/SImode values. */
3557 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3558 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3559 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3560 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3561
3562 /* %fcc[0123] */
3563 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3564
3565 /* %icc */
3566 CC_MODES
3567 };
3568
3569 static const int hard_64bit_mode_classes[] = {
3570 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3571 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3572 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3573 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3574
3575 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3576 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3577 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3578 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3579
3580 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3581 and none can hold SFmode/SImode values. */
3582 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3583 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3584 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3585 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3586
3587 /* %fcc[0123] */
3588 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3589
3590 /* %icc */
3591 CC_MODES
3592 };
3593
3594 int sparc_mode_class [NUM_MACHINE_MODES];
3595
3596 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3597
3598 static void
sparc_init_modes(void)3599 sparc_init_modes (void)
3600 {
3601 int i;
3602
3603 for (i = 0; i < NUM_MACHINE_MODES; i++)
3604 {
3605 switch (GET_MODE_CLASS (i))
3606 {
3607 case MODE_INT:
3608 case MODE_PARTIAL_INT:
3609 case MODE_COMPLEX_INT:
3610 if (GET_MODE_SIZE (i) <= 4)
3611 sparc_mode_class[i] = 1 << (int) S_MODE;
3612 else if (GET_MODE_SIZE (i) == 8)
3613 sparc_mode_class[i] = 1 << (int) D_MODE;
3614 else if (GET_MODE_SIZE (i) == 16)
3615 sparc_mode_class[i] = 1 << (int) T_MODE;
3616 else if (GET_MODE_SIZE (i) == 32)
3617 sparc_mode_class[i] = 1 << (int) O_MODE;
3618 else
3619 sparc_mode_class[i] = 0;
3620 break;
3621 case MODE_VECTOR_INT:
3622 if (GET_MODE_SIZE (i) <= 4)
3623 sparc_mode_class[i] = 1 << (int)SF_MODE;
3624 else if (GET_MODE_SIZE (i) == 8)
3625 sparc_mode_class[i] = 1 << (int)DF_MODE;
3626 break;
3627 case MODE_FLOAT:
3628 case MODE_COMPLEX_FLOAT:
3629 if (GET_MODE_SIZE (i) <= 4)
3630 sparc_mode_class[i] = 1 << (int) SF_MODE;
3631 else if (GET_MODE_SIZE (i) == 8)
3632 sparc_mode_class[i] = 1 << (int) DF_MODE;
3633 else if (GET_MODE_SIZE (i) == 16)
3634 sparc_mode_class[i] = 1 << (int) TF_MODE;
3635 else if (GET_MODE_SIZE (i) == 32)
3636 sparc_mode_class[i] = 1 << (int) OF_MODE;
3637 else
3638 sparc_mode_class[i] = 0;
3639 break;
3640 case MODE_CC:
3641 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3642 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3643 else
3644 sparc_mode_class[i] = 1 << (int) CC_MODE;
3645 break;
3646 default:
3647 sparc_mode_class[i] = 0;
3648 break;
3649 }
3650 }
3651
3652 if (TARGET_ARCH64)
3653 hard_regno_mode_classes = hard_64bit_mode_classes;
3654 else
3655 hard_regno_mode_classes = hard_32bit_mode_classes;
3656
3657 /* Initialize the array used by REGNO_REG_CLASS. */
3658 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3659 {
3660 if (i < 16 && TARGET_V8PLUS)
3661 sparc_regno_reg_class[i] = I64_REGS;
3662 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3663 sparc_regno_reg_class[i] = GENERAL_REGS;
3664 else if (i < 64)
3665 sparc_regno_reg_class[i] = FP_REGS;
3666 else if (i < 96)
3667 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3668 else if (i < 100)
3669 sparc_regno_reg_class[i] = FPCC_REGS;
3670 else
3671 sparc_regno_reg_class[i] = NO_REGS;
3672 }
3673 }
3674
3675 /* Compute the frame size required by the function. This function is called
3676 during the reload pass and also by sparc_expand_prologue. */
3677
3678 HOST_WIDE_INT
sparc_compute_frame_size(HOST_WIDE_INT size,int leaf_function_p)3679 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3680 {
3681 int outgoing_args_size = (current_function_outgoing_args_size
3682 + REG_PARM_STACK_SPACE (current_function_decl));
3683 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3684 int i;
3685
3686 if (TARGET_ARCH64)
3687 {
3688 for (i = 0; i < 8; i++)
3689 if (regs_ever_live[i] && ! call_used_regs[i])
3690 n_regs += 2;
3691 }
3692 else
3693 {
3694 for (i = 0; i < 8; i += 2)
3695 if ((regs_ever_live[i] && ! call_used_regs[i])
3696 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3697 n_regs += 2;
3698 }
3699
3700 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3701 if ((regs_ever_live[i] && ! call_used_regs[i])
3702 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3703 n_regs += 2;
3704
3705 /* Set up values for use in prologue and epilogue. */
3706 num_gfregs = n_regs;
3707
3708 if (leaf_function_p
3709 && n_regs == 0
3710 && size == 0
3711 && current_function_outgoing_args_size == 0)
3712 actual_fsize = apparent_fsize = 0;
3713 else
3714 {
3715 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3716 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3717 apparent_fsize += n_regs * 4;
3718 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3719 }
3720
3721 /* Make sure nothing can clobber our register windows.
3722 If a SAVE must be done, or there is a stack-local variable,
3723 the register window area must be allocated. */
3724 if (! leaf_function_p || size > 0)
3725 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3726
3727 return SPARC_STACK_ALIGN (actual_fsize);
3728 }
3729
3730 /* Output any necessary .register pseudo-ops. */
3731
3732 void
sparc_output_scratch_registers(FILE * file ATTRIBUTE_UNUSED)3733 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3734 {
3735 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3736 int i;
3737
3738 if (TARGET_ARCH32)
3739 return;
3740
3741 /* Check if %g[2367] were used without
3742 .register being printed for them already. */
3743 for (i = 2; i < 8; i++)
3744 {
3745 if (regs_ever_live [i]
3746 && ! sparc_hard_reg_printed [i])
3747 {
3748 sparc_hard_reg_printed [i] = 1;
3749 /* %g7 is used as TLS base register, use #ignore
3750 for it instead of #scratch. */
3751 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3752 i == 7 ? "ignore" : "scratch");
3753 }
3754 if (i == 3) i = 5;
3755 }
3756 #endif
3757 }
3758
3759 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3760 as needed. LOW should be double-word aligned for 32-bit registers.
3761 Return the new OFFSET. */
3762
3763 #define SORR_SAVE 0
3764 #define SORR_RESTORE 1
3765
3766 static int
save_or_restore_regs(int low,int high,rtx base,int offset,int action)3767 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3768 {
3769 rtx mem, insn;
3770 int i;
3771
3772 if (TARGET_ARCH64 && high <= 32)
3773 {
3774 for (i = low; i < high; i++)
3775 {
3776 if (regs_ever_live[i] && ! call_used_regs[i])
3777 {
3778 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3779 set_mem_alias_set (mem, sparc_sr_alias_set);
3780 if (action == SORR_SAVE)
3781 {
3782 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3783 RTX_FRAME_RELATED_P (insn) = 1;
3784 }
3785 else /* action == SORR_RESTORE */
3786 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3787 offset += 8;
3788 }
3789 }
3790 }
3791 else
3792 {
3793 for (i = low; i < high; i += 2)
3794 {
3795 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
3796 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
3797 enum machine_mode mode;
3798 int regno;
3799
3800 if (reg0 && reg1)
3801 {
3802 mode = i < 32 ? DImode : DFmode;
3803 regno = i;
3804 }
3805 else if (reg0)
3806 {
3807 mode = i < 32 ? SImode : SFmode;
3808 regno = i;
3809 }
3810 else if (reg1)
3811 {
3812 mode = i < 32 ? SImode : SFmode;
3813 regno = i + 1;
3814 offset += 4;
3815 }
3816 else
3817 continue;
3818
3819 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3820 set_mem_alias_set (mem, sparc_sr_alias_set);
3821 if (action == SORR_SAVE)
3822 {
3823 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3824 RTX_FRAME_RELATED_P (insn) = 1;
3825 }
3826 else /* action == SORR_RESTORE */
3827 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3828
3829 /* Always preserve double-word alignment. */
3830 offset = (offset + 7) & -8;
3831 }
3832 }
3833
3834 return offset;
3835 }
3836
3837 /* Emit code to save call-saved registers. */
3838
3839 static void
emit_save_or_restore_regs(int action)3840 emit_save_or_restore_regs (int action)
3841 {
3842 HOST_WIDE_INT offset;
3843 rtx base;
3844
3845 offset = frame_base_offset - apparent_fsize;
3846
3847 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3848 {
3849 /* ??? This might be optimized a little as %g1 might already have a
3850 value close enough that a single add insn will do. */
3851 /* ??? Although, all of this is probably only a temporary fix
3852 because if %g1 can hold a function result, then
3853 sparc_expand_epilogue will lose (the result will be
3854 clobbered). */
3855 base = gen_rtx_REG (Pmode, 1);
3856 emit_move_insn (base, GEN_INT (offset));
3857 emit_insn (gen_rtx_SET (VOIDmode,
3858 base,
3859 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3860 offset = 0;
3861 }
3862 else
3863 base = frame_base_reg;
3864
3865 offset = save_or_restore_regs (0, 8, base, offset, action);
3866 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3867 }
3868
3869 /* Generate a save_register_window insn. */
3870
3871 static rtx
gen_save_register_window(rtx increment)3872 gen_save_register_window (rtx increment)
3873 {
3874 if (TARGET_ARCH64)
3875 return gen_save_register_windowdi (increment);
3876 else
3877 return gen_save_register_windowsi (increment);
3878 }
3879
3880 /* Generate an increment for the stack pointer. */
3881
3882 static rtx
gen_stack_pointer_inc(rtx increment)3883 gen_stack_pointer_inc (rtx increment)
3884 {
3885 return gen_rtx_SET (VOIDmode,
3886 stack_pointer_rtx,
3887 gen_rtx_PLUS (Pmode,
3888 stack_pointer_rtx,
3889 increment));
3890 }
3891
3892 /* Generate a decrement for the stack pointer. */
3893
3894 static rtx
gen_stack_pointer_dec(rtx decrement)3895 gen_stack_pointer_dec (rtx decrement)
3896 {
3897 return gen_rtx_SET (VOIDmode,
3898 stack_pointer_rtx,
3899 gen_rtx_MINUS (Pmode,
3900 stack_pointer_rtx,
3901 decrement));
3902 }
3903
3904 /* Expand the function prologue. The prologue is responsible for reserving
3905 storage for the frame, saving the call-saved registers and loading the
3906 PIC register if needed. */
3907
3908 void
sparc_expand_prologue(void)3909 sparc_expand_prologue (void)
3910 {
3911 rtx insn;
3912 int i;
3913
3914 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3915 on the final value of the flag means deferring the prologue/epilogue
3916 expansion until just before the second scheduling pass, which is too
3917 late to emit multiple epilogues or return insns.
3918
3919 Of course we are making the assumption that the value of the flag
3920 will not change between now and its final value. Of the three parts
3921 of the formula, only the last one can reasonably vary. Let's take a
3922 closer look, after assuming that the first two ones are set to true
3923 (otherwise the last value is effectively silenced).
3924
3925 If only_leaf_regs_used returns false, the global predicate will also
3926 be false so the actual frame size calculated below will be positive.
3927 As a consequence, the save_register_window insn will be emitted in
3928 the instruction stream; now this insn explicitly references %fp
3929 which is not a leaf register so only_leaf_regs_used will always
3930 return false subsequently.
3931
3932 If only_leaf_regs_used returns true, we hope that the subsequent
3933 optimization passes won't cause non-leaf registers to pop up. For
3934 example, the regrename pass has special provisions to not rename to
3935 non-leaf registers in a leaf function. */
3936 sparc_leaf_function_p
3937 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3938
3939 /* Need to use actual_fsize, since we are also allocating
3940 space for our callee (and our own register save area). */
3941 actual_fsize
3942 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
3943
3944 if (warn_stack_larger_than && actual_fsize > stack_larger_than_size)
3945 warning (0, "stack usage is %d bytes", actual_fsize);
3946
3947 /* Advertise that the data calculated just above are now valid. */
3948 sparc_prologue_data_valid_p = true;
3949
3950 if (sparc_leaf_function_p)
3951 {
3952 frame_base_reg = stack_pointer_rtx;
3953 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3954 }
3955 else
3956 {
3957 frame_base_reg = hard_frame_pointer_rtx;
3958 frame_base_offset = SPARC_STACK_BIAS;
3959 }
3960
3961 if (actual_fsize == 0)
3962 /* do nothing. */ ;
3963 else if (sparc_leaf_function_p)
3964 {
3965 if (actual_fsize <= 4096)
3966 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
3967 else if (actual_fsize <= 8192)
3968 {
3969 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
3970 /* %sp is still the CFA register. */
3971 RTX_FRAME_RELATED_P (insn) = 1;
3972 insn
3973 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3974 }
3975 else
3976 {
3977 rtx reg = gen_rtx_REG (Pmode, 1);
3978 emit_move_insn (reg, GEN_INT (-actual_fsize));
3979 insn = emit_insn (gen_stack_pointer_inc (reg));
3980 REG_NOTES (insn) =
3981 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3982 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
3983 REG_NOTES (insn));
3984 }
3985
3986 RTX_FRAME_RELATED_P (insn) = 1;
3987 }
3988 else
3989 {
3990 if (actual_fsize <= 4096)
3991 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
3992 else if (actual_fsize <= 8192)
3993 {
3994 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
3995 /* %sp is not the CFA register anymore. */
3996 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3997 }
3998 else
3999 {
4000 rtx reg = gen_rtx_REG (Pmode, 1);
4001 emit_move_insn (reg, GEN_INT (-actual_fsize));
4002 insn = emit_insn (gen_save_register_window (reg));
4003 }
4004
4005 RTX_FRAME_RELATED_P (insn) = 1;
4006 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4007 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4008 }
4009
4010 if (num_gfregs)
4011 emit_save_or_restore_regs (SORR_SAVE);
4012
4013 /* Load the PIC register if needed. */
4014 if (flag_pic && current_function_uses_pic_offset_table)
4015 load_pic_register (false);
4016 }
4017
4018 /* This function generates the assembly code for function entry, which boils
4019 down to emitting the necessary .register directives. */
4020
4021 static void
sparc_asm_function_prologue(FILE * file,HOST_WIDE_INT size ATTRIBUTE_UNUSED)4022 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4023 {
4024 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4025 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4026
4027 sparc_output_scratch_registers (file);
4028 }
4029
4030 /* Expand the function epilogue, either normal or part of a sibcall.
4031 We emit all the instructions except the return or the call. */
4032
4033 void
sparc_expand_epilogue(void)4034 sparc_expand_epilogue (void)
4035 {
4036 if (num_gfregs)
4037 emit_save_or_restore_regs (SORR_RESTORE);
4038
4039 if (actual_fsize == 0)
4040 /* do nothing. */ ;
4041 else if (sparc_leaf_function_p)
4042 {
4043 if (actual_fsize <= 4096)
4044 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4045 else if (actual_fsize <= 8192)
4046 {
4047 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4048 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4049 }
4050 else
4051 {
4052 rtx reg = gen_rtx_REG (Pmode, 1);
4053 emit_move_insn (reg, GEN_INT (-actual_fsize));
4054 emit_insn (gen_stack_pointer_dec (reg));
4055 }
4056 }
4057 }
4058
4059 /* Return true if it is appropriate to emit `return' instructions in the
4060 body of a function. */
4061
4062 bool
sparc_can_use_return_insn_p(void)4063 sparc_can_use_return_insn_p (void)
4064 {
4065 return sparc_prologue_data_valid_p
4066 && (actual_fsize == 0 || !sparc_leaf_function_p);
4067 }
4068
4069 /* This function generates the assembly code for function exit. */
4070
4071 static void
sparc_asm_function_epilogue(FILE * file,HOST_WIDE_INT size ATTRIBUTE_UNUSED)4072 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4073 {
4074 /* If code does not drop into the epilogue, we have to still output
4075 a dummy nop for the sake of sane backtraces. Otherwise, if the
4076 last two instructions of a function were "call foo; dslot;" this
4077 can make the return PC of foo (i.e. address of call instruction
4078 plus 8) point to the first instruction in the next function. */
4079
4080 rtx insn, last_real_insn;
4081
4082 insn = get_last_insn ();
4083
4084 last_real_insn = prev_real_insn (insn);
4085 if (last_real_insn
4086 && GET_CODE (last_real_insn) == INSN
4087 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4088 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4089
4090 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4091 fputs("\tnop\n", file);
4092
4093 sparc_output_deferred_case_vectors ();
4094 }
4095
4096 /* Output a 'restore' instruction. */
4097
4098 static void
output_restore(rtx pat)4099 output_restore (rtx pat)
4100 {
4101 rtx operands[3];
4102
4103 if (! pat)
4104 {
4105 fputs ("\t restore\n", asm_out_file);
4106 return;
4107 }
4108
4109 gcc_assert (GET_CODE (pat) == SET);
4110
4111 operands[0] = SET_DEST (pat);
4112 pat = SET_SRC (pat);
4113
4114 switch (GET_CODE (pat))
4115 {
4116 case PLUS:
4117 operands[1] = XEXP (pat, 0);
4118 operands[2] = XEXP (pat, 1);
4119 output_asm_insn (" restore %r1, %2, %Y0", operands);
4120 break;
4121 case LO_SUM:
4122 operands[1] = XEXP (pat, 0);
4123 operands[2] = XEXP (pat, 1);
4124 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4125 break;
4126 case ASHIFT:
4127 operands[1] = XEXP (pat, 0);
4128 gcc_assert (XEXP (pat, 1) == const1_rtx);
4129 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4130 break;
4131 default:
4132 operands[1] = pat;
4133 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4134 break;
4135 }
4136 }
4137
4138 /* Output a return. */
4139
4140 const char *
output_return(rtx insn)4141 output_return (rtx insn)
4142 {
4143 if (sparc_leaf_function_p)
4144 {
4145 /* This is a leaf function so we don't have to bother restoring the
4146 register window, which frees us from dealing with the convoluted
4147 semantics of restore/return. We simply output the jump to the
4148 return address and the insn in the delay slot (if any). */
4149
4150 gcc_assert (! current_function_calls_eh_return);
4151
4152 return "jmp\t%%o7+%)%#";
4153 }
4154 else
4155 {
4156 /* This is a regular function so we have to restore the register window.
4157 We may have a pending insn for the delay slot, which will be either
4158 combined with the 'restore' instruction or put in the delay slot of
4159 the 'return' instruction. */
4160
4161 if (current_function_calls_eh_return)
4162 {
4163 /* If the function uses __builtin_eh_return, the eh_return
4164 machinery occupies the delay slot. */
4165 gcc_assert (! final_sequence);
4166
4167 if (! flag_delayed_branch)
4168 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4169
4170 if (TARGET_V9)
4171 fputs ("\treturn\t%i7+8\n", asm_out_file);
4172 else
4173 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4174
4175 if (flag_delayed_branch)
4176 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4177 else
4178 fputs ("\t nop\n", asm_out_file);
4179 }
4180 else if (final_sequence)
4181 {
4182 rtx delay, pat;
4183
4184 delay = NEXT_INSN (insn);
4185 gcc_assert (delay);
4186
4187 pat = PATTERN (delay);
4188
4189 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4190 {
4191 epilogue_renumber (&pat, 0);
4192 return "return\t%%i7+%)%#";
4193 }
4194 else
4195 {
4196 output_asm_insn ("jmp\t%%i7+%)", NULL);
4197 output_restore (pat);
4198 PATTERN (delay) = gen_blockage ();
4199 INSN_CODE (delay) = -1;
4200 }
4201 }
4202 else
4203 {
4204 /* The delay slot is empty. */
4205 if (TARGET_V9)
4206 return "return\t%%i7+%)\n\t nop";
4207 else if (flag_delayed_branch)
4208 return "jmp\t%%i7+%)\n\t restore";
4209 else
4210 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4211 }
4212 }
4213
4214 return "";
4215 }
4216
4217 /* Output a sibling call. */
4218
4219 const char *
output_sibcall(rtx insn,rtx call_operand)4220 output_sibcall (rtx insn, rtx call_operand)
4221 {
4222 rtx operands[1];
4223
4224 gcc_assert (flag_delayed_branch);
4225
4226 operands[0] = call_operand;
4227
4228 if (sparc_leaf_function_p)
4229 {
4230 /* This is a leaf function so we don't have to bother restoring the
4231 register window. We simply output the jump to the function and
4232 the insn in the delay slot (if any). */
4233
4234 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4235
4236 if (final_sequence)
4237 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4238 operands);
4239 else
4240 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4241 it into branch if possible. */
4242 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4243 operands);
4244 }
4245 else
4246 {
4247 /* This is a regular function so we have to restore the register window.
4248 We may have a pending insn for the delay slot, which will be combined
4249 with the 'restore' instruction. */
4250
4251 output_asm_insn ("call\t%a0, 0", operands);
4252
4253 if (final_sequence)
4254 {
4255 rtx delay = NEXT_INSN (insn);
4256 gcc_assert (delay);
4257
4258 output_restore (PATTERN (delay));
4259
4260 PATTERN (delay) = gen_blockage ();
4261 INSN_CODE (delay) = -1;
4262 }
4263 else
4264 output_restore (NULL_RTX);
4265 }
4266
4267 return "";
4268 }
4269
4270 /* Functions for handling argument passing.
4271
4272 For 32-bit, the first 6 args are normally in registers and the rest are
4273 pushed. Any arg that starts within the first 6 words is at least
4274 partially passed in a register unless its data type forbids.
4275
4276 For 64-bit, the argument registers are laid out as an array of 16 elements
4277 and arguments are added sequentially. The first 6 int args and up to the
4278 first 16 fp args (depending on size) are passed in regs.
4279
4280 Slot Stack Integral Float Float in structure Double Long Double
4281 ---- ----- -------- ----- ------------------ ------ -----------
4282 15 [SP+248] %f31 %f30,%f31 %d30
4283 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4284 13 [SP+232] %f27 %f26,%f27 %d26
4285 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4286 11 [SP+216] %f23 %f22,%f23 %d22
4287 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4288 9 [SP+200] %f19 %f18,%f19 %d18
4289 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4290 7 [SP+184] %f15 %f14,%f15 %d14
4291 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4292 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4293 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4294 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4295 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4296 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4297 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4298
4299 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4300
4301 Integral arguments are always passed as 64-bit quantities appropriately
4302 extended.
4303
4304 Passing of floating point values is handled as follows.
4305 If a prototype is in scope:
4306 If the value is in a named argument (i.e. not a stdarg function or a
4307 value not part of the `...') then the value is passed in the appropriate
4308 fp reg.
4309 If the value is part of the `...' and is passed in one of the first 6
4310 slots then the value is passed in the appropriate int reg.
4311 If the value is part of the `...' and is not passed in one of the first 6
4312 slots then the value is passed in memory.
4313 If a prototype is not in scope:
4314 If the value is one of the first 6 arguments the value is passed in the
4315 appropriate integer reg and the appropriate fp reg.
4316 If the value is not one of the first 6 arguments the value is passed in
4317 the appropriate fp reg and in memory.
4318
4319
4320 Summary of the calling conventions implemented by GCC on SPARC:
4321
4322 32-bit ABI:
4323 size argument return value
4324
4325 small integer <4 int. reg. int. reg.
4326 word 4 int. reg. int. reg.
4327 double word 8 int. reg. int. reg.
4328
4329 _Complex small integer <8 int. reg. int. reg.
4330 _Complex word 8 int. reg. int. reg.
4331 _Complex double word 16 memory int. reg.
4332
4333 vector integer <=8 int. reg. FP reg.
4334 vector integer >8 memory memory
4335
4336 float 4 int. reg. FP reg.
4337 double 8 int. reg. FP reg.
4338 long double 16 memory memory
4339
4340 _Complex float 8 memory FP reg.
4341 _Complex double 16 memory FP reg.
4342 _Complex long double 32 memory FP reg.
4343
4344 vector float any memory memory
4345
4346 aggregate any memory memory
4347
4348
4349
4350 64-bit ABI:
4351 size argument return value
4352
4353 small integer <8 int. reg. int. reg.
4354 word 8 int. reg. int. reg.
4355 double word 16 int. reg. int. reg.
4356
4357 _Complex small integer <16 int. reg. int. reg.
4358 _Complex word 16 int. reg. int. reg.
4359 _Complex double word 32 memory int. reg.
4360
4361 vector integer <=16 FP reg. FP reg.
4362 vector integer 16<s<=32 memory FP reg.
4363 vector integer >32 memory memory
4364
4365 float 4 FP reg. FP reg.
4366 double 8 FP reg. FP reg.
4367 long double 16 FP reg. FP reg.
4368
4369 _Complex float 8 FP reg. FP reg.
4370 _Complex double 16 FP reg. FP reg.
4371 _Complex long double 32 memory FP reg.
4372
4373 vector float <=16 FP reg. FP reg.
4374 vector float 16<s<=32 memory FP reg.
4375 vector float >32 memory memory
4376
4377 aggregate <=16 reg. reg.
4378 aggregate 16<s<=32 memory reg.
4379 aggregate >32 memory memory
4380
4381
4382
4383 Note #1: complex floating-point types follow the extended SPARC ABIs as
4384 implemented by the Sun compiler.
4385
4386 Note #2: integral vector types follow the scalar floating-point types
4387 conventions to match what is implemented by the Sun VIS SDK.
4388
4389 Note #3: floating-point vector types follow the aggregate types
4390 conventions. */
4391
4392
4393 /* Maximum number of int regs for args. */
4394 #define SPARC_INT_ARG_MAX 6
4395 /* Maximum number of fp regs for args. */
4396 #define SPARC_FP_ARG_MAX 16
4397
4398 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4399
4400 /* Handle the INIT_CUMULATIVE_ARGS macro.
4401 Initialize a variable CUM of type CUMULATIVE_ARGS
4402 for a call to a function whose data type is FNTYPE.
4403 For a library call, FNTYPE is 0. */
4404
4405 void
init_cumulative_args(struct sparc_args * cum,tree fntype,rtx libname ATTRIBUTE_UNUSED,tree fndecl ATTRIBUTE_UNUSED)4406 init_cumulative_args (struct sparc_args *cum, tree fntype,
4407 rtx libname ATTRIBUTE_UNUSED,
4408 tree fndecl ATTRIBUTE_UNUSED)
4409 {
4410 cum->words = 0;
4411 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4412 cum->libcall_p = fntype == 0;
4413 }
4414
4415 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4416 When a prototype says `char' or `short', really pass an `int'. */
4417
4418 static bool
sparc_promote_prototypes(tree fntype ATTRIBUTE_UNUSED)4419 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4420 {
4421 return TARGET_ARCH32 ? true : false;
4422 }
4423
4424 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4425
4426 static bool
sparc_strict_argument_naming(CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)4427 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4428 {
4429 return TARGET_ARCH64 ? true : false;
4430 }
4431
4432 /* Scan the record type TYPE and return the following predicates:
4433 - INTREGS_P: the record contains at least one field or sub-field
4434 that is eligible for promotion in integer registers.
4435 - FP_REGS_P: the record contains at least one field or sub-field
4436 that is eligible for promotion in floating-point registers.
4437 - PACKED_P: the record contains at least one field that is packed.
4438
4439 Sub-fields are not taken into account for the PACKED_P predicate. */
4440
4441 static void
scan_record_type(tree type,int * intregs_p,int * fpregs_p,int * packed_p)4442 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4443 {
4444 tree field;
4445
4446 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4447 {
4448 if (TREE_CODE (field) == FIELD_DECL)
4449 {
4450 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4451 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4452 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4453 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4454 && TARGET_FPU)
4455 *fpregs_p = 1;
4456 else
4457 *intregs_p = 1;
4458
4459 if (packed_p && DECL_PACKED (field))
4460 *packed_p = 1;
4461 }
4462 }
4463 }
4464
4465 /* Compute the slot number to pass an argument in.
4466 Return the slot number or -1 if passing on the stack.
4467
4468 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4469 the preceding args and about the function being called.
4470 MODE is the argument's machine mode.
4471 TYPE is the data type of the argument (as a tree).
4472 This is null for libcalls where that information may
4473 not be available.
4474 NAMED is nonzero if this argument is a named parameter
4475 (otherwise it is an extra parameter matching an ellipsis).
4476 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4477 *PREGNO records the register number to use if scalar type.
4478 *PPADDING records the amount of padding needed in words. */
4479
4480 static int
function_arg_slotno(const struct sparc_args * cum,enum machine_mode mode,tree type,int named,int incoming_p,int * pregno,int * ppadding)4481 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4482 tree type, int named, int incoming_p,
4483 int *pregno, int *ppadding)
4484 {
4485 int regbase = (incoming_p
4486 ? SPARC_INCOMING_INT_ARG_FIRST
4487 : SPARC_OUTGOING_INT_ARG_FIRST);
4488 int slotno = cum->words;
4489 enum mode_class mclass;
4490 int regno;
4491
4492 *ppadding = 0;
4493
4494 if (type && TREE_ADDRESSABLE (type))
4495 return -1;
4496
4497 if (TARGET_ARCH32
4498 && mode == BLKmode
4499 && type
4500 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4501 return -1;
4502
4503 /* For SPARC64, objects requiring 16-byte alignment get it. */
4504 if (TARGET_ARCH64
4505 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4506 && (slotno & 1) != 0)
4507 slotno++, *ppadding = 1;
4508
4509 mclass = GET_MODE_CLASS (mode);
4510 if (type && TREE_CODE (type) == VECTOR_TYPE)
4511 {
4512 /* Vector types deserve special treatment because they are
4513 polymorphic wrt their mode, depending upon whether VIS
4514 instructions are enabled. */
4515 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4516 {
4517 /* The SPARC port defines no floating-point vector modes. */
4518 gcc_assert (mode == BLKmode);
4519 }
4520 else
4521 {
4522 /* Integral vector types should either have a vector
4523 mode or an integral mode, because we are guaranteed
4524 by pass_by_reference that their size is not greater
4525 than 16 bytes and TImode is 16-byte wide. */
4526 gcc_assert (mode != BLKmode);
4527
4528 /* Vector integers are handled like floats according to
4529 the Sun VIS SDK. */
4530 mclass = MODE_FLOAT;
4531 }
4532 }
4533
4534 switch (mclass)
4535 {
4536 case MODE_FLOAT:
4537 case MODE_COMPLEX_FLOAT:
4538 if (TARGET_ARCH64 && TARGET_FPU && named)
4539 {
4540 if (slotno >= SPARC_FP_ARG_MAX)
4541 return -1;
4542 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4543 /* Arguments filling only one single FP register are
4544 right-justified in the outer double FP register. */
4545 if (GET_MODE_SIZE (mode) <= 4)
4546 regno++;
4547 break;
4548 }
4549 /* fallthrough */
4550
4551 case MODE_INT:
4552 case MODE_COMPLEX_INT:
4553 if (slotno >= SPARC_INT_ARG_MAX)
4554 return -1;
4555 regno = regbase + slotno;
4556 break;
4557
4558 case MODE_RANDOM:
4559 if (mode == VOIDmode)
4560 /* MODE is VOIDmode when generating the actual call. */
4561 return -1;
4562
4563 gcc_assert (mode == BLKmode);
4564
4565 if (TARGET_ARCH32
4566 || !type
4567 || (TREE_CODE (type) != VECTOR_TYPE
4568 && TREE_CODE (type) != RECORD_TYPE))
4569 {
4570 if (slotno >= SPARC_INT_ARG_MAX)
4571 return -1;
4572 regno = regbase + slotno;
4573 }
4574 else /* TARGET_ARCH64 && type */
4575 {
4576 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4577
4578 /* First see what kinds of registers we would need. */
4579 if (TREE_CODE (type) == VECTOR_TYPE)
4580 fpregs_p = 1;
4581 else
4582 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4583
4584 /* The ABI obviously doesn't specify how packed structures
4585 are passed. These are defined to be passed in int regs
4586 if possible, otherwise memory. */
4587 if (packed_p || !named)
4588 fpregs_p = 0, intregs_p = 1;
4589
4590 /* If all arg slots are filled, then must pass on stack. */
4591 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4592 return -1;
4593
4594 /* If there are only int args and all int arg slots are filled,
4595 then must pass on stack. */
4596 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4597 return -1;
4598
4599 /* Note that even if all int arg slots are filled, fp members may
4600 still be passed in regs if such regs are available.
4601 *PREGNO isn't set because there may be more than one, it's up
4602 to the caller to compute them. */
4603 return slotno;
4604 }
4605 break;
4606
4607 default :
4608 gcc_unreachable ();
4609 }
4610
4611 *pregno = regno;
4612 return slotno;
4613 }
4614
4615 /* Handle recursive register counting for structure field layout. */
4616
4617 struct function_arg_record_value_parms
4618 {
4619 rtx ret; /* return expression being built. */
4620 int slotno; /* slot number of the argument. */
4621 int named; /* whether the argument is named. */
4622 int regbase; /* regno of the base register. */
4623 int stack; /* 1 if part of the argument is on the stack. */
4624 int intoffset; /* offset of the first pending integer field. */
4625 unsigned int nregs; /* number of words passed in registers. */
4626 };
4627
4628 static void function_arg_record_value_3
4629 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4630 static void function_arg_record_value_2
4631 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4632 static void function_arg_record_value_1
4633 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4634 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4635 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4636
4637 /* A subroutine of function_arg_record_value. Traverse the structure
4638 recursively and determine how many registers will be required. */
4639
4640 static void
function_arg_record_value_1(tree type,HOST_WIDE_INT startbitpos,struct function_arg_record_value_parms * parms,bool packed_p)4641 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4642 struct function_arg_record_value_parms *parms,
4643 bool packed_p)
4644 {
4645 tree field;
4646
4647 /* We need to compute how many registers are needed so we can
4648 allocate the PARALLEL but before we can do that we need to know
4649 whether there are any packed fields. The ABI obviously doesn't
4650 specify how structures are passed in this case, so they are
4651 defined to be passed in int regs if possible, otherwise memory,
4652 regardless of whether there are fp values present. */
4653
4654 if (! packed_p)
4655 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4656 {
4657 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4658 {
4659 packed_p = true;
4660 break;
4661 }
4662 }
4663
4664 /* Compute how many registers we need. */
4665 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4666 {
4667 if (TREE_CODE (field) == FIELD_DECL)
4668 {
4669 HOST_WIDE_INT bitpos = startbitpos;
4670
4671 if (DECL_SIZE (field) != 0)
4672 {
4673 if (integer_zerop (DECL_SIZE (field)))
4674 continue;
4675
4676 if (host_integerp (bit_position (field), 1))
4677 bitpos += int_bit_position (field);
4678 }
4679
4680 /* ??? FIXME: else assume zero offset. */
4681
4682 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4683 function_arg_record_value_1 (TREE_TYPE (field),
4684 bitpos,
4685 parms,
4686 packed_p);
4687 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4688 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4689 && TARGET_FPU
4690 && parms->named
4691 && ! packed_p)
4692 {
4693 if (parms->intoffset != -1)
4694 {
4695 unsigned int startbit, endbit;
4696 int intslots, this_slotno;
4697
4698 startbit = parms->intoffset & -BITS_PER_WORD;
4699 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4700
4701 intslots = (endbit - startbit) / BITS_PER_WORD;
4702 this_slotno = parms->slotno + parms->intoffset
4703 / BITS_PER_WORD;
4704
4705 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4706 {
4707 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4708 /* We need to pass this field on the stack. */
4709 parms->stack = 1;
4710 }
4711
4712 parms->nregs += intslots;
4713 parms->intoffset = -1;
4714 }
4715
4716 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4717 If it wasn't true we wouldn't be here. */
4718 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4719 && DECL_MODE (field) == BLKmode)
4720 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4721 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4722 parms->nregs += 2;
4723 else
4724 parms->nregs += 1;
4725 }
4726 else
4727 {
4728 if (parms->intoffset == -1)
4729 parms->intoffset = bitpos;
4730 }
4731 }
4732 }
4733 }
4734
4735 /* A subroutine of function_arg_record_value. Assign the bits of the
4736 structure between parms->intoffset and bitpos to integer registers. */
4737
4738 static void
function_arg_record_value_3(HOST_WIDE_INT bitpos,struct function_arg_record_value_parms * parms)4739 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4740 struct function_arg_record_value_parms *parms)
4741 {
4742 enum machine_mode mode;
4743 unsigned int regno;
4744 unsigned int startbit, endbit;
4745 int this_slotno, intslots, intoffset;
4746 rtx reg;
4747
4748 if (parms->intoffset == -1)
4749 return;
4750
4751 intoffset = parms->intoffset;
4752 parms->intoffset = -1;
4753
4754 startbit = intoffset & -BITS_PER_WORD;
4755 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4756 intslots = (endbit - startbit) / BITS_PER_WORD;
4757 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4758
4759 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4760 if (intslots <= 0)
4761 return;
4762
4763 /* If this is the trailing part of a word, only load that much into
4764 the register. Otherwise load the whole register. Note that in
4765 the latter case we may pick up unwanted bits. It's not a problem
4766 at the moment but may wish to revisit. */
4767
4768 if (intoffset % BITS_PER_WORD != 0)
4769 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4770 MODE_INT);
4771 else
4772 mode = word_mode;
4773
4774 intoffset /= BITS_PER_UNIT;
4775 do
4776 {
4777 regno = parms->regbase + this_slotno;
4778 reg = gen_rtx_REG (mode, regno);
4779 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4780 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4781
4782 this_slotno += 1;
4783 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4784 mode = word_mode;
4785 parms->nregs += 1;
4786 intslots -= 1;
4787 }
4788 while (intslots > 0);
4789 }
4790
4791 /* A subroutine of function_arg_record_value. Traverse the structure
4792 recursively and assign bits to floating point registers. Track which
4793 bits in between need integer registers; invoke function_arg_record_value_3
4794 to make that happen. */
4795
4796 static void
function_arg_record_value_2(tree type,HOST_WIDE_INT startbitpos,struct function_arg_record_value_parms * parms,bool packed_p)4797 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4798 struct function_arg_record_value_parms *parms,
4799 bool packed_p)
4800 {
4801 tree field;
4802
4803 if (! packed_p)
4804 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4805 {
4806 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4807 {
4808 packed_p = true;
4809 break;
4810 }
4811 }
4812
4813 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4814 {
4815 if (TREE_CODE (field) == FIELD_DECL)
4816 {
4817 HOST_WIDE_INT bitpos = startbitpos;
4818
4819 if (DECL_SIZE (field) != 0)
4820 {
4821 if (integer_zerop (DECL_SIZE (field)))
4822 continue;
4823
4824 if (host_integerp (bit_position (field), 1))
4825 bitpos += int_bit_position (field);
4826 }
4827
4828 /* ??? FIXME: else assume zero offset. */
4829
4830 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4831 function_arg_record_value_2 (TREE_TYPE (field),
4832 bitpos,
4833 parms,
4834 packed_p);
4835 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4836 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4837 && TARGET_FPU
4838 && parms->named
4839 && ! packed_p)
4840 {
4841 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4842 int regno, nregs, pos;
4843 enum machine_mode mode = DECL_MODE (field);
4844 rtx reg;
4845
4846 function_arg_record_value_3 (bitpos, parms);
4847
4848 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4849 && mode == BLKmode)
4850 {
4851 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4852 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4853 }
4854 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4855 {
4856 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4857 nregs = 2;
4858 }
4859 else
4860 nregs = 1;
4861
4862 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4863 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4864 regno++;
4865 reg = gen_rtx_REG (mode, regno);
4866 pos = bitpos / BITS_PER_UNIT;
4867 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4868 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4869 parms->nregs += 1;
4870 while (--nregs > 0)
4871 {
4872 regno += GET_MODE_SIZE (mode) / 4;
4873 reg = gen_rtx_REG (mode, regno);
4874 pos += GET_MODE_SIZE (mode);
4875 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4876 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4877 parms->nregs += 1;
4878 }
4879 }
4880 else
4881 {
4882 if (parms->intoffset == -1)
4883 parms->intoffset = bitpos;
4884 }
4885 }
4886 }
4887 }
4888
4889 /* Used by function_arg and function_value to implement the complex
4890 conventions of the 64-bit ABI for passing and returning structures.
4891 Return an expression valid as a return value for the two macros
4892 FUNCTION_ARG and FUNCTION_VALUE.
4893
4894 TYPE is the data type of the argument (as a tree).
4895 This is null for libcalls where that information may
4896 not be available.
4897 MODE is the argument's machine mode.
4898 SLOTNO is the index number of the argument's slot in the parameter array.
4899 NAMED is nonzero if this argument is a named parameter
4900 (otherwise it is an extra parameter matching an ellipsis).
4901 REGBASE is the regno of the base register for the parameter array. */
4902
4903 static rtx
function_arg_record_value(tree type,enum machine_mode mode,int slotno,int named,int regbase)4904 function_arg_record_value (tree type, enum machine_mode mode,
4905 int slotno, int named, int regbase)
4906 {
4907 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4908 struct function_arg_record_value_parms parms;
4909 unsigned int nregs;
4910
4911 parms.ret = NULL_RTX;
4912 parms.slotno = slotno;
4913 parms.named = named;
4914 parms.regbase = regbase;
4915 parms.stack = 0;
4916
4917 /* Compute how many registers we need. */
4918 parms.nregs = 0;
4919 parms.intoffset = 0;
4920 function_arg_record_value_1 (type, 0, &parms, false);
4921
4922 /* Take into account pending integer fields. */
4923 if (parms.intoffset != -1)
4924 {
4925 unsigned int startbit, endbit;
4926 int intslots, this_slotno;
4927
4928 startbit = parms.intoffset & -BITS_PER_WORD;
4929 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4930 intslots = (endbit - startbit) / BITS_PER_WORD;
4931 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4932
4933 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4934 {
4935 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4936 /* We need to pass this field on the stack. */
4937 parms.stack = 1;
4938 }
4939
4940 parms.nregs += intslots;
4941 }
4942 nregs = parms.nregs;
4943
4944 /* Allocate the vector and handle some annoying special cases. */
4945 if (nregs == 0)
4946 {
4947 /* ??? Empty structure has no value? Duh? */
4948 if (typesize <= 0)
4949 {
4950 /* Though there's nothing really to store, return a word register
4951 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
4952 leads to breakage due to the fact that there are zero bytes to
4953 load. */
4954 return gen_rtx_REG (mode, regbase);
4955 }
4956 else
4957 {
4958 /* ??? C++ has structures with no fields, and yet a size. Give up
4959 for now and pass everything back in integer registers. */
4960 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4961 }
4962 if (nregs + slotno > SPARC_INT_ARG_MAX)
4963 nregs = SPARC_INT_ARG_MAX - slotno;
4964 }
4965 gcc_assert (nregs != 0);
4966
4967 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
4968
4969 /* If at least one field must be passed on the stack, generate
4970 (parallel [(expr_list (nil) ...) ...]) so that all fields will
4971 also be passed on the stack. We can't do much better because the
4972 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
4973 of structures for which the fields passed exclusively in registers
4974 are not at the beginning of the structure. */
4975 if (parms.stack)
4976 XVECEXP (parms.ret, 0, 0)
4977 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4978
4979 /* Fill in the entries. */
4980 parms.nregs = 0;
4981 parms.intoffset = 0;
4982 function_arg_record_value_2 (type, 0, &parms, false);
4983 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4984
4985 gcc_assert (parms.nregs == nregs);
4986
4987 return parms.ret;
4988 }
4989
4990 /* Used by function_arg and function_value to implement the conventions
4991 of the 64-bit ABI for passing and returning unions.
4992 Return an expression valid as a return value for the two macros
4993 FUNCTION_ARG and FUNCTION_VALUE.
4994
4995 SIZE is the size in bytes of the union.
4996 MODE is the argument's machine mode.
4997 REGNO is the hard register the union will be passed in. */
4998
4999 static rtx
function_arg_union_value(int size,enum machine_mode mode,int slotno,int regno)5000 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5001 int regno)
5002 {
5003 int nwords = ROUND_ADVANCE (size), i;
5004 rtx regs;
5005
5006 /* See comment in previous function for empty structures. */
5007 if (nwords == 0)
5008 return gen_rtx_REG (mode, regno);
5009
5010 if (slotno == SPARC_INT_ARG_MAX - 1)
5011 nwords = 1;
5012
5013 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5014
5015 for (i = 0; i < nwords; i++)
5016 {
5017 /* Unions are passed left-justified. */
5018 XVECEXP (regs, 0, i)
5019 = gen_rtx_EXPR_LIST (VOIDmode,
5020 gen_rtx_REG (word_mode, regno),
5021 GEN_INT (UNITS_PER_WORD * i));
5022 regno++;
5023 }
5024
5025 return regs;
5026 }
5027
5028 /* Used by function_arg and function_value to implement the conventions
5029 for passing and returning large (BLKmode) vectors.
5030 Return an expression valid as a return value for the two macros
5031 FUNCTION_ARG and FUNCTION_VALUE.
5032
5033 SIZE is the size in bytes of the vector.
5034 BASE_MODE is the argument's base machine mode.
5035 REGNO is the FP hard register the vector will be passed in. */
5036
5037 static rtx
function_arg_vector_value(int size,enum machine_mode base_mode,int regno)5038 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5039 {
5040 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5041 int nregs = size / base_mode_size, i;
5042 rtx regs;
5043
5044 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5045
5046 for (i = 0; i < nregs; i++)
5047 {
5048 XVECEXP (regs, 0, i)
5049 = gen_rtx_EXPR_LIST (VOIDmode,
5050 gen_rtx_REG (base_mode, regno),
5051 GEN_INT (base_mode_size * i));
5052 regno += base_mode_size / 4;
5053 }
5054
5055 return regs;
5056 }
5057
5058 /* Handle the FUNCTION_ARG macro.
5059 Determine where to put an argument to a function.
5060 Value is zero to push the argument on the stack,
5061 or a hard register in which to store the argument.
5062
5063 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5064 the preceding args and about the function being called.
5065 MODE is the argument's machine mode.
5066 TYPE is the data type of the argument (as a tree).
5067 This is null for libcalls where that information may
5068 not be available.
5069 NAMED is nonzero if this argument is a named parameter
5070 (otherwise it is an extra parameter matching an ellipsis).
5071 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5072
5073 rtx
function_arg(const struct sparc_args * cum,enum machine_mode mode,tree type,int named,int incoming_p)5074 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5075 tree type, int named, int incoming_p)
5076 {
5077 int regbase = (incoming_p
5078 ? SPARC_INCOMING_INT_ARG_FIRST
5079 : SPARC_OUTGOING_INT_ARG_FIRST);
5080 int slotno, regno, padding;
5081 enum mode_class mclass = GET_MODE_CLASS (mode);
5082
5083 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5084 ®no, &padding);
5085 if (slotno == -1)
5086 return 0;
5087
5088 /* Vector types deserve special treatment because they are polymorphic wrt
5089 their mode, depending upon whether VIS instructions are enabled. */
5090 if (type && TREE_CODE (type) == VECTOR_TYPE)
5091 {
5092 HOST_WIDE_INT size = int_size_in_bytes (type);
5093 gcc_assert ((TARGET_ARCH32 && size <= 8)
5094 || (TARGET_ARCH64 && size <= 16));
5095
5096 if (mode == BLKmode)
5097 return function_arg_vector_value (size,
5098 TYPE_MODE (TREE_TYPE (type)),
5099 SPARC_FP_ARG_FIRST + 2*slotno);
5100 else
5101 mclass = MODE_FLOAT;
5102 }
5103
5104 if (TARGET_ARCH32)
5105 return gen_rtx_REG (mode, regno);
5106
5107 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5108 and are promoted to registers if possible. */
5109 if (type && TREE_CODE (type) == RECORD_TYPE)
5110 {
5111 HOST_WIDE_INT size = int_size_in_bytes (type);
5112 gcc_assert (size <= 16);
5113
5114 return function_arg_record_value (type, mode, slotno, named, regbase);
5115 }
5116
5117 /* Unions up to 16 bytes in size are passed in integer registers. */
5118 else if (type && TREE_CODE (type) == UNION_TYPE)
5119 {
5120 HOST_WIDE_INT size = int_size_in_bytes (type);
5121 gcc_assert (size <= 16);
5122
5123 return function_arg_union_value (size, mode, slotno, regno);
5124 }
5125
5126 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5127 but also have the slot allocated for them.
5128 If no prototype is in scope fp values in register slots get passed
5129 in two places, either fp regs and int regs or fp regs and memory. */
5130 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5131 && SPARC_FP_REG_P (regno))
5132 {
5133 rtx reg = gen_rtx_REG (mode, regno);
5134 if (cum->prototype_p || cum->libcall_p)
5135 {
5136 /* "* 2" because fp reg numbers are recorded in 4 byte
5137 quantities. */
5138 #if 0
5139 /* ??? This will cause the value to be passed in the fp reg and
5140 in the stack. When a prototype exists we want to pass the
5141 value in the reg but reserve space on the stack. That's an
5142 optimization, and is deferred [for a bit]. */
5143 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5144 return gen_rtx_PARALLEL (mode,
5145 gen_rtvec (2,
5146 gen_rtx_EXPR_LIST (VOIDmode,
5147 NULL_RTX, const0_rtx),
5148 gen_rtx_EXPR_LIST (VOIDmode,
5149 reg, const0_rtx)));
5150 else
5151 #else
5152 /* ??? It seems that passing back a register even when past
5153 the area declared by REG_PARM_STACK_SPACE will allocate
5154 space appropriately, and will not copy the data onto the
5155 stack, exactly as we desire.
5156
5157 This is due to locate_and_pad_parm being called in
5158 expand_call whenever reg_parm_stack_space > 0, which
5159 while beneficial to our example here, would seem to be
5160 in error from what had been intended. Ho hum... -- r~ */
5161 #endif
5162 return reg;
5163 }
5164 else
5165 {
5166 rtx v0, v1;
5167
5168 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5169 {
5170 int intreg;
5171
5172 /* On incoming, we don't need to know that the value
5173 is passed in %f0 and %i0, and it confuses other parts
5174 causing needless spillage even on the simplest cases. */
5175 if (incoming_p)
5176 return reg;
5177
5178 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5179 + (regno - SPARC_FP_ARG_FIRST) / 2);
5180
5181 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5182 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5183 const0_rtx);
5184 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5185 }
5186 else
5187 {
5188 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5189 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5190 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5191 }
5192 }
5193 }
5194
5195 /* All other aggregate types are passed in an integer register in a mode
5196 corresponding to the size of the type. */
5197 else if (type && AGGREGATE_TYPE_P (type))
5198 {
5199 HOST_WIDE_INT size = int_size_in_bytes (type);
5200 gcc_assert (size <= 16);
5201
5202 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5203 }
5204
5205 return gen_rtx_REG (mode, regno);
5206 }
5207
5208 /* For an arg passed partly in registers and partly in memory,
5209 this is the number of bytes of registers used.
5210 For args passed entirely in registers or entirely in memory, zero.
5211
5212 Any arg that starts in the first 6 regs but won't entirely fit in them
5213 needs partial registers on v8. On v9, structures with integer
5214 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5215 values that begin in the last fp reg [where "last fp reg" varies with the
5216 mode] will be split between that reg and memory. */
5217
5218 static int
sparc_arg_partial_bytes(CUMULATIVE_ARGS * cum,enum machine_mode mode,tree type,bool named)5219 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5220 tree type, bool named)
5221 {
5222 int slotno, regno, padding;
5223
5224 /* We pass 0 for incoming_p here, it doesn't matter. */
5225 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5226
5227 if (slotno == -1)
5228 return 0;
5229
5230 if (TARGET_ARCH32)
5231 {
5232 if ((slotno + (mode == BLKmode
5233 ? ROUND_ADVANCE (int_size_in_bytes (type))
5234 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5235 > SPARC_INT_ARG_MAX)
5236 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5237 }
5238 else
5239 {
5240 /* We are guaranteed by pass_by_reference that the size of the
5241 argument is not greater than 16 bytes, so we only need to return
5242 one word if the argument is partially passed in registers. */
5243
5244 if (type && AGGREGATE_TYPE_P (type))
5245 {
5246 int size = int_size_in_bytes (type);
5247
5248 if (size > UNITS_PER_WORD
5249 && slotno == SPARC_INT_ARG_MAX - 1)
5250 return UNITS_PER_WORD;
5251 }
5252 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5253 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5254 && ! (TARGET_FPU && named)))
5255 {
5256 /* The complex types are passed as packed types. */
5257 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5258 && slotno == SPARC_INT_ARG_MAX - 1)
5259 return UNITS_PER_WORD;
5260 }
5261 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5262 {
5263 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5264 > SPARC_FP_ARG_MAX)
5265 return UNITS_PER_WORD;
5266 }
5267 }
5268
5269 return 0;
5270 }
5271
5272 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5273 Specify whether to pass the argument by reference. */
5274
5275 static bool
sparc_pass_by_reference(CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,enum machine_mode mode,tree type,bool named ATTRIBUTE_UNUSED)5276 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5277 enum machine_mode mode, tree type,
5278 bool named ATTRIBUTE_UNUSED)
5279 {
5280 if (TARGET_ARCH32)
5281 /* Original SPARC 32-bit ABI says that structures and unions,
5282 and quad-precision floats are passed by reference. For Pascal,
5283 also pass arrays by reference. All other base types are passed
5284 in registers.
5285
5286 Extended ABI (as implemented by the Sun compiler) says that all
5287 complex floats are passed by reference. Pass complex integers
5288 in registers up to 8 bytes. More generally, enforce the 2-word
5289 cap for passing arguments in registers.
5290
5291 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5292 integers are passed like floats of the same size, that is in
5293 registers up to 8 bytes. Pass all vector floats by reference
5294 like structure and unions. */
5295 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5296 || mode == SCmode
5297 /* Catch CDImode, TFmode, DCmode and TCmode. */
5298 || GET_MODE_SIZE (mode) > 8
5299 || (type
5300 && TREE_CODE (type) == VECTOR_TYPE
5301 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5302 else
5303 /* Original SPARC 64-bit ABI says that structures and unions
5304 smaller than 16 bytes are passed in registers, as well as
5305 all other base types.
5306
5307 Extended ABI (as implemented by the Sun compiler) says that
5308 complex floats are passed in registers up to 16 bytes. Pass
5309 all complex integers in registers up to 16 bytes. More generally,
5310 enforce the 2-word cap for passing arguments in registers.
5311
5312 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5313 integers are passed like floats of the same size, that is in
5314 registers (up to 16 bytes). Pass all vector floats like structure
5315 and unions. */
5316 return ((type
5317 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5318 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5319 /* Catch CTImode and TCmode. */
5320 || GET_MODE_SIZE (mode) > 16);
5321 }
5322
5323 /* Handle the FUNCTION_ARG_ADVANCE macro.
5324 Update the data in CUM to advance over an argument
5325 of mode MODE and data type TYPE.
5326 TYPE is null for libcalls where that information may not be available. */
5327
5328 void
function_arg_advance(struct sparc_args * cum,enum machine_mode mode,tree type,int named)5329 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5330 tree type, int named)
5331 {
5332 int slotno, regno, padding;
5333
5334 /* We pass 0 for incoming_p here, it doesn't matter. */
5335 slotno = function_arg_slotno (cum, mode, type, named, 0, ®no, &padding);
5336
5337 /* If register required leading padding, add it. */
5338 if (slotno != -1)
5339 cum->words += padding;
5340
5341 if (TARGET_ARCH32)
5342 {
5343 cum->words += (mode != BLKmode
5344 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5345 : ROUND_ADVANCE (int_size_in_bytes (type)));
5346 }
5347 else
5348 {
5349 if (type && AGGREGATE_TYPE_P (type))
5350 {
5351 int size = int_size_in_bytes (type);
5352
5353 if (size <= 8)
5354 ++cum->words;
5355 else if (size <= 16)
5356 cum->words += 2;
5357 else /* passed by reference */
5358 ++cum->words;
5359 }
5360 else
5361 {
5362 cum->words += (mode != BLKmode
5363 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5364 : ROUND_ADVANCE (int_size_in_bytes (type)));
5365 }
5366 }
5367 }
5368
5369 /* Handle the FUNCTION_ARG_PADDING macro.
5370 For the 64 bit ABI structs are always stored left shifted in their
5371 argument slot. */
5372
5373 enum direction
function_arg_padding(enum machine_mode mode,tree type)5374 function_arg_padding (enum machine_mode mode, tree type)
5375 {
5376 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5377 return upward;
5378
5379 /* Fall back to the default. */
5380 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5381 }
5382
5383 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5384 Specify whether to return the return value in memory. */
5385
5386 static bool
sparc_return_in_memory(tree type,tree fntype ATTRIBUTE_UNUSED)5387 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5388 {
5389 if (TARGET_ARCH32)
5390 /* Original SPARC 32-bit ABI says that structures and unions,
5391 and quad-precision floats are returned in memory. All other
5392 base types are returned in registers.
5393
5394 Extended ABI (as implemented by the Sun compiler) says that
5395 all complex floats are returned in registers (8 FP registers
5396 at most for '_Complex long double'). Return all complex integers
5397 in registers (4 at most for '_Complex long long').
5398
5399 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5400 integers are returned like floats of the same size, that is in
5401 registers up to 8 bytes and in memory otherwise. Return all
5402 vector floats in memory like structure and unions; note that
5403 they always have BLKmode like the latter. */
5404 return (TYPE_MODE (type) == BLKmode
5405 || TYPE_MODE (type) == TFmode
5406 || (TREE_CODE (type) == VECTOR_TYPE
5407 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5408 else
5409 /* Original SPARC 64-bit ABI says that structures and unions
5410 smaller than 32 bytes are returned in registers, as well as
5411 all other base types.
5412
5413 Extended ABI (as implemented by the Sun compiler) says that all
5414 complex floats are returned in registers (8 FP registers at most
5415 for '_Complex long double'). Return all complex integers in
5416 registers (4 at most for '_Complex TItype').
5417
5418 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5419 integers are returned like floats of the same size, that is in
5420 registers. Return all vector floats like structure and unions;
5421 note that they always have BLKmode like the latter. */
5422 return ((TYPE_MODE (type) == BLKmode
5423 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5424 }
5425
5426 /* Handle the TARGET_STRUCT_VALUE target hook.
5427 Return where to find the structure return value address. */
5428
5429 static rtx
sparc_struct_value_rtx(tree fndecl,int incoming)5430 sparc_struct_value_rtx (tree fndecl, int incoming)
5431 {
5432 if (TARGET_ARCH64)
5433 return 0;
5434 else
5435 {
5436 rtx mem;
5437
5438 if (incoming)
5439 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5440 STRUCT_VALUE_OFFSET));
5441 else
5442 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5443 STRUCT_VALUE_OFFSET));
5444
5445 /* Only follow the SPARC ABI for fixed-size structure returns.
5446 Variable size structure returns are handled per the normal
5447 procedures in GCC. This is enabled by -mstd-struct-return */
5448 if (incoming == 2
5449 && sparc_std_struct_return
5450 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5451 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5452 {
5453 /* We must check and adjust the return address, as it is
5454 optional as to whether the return object is really
5455 provided. */
5456 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5457 rtx scratch = gen_reg_rtx (SImode);
5458 rtx endlab = gen_label_rtx ();
5459
5460 /* Calculate the return object size */
5461 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5462 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5463 /* Construct a temporary return value */
5464 rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5465
5466 /* Implement SPARC 32-bit psABI callee returns struck checking
5467 requirements:
5468
5469 Fetch the instruction where we will return to and see if
5470 it's an unimp instruction (the most significant 10 bits
5471 will be zero). */
5472 emit_move_insn (scratch, gen_rtx_MEM (SImode,
5473 plus_constant (ret_rtx, 8)));
5474 /* Assume the size is valid and pre-adjust */
5475 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5476 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5477 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5478 /* Assign stack temp:
5479 Write the address of the memory pointed to by temp_val into
5480 the memory pointed to by mem */
5481 emit_move_insn (mem, XEXP (temp_val, 0));
5482 emit_label (endlab);
5483 }
5484
5485 set_mem_alias_set (mem, struct_value_alias_set);
5486 return mem;
5487 }
5488 }
5489
5490 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5491 For v9, function return values are subject to the same rules as arguments,
5492 except that up to 32 bytes may be returned in registers. */
5493
5494 rtx
function_value(tree type,enum machine_mode mode,int incoming_p)5495 function_value (tree type, enum machine_mode mode, int incoming_p)
5496 {
5497 /* Beware that the two values are swapped here wrt function_arg. */
5498 int regbase = (incoming_p
5499 ? SPARC_OUTGOING_INT_ARG_FIRST
5500 : SPARC_INCOMING_INT_ARG_FIRST);
5501 enum mode_class mclass = GET_MODE_CLASS (mode);
5502 int regno;
5503
5504 /* Vector types deserve special treatment because they are polymorphic wrt
5505 their mode, depending upon whether VIS instructions are enabled. */
5506 if (type && TREE_CODE (type) == VECTOR_TYPE)
5507 {
5508 HOST_WIDE_INT size = int_size_in_bytes (type);
5509 gcc_assert ((TARGET_ARCH32 && size <= 8)
5510 || (TARGET_ARCH64 && size <= 32));
5511
5512 if (mode == BLKmode)
5513 return function_arg_vector_value (size,
5514 TYPE_MODE (TREE_TYPE (type)),
5515 SPARC_FP_ARG_FIRST);
5516 else
5517 mclass = MODE_FLOAT;
5518 }
5519
5520 if (TARGET_ARCH64 && type)
5521 {
5522 /* Structures up to 32 bytes in size are returned in registers. */
5523 if (TREE_CODE (type) == RECORD_TYPE)
5524 {
5525 HOST_WIDE_INT size = int_size_in_bytes (type);
5526 gcc_assert (size <= 32);
5527
5528 return function_arg_record_value (type, mode, 0, 1, regbase);
5529 }
5530
5531 /* Unions up to 32 bytes in size are returned in integer registers. */
5532 else if (TREE_CODE (type) == UNION_TYPE)
5533 {
5534 HOST_WIDE_INT size = int_size_in_bytes (type);
5535 gcc_assert (size <= 32);
5536
5537 return function_arg_union_value (size, mode, 0, regbase);
5538 }
5539
5540 /* Objects that require it are returned in FP registers. */
5541 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5542 ;
5543
5544 /* All other aggregate types are returned in an integer register in a
5545 mode corresponding to the size of the type. */
5546 else if (AGGREGATE_TYPE_P (type))
5547 {
5548 /* All other aggregate types are passed in an integer register
5549 in a mode corresponding to the size of the type. */
5550 HOST_WIDE_INT size = int_size_in_bytes (type);
5551 gcc_assert (size <= 32);
5552
5553 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5554
5555 /* ??? We probably should have made the same ABI change in
5556 3.4.0 as the one we made for unions. The latter was
5557 required by the SCD though, while the former is not
5558 specified, so we favored compatibility and efficiency.
5559
5560 Now we're stuck for aggregates larger than 16 bytes,
5561 because OImode vanished in the meantime. Let's not
5562 try to be unduly clever, and simply follow the ABI
5563 for unions in that case. */
5564 if (mode == BLKmode)
5565 return function_arg_union_value (size, mode, 0, regbase);
5566 else
5567 mclass = MODE_INT;
5568 }
5569
5570 /* This must match PROMOTE_FUNCTION_MODE. */
5571 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5572 mode = word_mode;
5573 }
5574
5575 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5576 regno = SPARC_FP_ARG_FIRST;
5577 else
5578 regno = regbase;
5579
5580 return gen_rtx_REG (mode, regno);
5581 }
5582
5583 /* Do what is necessary for `va_start'. We look at the current function
5584 to determine if stdarg or varargs is used and return the address of
5585 the first unnamed parameter. */
5586
5587 static rtx
sparc_builtin_saveregs(void)5588 sparc_builtin_saveregs (void)
5589 {
5590 int first_reg = current_function_args_info.words;
5591 rtx address;
5592 int regno;
5593
5594 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5595 emit_move_insn (gen_rtx_MEM (word_mode,
5596 gen_rtx_PLUS (Pmode,
5597 frame_pointer_rtx,
5598 GEN_INT (FIRST_PARM_OFFSET (0)
5599 + (UNITS_PER_WORD
5600 * regno)))),
5601 gen_rtx_REG (word_mode,
5602 SPARC_INCOMING_INT_ARG_FIRST + regno));
5603
5604 address = gen_rtx_PLUS (Pmode,
5605 frame_pointer_rtx,
5606 GEN_INT (FIRST_PARM_OFFSET (0)
5607 + UNITS_PER_WORD * first_reg));
5608
5609 return address;
5610 }
5611
5612 /* Implement `va_start' for stdarg. */
5613
5614 void
sparc_va_start(tree valist,rtx nextarg)5615 sparc_va_start (tree valist, rtx nextarg)
5616 {
5617 nextarg = expand_builtin_saveregs ();
5618 std_expand_builtin_va_start (valist, nextarg);
5619 }
5620
5621 /* Implement `va_arg' for stdarg. */
5622
5623 static tree
sparc_gimplify_va_arg(tree valist,tree type,tree * pre_p,tree * post_p)5624 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5625 {
5626 HOST_WIDE_INT size, rsize, align;
5627 tree addr, incr;
5628 bool indirect;
5629 tree ptrtype = build_pointer_type (type);
5630
5631 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5632 {
5633 indirect = true;
5634 size = rsize = UNITS_PER_WORD;
5635 align = 0;
5636 }
5637 else
5638 {
5639 indirect = false;
5640 size = int_size_in_bytes (type);
5641 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5642 align = 0;
5643
5644 if (TARGET_ARCH64)
5645 {
5646 /* For SPARC64, objects requiring 16-byte alignment get it. */
5647 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5648 align = 2 * UNITS_PER_WORD;
5649
5650 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5651 are left-justified in their slots. */
5652 if (AGGREGATE_TYPE_P (type))
5653 {
5654 if (size == 0)
5655 size = rsize = UNITS_PER_WORD;
5656 else
5657 size = rsize;
5658 }
5659 }
5660 }
5661
5662 incr = valist;
5663 if (align)
5664 {
5665 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5666 ssize_int (align - 1)));
5667 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5668 ssize_int (-align)));
5669 }
5670
5671 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5672 addr = incr;
5673
5674 if (BYTES_BIG_ENDIAN && size < rsize)
5675 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5676 ssize_int (rsize - size)));
5677
5678 if (indirect)
5679 {
5680 addr = fold_convert (build_pointer_type (ptrtype), addr);
5681 addr = build_va_arg_indirect_ref (addr);
5682 }
5683 /* If the address isn't aligned properly for the type,
5684 we may need to copy to a temporary.
5685 FIXME: This is inefficient. Usually we can do this
5686 in registers. */
5687 else if (align == 0
5688 && TYPE_ALIGN (type) > BITS_PER_WORD)
5689 {
5690 tree tmp = create_tmp_var (type, "va_arg_tmp");
5691 tree dest_addr = build_fold_addr_expr (tmp);
5692
5693 tree copy = build_function_call_expr
5694 (implicit_built_in_decls[BUILT_IN_MEMCPY],
5695 tree_cons (NULL_TREE, dest_addr,
5696 tree_cons (NULL_TREE, addr,
5697 tree_cons (NULL_TREE, size_int (rsize),
5698 NULL_TREE))));
5699
5700 gimplify_and_add (copy, pre_p);
5701 addr = dest_addr;
5702 }
5703 else
5704 addr = fold_convert (ptrtype, addr);
5705
5706 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5707 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5708 gimplify_and_add (incr, post_p);
5709
5710 return build_va_arg_indirect_ref (addr);
5711 }
5712
5713 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5714 Specify whether the vector mode is supported by the hardware. */
5715
5716 static bool
sparc_vector_mode_supported_p(enum machine_mode mode)5717 sparc_vector_mode_supported_p (enum machine_mode mode)
5718 {
5719 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5720 }
5721
5722 /* Return the string to output an unconditional branch to LABEL, which is
5723 the operand number of the label.
5724
5725 DEST is the destination insn (i.e. the label), INSN is the source. */
5726
5727 const char *
output_ubranch(rtx dest,int label,rtx insn)5728 output_ubranch (rtx dest, int label, rtx insn)
5729 {
5730 static char string[64];
5731 bool v9_form = false;
5732 char *p;
5733
5734 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5735 {
5736 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5737 - INSN_ADDRESSES (INSN_UID (insn)));
5738 /* Leave some instructions for "slop". */
5739 if (delta >= -260000 && delta < 260000)
5740 v9_form = true;
5741 }
5742
5743 if (v9_form)
5744 strcpy (string, "ba%*,pt\t%%xcc, ");
5745 else
5746 strcpy (string, "b%*\t");
5747
5748 p = strchr (string, '\0');
5749 *p++ = '%';
5750 *p++ = 'l';
5751 *p++ = '0' + label;
5752 *p++ = '%';
5753 *p++ = '(';
5754 *p = '\0';
5755
5756 return string;
5757 }
5758
5759 /* Return the string to output a conditional branch to LABEL, which is
5760 the operand number of the label. OP is the conditional expression.
5761 XEXP (OP, 0) is assumed to be a condition code register (integer or
5762 floating point) and its mode specifies what kind of comparison we made.
5763
5764 DEST is the destination insn (i.e. the label), INSN is the source.
5765
5766 REVERSED is nonzero if we should reverse the sense of the comparison.
5767
5768 ANNUL is nonzero if we should generate an annulling branch. */
5769
5770 const char *
output_cbranch(rtx op,rtx dest,int label,int reversed,int annul,rtx insn)5771 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5772 rtx insn)
5773 {
5774 static char string[64];
5775 enum rtx_code code = GET_CODE (op);
5776 rtx cc_reg = XEXP (op, 0);
5777 enum machine_mode mode = GET_MODE (cc_reg);
5778 const char *labelno, *branch;
5779 int spaces = 8, far;
5780 char *p;
5781
5782 /* v9 branches are limited to +-1MB. If it is too far away,
5783 change
5784
5785 bne,pt %xcc, .LC30
5786
5787 to
5788
5789 be,pn %xcc, .+12
5790 nop
5791 ba .LC30
5792
5793 and
5794
5795 fbne,a,pn %fcc2, .LC29
5796
5797 to
5798
5799 fbe,pt %fcc2, .+16
5800 nop
5801 ba .LC29 */
5802
5803 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5804 if (reversed ^ far)
5805 {
5806 /* Reversal of FP compares takes care -- an ordered compare
5807 becomes an unordered compare and vice versa. */
5808 if (mode == CCFPmode || mode == CCFPEmode)
5809 code = reverse_condition_maybe_unordered (code);
5810 else
5811 code = reverse_condition (code);
5812 }
5813
5814 /* Start by writing the branch condition. */
5815 if (mode == CCFPmode || mode == CCFPEmode)
5816 {
5817 switch (code)
5818 {
5819 case NE:
5820 branch = "fbne";
5821 break;
5822 case EQ:
5823 branch = "fbe";
5824 break;
5825 case GE:
5826 branch = "fbge";
5827 break;
5828 case GT:
5829 branch = "fbg";
5830 break;
5831 case LE:
5832 branch = "fble";
5833 break;
5834 case LT:
5835 branch = "fbl";
5836 break;
5837 case UNORDERED:
5838 branch = "fbu";
5839 break;
5840 case ORDERED:
5841 branch = "fbo";
5842 break;
5843 case UNGT:
5844 branch = "fbug";
5845 break;
5846 case UNLT:
5847 branch = "fbul";
5848 break;
5849 case UNEQ:
5850 branch = "fbue";
5851 break;
5852 case UNGE:
5853 branch = "fbuge";
5854 break;
5855 case UNLE:
5856 branch = "fbule";
5857 break;
5858 case LTGT:
5859 branch = "fblg";
5860 break;
5861
5862 default:
5863 gcc_unreachable ();
5864 }
5865
5866 /* ??? !v9: FP branches cannot be preceded by another floating point
5867 insn. Because there is currently no concept of pre-delay slots,
5868 we can fix this only by always emitting a nop before a floating
5869 point branch. */
5870
5871 string[0] = '\0';
5872 if (! TARGET_V9)
5873 strcpy (string, "nop\n\t");
5874 strcat (string, branch);
5875 }
5876 else
5877 {
5878 switch (code)
5879 {
5880 case NE:
5881 branch = "bne";
5882 break;
5883 case EQ:
5884 branch = "be";
5885 break;
5886 case GE:
5887 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5888 branch = "bpos";
5889 else
5890 branch = "bge";
5891 break;
5892 case GT:
5893 branch = "bg";
5894 break;
5895 case LE:
5896 branch = "ble";
5897 break;
5898 case LT:
5899 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5900 branch = "bneg";
5901 else
5902 branch = "bl";
5903 break;
5904 case GEU:
5905 branch = "bgeu";
5906 break;
5907 case GTU:
5908 branch = "bgu";
5909 break;
5910 case LEU:
5911 branch = "bleu";
5912 break;
5913 case LTU:
5914 branch = "blu";
5915 break;
5916
5917 default:
5918 gcc_unreachable ();
5919 }
5920 strcpy (string, branch);
5921 }
5922 spaces -= strlen (branch);
5923 p = strchr (string, '\0');
5924
5925 /* Now add the annulling, the label, and a possible noop. */
5926 if (annul && ! far)
5927 {
5928 strcpy (p, ",a");
5929 p += 2;
5930 spaces -= 2;
5931 }
5932
5933 if (TARGET_V9)
5934 {
5935 rtx note;
5936 int v8 = 0;
5937
5938 if (! far && insn && INSN_ADDRESSES_SET_P ())
5939 {
5940 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5941 - INSN_ADDRESSES (INSN_UID (insn)));
5942 /* Leave some instructions for "slop". */
5943 if (delta < -260000 || delta >= 260000)
5944 v8 = 1;
5945 }
5946
5947 if (mode == CCFPmode || mode == CCFPEmode)
5948 {
5949 static char v9_fcc_labelno[] = "%%fccX, ";
5950 /* Set the char indicating the number of the fcc reg to use. */
5951 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5952 labelno = v9_fcc_labelno;
5953 if (v8)
5954 {
5955 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
5956 labelno = "";
5957 }
5958 }
5959 else if (mode == CCXmode || mode == CCX_NOOVmode)
5960 {
5961 labelno = "%%xcc, ";
5962 gcc_assert (! v8);
5963 }
5964 else
5965 {
5966 labelno = "%%icc, ";
5967 if (v8)
5968 labelno = "";
5969 }
5970
5971 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
5972 {
5973 strcpy (p,
5974 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
5975 ? ",pt" : ",pn");
5976 p += 3;
5977 spaces -= 3;
5978 }
5979 }
5980 else
5981 labelno = "";
5982
5983 if (spaces > 0)
5984 *p++ = '\t';
5985 else
5986 *p++ = ' ';
5987 strcpy (p, labelno);
5988 p = strchr (p, '\0');
5989 if (far)
5990 {
5991 strcpy (p, ".+12\n\t nop\n\tb\t");
5992 /* Skip the next insn if requested or
5993 if we know that it will be a nop. */
5994 if (annul || ! final_sequence)
5995 p[3] = '6';
5996 p += 14;
5997 }
5998 *p++ = '%';
5999 *p++ = 'l';
6000 *p++ = label + '0';
6001 *p++ = '%';
6002 *p++ = '#';
6003 *p = '\0';
6004
6005 return string;
6006 }
6007
6008 /* Emit a library call comparison between floating point X and Y.
6009 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6010 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6011 values as arguments instead of the TFmode registers themselves,
6012 that's why we cannot call emit_float_lib_cmp. */
6013 void
sparc_emit_float_lib_cmp(rtx x,rtx y,enum rtx_code comparison)6014 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6015 {
6016 const char *qpfunc;
6017 rtx slot0, slot1, result, tem, tem2;
6018 enum machine_mode mode;
6019
6020 switch (comparison)
6021 {
6022 case EQ:
6023 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6024 break;
6025
6026 case NE:
6027 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6028 break;
6029
6030 case GT:
6031 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6032 break;
6033
6034 case GE:
6035 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6036 break;
6037
6038 case LT:
6039 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6040 break;
6041
6042 case LE:
6043 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6044 break;
6045
6046 case ORDERED:
6047 case UNORDERED:
6048 case UNGT:
6049 case UNLT:
6050 case UNEQ:
6051 case UNGE:
6052 case UNLE:
6053 case LTGT:
6054 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6055 break;
6056
6057 default:
6058 gcc_unreachable ();
6059 }
6060
6061 if (TARGET_ARCH64)
6062 {
6063 if (GET_CODE (x) != MEM)
6064 {
6065 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6066 emit_move_insn (slot0, x);
6067 }
6068 else
6069 slot0 = x;
6070
6071 if (GET_CODE (y) != MEM)
6072 {
6073 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6074 emit_move_insn (slot1, y);
6075 }
6076 else
6077 slot1 = y;
6078
6079 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6080 DImode, 2,
6081 XEXP (slot0, 0), Pmode,
6082 XEXP (slot1, 0), Pmode);
6083
6084 mode = DImode;
6085 }
6086 else
6087 {
6088 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6089 SImode, 2,
6090 x, TFmode, y, TFmode);
6091
6092 mode = SImode;
6093 }
6094
6095
6096 /* Immediately move the result of the libcall into a pseudo
6097 register so reload doesn't clobber the value if it needs
6098 the return register for a spill reg. */
6099 result = gen_reg_rtx (mode);
6100 emit_move_insn (result, hard_libcall_value (mode));
6101
6102 switch (comparison)
6103 {
6104 default:
6105 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6106 break;
6107 case ORDERED:
6108 case UNORDERED:
6109 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6110 NULL_RTX, mode, 0);
6111 break;
6112 case UNGT:
6113 case UNGE:
6114 emit_cmp_insn (result, const1_rtx,
6115 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6116 break;
6117 case UNLE:
6118 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6119 break;
6120 case UNLT:
6121 tem = gen_reg_rtx (mode);
6122 if (TARGET_ARCH32)
6123 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6124 else
6125 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6126 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6127 break;
6128 case UNEQ:
6129 case LTGT:
6130 tem = gen_reg_rtx (mode);
6131 if (TARGET_ARCH32)
6132 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6133 else
6134 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6135 tem2 = gen_reg_rtx (mode);
6136 if (TARGET_ARCH32)
6137 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6138 else
6139 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6140 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6141 NULL_RTX, mode, 0);
6142 break;
6143 }
6144 }
6145
6146 /* Generate an unsigned DImode to FP conversion. This is the same code
6147 optabs would emit if we didn't have TFmode patterns. */
6148
6149 void
sparc_emit_floatunsdi(rtx * operands,enum machine_mode mode)6150 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6151 {
6152 rtx neglab, donelab, i0, i1, f0, in, out;
6153
6154 out = operands[0];
6155 in = force_reg (DImode, operands[1]);
6156 neglab = gen_label_rtx ();
6157 donelab = gen_label_rtx ();
6158 i0 = gen_reg_rtx (DImode);
6159 i1 = gen_reg_rtx (DImode);
6160 f0 = gen_reg_rtx (mode);
6161
6162 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6163
6164 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6165 emit_jump_insn (gen_jump (donelab));
6166 emit_barrier ();
6167
6168 emit_label (neglab);
6169
6170 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6171 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6172 emit_insn (gen_iordi3 (i0, i0, i1));
6173 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6174 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6175
6176 emit_label (donelab);
6177 }
6178
6179 /* Generate an FP to unsigned DImode conversion. This is the same code
6180 optabs would emit if we didn't have TFmode patterns. */
6181
6182 void
sparc_emit_fixunsdi(rtx * operands,enum machine_mode mode)6183 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6184 {
6185 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6186
6187 out = operands[0];
6188 in = force_reg (mode, operands[1]);
6189 neglab = gen_label_rtx ();
6190 donelab = gen_label_rtx ();
6191 i0 = gen_reg_rtx (DImode);
6192 i1 = gen_reg_rtx (DImode);
6193 limit = gen_reg_rtx (mode);
6194 f0 = gen_reg_rtx (mode);
6195
6196 emit_move_insn (limit,
6197 CONST_DOUBLE_FROM_REAL_VALUE (
6198 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6199 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6200
6201 emit_insn (gen_rtx_SET (VOIDmode,
6202 out,
6203 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6204 emit_jump_insn (gen_jump (donelab));
6205 emit_barrier ();
6206
6207 emit_label (neglab);
6208
6209 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6210 emit_insn (gen_rtx_SET (VOIDmode,
6211 i0,
6212 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6213 emit_insn (gen_movdi (i1, const1_rtx));
6214 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6215 emit_insn (gen_xordi3 (out, i0, i1));
6216
6217 emit_label (donelab);
6218 }
6219
6220 /* Return the string to output a conditional branch to LABEL, testing
6221 register REG. LABEL is the operand number of the label; REG is the
6222 operand number of the reg. OP is the conditional expression. The mode
6223 of REG says what kind of comparison we made.
6224
6225 DEST is the destination insn (i.e. the label), INSN is the source.
6226
6227 REVERSED is nonzero if we should reverse the sense of the comparison.
6228
6229 ANNUL is nonzero if we should generate an annulling branch. */
6230
6231 const char *
output_v9branch(rtx op,rtx dest,int reg,int label,int reversed,int annul,rtx insn)6232 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6233 int annul, rtx insn)
6234 {
6235 static char string[64];
6236 enum rtx_code code = GET_CODE (op);
6237 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6238 rtx note;
6239 int far;
6240 char *p;
6241
6242 /* branch on register are limited to +-128KB. If it is too far away,
6243 change
6244
6245 brnz,pt %g1, .LC30
6246
6247 to
6248
6249 brz,pn %g1, .+12
6250 nop
6251 ba,pt %xcc, .LC30
6252
6253 and
6254
6255 brgez,a,pn %o1, .LC29
6256
6257 to
6258
6259 brlz,pt %o1, .+16
6260 nop
6261 ba,pt %xcc, .LC29 */
6262
6263 far = get_attr_length (insn) >= 3;
6264
6265 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6266 if (reversed ^ far)
6267 code = reverse_condition (code);
6268
6269 /* Only 64 bit versions of these instructions exist. */
6270 gcc_assert (mode == DImode);
6271
6272 /* Start by writing the branch condition. */
6273
6274 switch (code)
6275 {
6276 case NE:
6277 strcpy (string, "brnz");
6278 break;
6279
6280 case EQ:
6281 strcpy (string, "brz");
6282 break;
6283
6284 case GE:
6285 strcpy (string, "brgez");
6286 break;
6287
6288 case LT:
6289 strcpy (string, "brlz");
6290 break;
6291
6292 case LE:
6293 strcpy (string, "brlez");
6294 break;
6295
6296 case GT:
6297 strcpy (string, "brgz");
6298 break;
6299
6300 default:
6301 gcc_unreachable ();
6302 }
6303
6304 p = strchr (string, '\0');
6305
6306 /* Now add the annulling, reg, label, and nop. */
6307 if (annul && ! far)
6308 {
6309 strcpy (p, ",a");
6310 p += 2;
6311 }
6312
6313 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6314 {
6315 strcpy (p,
6316 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6317 ? ",pt" : ",pn");
6318 p += 3;
6319 }
6320
6321 *p = p < string + 8 ? '\t' : ' ';
6322 p++;
6323 *p++ = '%';
6324 *p++ = '0' + reg;
6325 *p++ = ',';
6326 *p++ = ' ';
6327 if (far)
6328 {
6329 int veryfar = 1, delta;
6330
6331 if (INSN_ADDRESSES_SET_P ())
6332 {
6333 delta = (INSN_ADDRESSES (INSN_UID (dest))
6334 - INSN_ADDRESSES (INSN_UID (insn)));
6335 /* Leave some instructions for "slop". */
6336 if (delta >= -260000 && delta < 260000)
6337 veryfar = 0;
6338 }
6339
6340 strcpy (p, ".+12\n\t nop\n\t");
6341 /* Skip the next insn if requested or
6342 if we know that it will be a nop. */
6343 if (annul || ! final_sequence)
6344 p[3] = '6';
6345 p += 12;
6346 if (veryfar)
6347 {
6348 strcpy (p, "b\t");
6349 p += 2;
6350 }
6351 else
6352 {
6353 strcpy (p, "ba,pt\t%%xcc, ");
6354 p += 13;
6355 }
6356 }
6357 *p++ = '%';
6358 *p++ = 'l';
6359 *p++ = '0' + label;
6360 *p++ = '%';
6361 *p++ = '#';
6362 *p = '\0';
6363
6364 return string;
6365 }
6366
6367 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6368 Such instructions cannot be used in the delay slot of return insn on v9.
6369 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6370 */
6371
6372 static int
epilogue_renumber(register rtx * where,int test)6373 epilogue_renumber (register rtx *where, int test)
6374 {
6375 register const char *fmt;
6376 register int i;
6377 register enum rtx_code code;
6378
6379 if (*where == 0)
6380 return 0;
6381
6382 code = GET_CODE (*where);
6383
6384 switch (code)
6385 {
6386 case REG:
6387 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6388 return 1;
6389 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6390 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6391 case SCRATCH:
6392 case CC0:
6393 case PC:
6394 case CONST_INT:
6395 case CONST_DOUBLE:
6396 return 0;
6397
6398 /* Do not replace the frame pointer with the stack pointer because
6399 it can cause the delayed instruction to load below the stack.
6400 This occurs when instructions like:
6401
6402 (set (reg/i:SI 24 %i0)
6403 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6404 (const_int -20 [0xffffffec])) 0))
6405
6406 are in the return delayed slot. */
6407 case PLUS:
6408 if (GET_CODE (XEXP (*where, 0)) == REG
6409 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6410 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6411 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6412 return 1;
6413 break;
6414
6415 case MEM:
6416 if (SPARC_STACK_BIAS
6417 && GET_CODE (XEXP (*where, 0)) == REG
6418 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6419 return 1;
6420 break;
6421
6422 default:
6423 break;
6424 }
6425
6426 fmt = GET_RTX_FORMAT (code);
6427
6428 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6429 {
6430 if (fmt[i] == 'E')
6431 {
6432 register int j;
6433 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6434 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6435 return 1;
6436 }
6437 else if (fmt[i] == 'e'
6438 && epilogue_renumber (&(XEXP (*where, i)), test))
6439 return 1;
6440 }
6441 return 0;
6442 }
6443
6444 /* Leaf functions and non-leaf functions have different needs. */
6445
6446 static const int
6447 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6448
6449 static const int
6450 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6451
6452 static const int *const reg_alloc_orders[] = {
6453 reg_leaf_alloc_order,
6454 reg_nonleaf_alloc_order};
6455
6456 void
order_regs_for_local_alloc(void)6457 order_regs_for_local_alloc (void)
6458 {
6459 static int last_order_nonleaf = 1;
6460
6461 if (regs_ever_live[15] != last_order_nonleaf)
6462 {
6463 last_order_nonleaf = !last_order_nonleaf;
6464 memcpy ((char *) reg_alloc_order,
6465 (const char *) reg_alloc_orders[last_order_nonleaf],
6466 FIRST_PSEUDO_REGISTER * sizeof (int));
6467 }
6468 }
6469
6470 /* Return 1 if REG and MEM are legitimate enough to allow the various
6471 mem<-->reg splits to be run. */
6472
6473 int
sparc_splitdi_legitimate(rtx reg,rtx mem)6474 sparc_splitdi_legitimate (rtx reg, rtx mem)
6475 {
6476 /* Punt if we are here by mistake. */
6477 gcc_assert (reload_completed);
6478
6479 /* We must have an offsettable memory reference. */
6480 if (! offsettable_memref_p (mem))
6481 return 0;
6482
6483 /* If we have legitimate args for ldd/std, we do not want
6484 the split to happen. */
6485 if ((REGNO (reg) % 2) == 0
6486 && mem_min_alignment (mem, 8))
6487 return 0;
6488
6489 /* Success. */
6490 return 1;
6491 }
6492
6493 /* Return 1 if x and y are some kind of REG and they refer to
6494 different hard registers. This test is guaranteed to be
6495 run after reload. */
6496
6497 int
sparc_absnegfloat_split_legitimate(rtx x,rtx y)6498 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6499 {
6500 if (GET_CODE (x) != REG)
6501 return 0;
6502 if (GET_CODE (y) != REG)
6503 return 0;
6504 if (REGNO (x) == REGNO (y))
6505 return 0;
6506 return 1;
6507 }
6508
6509 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6510 This makes them candidates for using ldd and std insns.
6511
6512 Note reg1 and reg2 *must* be hard registers. */
6513
6514 int
registers_ok_for_ldd_peep(rtx reg1,rtx reg2)6515 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6516 {
6517 /* We might have been passed a SUBREG. */
6518 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6519 return 0;
6520
6521 if (REGNO (reg1) % 2 != 0)
6522 return 0;
6523
6524 /* Integer ldd is deprecated in SPARC V9 */
6525 if (TARGET_V9 && REGNO (reg1) < 32)
6526 return 0;
6527
6528 return (REGNO (reg1) == REGNO (reg2) - 1);
6529 }
6530
6531 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6532 an ldd or std insn.
6533
6534 This can only happen when addr1 and addr2, the addresses in mem1
6535 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6536 addr1 must also be aligned on a 64-bit boundary.
6537
6538 Also iff dependent_reg_rtx is not null it should not be used to
6539 compute the address for mem1, i.e. we cannot optimize a sequence
6540 like:
6541 ld [%o0], %o0
6542 ld [%o0 + 4], %o1
6543 to
6544 ldd [%o0], %o0
6545 nor:
6546 ld [%g3 + 4], %g3
6547 ld [%g3], %g2
6548 to
6549 ldd [%g3], %g2
6550
6551 But, note that the transformation from:
6552 ld [%g2 + 4], %g3
6553 ld [%g2], %g2
6554 to
6555 ldd [%g2], %g2
6556 is perfectly fine. Thus, the peephole2 patterns always pass us
6557 the destination register of the first load, never the second one.
6558
6559 For stores we don't have a similar problem, so dependent_reg_rtx is
6560 NULL_RTX. */
6561
6562 int
mems_ok_for_ldd_peep(rtx mem1,rtx mem2,rtx dependent_reg_rtx)6563 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6564 {
6565 rtx addr1, addr2;
6566 unsigned int reg1;
6567 HOST_WIDE_INT offset1;
6568
6569 /* The mems cannot be volatile. */
6570 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6571 return 0;
6572
6573 /* MEM1 should be aligned on a 64-bit boundary. */
6574 if (MEM_ALIGN (mem1) < 64)
6575 return 0;
6576
6577 addr1 = XEXP (mem1, 0);
6578 addr2 = XEXP (mem2, 0);
6579
6580 /* Extract a register number and offset (if used) from the first addr. */
6581 if (GET_CODE (addr1) == PLUS)
6582 {
6583 /* If not a REG, return zero. */
6584 if (GET_CODE (XEXP (addr1, 0)) != REG)
6585 return 0;
6586 else
6587 {
6588 reg1 = REGNO (XEXP (addr1, 0));
6589 /* The offset must be constant! */
6590 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6591 return 0;
6592 offset1 = INTVAL (XEXP (addr1, 1));
6593 }
6594 }
6595 else if (GET_CODE (addr1) != REG)
6596 return 0;
6597 else
6598 {
6599 reg1 = REGNO (addr1);
6600 /* This was a simple (mem (reg)) expression. Offset is 0. */
6601 offset1 = 0;
6602 }
6603
6604 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6605 if (GET_CODE (addr2) != PLUS)
6606 return 0;
6607
6608 if (GET_CODE (XEXP (addr2, 0)) != REG
6609 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6610 return 0;
6611
6612 if (reg1 != REGNO (XEXP (addr2, 0)))
6613 return 0;
6614
6615 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6616 return 0;
6617
6618 /* The first offset must be evenly divisible by 8 to ensure the
6619 address is 64 bit aligned. */
6620 if (offset1 % 8 != 0)
6621 return 0;
6622
6623 /* The offset for the second addr must be 4 more than the first addr. */
6624 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6625 return 0;
6626
6627 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6628 instructions. */
6629 return 1;
6630 }
6631
6632 /* Return 1 if reg is a pseudo, or is the first register in
6633 a hard register pair. This makes it a candidate for use in
6634 ldd and std insns. */
6635
6636 int
register_ok_for_ldd(rtx reg)6637 register_ok_for_ldd (rtx reg)
6638 {
6639 /* We might have been passed a SUBREG. */
6640 if (GET_CODE (reg) != REG)
6641 return 0;
6642
6643 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6644 return (REGNO (reg) % 2 == 0);
6645 else
6646 return 1;
6647 }
6648
6649 /* Print operand X (an rtx) in assembler syntax to file FILE.
6650 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6651 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6652
6653 void
print_operand(FILE * file,rtx x,int code)6654 print_operand (FILE *file, rtx x, int code)
6655 {
6656 switch (code)
6657 {
6658 case '#':
6659 /* Output an insn in a delay slot. */
6660 if (final_sequence)
6661 sparc_indent_opcode = 1;
6662 else
6663 fputs ("\n\t nop", file);
6664 return;
6665 case '*':
6666 /* Output an annul flag if there's nothing for the delay slot and we
6667 are optimizing. This is always used with '(' below.
6668 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6669 this is a dbx bug. So, we only do this when optimizing.
6670 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6671 Always emit a nop in case the next instruction is a branch. */
6672 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6673 fputs (",a", file);
6674 return;
6675 case '(':
6676 /* Output a 'nop' if there's nothing for the delay slot and we are
6677 not optimizing. This is always used with '*' above. */
6678 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6679 fputs ("\n\t nop", file);
6680 else if (final_sequence)
6681 sparc_indent_opcode = 1;
6682 return;
6683 case ')':
6684 /* Output the right displacement from the saved PC on function return.
6685 The caller may have placed an "unimp" insn immediately after the call
6686 so we have to account for it. This insn is used in the 32-bit ABI
6687 when calling a function that returns a non zero-sized structure. The
6688 64-bit ABI doesn't have it. Be careful to have this test be the same
6689 as that used on the call. The exception here is that when
6690 sparc_std_struct_return is enabled, the psABI is followed exactly
6691 and the adjustment is made by the code in sparc_struct_value_rtx.
6692 The call emitted is the same when sparc_std_struct_return is
6693 present. */
6694 if (! TARGET_ARCH64
6695 && current_function_returns_struct
6696 && ! sparc_std_struct_return
6697 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6698 == INTEGER_CST)
6699 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6700 fputs ("12", file);
6701 else
6702 fputc ('8', file);
6703 return;
6704 case '_':
6705 /* Output the Embedded Medium/Anywhere code model base register. */
6706 fputs (EMBMEDANY_BASE_REG, file);
6707 return;
6708 case '&':
6709 /* Print some local dynamic TLS name. */
6710 assemble_name (file, get_some_local_dynamic_name ());
6711 return;
6712
6713 case 'Y':
6714 /* Adjust the operand to take into account a RESTORE operation. */
6715 if (GET_CODE (x) == CONST_INT)
6716 break;
6717 else if (GET_CODE (x) != REG)
6718 output_operand_lossage ("invalid %%Y operand");
6719 else if (REGNO (x) < 8)
6720 fputs (reg_names[REGNO (x)], file);
6721 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6722 fputs (reg_names[REGNO (x)-16], file);
6723 else
6724 output_operand_lossage ("invalid %%Y operand");
6725 return;
6726 case 'L':
6727 /* Print out the low order register name of a register pair. */
6728 if (WORDS_BIG_ENDIAN)
6729 fputs (reg_names[REGNO (x)+1], file);
6730 else
6731 fputs (reg_names[REGNO (x)], file);
6732 return;
6733 case 'H':
6734 /* Print out the high order register name of a register pair. */
6735 if (WORDS_BIG_ENDIAN)
6736 fputs (reg_names[REGNO (x)], file);
6737 else
6738 fputs (reg_names[REGNO (x)+1], file);
6739 return;
6740 case 'R':
6741 /* Print out the second register name of a register pair or quad.
6742 I.e., R (%o0) => %o1. */
6743 fputs (reg_names[REGNO (x)+1], file);
6744 return;
6745 case 'S':
6746 /* Print out the third register name of a register quad.
6747 I.e., S (%o0) => %o2. */
6748 fputs (reg_names[REGNO (x)+2], file);
6749 return;
6750 case 'T':
6751 /* Print out the fourth register name of a register quad.
6752 I.e., T (%o0) => %o3. */
6753 fputs (reg_names[REGNO (x)+3], file);
6754 return;
6755 case 'x':
6756 /* Print a condition code register. */
6757 if (REGNO (x) == SPARC_ICC_REG)
6758 {
6759 /* We don't handle CC[X]_NOOVmode because they're not supposed
6760 to occur here. */
6761 if (GET_MODE (x) == CCmode)
6762 fputs ("%icc", file);
6763 else if (GET_MODE (x) == CCXmode)
6764 fputs ("%xcc", file);
6765 else
6766 gcc_unreachable ();
6767 }
6768 else
6769 /* %fccN register */
6770 fputs (reg_names[REGNO (x)], file);
6771 return;
6772 case 'm':
6773 /* Print the operand's address only. */
6774 output_address (XEXP (x, 0));
6775 return;
6776 case 'r':
6777 /* In this case we need a register. Use %g0 if the
6778 operand is const0_rtx. */
6779 if (x == const0_rtx
6780 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6781 {
6782 fputs ("%g0", file);
6783 return;
6784 }
6785 else
6786 break;
6787
6788 case 'A':
6789 switch (GET_CODE (x))
6790 {
6791 case IOR: fputs ("or", file); break;
6792 case AND: fputs ("and", file); break;
6793 case XOR: fputs ("xor", file); break;
6794 default: output_operand_lossage ("invalid %%A operand");
6795 }
6796 return;
6797
6798 case 'B':
6799 switch (GET_CODE (x))
6800 {
6801 case IOR: fputs ("orn", file); break;
6802 case AND: fputs ("andn", file); break;
6803 case XOR: fputs ("xnor", file); break;
6804 default: output_operand_lossage ("invalid %%B operand");
6805 }
6806 return;
6807
6808 /* These are used by the conditional move instructions. */
6809 case 'c' :
6810 case 'C':
6811 {
6812 enum rtx_code rc = GET_CODE (x);
6813
6814 if (code == 'c')
6815 {
6816 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6817 if (mode == CCFPmode || mode == CCFPEmode)
6818 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6819 else
6820 rc = reverse_condition (GET_CODE (x));
6821 }
6822 switch (rc)
6823 {
6824 case NE: fputs ("ne", file); break;
6825 case EQ: fputs ("e", file); break;
6826 case GE: fputs ("ge", file); break;
6827 case GT: fputs ("g", file); break;
6828 case LE: fputs ("le", file); break;
6829 case LT: fputs ("l", file); break;
6830 case GEU: fputs ("geu", file); break;
6831 case GTU: fputs ("gu", file); break;
6832 case LEU: fputs ("leu", file); break;
6833 case LTU: fputs ("lu", file); break;
6834 case LTGT: fputs ("lg", file); break;
6835 case UNORDERED: fputs ("u", file); break;
6836 case ORDERED: fputs ("o", file); break;
6837 case UNLT: fputs ("ul", file); break;
6838 case UNLE: fputs ("ule", file); break;
6839 case UNGT: fputs ("ug", file); break;
6840 case UNGE: fputs ("uge", file); break;
6841 case UNEQ: fputs ("ue", file); break;
6842 default: output_operand_lossage (code == 'c'
6843 ? "invalid %%c operand"
6844 : "invalid %%C operand");
6845 }
6846 return;
6847 }
6848
6849 /* These are used by the movr instruction pattern. */
6850 case 'd':
6851 case 'D':
6852 {
6853 enum rtx_code rc = (code == 'd'
6854 ? reverse_condition (GET_CODE (x))
6855 : GET_CODE (x));
6856 switch (rc)
6857 {
6858 case NE: fputs ("ne", file); break;
6859 case EQ: fputs ("e", file); break;
6860 case GE: fputs ("gez", file); break;
6861 case LT: fputs ("lz", file); break;
6862 case LE: fputs ("lez", file); break;
6863 case GT: fputs ("gz", file); break;
6864 default: output_operand_lossage (code == 'd'
6865 ? "invalid %%d operand"
6866 : "invalid %%D operand");
6867 }
6868 return;
6869 }
6870
6871 case 'b':
6872 {
6873 /* Print a sign-extended character. */
6874 int i = trunc_int_for_mode (INTVAL (x), QImode);
6875 fprintf (file, "%d", i);
6876 return;
6877 }
6878
6879 case 'f':
6880 /* Operand must be a MEM; write its address. */
6881 if (GET_CODE (x) != MEM)
6882 output_operand_lossage ("invalid %%f operand");
6883 output_address (XEXP (x, 0));
6884 return;
6885
6886 case 's':
6887 {
6888 /* Print a sign-extended 32-bit value. */
6889 HOST_WIDE_INT i;
6890 if (GET_CODE(x) == CONST_INT)
6891 i = INTVAL (x);
6892 else if (GET_CODE(x) == CONST_DOUBLE)
6893 i = CONST_DOUBLE_LOW (x);
6894 else
6895 {
6896 output_operand_lossage ("invalid %%s operand");
6897 return;
6898 }
6899 i = trunc_int_for_mode (i, SImode);
6900 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6901 return;
6902 }
6903
6904 case 0:
6905 /* Do nothing special. */
6906 break;
6907
6908 default:
6909 /* Undocumented flag. */
6910 output_operand_lossage ("invalid operand output code");
6911 }
6912
6913 if (GET_CODE (x) == REG)
6914 fputs (reg_names[REGNO (x)], file);
6915 else if (GET_CODE (x) == MEM)
6916 {
6917 fputc ('[', file);
6918 /* Poor Sun assembler doesn't understand absolute addressing. */
6919 if (CONSTANT_P (XEXP (x, 0)))
6920 fputs ("%g0+", file);
6921 output_address (XEXP (x, 0));
6922 fputc (']', file);
6923 }
6924 else if (GET_CODE (x) == HIGH)
6925 {
6926 fputs ("%hi(", file);
6927 output_addr_const (file, XEXP (x, 0));
6928 fputc (')', file);
6929 }
6930 else if (GET_CODE (x) == LO_SUM)
6931 {
6932 print_operand (file, XEXP (x, 0), 0);
6933 if (TARGET_CM_MEDMID)
6934 fputs ("+%l44(", file);
6935 else
6936 fputs ("+%lo(", file);
6937 output_addr_const (file, XEXP (x, 1));
6938 fputc (')', file);
6939 }
6940 else if (GET_CODE (x) == CONST_DOUBLE
6941 && (GET_MODE (x) == VOIDmode
6942 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6943 {
6944 if (CONST_DOUBLE_HIGH (x) == 0)
6945 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6946 else if (CONST_DOUBLE_HIGH (x) == -1
6947 && CONST_DOUBLE_LOW (x) < 0)
6948 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
6949 else
6950 output_operand_lossage ("long long constant not a valid immediate operand");
6951 }
6952 else if (GET_CODE (x) == CONST_DOUBLE)
6953 output_operand_lossage ("floating point constant not a valid immediate operand");
6954 else { output_addr_const (file, x); }
6955 }
6956
6957 /* Target hook for assembling integer objects. The sparc version has
6958 special handling for aligned DI-mode objects. */
6959
6960 static bool
sparc_assemble_integer(rtx x,unsigned int size,int aligned_p)6961 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
6962 {
6963 /* ??? We only output .xword's for symbols and only then in environments
6964 where the assembler can handle them. */
6965 if (aligned_p && size == 8
6966 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
6967 {
6968 if (TARGET_V9)
6969 {
6970 assemble_integer_with_op ("\t.xword\t", x);
6971 return true;
6972 }
6973 else
6974 {
6975 assemble_aligned_integer (4, const0_rtx);
6976 assemble_aligned_integer (4, x);
6977 return true;
6978 }
6979 }
6980 return default_assemble_integer (x, size, aligned_p);
6981 }
6982
6983 /* Return the value of a code used in the .proc pseudo-op that says
6984 what kind of result this function returns. For non-C types, we pick
6985 the closest C type. */
6986
6987 #ifndef SHORT_TYPE_SIZE
6988 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
6989 #endif
6990
6991 #ifndef INT_TYPE_SIZE
6992 #define INT_TYPE_SIZE BITS_PER_WORD
6993 #endif
6994
6995 #ifndef LONG_TYPE_SIZE
6996 #define LONG_TYPE_SIZE BITS_PER_WORD
6997 #endif
6998
6999 #ifndef LONG_LONG_TYPE_SIZE
7000 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7001 #endif
7002
7003 #ifndef FLOAT_TYPE_SIZE
7004 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7005 #endif
7006
7007 #ifndef DOUBLE_TYPE_SIZE
7008 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7009 #endif
7010
7011 #ifndef LONG_DOUBLE_TYPE_SIZE
7012 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7013 #endif
7014
7015 unsigned long
sparc_type_code(register tree type)7016 sparc_type_code (register tree type)
7017 {
7018 register unsigned long qualifiers = 0;
7019 register unsigned shift;
7020
7021 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7022 setting more, since some assemblers will give an error for this. Also,
7023 we must be careful to avoid shifts of 32 bits or more to avoid getting
7024 unpredictable results. */
7025
7026 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7027 {
7028 switch (TREE_CODE (type))
7029 {
7030 case ERROR_MARK:
7031 return qualifiers;
7032
7033 case ARRAY_TYPE:
7034 qualifiers |= (3 << shift);
7035 break;
7036
7037 case FUNCTION_TYPE:
7038 case METHOD_TYPE:
7039 qualifiers |= (2 << shift);
7040 break;
7041
7042 case POINTER_TYPE:
7043 case REFERENCE_TYPE:
7044 case OFFSET_TYPE:
7045 qualifiers |= (1 << shift);
7046 break;
7047
7048 case RECORD_TYPE:
7049 return (qualifiers | 8);
7050
7051 case UNION_TYPE:
7052 case QUAL_UNION_TYPE:
7053 return (qualifiers | 9);
7054
7055 case ENUMERAL_TYPE:
7056 return (qualifiers | 10);
7057
7058 case VOID_TYPE:
7059 return (qualifiers | 16);
7060
7061 case INTEGER_TYPE:
7062 /* If this is a range type, consider it to be the underlying
7063 type. */
7064 if (TREE_TYPE (type) != 0)
7065 break;
7066
7067 /* Carefully distinguish all the standard types of C,
7068 without messing up if the language is not C. We do this by
7069 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7070 look at both the names and the above fields, but that's redundant.
7071 Any type whose size is between two C types will be considered
7072 to be the wider of the two types. Also, we do not have a
7073 special code to use for "long long", so anything wider than
7074 long is treated the same. Note that we can't distinguish
7075 between "int" and "long" in this code if they are the same
7076 size, but that's fine, since neither can the assembler. */
7077
7078 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7079 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7080
7081 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7082 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7083
7084 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7085 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7086
7087 else
7088 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7089
7090 case REAL_TYPE:
7091 /* If this is a range type, consider it to be the underlying
7092 type. */
7093 if (TREE_TYPE (type) != 0)
7094 break;
7095
7096 /* Carefully distinguish all the standard types of C,
7097 without messing up if the language is not C. */
7098
7099 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7100 return (qualifiers | 6);
7101
7102 else
7103 return (qualifiers | 7);
7104
7105 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7106 /* ??? We need to distinguish between double and float complex types,
7107 but I don't know how yet because I can't reach this code from
7108 existing front-ends. */
7109 return (qualifiers | 7); /* Who knows? */
7110
7111 case VECTOR_TYPE:
7112 case BOOLEAN_TYPE: /* Boolean truth value type. */
7113 case LANG_TYPE: /* ? */
7114 return qualifiers;
7115
7116 default:
7117 gcc_unreachable (); /* Not a type! */
7118 }
7119 }
7120
7121 return qualifiers;
7122 }
7123
7124 /* Nested function support. */
7125
7126 /* Emit RTL insns to initialize the variable parts of a trampoline.
7127 FNADDR is an RTX for the address of the function's pure code.
7128 CXT is an RTX for the static chain value for the function.
7129
7130 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7131 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7132 (to store insns). This is a bit excessive. Perhaps a different
7133 mechanism would be better here.
7134
7135 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7136
7137 void
sparc_initialize_trampoline(rtx tramp,rtx fnaddr,rtx cxt)7138 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7139 {
7140 /* SPARC 32-bit trampoline:
7141
7142 sethi %hi(fn), %g1
7143 sethi %hi(static), %g2
7144 jmp %g1+%lo(fn)
7145 or %g2, %lo(static), %g2
7146
7147 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7148 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7149 */
7150
7151 emit_move_insn
7152 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7153 expand_binop (SImode, ior_optab,
7154 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7155 size_int (10), 0, 1),
7156 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7157 NULL_RTX, 1, OPTAB_DIRECT));
7158
7159 emit_move_insn
7160 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7161 expand_binop (SImode, ior_optab,
7162 expand_shift (RSHIFT_EXPR, SImode, cxt,
7163 size_int (10), 0, 1),
7164 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7165 NULL_RTX, 1, OPTAB_DIRECT));
7166
7167 emit_move_insn
7168 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7169 expand_binop (SImode, ior_optab,
7170 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7171 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7172 NULL_RTX, 1, OPTAB_DIRECT));
7173
7174 emit_move_insn
7175 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7176 expand_binop (SImode, ior_optab,
7177 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7178 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7179 NULL_RTX, 1, OPTAB_DIRECT));
7180
7181 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7182 aligned on a 16 byte boundary so one flush clears it all. */
7183 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7184 if (sparc_cpu != PROCESSOR_ULTRASPARC
7185 && sparc_cpu != PROCESSOR_ULTRASPARC3
7186 && sparc_cpu != PROCESSOR_NIAGARA)
7187 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7188 plus_constant (tramp, 8)))));
7189
7190 /* Call __enable_execute_stack after writing onto the stack to make sure
7191 the stack address is accessible. */
7192 #ifdef ENABLE_EXECUTE_STACK
7193 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7194 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7195 #endif
7196
7197 }
7198
7199 /* The 64-bit version is simpler because it makes more sense to load the
7200 values as "immediate" data out of the trampoline. It's also easier since
7201 we can read the PC without clobbering a register. */
7202
7203 void
sparc64_initialize_trampoline(rtx tramp,rtx fnaddr,rtx cxt)7204 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7205 {
7206 /* SPARC 64-bit trampoline:
7207
7208 rd %pc, %g1
7209 ldx [%g1+24], %g5
7210 jmp %g5
7211 ldx [%g1+16], %g5
7212 +16 bytes data
7213 */
7214
7215 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7216 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7217 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7218 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7219 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7220 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7221 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7222 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7223 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7224 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7225 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7226
7227 if (sparc_cpu != PROCESSOR_ULTRASPARC
7228 && sparc_cpu != PROCESSOR_ULTRASPARC3
7229 && sparc_cpu != PROCESSOR_NIAGARA)
7230 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7231
7232 /* Call __enable_execute_stack after writing onto the stack to make sure
7233 the stack address is accessible. */
7234 #ifdef ENABLE_EXECUTE_STACK
7235 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7236 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7237 #endif
7238 }
7239
7240 /* Adjust the cost of a scheduling dependency. Return the new cost of
7241 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7242
7243 static int
supersparc_adjust_cost(rtx insn,rtx link,rtx dep_insn,int cost)7244 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7245 {
7246 enum attr_type insn_type;
7247
7248 if (! recog_memoized (insn))
7249 return 0;
7250
7251 insn_type = get_attr_type (insn);
7252
7253 if (REG_NOTE_KIND (link) == 0)
7254 {
7255 /* Data dependency; DEP_INSN writes a register that INSN reads some
7256 cycles later. */
7257
7258 /* if a load, then the dependence must be on the memory address;
7259 add an extra "cycle". Note that the cost could be two cycles
7260 if the reg was written late in an instruction group; we ca not tell
7261 here. */
7262 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7263 return cost + 3;
7264
7265 /* Get the delay only if the address of the store is the dependence. */
7266 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7267 {
7268 rtx pat = PATTERN(insn);
7269 rtx dep_pat = PATTERN (dep_insn);
7270
7271 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7272 return cost; /* This should not happen! */
7273
7274 /* The dependency between the two instructions was on the data that
7275 is being stored. Assume that this implies that the address of the
7276 store is not dependent. */
7277 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7278 return cost;
7279
7280 return cost + 3; /* An approximation. */
7281 }
7282
7283 /* A shift instruction cannot receive its data from an instruction
7284 in the same cycle; add a one cycle penalty. */
7285 if (insn_type == TYPE_SHIFT)
7286 return cost + 3; /* Split before cascade into shift. */
7287 }
7288 else
7289 {
7290 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7291 INSN writes some cycles later. */
7292
7293 /* These are only significant for the fpu unit; writing a fp reg before
7294 the fpu has finished with it stalls the processor. */
7295
7296 /* Reusing an integer register causes no problems. */
7297 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7298 return 0;
7299 }
7300
7301 return cost;
7302 }
7303
7304 static int
hypersparc_adjust_cost(rtx insn,rtx link,rtx dep_insn,int cost)7305 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7306 {
7307 enum attr_type insn_type, dep_type;
7308 rtx pat = PATTERN(insn);
7309 rtx dep_pat = PATTERN (dep_insn);
7310
7311 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7312 return cost;
7313
7314 insn_type = get_attr_type (insn);
7315 dep_type = get_attr_type (dep_insn);
7316
7317 switch (REG_NOTE_KIND (link))
7318 {
7319 case 0:
7320 /* Data dependency; DEP_INSN writes a register that INSN reads some
7321 cycles later. */
7322
7323 switch (insn_type)
7324 {
7325 case TYPE_STORE:
7326 case TYPE_FPSTORE:
7327 /* Get the delay iff the address of the store is the dependence. */
7328 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7329 return cost;
7330
7331 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7332 return cost;
7333 return cost + 3;
7334
7335 case TYPE_LOAD:
7336 case TYPE_SLOAD:
7337 case TYPE_FPLOAD:
7338 /* If a load, then the dependence must be on the memory address. If
7339 the addresses aren't equal, then it might be a false dependency */
7340 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7341 {
7342 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7343 || GET_CODE (SET_DEST (dep_pat)) != MEM
7344 || GET_CODE (SET_SRC (pat)) != MEM
7345 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7346 XEXP (SET_SRC (pat), 0)))
7347 return cost + 2;
7348
7349 return cost + 8;
7350 }
7351 break;
7352
7353 case TYPE_BRANCH:
7354 /* Compare to branch latency is 0. There is no benefit from
7355 separating compare and branch. */
7356 if (dep_type == TYPE_COMPARE)
7357 return 0;
7358 /* Floating point compare to branch latency is less than
7359 compare to conditional move. */
7360 if (dep_type == TYPE_FPCMP)
7361 return cost - 1;
7362 break;
7363 default:
7364 break;
7365 }
7366 break;
7367
7368 case REG_DEP_ANTI:
7369 /* Anti-dependencies only penalize the fpu unit. */
7370 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7371 return 0;
7372 break;
7373
7374 default:
7375 break;
7376 }
7377
7378 return cost;
7379 }
7380
7381 static int
sparc_adjust_cost(rtx insn,rtx link,rtx dep,int cost)7382 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7383 {
7384 switch (sparc_cpu)
7385 {
7386 case PROCESSOR_SUPERSPARC:
7387 cost = supersparc_adjust_cost (insn, link, dep, cost);
7388 break;
7389 case PROCESSOR_HYPERSPARC:
7390 case PROCESSOR_SPARCLITE86X:
7391 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7392 break;
7393 default:
7394 break;
7395 }
7396 return cost;
7397 }
7398
7399 static void
sparc_sched_init(FILE * dump ATTRIBUTE_UNUSED,int sched_verbose ATTRIBUTE_UNUSED,int max_ready ATTRIBUTE_UNUSED)7400 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7401 int sched_verbose ATTRIBUTE_UNUSED,
7402 int max_ready ATTRIBUTE_UNUSED)
7403 {
7404 }
7405
7406 static int
sparc_use_sched_lookahead(void)7407 sparc_use_sched_lookahead (void)
7408 {
7409 if (sparc_cpu == PROCESSOR_NIAGARA)
7410 return 0;
7411 if (sparc_cpu == PROCESSOR_ULTRASPARC
7412 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7413 return 4;
7414 if ((1 << sparc_cpu) &
7415 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7416 (1 << PROCESSOR_SPARCLITE86X)))
7417 return 3;
7418 return 0;
7419 }
7420
7421 static int
sparc_issue_rate(void)7422 sparc_issue_rate (void)
7423 {
7424 switch (sparc_cpu)
7425 {
7426 case PROCESSOR_NIAGARA:
7427 default:
7428 return 1;
7429 case PROCESSOR_V9:
7430 /* Assume V9 processors are capable of at least dual-issue. */
7431 return 2;
7432 case PROCESSOR_SUPERSPARC:
7433 return 3;
7434 case PROCESSOR_HYPERSPARC:
7435 case PROCESSOR_SPARCLITE86X:
7436 return 2;
7437 case PROCESSOR_ULTRASPARC:
7438 case PROCESSOR_ULTRASPARC3:
7439 return 4;
7440 }
7441 }
7442
7443 static int
set_extends(rtx insn)7444 set_extends (rtx insn)
7445 {
7446 register rtx pat = PATTERN (insn);
7447
7448 switch (GET_CODE (SET_SRC (pat)))
7449 {
7450 /* Load and some shift instructions zero extend. */
7451 case MEM:
7452 case ZERO_EXTEND:
7453 /* sethi clears the high bits */
7454 case HIGH:
7455 /* LO_SUM is used with sethi. sethi cleared the high
7456 bits and the values used with lo_sum are positive */
7457 case LO_SUM:
7458 /* Store flag stores 0 or 1 */
7459 case LT: case LTU:
7460 case GT: case GTU:
7461 case LE: case LEU:
7462 case GE: case GEU:
7463 case EQ:
7464 case NE:
7465 return 1;
7466 case AND:
7467 {
7468 rtx op0 = XEXP (SET_SRC (pat), 0);
7469 rtx op1 = XEXP (SET_SRC (pat), 1);
7470 if (GET_CODE (op1) == CONST_INT)
7471 return INTVAL (op1) >= 0;
7472 if (GET_CODE (op0) != REG)
7473 return 0;
7474 if (sparc_check_64 (op0, insn) == 1)
7475 return 1;
7476 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7477 }
7478 case IOR:
7479 case XOR:
7480 {
7481 rtx op0 = XEXP (SET_SRC (pat), 0);
7482 rtx op1 = XEXP (SET_SRC (pat), 1);
7483 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7484 return 0;
7485 if (GET_CODE (op1) == CONST_INT)
7486 return INTVAL (op1) >= 0;
7487 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7488 }
7489 case LSHIFTRT:
7490 return GET_MODE (SET_SRC (pat)) == SImode;
7491 /* Positive integers leave the high bits zero. */
7492 case CONST_DOUBLE:
7493 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7494 case CONST_INT:
7495 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7496 case ASHIFTRT:
7497 case SIGN_EXTEND:
7498 return - (GET_MODE (SET_SRC (pat)) == SImode);
7499 case REG:
7500 return sparc_check_64 (SET_SRC (pat), insn);
7501 default:
7502 return 0;
7503 }
7504 }
7505
7506 /* We _ought_ to have only one kind per function, but... */
7507 static GTY(()) rtx sparc_addr_diff_list;
7508 static GTY(()) rtx sparc_addr_list;
7509
7510 void
sparc_defer_case_vector(rtx lab,rtx vec,int diff)7511 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7512 {
7513 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7514 if (diff)
7515 sparc_addr_diff_list
7516 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7517 else
7518 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7519 }
7520
7521 static void
sparc_output_addr_vec(rtx vec)7522 sparc_output_addr_vec (rtx vec)
7523 {
7524 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7525 int idx, vlen = XVECLEN (body, 0);
7526
7527 #ifdef ASM_OUTPUT_ADDR_VEC_START
7528 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7529 #endif
7530
7531 #ifdef ASM_OUTPUT_CASE_LABEL
7532 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7533 NEXT_INSN (lab));
7534 #else
7535 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7536 #endif
7537
7538 for (idx = 0; idx < vlen; idx++)
7539 {
7540 ASM_OUTPUT_ADDR_VEC_ELT
7541 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7542 }
7543
7544 #ifdef ASM_OUTPUT_ADDR_VEC_END
7545 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7546 #endif
7547 }
7548
7549 static void
sparc_output_addr_diff_vec(rtx vec)7550 sparc_output_addr_diff_vec (rtx vec)
7551 {
7552 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7553 rtx base = XEXP (XEXP (body, 0), 0);
7554 int idx, vlen = XVECLEN (body, 1);
7555
7556 #ifdef ASM_OUTPUT_ADDR_VEC_START
7557 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7558 #endif
7559
7560 #ifdef ASM_OUTPUT_CASE_LABEL
7561 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7562 NEXT_INSN (lab));
7563 #else
7564 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7565 #endif
7566
7567 for (idx = 0; idx < vlen; idx++)
7568 {
7569 ASM_OUTPUT_ADDR_DIFF_ELT
7570 (asm_out_file,
7571 body,
7572 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7573 CODE_LABEL_NUMBER (base));
7574 }
7575
7576 #ifdef ASM_OUTPUT_ADDR_VEC_END
7577 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7578 #endif
7579 }
7580
7581 static void
sparc_output_deferred_case_vectors(void)7582 sparc_output_deferred_case_vectors (void)
7583 {
7584 rtx t;
7585 int align;
7586
7587 if (sparc_addr_list == NULL_RTX
7588 && sparc_addr_diff_list == NULL_RTX)
7589 return;
7590
7591 /* Align to cache line in the function's code section. */
7592 switch_to_section (current_function_section ());
7593
7594 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7595 if (align > 0)
7596 ASM_OUTPUT_ALIGN (asm_out_file, align);
7597
7598 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7599 sparc_output_addr_vec (XEXP (t, 0));
7600 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7601 sparc_output_addr_diff_vec (XEXP (t, 0));
7602
7603 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7604 }
7605
7606 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7607 unknown. Return 1 if the high bits are zero, -1 if the register is
7608 sign extended. */
7609 int
sparc_check_64(rtx x,rtx insn)7610 sparc_check_64 (rtx x, rtx insn)
7611 {
7612 /* If a register is set only once it is safe to ignore insns this
7613 code does not know how to handle. The loop will either recognize
7614 the single set and return the correct value or fail to recognize
7615 it and return 0. */
7616 int set_once = 0;
7617 rtx y = x;
7618
7619 gcc_assert (GET_CODE (x) == REG);
7620
7621 if (GET_MODE (x) == DImode)
7622 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7623
7624 if (flag_expensive_optimizations
7625 && REG_N_SETS (REGNO (y)) == 1)
7626 set_once = 1;
7627
7628 if (insn == 0)
7629 {
7630 if (set_once)
7631 insn = get_last_insn_anywhere ();
7632 else
7633 return 0;
7634 }
7635
7636 while ((insn = PREV_INSN (insn)))
7637 {
7638 switch (GET_CODE (insn))
7639 {
7640 case JUMP_INSN:
7641 case NOTE:
7642 break;
7643 case CODE_LABEL:
7644 case CALL_INSN:
7645 default:
7646 if (! set_once)
7647 return 0;
7648 break;
7649 case INSN:
7650 {
7651 rtx pat = PATTERN (insn);
7652 if (GET_CODE (pat) != SET)
7653 return 0;
7654 if (rtx_equal_p (x, SET_DEST (pat)))
7655 return set_extends (insn);
7656 if (y && rtx_equal_p (y, SET_DEST (pat)))
7657 return set_extends (insn);
7658 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7659 return 0;
7660 }
7661 }
7662 }
7663 return 0;
7664 }
7665
7666 /* Returns assembly code to perform a DImode shift using
7667 a 64-bit global or out register on SPARC-V8+. */
7668 const char *
output_v8plus_shift(rtx * operands,rtx insn,const char * opcode)7669 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7670 {
7671 static char asm_code[60];
7672
7673 /* The scratch register is only required when the destination
7674 register is not a 64-bit global or out register. */
7675 if (which_alternative != 2)
7676 operands[3] = operands[0];
7677
7678 /* We can only shift by constants <= 63. */
7679 if (GET_CODE (operands[2]) == CONST_INT)
7680 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7681
7682 if (GET_CODE (operands[1]) == CONST_INT)
7683 {
7684 output_asm_insn ("mov\t%1, %3", operands);
7685 }
7686 else
7687 {
7688 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7689 if (sparc_check_64 (operands[1], insn) <= 0)
7690 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7691 output_asm_insn ("or\t%L1, %3, %3", operands);
7692 }
7693
7694 strcpy(asm_code, opcode);
7695
7696 if (which_alternative != 2)
7697 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7698 else
7699 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7700 }
7701
7702 /* Output rtl to increment the profiler label LABELNO
7703 for profiling a function entry. */
7704
7705 void
sparc_profile_hook(int labelno)7706 sparc_profile_hook (int labelno)
7707 {
7708 char buf[32];
7709 rtx lab, fun;
7710
7711 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7712 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7713 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7714
7715 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7716 }
7717
7718 #ifdef OBJECT_FORMAT_ELF
7719 static void
sparc_elf_asm_named_section(const char * name,unsigned int flags,tree decl)7720 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7721 tree decl)
7722 {
7723 if (flags & SECTION_MERGE)
7724 {
7725 /* entsize cannot be expressed in this section attributes
7726 encoding style. */
7727 default_elf_asm_named_section (name, flags, decl);
7728 return;
7729 }
7730
7731 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7732
7733 if (!(flags & SECTION_DEBUG))
7734 fputs (",#alloc", asm_out_file);
7735 if (flags & SECTION_WRITE)
7736 fputs (",#write", asm_out_file);
7737 if (flags & SECTION_TLS)
7738 fputs (",#tls", asm_out_file);
7739 if (flags & SECTION_CODE)
7740 fputs (",#execinstr", asm_out_file);
7741
7742 /* ??? Handle SECTION_BSS. */
7743
7744 fputc ('\n', asm_out_file);
7745 }
7746 #endif /* OBJECT_FORMAT_ELF */
7747
7748 /* We do not allow indirect calls to be optimized into sibling calls.
7749
7750 We cannot use sibling calls when delayed branches are disabled
7751 because they will likely require the call delay slot to be filled.
7752
7753 Also, on SPARC 32-bit we cannot emit a sibling call when the
7754 current function returns a structure. This is because the "unimp
7755 after call" convention would cause the callee to return to the
7756 wrong place. The generic code already disallows cases where the
7757 function being called returns a structure.
7758
7759 It may seem strange how this last case could occur. Usually there
7760 is code after the call which jumps to epilogue code which dumps the
7761 return value into the struct return area. That ought to invalidate
7762 the sibling call right? Well, in the C++ case we can end up passing
7763 the pointer to the struct return area to a constructor (which returns
7764 void) and then nothing else happens. Such a sibling call would look
7765 valid without the added check here. */
7766 static bool
sparc_function_ok_for_sibcall(tree decl,tree exp ATTRIBUTE_UNUSED)7767 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7768 {
7769 return (decl
7770 && flag_delayed_branch
7771 && (TARGET_ARCH64 || ! current_function_returns_struct));
7772 }
7773
7774 /* libfunc renaming. */
7775 #include "config/gofast.h"
7776
7777 static void
sparc_init_libfuncs(void)7778 sparc_init_libfuncs (void)
7779 {
7780 if (TARGET_ARCH32)
7781 {
7782 /* Use the subroutines that Sun's library provides for integer
7783 multiply and divide. The `*' prevents an underscore from
7784 being prepended by the compiler. .umul is a little faster
7785 than .mul. */
7786 set_optab_libfunc (smul_optab, SImode, "*.umul");
7787 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7788 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7789 set_optab_libfunc (smod_optab, SImode, "*.rem");
7790 set_optab_libfunc (umod_optab, SImode, "*.urem");
7791
7792 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7793 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7794 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7795 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7796 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7797 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7798
7799 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7800 is because with soft-float, the SFmode and DFmode sqrt
7801 instructions will be absent, and the compiler will notice and
7802 try to use the TFmode sqrt instruction for calls to the
7803 builtin function sqrt, but this fails. */
7804 if (TARGET_FPU)
7805 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7806
7807 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7808 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7809 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7810 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7811 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7812 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7813
7814 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7815 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7816 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7817 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7818
7819 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7820 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7821 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7822 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7823
7824 if (DITF_CONVERSION_LIBFUNCS)
7825 {
7826 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7827 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7828 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7829 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7830 }
7831
7832 if (SUN_CONVERSION_LIBFUNCS)
7833 {
7834 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7835 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7836 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7837 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7838 }
7839 }
7840 if (TARGET_ARCH64)
7841 {
7842 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7843 do not exist in the library. Make sure the compiler does not
7844 emit calls to them by accident. (It should always use the
7845 hardware instructions.) */
7846 set_optab_libfunc (smul_optab, SImode, 0);
7847 set_optab_libfunc (sdiv_optab, SImode, 0);
7848 set_optab_libfunc (udiv_optab, SImode, 0);
7849 set_optab_libfunc (smod_optab, SImode, 0);
7850 set_optab_libfunc (umod_optab, SImode, 0);
7851
7852 if (SUN_INTEGER_MULTIPLY_64)
7853 {
7854 set_optab_libfunc (smul_optab, DImode, "__mul64");
7855 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7856 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7857 set_optab_libfunc (smod_optab, DImode, "__rem64");
7858 set_optab_libfunc (umod_optab, DImode, "__urem64");
7859 }
7860
7861 if (SUN_CONVERSION_LIBFUNCS)
7862 {
7863 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7864 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7865 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7866 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7867 }
7868 }
7869
7870 gofast_maybe_init_libfuncs ();
7871 }
7872
7873 #define def_builtin(NAME, CODE, TYPE) \
7874 lang_hooks.builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7875 NULL_TREE)
7876
7877 /* Implement the TARGET_INIT_BUILTINS target hook.
7878 Create builtin functions for special SPARC instructions. */
7879
7880 static void
sparc_init_builtins(void)7881 sparc_init_builtins (void)
7882 {
7883 if (TARGET_VIS)
7884 sparc_vis_init_builtins ();
7885 }
7886
7887 /* Create builtin functions for VIS 1.0 instructions. */
7888
7889 static void
sparc_vis_init_builtins(void)7890 sparc_vis_init_builtins (void)
7891 {
7892 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7893 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7894 tree v4hi = build_vector_type (intHI_type_node, 4);
7895 tree v2hi = build_vector_type (intHI_type_node, 2);
7896 tree v2si = build_vector_type (intSI_type_node, 2);
7897
7898 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7899 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7900 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7901 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7902 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7903 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7904 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7905 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7906 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7907 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7908 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7909 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7910 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7911 v8qi, v8qi,
7912 intDI_type_node, 0);
7913 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7914 intDI_type_node,
7915 intDI_type_node, 0);
7916 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7917 ptr_type_node,
7918 intSI_type_node, 0);
7919 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7920 ptr_type_node,
7921 intDI_type_node, 0);
7922
7923 /* Packing and expanding vectors. */
7924 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7925 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7926 v8qi_ftype_v2si_v8qi);
7927 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7928 v2hi_ftype_v2si);
7929 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7930 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7931 v8qi_ftype_v4qi_v4qi);
7932
7933 /* Multiplications. */
7934 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7935 v4hi_ftype_v4qi_v4hi);
7936 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7937 v4hi_ftype_v4qi_v2hi);
7938 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7939 v4hi_ftype_v4qi_v2hi);
7940 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7941 v4hi_ftype_v8qi_v4hi);
7942 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
7943 v4hi_ftype_v8qi_v4hi);
7944 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
7945 v2si_ftype_v4qi_v2hi);
7946 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
7947 v2si_ftype_v4qi_v2hi);
7948
7949 /* Data aligning. */
7950 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
7951 v4hi_ftype_v4hi_v4hi);
7952 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
7953 v8qi_ftype_v8qi_v8qi);
7954 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
7955 v2si_ftype_v2si_v2si);
7956 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
7957 di_ftype_di_di);
7958 if (TARGET_ARCH64)
7959 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
7960 ptr_ftype_ptr_di);
7961 else
7962 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
7963 ptr_ftype_ptr_si);
7964
7965 /* Pixel distance. */
7966 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
7967 di_ftype_v8qi_v8qi_di);
7968 }
7969
7970 /* Handle TARGET_EXPAND_BUILTIN target hook.
7971 Expand builtin functions for sparc intrinsics. */
7972
7973 static rtx
sparc_expand_builtin(tree exp,rtx target,rtx subtarget ATTRIBUTE_UNUSED,enum machine_mode tmode ATTRIBUTE_UNUSED,int ignore ATTRIBUTE_UNUSED)7974 sparc_expand_builtin (tree exp, rtx target,
7975 rtx subtarget ATTRIBUTE_UNUSED,
7976 enum machine_mode tmode ATTRIBUTE_UNUSED,
7977 int ignore ATTRIBUTE_UNUSED)
7978 {
7979 tree arglist;
7980 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7981 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
7982 rtx pat, op[4];
7983 enum machine_mode mode[4];
7984 int arg_count = 0;
7985
7986 mode[0] = insn_data[icode].operand[0].mode;
7987 if (!target
7988 || GET_MODE (target) != mode[0]
7989 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
7990 op[0] = gen_reg_rtx (mode[0]);
7991 else
7992 op[0] = target;
7993
7994 for (arglist = TREE_OPERAND (exp, 1); arglist;
7995 arglist = TREE_CHAIN (arglist))
7996 {
7997 tree arg = TREE_VALUE (arglist);
7998
7999 arg_count++;
8000 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8001 op[arg_count] = expand_normal (arg);
8002
8003 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8004 mode[arg_count]))
8005 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8006 }
8007
8008 switch (arg_count)
8009 {
8010 case 1:
8011 pat = GEN_FCN (icode) (op[0], op[1]);
8012 break;
8013 case 2:
8014 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8015 break;
8016 case 3:
8017 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8018 break;
8019 default:
8020 gcc_unreachable ();
8021 }
8022
8023 if (!pat)
8024 return NULL_RTX;
8025
8026 emit_insn (pat);
8027
8028 return op[0];
8029 }
8030
8031 static int
sparc_vis_mul8x16(int e8,int e16)8032 sparc_vis_mul8x16 (int e8, int e16)
8033 {
8034 return (e8 * e16 + 128) / 256;
8035 }
8036
8037 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8038 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8039 constants. A tree list with the results of the multiplications is returned,
8040 and each element in the list is of INNER_TYPE. */
8041
8042 static tree
sparc_handle_vis_mul8x16(int fncode,tree inner_type,tree elts0,tree elts1)8043 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8044 {
8045 tree n_elts = NULL_TREE;
8046 int scale;
8047
8048 switch (fncode)
8049 {
8050 case CODE_FOR_fmul8x16_vis:
8051 for (; elts0 && elts1;
8052 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8053 {
8054 int val
8055 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8056 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8057 n_elts = tree_cons (NULL_TREE,
8058 build_int_cst (inner_type, val),
8059 n_elts);
8060 }
8061 break;
8062
8063 case CODE_FOR_fmul8x16au_vis:
8064 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8065
8066 for (; elts0; elts0 = TREE_CHAIN (elts0))
8067 {
8068 int val
8069 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8070 scale);
8071 n_elts = tree_cons (NULL_TREE,
8072 build_int_cst (inner_type, val),
8073 n_elts);
8074 }
8075 break;
8076
8077 case CODE_FOR_fmul8x16al_vis:
8078 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8079
8080 for (; elts0; elts0 = TREE_CHAIN (elts0))
8081 {
8082 int val
8083 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8084 scale);
8085 n_elts = tree_cons (NULL_TREE,
8086 build_int_cst (inner_type, val),
8087 n_elts);
8088 }
8089 break;
8090
8091 default:
8092 gcc_unreachable ();
8093 }
8094
8095 return nreverse (n_elts);
8096
8097 }
8098 /* Handle TARGET_FOLD_BUILTIN target hook.
8099 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8100 result of the function call is ignored. NULL_TREE is returned if the
8101 function could not be folded. */
8102
8103 static tree
sparc_fold_builtin(tree fndecl,tree arglist,bool ignore)8104 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8105 {
8106 tree arg0, arg1, arg2;
8107 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8108
8109 if (ignore
8110 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8111 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8112 return fold_convert (rtype, integer_zero_node);
8113
8114 switch (DECL_FUNCTION_CODE (fndecl))
8115 {
8116 case CODE_FOR_fexpand_vis:
8117 arg0 = TREE_VALUE (arglist);
8118 STRIP_NOPS (arg0);
8119
8120 if (TREE_CODE (arg0) == VECTOR_CST)
8121 {
8122 tree inner_type = TREE_TYPE (rtype);
8123 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8124 tree n_elts = NULL_TREE;
8125
8126 for (; elts; elts = TREE_CHAIN (elts))
8127 {
8128 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8129 n_elts = tree_cons (NULL_TREE,
8130 build_int_cst (inner_type, val),
8131 n_elts);
8132 }
8133 return build_vector (rtype, nreverse (n_elts));
8134 }
8135 break;
8136
8137 case CODE_FOR_fmul8x16_vis:
8138 case CODE_FOR_fmul8x16au_vis:
8139 case CODE_FOR_fmul8x16al_vis:
8140 arg0 = TREE_VALUE (arglist);
8141 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8142 STRIP_NOPS (arg0);
8143 STRIP_NOPS (arg1);
8144
8145 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8146 {
8147 tree inner_type = TREE_TYPE (rtype);
8148 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8149 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8150 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8151 inner_type, elts0, elts1);
8152
8153 return build_vector (rtype, n_elts);
8154 }
8155 break;
8156
8157 case CODE_FOR_fpmerge_vis:
8158 arg0 = TREE_VALUE (arglist);
8159 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8160 STRIP_NOPS (arg0);
8161 STRIP_NOPS (arg1);
8162
8163 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8164 {
8165 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8166 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8167 tree n_elts = NULL_TREE;
8168
8169 for (; elts0 && elts1;
8170 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8171 {
8172 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8173 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8174 }
8175
8176 return build_vector (rtype, nreverse (n_elts));
8177 }
8178 break;
8179
8180 case CODE_FOR_pdist_vis:
8181 arg0 = TREE_VALUE (arglist);
8182 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8183 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8184 STRIP_NOPS (arg0);
8185 STRIP_NOPS (arg1);
8186 STRIP_NOPS (arg2);
8187
8188 if (TREE_CODE (arg0) == VECTOR_CST
8189 && TREE_CODE (arg1) == VECTOR_CST
8190 && TREE_CODE (arg2) == INTEGER_CST)
8191 {
8192 int overflow = 0;
8193 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8194 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8195 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8196 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8197
8198 for (; elts0 && elts1;
8199 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8200 {
8201 unsigned HOST_WIDE_INT
8202 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8203 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8204 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8205 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8206
8207 unsigned HOST_WIDE_INT l;
8208 HOST_WIDE_INT h;
8209
8210 overflow |= neg_double (low1, high1, &l, &h);
8211 overflow |= add_double (low0, high0, l, h, &l, &h);
8212 if (h < 0)
8213 overflow |= neg_double (l, h, &l, &h);
8214
8215 overflow |= add_double (low, high, l, h, &low, &high);
8216 }
8217
8218 gcc_assert (overflow == 0);
8219
8220 return build_int_cst_wide (rtype, low, high);
8221 }
8222
8223 default:
8224 break;
8225 }
8226
8227 return NULL_TREE;
8228 }
8229
8230 int
sparc_extra_constraint_check(rtx op,int c,int strict)8231 sparc_extra_constraint_check (rtx op, int c, int strict)
8232 {
8233 int reload_ok_mem;
8234
8235 if (TARGET_ARCH64
8236 && (c == 'T' || c == 'U'))
8237 return 0;
8238
8239 switch (c)
8240 {
8241 case 'Q':
8242 return fp_sethi_p (op);
8243
8244 case 'R':
8245 return fp_mov_p (op);
8246
8247 case 'S':
8248 return fp_high_losum_p (op);
8249
8250 case 'U':
8251 if (! strict
8252 || (GET_CODE (op) == REG
8253 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8254 || reg_renumber[REGNO (op)] >= 0)))
8255 return register_ok_for_ldd (op);
8256
8257 return 0;
8258
8259 case 'W':
8260 case 'T':
8261 break;
8262
8263 case 'Y':
8264 return const_zero_operand (op, GET_MODE (op));
8265
8266 default:
8267 return 0;
8268 }
8269
8270 /* Our memory extra constraints have to emulate the
8271 behavior of 'm' and 'o' in order for reload to work
8272 correctly. */
8273 if (GET_CODE (op) == MEM)
8274 {
8275 reload_ok_mem = 0;
8276 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8277 && (! strict
8278 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8279 reload_ok_mem = 1;
8280 }
8281 else
8282 {
8283 reload_ok_mem = (reload_in_progress
8284 && GET_CODE (op) == REG
8285 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8286 && reg_renumber [REGNO (op)] < 0);
8287 }
8288
8289 return reload_ok_mem;
8290 }
8291
8292 /* ??? This duplicates information provided to the compiler by the
8293 ??? scheduler description. Some day, teach genautomata to output
8294 ??? the latencies and then CSE will just use that. */
8295
8296 static bool
sparc_rtx_costs(rtx x,int code,int outer_code,int * total)8297 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8298 {
8299 enum machine_mode mode = GET_MODE (x);
8300 bool float_mode_p = FLOAT_MODE_P (mode);
8301
8302 switch (code)
8303 {
8304 case CONST_INT:
8305 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8306 {
8307 *total = 0;
8308 return true;
8309 }
8310 /* FALLTHRU */
8311
8312 case HIGH:
8313 *total = 2;
8314 return true;
8315
8316 case CONST:
8317 case LABEL_REF:
8318 case SYMBOL_REF:
8319 *total = 4;
8320 return true;
8321
8322 case CONST_DOUBLE:
8323 if (GET_MODE (x) == VOIDmode
8324 && ((CONST_DOUBLE_HIGH (x) == 0
8325 && CONST_DOUBLE_LOW (x) < 0x1000)
8326 || (CONST_DOUBLE_HIGH (x) == -1
8327 && CONST_DOUBLE_LOW (x) < 0
8328 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8329 *total = 0;
8330 else
8331 *total = 8;
8332 return true;
8333
8334 case MEM:
8335 /* If outer-code was a sign or zero extension, a cost
8336 of COSTS_N_INSNS (1) was already added in. This is
8337 why we are subtracting it back out. */
8338 if (outer_code == ZERO_EXTEND)
8339 {
8340 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8341 }
8342 else if (outer_code == SIGN_EXTEND)
8343 {
8344 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8345 }
8346 else if (float_mode_p)
8347 {
8348 *total = sparc_costs->float_load;
8349 }
8350 else
8351 {
8352 *total = sparc_costs->int_load;
8353 }
8354
8355 return true;
8356
8357 case PLUS:
8358 case MINUS:
8359 if (float_mode_p)
8360 *total = sparc_costs->float_plusminus;
8361 else
8362 *total = COSTS_N_INSNS (1);
8363 return false;
8364
8365 case MULT:
8366 if (float_mode_p)
8367 *total = sparc_costs->float_mul;
8368 else if (! TARGET_HARD_MUL)
8369 *total = COSTS_N_INSNS (25);
8370 else
8371 {
8372 int bit_cost;
8373
8374 bit_cost = 0;
8375 if (sparc_costs->int_mul_bit_factor)
8376 {
8377 int nbits;
8378
8379 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8380 {
8381 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8382 for (nbits = 0; value != 0; value &= value - 1)
8383 nbits++;
8384 }
8385 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8386 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8387 {
8388 rtx x1 = XEXP (x, 1);
8389 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8390 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8391
8392 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8393 nbits++;
8394 for (; value2 != 0; value2 &= value2 - 1)
8395 nbits++;
8396 }
8397 else
8398 nbits = 7;
8399
8400 if (nbits < 3)
8401 nbits = 3;
8402 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8403 bit_cost = COSTS_N_INSNS (bit_cost);
8404 }
8405
8406 if (mode == DImode)
8407 *total = sparc_costs->int_mulX + bit_cost;
8408 else
8409 *total = sparc_costs->int_mul + bit_cost;
8410 }
8411 return false;
8412
8413 case ASHIFT:
8414 case ASHIFTRT:
8415 case LSHIFTRT:
8416 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8417 return false;
8418
8419 case DIV:
8420 case UDIV:
8421 case MOD:
8422 case UMOD:
8423 if (float_mode_p)
8424 {
8425 if (mode == DFmode)
8426 *total = sparc_costs->float_div_df;
8427 else
8428 *total = sparc_costs->float_div_sf;
8429 }
8430 else
8431 {
8432 if (mode == DImode)
8433 *total = sparc_costs->int_divX;
8434 else
8435 *total = sparc_costs->int_div;
8436 }
8437 return false;
8438
8439 case NEG:
8440 if (! float_mode_p)
8441 {
8442 *total = COSTS_N_INSNS (1);
8443 return false;
8444 }
8445 /* FALLTHRU */
8446
8447 case ABS:
8448 case FLOAT:
8449 case UNSIGNED_FLOAT:
8450 case FIX:
8451 case UNSIGNED_FIX:
8452 case FLOAT_EXTEND:
8453 case FLOAT_TRUNCATE:
8454 *total = sparc_costs->float_move;
8455 return false;
8456
8457 case SQRT:
8458 if (mode == DFmode)
8459 *total = sparc_costs->float_sqrt_df;
8460 else
8461 *total = sparc_costs->float_sqrt_sf;
8462 return false;
8463
8464 case COMPARE:
8465 if (float_mode_p)
8466 *total = sparc_costs->float_cmp;
8467 else
8468 *total = COSTS_N_INSNS (1);
8469 return false;
8470
8471 case IF_THEN_ELSE:
8472 if (float_mode_p)
8473 *total = sparc_costs->float_cmove;
8474 else
8475 *total = sparc_costs->int_cmove;
8476 return false;
8477
8478 case IOR:
8479 /* Handle the NAND vector patterns. */
8480 if (sparc_vector_mode_supported_p (GET_MODE (x))
8481 && GET_CODE (XEXP (x, 0)) == NOT
8482 && GET_CODE (XEXP (x, 1)) == NOT)
8483 {
8484 *total = COSTS_N_INSNS (1);
8485 return true;
8486 }
8487 else
8488 return false;
8489
8490 default:
8491 return false;
8492 }
8493 }
8494
8495 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8496 This is achieved by means of a manual dynamic stack space allocation in
8497 the current frame. We make the assumption that SEQ doesn't contain any
8498 function calls, with the possible exception of calls to the PIC helper. */
8499
8500 static void
emit_and_preserve(rtx seq,rtx reg,rtx reg2)8501 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8502 {
8503 /* We must preserve the lowest 16 words for the register save area. */
8504 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8505 /* We really need only 2 words of fresh stack space. */
8506 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8507
8508 rtx slot
8509 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8510 SPARC_STACK_BIAS + offset));
8511
8512 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8513 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8514 if (reg2)
8515 emit_insn (gen_rtx_SET (VOIDmode,
8516 adjust_address (slot, word_mode, UNITS_PER_WORD),
8517 reg2));
8518 emit_insn (seq);
8519 if (reg2)
8520 emit_insn (gen_rtx_SET (VOIDmode,
8521 reg2,
8522 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8523 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8524 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8525 }
8526
8527 /* Output the assembler code for a thunk function. THUNK_DECL is the
8528 declaration for the thunk function itself, FUNCTION is the decl for
8529 the target function. DELTA is an immediate constant offset to be
8530 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8531 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8532
8533 static void
sparc_output_mi_thunk(FILE * file,tree thunk_fndecl ATTRIBUTE_UNUSED,HOST_WIDE_INT delta,HOST_WIDE_INT vcall_offset,tree function)8534 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8535 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8536 tree function)
8537 {
8538 rtx this, insn, funexp;
8539 unsigned int int_arg_first;
8540
8541 reload_completed = 1;
8542 epilogue_completed = 1;
8543 no_new_pseudos = 1;
8544 reset_block_changes ();
8545
8546 emit_note (NOTE_INSN_PROLOGUE_END);
8547
8548 if (flag_delayed_branch)
8549 {
8550 /* We will emit a regular sibcall below, so we need to instruct
8551 output_sibcall that we are in a leaf function. */
8552 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8553
8554 /* This will cause final.c to invoke leaf_renumber_regs so we
8555 must behave as if we were in a not-yet-leafified function. */
8556 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8557 }
8558 else
8559 {
8560 /* We will emit the sibcall manually below, so we will need to
8561 manually spill non-leaf registers. */
8562 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8563
8564 /* We really are in a leaf function. */
8565 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8566 }
8567
8568 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8569 returns a structure, the structure return pointer is there instead. */
8570 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8571 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8572 else
8573 this = gen_rtx_REG (Pmode, int_arg_first);
8574
8575 /* Add DELTA. When possible use a plain add, otherwise load it into
8576 a register first. */
8577 if (delta)
8578 {
8579 rtx delta_rtx = GEN_INT (delta);
8580
8581 if (! SPARC_SIMM13_P (delta))
8582 {
8583 rtx scratch = gen_rtx_REG (Pmode, 1);
8584 emit_move_insn (scratch, delta_rtx);
8585 delta_rtx = scratch;
8586 }
8587
8588 /* THIS += DELTA. */
8589 emit_insn (gen_add2_insn (this, delta_rtx));
8590 }
8591
8592 /* Add the word at address (*THIS + VCALL_OFFSET). */
8593 if (vcall_offset)
8594 {
8595 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8596 rtx scratch = gen_rtx_REG (Pmode, 1);
8597
8598 gcc_assert (vcall_offset < 0);
8599
8600 /* SCRATCH = *THIS. */
8601 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8602
8603 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8604 may not have any available scratch register at this point. */
8605 if (SPARC_SIMM13_P (vcall_offset))
8606 ;
8607 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8608 else if (! fixed_regs[5]
8609 /* The below sequence is made up of at least 2 insns,
8610 while the default method may need only one. */
8611 && vcall_offset < -8192)
8612 {
8613 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8614 emit_move_insn (scratch2, vcall_offset_rtx);
8615 vcall_offset_rtx = scratch2;
8616 }
8617 else
8618 {
8619 rtx increment = GEN_INT (-4096);
8620
8621 /* VCALL_OFFSET is a negative number whose typical range can be
8622 estimated as -32768..0 in 32-bit mode. In almost all cases
8623 it is therefore cheaper to emit multiple add insns than
8624 spilling and loading the constant into a register (at least
8625 6 insns). */
8626 while (! SPARC_SIMM13_P (vcall_offset))
8627 {
8628 emit_insn (gen_add2_insn (scratch, increment));
8629 vcall_offset += 4096;
8630 }
8631 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8632 }
8633
8634 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8635 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8636 gen_rtx_PLUS (Pmode,
8637 scratch,
8638 vcall_offset_rtx)));
8639
8640 /* THIS += *(*THIS + VCALL_OFFSET). */
8641 emit_insn (gen_add2_insn (this, scratch));
8642 }
8643
8644 /* Generate a tail call to the target function. */
8645 if (! TREE_USED (function))
8646 {
8647 assemble_external (function);
8648 TREE_USED (function) = 1;
8649 }
8650 funexp = XEXP (DECL_RTL (function), 0);
8651
8652 if (flag_delayed_branch)
8653 {
8654 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8655 insn = emit_call_insn (gen_sibcall (funexp));
8656 SIBLING_CALL_P (insn) = 1;
8657 }
8658 else
8659 {
8660 /* The hoops we have to jump through in order to generate a sibcall
8661 without using delay slots... */
8662 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8663
8664 if (flag_pic)
8665 {
8666 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8667 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8668 start_sequence ();
8669 /* Delay emitting the PIC helper function because it needs to
8670 change the section and we are emitting assembly code. */
8671 load_pic_register (true); /* clobbers %o7 */
8672 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8673 seq = get_insns ();
8674 end_sequence ();
8675 emit_and_preserve (seq, spill_reg, spill_reg2);
8676 }
8677 else if (TARGET_ARCH32)
8678 {
8679 emit_insn (gen_rtx_SET (VOIDmode,
8680 scratch,
8681 gen_rtx_HIGH (SImode, funexp)));
8682 emit_insn (gen_rtx_SET (VOIDmode,
8683 scratch,
8684 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8685 }
8686 else /* TARGET_ARCH64 */
8687 {
8688 switch (sparc_cmodel)
8689 {
8690 case CM_MEDLOW:
8691 case CM_MEDMID:
8692 /* The destination can serve as a temporary. */
8693 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8694 break;
8695
8696 case CM_MEDANY:
8697 case CM_EMBMEDANY:
8698 /* The destination cannot serve as a temporary. */
8699 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8700 start_sequence ();
8701 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8702 seq = get_insns ();
8703 end_sequence ();
8704 emit_and_preserve (seq, spill_reg, 0);
8705 break;
8706
8707 default:
8708 gcc_unreachable ();
8709 }
8710 }
8711
8712 emit_jump_insn (gen_indirect_jump (scratch));
8713 }
8714
8715 emit_barrier ();
8716
8717 /* Run just enough of rest_of_compilation to get the insns emitted.
8718 There's not really enough bulk here to make other passes such as
8719 instruction scheduling worth while. Note that use_thunk calls
8720 assemble_start_function and assemble_end_function. */
8721 insn = get_insns ();
8722 insn_locators_initialize ();
8723 shorten_branches (insn);
8724 final_start_function (insn, file, 1);
8725 final (insn, file, 1);
8726 final_end_function ();
8727
8728 reload_completed = 0;
8729 epilogue_completed = 0;
8730 no_new_pseudos = 0;
8731 }
8732
8733 /* Return true if sparc_output_mi_thunk would be able to output the
8734 assembler code for the thunk function specified by the arguments
8735 it is passed, and false otherwise. */
8736 static bool
sparc_can_output_mi_thunk(tree thunk_fndecl ATTRIBUTE_UNUSED,HOST_WIDE_INT delta ATTRIBUTE_UNUSED,HOST_WIDE_INT vcall_offset,tree function ATTRIBUTE_UNUSED)8737 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8738 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8739 HOST_WIDE_INT vcall_offset,
8740 tree function ATTRIBUTE_UNUSED)
8741 {
8742 /* Bound the loop used in the default method above. */
8743 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8744 }
8745
8746 /* How to allocate a 'struct machine_function'. */
8747
8748 static struct machine_function *
sparc_init_machine_status(void)8749 sparc_init_machine_status (void)
8750 {
8751 return ggc_alloc_cleared (sizeof (struct machine_function));
8752 }
8753
8754 /* Locate some local-dynamic symbol still in use by this function
8755 so that we can print its name in local-dynamic base patterns. */
8756
8757 static const char *
get_some_local_dynamic_name(void)8758 get_some_local_dynamic_name (void)
8759 {
8760 rtx insn;
8761
8762 if (cfun->machine->some_ld_name)
8763 return cfun->machine->some_ld_name;
8764
8765 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8766 if (INSN_P (insn)
8767 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8768 return cfun->machine->some_ld_name;
8769
8770 gcc_unreachable ();
8771 }
8772
8773 static int
get_some_local_dynamic_name_1(rtx * px,void * data ATTRIBUTE_UNUSED)8774 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8775 {
8776 rtx x = *px;
8777
8778 if (x
8779 && GET_CODE (x) == SYMBOL_REF
8780 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8781 {
8782 cfun->machine->some_ld_name = XSTR (x, 0);
8783 return 1;
8784 }
8785
8786 return 0;
8787 }
8788
8789 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8790 This is called from dwarf2out.c to emit call frame instructions
8791 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8792 static void
sparc_dwarf_handle_frame_unspec(const char * label,rtx pattern ATTRIBUTE_UNUSED,int index ATTRIBUTE_UNUSED)8793 sparc_dwarf_handle_frame_unspec (const char *label,
8794 rtx pattern ATTRIBUTE_UNUSED,
8795 int index ATTRIBUTE_UNUSED)
8796 {
8797 gcc_assert (index == UNSPECV_SAVEW);
8798 dwarf2out_window_save (label);
8799 }
8800
8801 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8802 We need to emit DTP-relative relocations. */
8803
8804 static void
sparc_output_dwarf_dtprel(FILE * file,int size,rtx x)8805 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8806 {
8807 switch (size)
8808 {
8809 case 4:
8810 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8811 break;
8812 case 8:
8813 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8814 break;
8815 default:
8816 gcc_unreachable ();
8817 }
8818 output_addr_const (file, x);
8819 fputs (")", file);
8820 }
8821
8822 /* Do whatever processing is required at the end of a file. */
8823
8824 static void
sparc_file_end(void)8825 sparc_file_end (void)
8826 {
8827 /* If we haven't emitted the special PIC helper function, do so now. */
8828 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8829 emit_pic_helper ();
8830
8831 if (NEED_INDICATE_EXEC_STACK)
8832 file_end_indicate_exec_stack ();
8833 }
8834
8835 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8836 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
8837
8838 static const char *
sparc_mangle_fundamental_type(tree type)8839 sparc_mangle_fundamental_type (tree type)
8840 {
8841 if (!TARGET_64BIT
8842 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8843 && TARGET_LONG_DOUBLE_128)
8844 return "g";
8845
8846 /* For all other types, use normal C++ mangling. */
8847 return NULL;
8848 }
8849 #endif
8850
8851 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8852 compare and swap on the word containing the byte or half-word. */
8853
8854 void
sparc_expand_compare_and_swap_12(rtx result,rtx mem,rtx oldval,rtx newval)8855 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8856 {
8857 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8858 rtx addr = gen_reg_rtx (Pmode);
8859 rtx off = gen_reg_rtx (SImode);
8860 rtx oldv = gen_reg_rtx (SImode);
8861 rtx newv = gen_reg_rtx (SImode);
8862 rtx oldvalue = gen_reg_rtx (SImode);
8863 rtx newvalue = gen_reg_rtx (SImode);
8864 rtx res = gen_reg_rtx (SImode);
8865 rtx resv = gen_reg_rtx (SImode);
8866 rtx memsi, val, mask, end_label, loop_label, cc;
8867
8868 emit_insn (gen_rtx_SET (VOIDmode, addr,
8869 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8870
8871 if (Pmode != SImode)
8872 addr1 = gen_lowpart (SImode, addr1);
8873 emit_insn (gen_rtx_SET (VOIDmode, off,
8874 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8875
8876 memsi = gen_rtx_MEM (SImode, addr);
8877 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8878 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8879
8880 val = force_reg (SImode, memsi);
8881
8882 emit_insn (gen_rtx_SET (VOIDmode, off,
8883 gen_rtx_XOR (SImode, off,
8884 GEN_INT (GET_MODE (mem) == QImode
8885 ? 3 : 2))));
8886
8887 emit_insn (gen_rtx_SET (VOIDmode, off,
8888 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8889
8890 if (GET_MODE (mem) == QImode)
8891 mask = force_reg (SImode, GEN_INT (0xff));
8892 else
8893 mask = force_reg (SImode, GEN_INT (0xffff));
8894
8895 emit_insn (gen_rtx_SET (VOIDmode, mask,
8896 gen_rtx_ASHIFT (SImode, mask, off)));
8897
8898 emit_insn (gen_rtx_SET (VOIDmode, val,
8899 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8900 val)));
8901
8902 oldval = gen_lowpart (SImode, oldval);
8903 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8904 gen_rtx_ASHIFT (SImode, oldval, off)));
8905
8906 newval = gen_lowpart_common (SImode, newval);
8907 emit_insn (gen_rtx_SET (VOIDmode, newv,
8908 gen_rtx_ASHIFT (SImode, newval, off)));
8909
8910 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8911 gen_rtx_AND (SImode, oldv, mask)));
8912
8913 emit_insn (gen_rtx_SET (VOIDmode, newv,
8914 gen_rtx_AND (SImode, newv, mask)));
8915
8916 end_label = gen_label_rtx ();
8917 loop_label = gen_label_rtx ();
8918 emit_label (loop_label);
8919
8920 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8921 gen_rtx_IOR (SImode, oldv, val)));
8922
8923 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8924 gen_rtx_IOR (SImode, newv, val)));
8925
8926 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8927
8928 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8929
8930 emit_insn (gen_rtx_SET (VOIDmode, resv,
8931 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8932 res)));
8933
8934 sparc_compare_op0 = resv;
8935 sparc_compare_op1 = val;
8936 cc = gen_compare_reg (NE);
8937
8938 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8939
8940 sparc_compare_emitted = cc;
8941 emit_jump_insn (gen_bne (loop_label));
8942
8943 emit_label (end_label);
8944
8945 emit_insn (gen_rtx_SET (VOIDmode, res,
8946 gen_rtx_AND (SImode, res, mask)));
8947
8948 emit_insn (gen_rtx_SET (VOIDmode, res,
8949 gen_rtx_LSHIFTRT (SImode, res, off)));
8950
8951 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
8952 }
8953
8954 #include "gt-sparc.h"
8955