1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
69 XVECEXP (X, 0, 0)
70
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save & restore registers.
77
78 Use a maximum gap of 0x100 in the mips16 case. We can then use
79 unextended instructions to save and restore registers, and to
80 allocate and deallocate the top part of the frame.
81
82 The value in the !mips16 case must be a SMALL_OPERAND and must
83 preserve the maximum stack alignment. */
84 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85
86 /* True if INSN is a mips.md pattern or asm statement. */
87 #define USEFUL_INSN_P(INSN) \
88 (INSN_P (INSN) \
89 && GET_CODE (PATTERN (INSN)) != USE \
90 && GET_CODE (PATTERN (INSN)) != CLOBBER \
91 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
92 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93
94 /* If INSN is a delayed branch sequence, return the first instruction
95 in the sequence, otherwise return INSN itself. */
96 #define SEQ_BEGIN(INSN) \
97 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
98 ? XVECEXP (PATTERN (INSN), 0, 0) \
99 : (INSN))
100
101 /* Likewise for the last instruction in a delayed branch sequence. */
102 #define SEQ_END(INSN) \
103 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
104 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
105 : (INSN))
106
107 /* Execute the following loop body with SUBINSN set to each instruction
108 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
109 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
110 for ((SUBINSN) = SEQ_BEGIN (INSN); \
111 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
112 (SUBINSN) = NEXT_INSN (SUBINSN))
113
114 /* Classifies an address.
115
116 ADDRESS_REG
117 A natural register + offset address. The register satisfies
118 mips_valid_base_register_p and the offset is a const_arith_operand.
119
120 ADDRESS_LO_SUM
121 A LO_SUM rtx. The first operand is a valid base register and
122 the second operand is a symbolic address.
123
124 ADDRESS_CONST_INT
125 A signed 16-bit constant address.
126
127 ADDRESS_SYMBOLIC:
128 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
129 enum mips_address_type {
130 ADDRESS_REG,
131 ADDRESS_LO_SUM,
132 ADDRESS_CONST_INT,
133 ADDRESS_SYMBOLIC
134 };
135
136 /* Classifies the prototype of a builtin function. */
137 enum mips_function_type
138 {
139 MIPS_V2SF_FTYPE_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
142 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
143 MIPS_V2SF_FTYPE_SF_SF,
144 MIPS_INT_FTYPE_V2SF_V2SF,
145 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
146 MIPS_INT_FTYPE_SF_SF,
147 MIPS_INT_FTYPE_DF_DF,
148 MIPS_SF_FTYPE_V2SF,
149 MIPS_SF_FTYPE_SF,
150 MIPS_SF_FTYPE_SF_SF,
151 MIPS_DF_FTYPE_DF,
152 MIPS_DF_FTYPE_DF_DF,
153
154 /* For MIPS DSP ASE */
155 MIPS_DI_FTYPE_DI_SI,
156 MIPS_DI_FTYPE_DI_SI_SI,
157 MIPS_DI_FTYPE_DI_V2HI_V2HI,
158 MIPS_DI_FTYPE_DI_V4QI_V4QI,
159 MIPS_SI_FTYPE_DI_SI,
160 MIPS_SI_FTYPE_PTR_SI,
161 MIPS_SI_FTYPE_SI,
162 MIPS_SI_FTYPE_SI_SI,
163 MIPS_SI_FTYPE_V2HI,
164 MIPS_SI_FTYPE_V2HI_V2HI,
165 MIPS_SI_FTYPE_V4QI,
166 MIPS_SI_FTYPE_V4QI_V4QI,
167 MIPS_SI_FTYPE_VOID,
168 MIPS_V2HI_FTYPE_SI,
169 MIPS_V2HI_FTYPE_SI_SI,
170 MIPS_V2HI_FTYPE_V2HI,
171 MIPS_V2HI_FTYPE_V2HI_SI,
172 MIPS_V2HI_FTYPE_V2HI_V2HI,
173 MIPS_V2HI_FTYPE_V4QI,
174 MIPS_V2HI_FTYPE_V4QI_V2HI,
175 MIPS_V4QI_FTYPE_SI,
176 MIPS_V4QI_FTYPE_V2HI_V2HI,
177 MIPS_V4QI_FTYPE_V4QI_SI,
178 MIPS_V4QI_FTYPE_V4QI_V4QI,
179 MIPS_VOID_FTYPE_SI_SI,
180 MIPS_VOID_FTYPE_V2HI_V2HI,
181 MIPS_VOID_FTYPE_V4QI_V4QI,
182
183 /* The last type. */
184 MIPS_MAX_FTYPE_MAX
185 };
186
187 /* Specifies how a builtin function should be converted into rtl. */
188 enum mips_builtin_type
189 {
190 /* The builtin corresponds directly to an .md pattern. The return
191 value is mapped to operand 0 and the arguments are mapped to
192 operands 1 and above. */
193 MIPS_BUILTIN_DIRECT,
194
195 /* The builtin corresponds directly to an .md pattern. There is no return
196 value and the arguments are mapped to operands 0 and above. */
197 MIPS_BUILTIN_DIRECT_NO_TARGET,
198
199 /* The builtin corresponds to a comparison instruction followed by
200 a mips_cond_move_tf_ps pattern. The first two arguments are the
201 values to compare and the second two arguments are the vector
202 operands for the movt.ps or movf.ps instruction (in assembly order). */
203 MIPS_BUILTIN_MOVF,
204 MIPS_BUILTIN_MOVT,
205
206 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
207 of this instruction is the result of the comparison, which has mode
208 CCV2 or CCV4. The function arguments are mapped to operands 1 and
209 above. The function's return value is an SImode boolean that is
210 true under the following conditions:
211
212 MIPS_BUILTIN_CMP_ANY: one of the registers is true
213 MIPS_BUILTIN_CMP_ALL: all of the registers are true
214 MIPS_BUILTIN_CMP_LOWER: the first register is true
215 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
216 MIPS_BUILTIN_CMP_ANY,
217 MIPS_BUILTIN_CMP_ALL,
218 MIPS_BUILTIN_CMP_UPPER,
219 MIPS_BUILTIN_CMP_LOWER,
220
221 /* As above, but the instruction only sets a single $fcc register. */
222 MIPS_BUILTIN_CMP_SINGLE,
223
224 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
225 MIPS_BUILTIN_BPOSGE32
226 };
227
228 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
229 #define MIPS_FP_CONDITIONS(MACRO) \
230 MACRO (f), \
231 MACRO (un), \
232 MACRO (eq), \
233 MACRO (ueq), \
234 MACRO (olt), \
235 MACRO (ult), \
236 MACRO (ole), \
237 MACRO (ule), \
238 MACRO (sf), \
239 MACRO (ngle), \
240 MACRO (seq), \
241 MACRO (ngl), \
242 MACRO (lt), \
243 MACRO (nge), \
244 MACRO (le), \
245 MACRO (ngt)
246
247 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
248 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
249 enum mips_fp_condition {
250 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
251 };
252
253 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
254 #define STRINGIFY(X) #X
255 static const char *const mips_fp_conditions[] = {
256 MIPS_FP_CONDITIONS (STRINGIFY)
257 };
258
259 /* A function to save or store a register. The first argument is the
260 register and the second is the stack slot. */
261 typedef void (*mips_save_restore_fn) (rtx, rtx);
262
263 struct mips16_constant;
264 struct mips_arg_info;
265 struct mips_address_info;
266 struct mips_integer_op;
267 struct mips_sim;
268
269 static enum mips_symbol_type mips_classify_symbol (rtx);
270 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
271 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
272 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
273 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
274 static bool mips_classify_address (struct mips_address_info *, rtx,
275 enum machine_mode, int);
276 static bool mips_cannot_force_const_mem (rtx);
277 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
278 static int mips_symbol_insns (enum mips_symbol_type);
279 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
280 static rtx mips_force_temporary (rtx, rtx);
281 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
282 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
283 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
284 static unsigned int mips_build_lower (struct mips_integer_op *,
285 unsigned HOST_WIDE_INT);
286 static unsigned int mips_build_integer (struct mips_integer_op *,
287 unsigned HOST_WIDE_INT);
288 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
289 static int m16_check_op (rtx, int, int, int);
290 static bool mips_rtx_costs (rtx, int, int, int *);
291 static int mips_address_cost (rtx);
292 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
293 static void mips_load_call_address (rtx, rtx, int);
294 static bool mips_function_ok_for_sibcall (tree, tree);
295 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
296 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
297 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
298 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
299 tree, int, struct mips_arg_info *);
300 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
301 static void mips_set_architecture (const struct mips_cpu_info *);
302 static void mips_set_tune (const struct mips_cpu_info *);
303 static bool mips_handle_option (size_t, const char *, int);
304 static struct machine_function *mips_init_machine_status (void);
305 static void print_operand_reloc (FILE *, rtx, const char **);
306 #if TARGET_IRIX
307 static void irix_output_external_libcall (rtx);
308 #endif
309 static void mips_file_start (void);
310 static void mips_file_end (void);
311 static bool mips_rewrite_small_data_p (rtx);
312 static int mips_small_data_pattern_1 (rtx *, void *);
313 static int mips_rewrite_small_data_1 (rtx *, void *);
314 static bool mips_function_has_gp_insn (void);
315 static unsigned int mips_global_pointer (void);
316 static bool mips_save_reg_p (unsigned int);
317 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
318 mips_save_restore_fn);
319 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
320 static void mips_output_cplocal (void);
321 static void mips_emit_loadgp (void);
322 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
323 static void mips_set_frame_expr (rtx);
324 static rtx mips_frame_set (rtx, rtx);
325 static void mips_save_reg (rtx, rtx);
326 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
327 static void mips_restore_reg (rtx, rtx);
328 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
329 HOST_WIDE_INT, tree);
330 static int symbolic_expression_p (rtx);
331 static section *mips_select_rtx_section (enum machine_mode, rtx,
332 unsigned HOST_WIDE_INT);
333 static section *mips_function_rodata_section (tree);
334 static bool mips_in_small_data_p (tree);
335 static bool mips_use_anchors_for_symbol_p (rtx);
336 static int mips_fpr_return_fields (tree, tree *);
337 static bool mips_return_in_msb (tree);
338 static rtx mips_return_fpr_pair (enum machine_mode mode,
339 enum machine_mode mode1, HOST_WIDE_INT,
340 enum machine_mode mode2, HOST_WIDE_INT);
341 static rtx mips16_gp_pseudo_reg (void);
342 static void mips16_fp_args (FILE *, int, int);
343 static void build_mips16_function_stub (FILE *);
344 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
345 static void dump_constants (struct mips16_constant *, rtx);
346 static int mips16_insn_length (rtx);
347 static int mips16_rewrite_pool_refs (rtx *, void *);
348 static void mips16_lay_out_constants (void);
349 static void mips_sim_reset (struct mips_sim *);
350 static void mips_sim_init (struct mips_sim *, state_t);
351 static void mips_sim_next_cycle (struct mips_sim *);
352 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
353 static int mips_sim_wait_regs_2 (rtx *, void *);
354 static void mips_sim_wait_regs_1 (rtx *, void *);
355 static void mips_sim_wait_regs (struct mips_sim *, rtx);
356 static void mips_sim_wait_units (struct mips_sim *, rtx);
357 static void mips_sim_wait_insn (struct mips_sim *, rtx);
358 static void mips_sim_record_set (rtx, rtx, void *);
359 static void mips_sim_issue_insn (struct mips_sim *, rtx);
360 static void mips_sim_issue_nop (struct mips_sim *);
361 static void mips_sim_finish_insn (struct mips_sim *, rtx);
362 static void vr4130_avoid_branch_rt_conflict (rtx);
363 static void vr4130_align_insns (void);
364 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
365 static void mips_avoid_hazards (void);
366 static void mips_reorg (void);
367 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
368 static bool mips_matching_cpu_name_p (const char *, const char *);
369 static const struct mips_cpu_info *mips_parse_cpu (const char *);
370 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
371 static bool mips_return_in_memory (tree, tree);
372 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
373 static void mips_macc_chains_record (rtx);
374 static void mips_macc_chains_reorder (rtx *, int);
375 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
376 static bool vr4130_true_reg_dependence_p (rtx);
377 static bool vr4130_swap_insns_p (rtx, rtx);
378 static void vr4130_reorder (rtx *, int);
379 static void mips_promote_ready (rtx *, int, int);
380 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
381 static int mips_variable_issue (FILE *, int, rtx, int);
382 static int mips_adjust_cost (rtx, rtx, rtx, int);
383 static int mips_issue_rate (void);
384 static int mips_multipass_dfa_lookahead (void);
385 static void mips_init_libfuncs (void);
386 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
387 tree, int *, int);
388 static tree mips_build_builtin_va_list (void);
389 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
390 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
391 tree, bool);
392 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
393 tree, bool);
394 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
395 tree, bool);
396 static bool mips_valid_pointer_mode (enum machine_mode);
397 static bool mips_vector_mode_supported_p (enum machine_mode);
398 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
399 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
400 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
401 static void mips_init_builtins (void);
402 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
403 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
404 enum insn_code, enum mips_fp_condition,
405 rtx, tree);
406 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
407 enum insn_code, enum mips_fp_condition,
408 rtx, tree);
409 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
410 static void mips_encode_section_info (tree, rtx, int);
411 static void mips_extra_live_on_entry (bitmap);
412 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
413
414 /* Structure to be filled in by compute_frame_size with register
415 save masks, and offsets for the current function. */
416
417 struct mips_frame_info GTY(())
418 {
419 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
420 HOST_WIDE_INT var_size; /* # bytes that variables take up */
421 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
422 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
423 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
424 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
425 unsigned int mask; /* mask of saved gp registers */
426 unsigned int fmask; /* mask of saved fp registers */
427 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
428 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
429 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
430 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
431 bool initialized; /* true if frame size already calculated */
432 int num_gp; /* number of gp registers saved */
433 int num_fp; /* number of fp registers saved */
434 };
435
436 struct machine_function GTY(()) {
437 /* Pseudo-reg holding the value of $28 in a mips16 function which
438 refers to GP relative global variables. */
439 rtx mips16_gp_pseudo_rtx;
440
441 /* The number of extra stack bytes taken up by register varargs.
442 This area is allocated by the callee at the very top of the frame. */
443 int varargs_size;
444
445 /* Current frame information, calculated by compute_frame_size. */
446 struct mips_frame_info frame;
447
448 /* The register to use as the global pointer within this function. */
449 unsigned int global_pointer;
450
451 /* True if mips_adjust_insn_length should ignore an instruction's
452 hazard attribute. */
453 bool ignore_hazard_length_p;
454
455 /* True if the whole function is suitable for .set noreorder and
456 .set nomacro. */
457 bool all_noreorder_p;
458
459 /* True if the function is known to have an instruction that needs $gp. */
460 bool has_gp_insn_p;
461 };
462
463 /* Information about a single argument. */
464 struct mips_arg_info
465 {
466 /* True if the argument is passed in a floating-point register, or
467 would have been if we hadn't run out of registers. */
468 bool fpr_p;
469
470 /* The number of words passed in registers, rounded up. */
471 unsigned int reg_words;
472
473 /* For EABI, the offset of the first register from GP_ARG_FIRST or
474 FP_ARG_FIRST. For other ABIs, the offset of the first register from
475 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
476 comment for details).
477
478 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
479 on the stack. */
480 unsigned int reg_offset;
481
482 /* The number of words that must be passed on the stack, rounded up. */
483 unsigned int stack_words;
484
485 /* The offset from the start of the stack overflow area of the argument's
486 first stack word. Only meaningful when STACK_WORDS is nonzero. */
487 unsigned int stack_offset;
488 };
489
490
491 /* Information about an address described by mips_address_type.
492
493 ADDRESS_CONST_INT
494 No fields are used.
495
496 ADDRESS_REG
497 REG is the base register and OFFSET is the constant offset.
498
499 ADDRESS_LO_SUM
500 REG is the register that contains the high part of the address,
501 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
502 is the type of OFFSET's symbol.
503
504 ADDRESS_SYMBOLIC
505 SYMBOL_TYPE is the type of symbol being referenced. */
506
507 struct mips_address_info
508 {
509 enum mips_address_type type;
510 rtx reg;
511 rtx offset;
512 enum mips_symbol_type symbol_type;
513 };
514
515
516 /* One stage in a constant building sequence. These sequences have
517 the form:
518
519 A = VALUE[0]
520 A = A CODE[1] VALUE[1]
521 A = A CODE[2] VALUE[2]
522 ...
523
524 where A is an accumulator, each CODE[i] is a binary rtl operation
525 and each VALUE[i] is a constant integer. */
526 struct mips_integer_op {
527 enum rtx_code code;
528 unsigned HOST_WIDE_INT value;
529 };
530
531
532 /* The largest number of operations needed to load an integer constant.
533 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
534 When the lowest bit is clear, we can try, but reject a sequence with
535 an extra SLL at the end. */
536 #define MIPS_MAX_INTEGER_OPS 7
537
538
539 /* Global variables for machine-dependent things. */
540
541 /* Threshold for data being put into the small data/bss area, instead
542 of the normal data area. */
543 int mips_section_threshold = -1;
544
545 /* Count the number of .file directives, so that .loc is up to date. */
546 int num_source_filenames = 0;
547
548 /* Count the number of sdb related labels are generated (to find block
549 start and end boundaries). */
550 int sdb_label_count = 0;
551
552 /* Next label # for each statement for Silicon Graphics IRIS systems. */
553 int sym_lineno = 0;
554
555 /* Linked list of all externals that are to be emitted when optimizing
556 for the global pointer if they haven't been declared by the end of
557 the program with an appropriate .comm or initialization. */
558
559 struct extern_list GTY (())
560 {
561 struct extern_list *next; /* next external */
562 const char *name; /* name of the external */
563 int size; /* size in bytes */
564 };
565
566 static GTY (()) struct extern_list *extern_head = 0;
567
568 /* Name of the file containing the current function. */
569 const char *current_function_file = "";
570
571 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
572 int set_noreorder;
573 int set_noat;
574 int set_nomacro;
575 int set_volatile;
576
577 /* The next branch instruction is a branch likely, not branch normal. */
578 int mips_branch_likely;
579
580 /* The operands passed to the last cmpMM expander. */
581 rtx cmp_operands[2];
582
583 /* The target cpu for code generation. */
584 enum processor_type mips_arch;
585 const struct mips_cpu_info *mips_arch_info;
586
587 /* The target cpu for optimization and scheduling. */
588 enum processor_type mips_tune;
589 const struct mips_cpu_info *mips_tune_info;
590
591 /* Which instruction set architecture to use. */
592 int mips_isa;
593
594 /* Which ABI to use. */
595 int mips_abi = MIPS_ABI_DEFAULT;
596
597 /* Cost information to use. */
598 const struct mips_rtx_cost_data *mips_cost;
599
600 /* Whether we are generating mips16 hard float code. In mips16 mode
601 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
602 -msoft-float was not specified by the user, which means that we
603 should arrange to call mips32 hard floating point code. */
604 int mips16_hard_float;
605
606 /* The architecture selected by -mipsN. */
607 static const struct mips_cpu_info *mips_isa_info;
608
609 /* If TRUE, we split addresses into their high and low parts in the RTL. */
610 int mips_split_addresses;
611
612 /* Mode used for saving/restoring general purpose registers. */
613 static enum machine_mode gpr_mode;
614
615 /* Array giving truth value on whether or not a given hard register
616 can support a given mode. */
617 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
618
619 /* List of all MIPS punctuation characters used by print_operand. */
620 char mips_print_operand_punct[256];
621
622 /* Map GCC register number to debugger register number. */
623 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
624
625 /* A copy of the original flag_delayed_branch: see override_options. */
626 static int mips_flag_delayed_branch;
627
628 static GTY (()) int mips_output_filename_first_time = 1;
629
630 /* mips_split_p[X] is true if symbols of type X can be split by
631 mips_split_symbol(). */
632 bool mips_split_p[NUM_SYMBOL_TYPES];
633
634 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
635 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
636 if they are matched by a special .md file pattern. */
637 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
638
639 /* Likewise for HIGHs. */
640 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
641
642 /* Map hard register number to register class */
643 const enum reg_class mips_regno_to_class[] =
644 {
645 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
646 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
647 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
648 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
649 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
650 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
651 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
652 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
653 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
654 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
655 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
656 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
661 HI_REG, LO_REG, NO_REGS, ST_REGS,
662 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
663 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
664 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
665 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
666 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
667 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
668 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
669 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
670 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
671 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
672 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
673 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
674 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
675 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
676 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
677 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
678 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
679 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
680 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
681 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
682 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
683 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
684 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
685 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
686 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
687 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
688 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
689 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
690 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
691 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
692 };
693
694 /* Table of machine dependent attributes. */
695 const struct attribute_spec mips_attribute_table[] =
696 {
697 { "long_call", 0, 0, false, true, true, NULL },
698 { NULL, 0, 0, false, false, false, NULL }
699 };
700
701 /* A table describing all the processors gcc knows about. Names are
702 matched in the order listed. The first mention of an ISA level is
703 taken as the canonical name for that ISA.
704
705 To ease comparison, please keep this table in the same order as
706 gas's mips_cpu_info_table[]. */
707 const struct mips_cpu_info mips_cpu_info_table[] = {
708 /* Entries for generic ISAs */
709 { "mips1", PROCESSOR_R3000, 1 },
710 { "mips2", PROCESSOR_R6000, 2 },
711 { "mips3", PROCESSOR_R4000, 3 },
712 { "mips4", PROCESSOR_R8000, 4 },
713 { "mips32", PROCESSOR_4KC, 32 },
714 { "mips32r2", PROCESSOR_M4K, 33 },
715 { "mips64", PROCESSOR_5KC, 64 },
716
717 /* MIPS I */
718 { "r3000", PROCESSOR_R3000, 1 },
719 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
720 { "r3900", PROCESSOR_R3900, 1 },
721
722 /* MIPS II */
723 { "r6000", PROCESSOR_R6000, 2 },
724
725 /* MIPS III */
726 { "r4000", PROCESSOR_R4000, 3 },
727 { "vr4100", PROCESSOR_R4100, 3 },
728 { "vr4111", PROCESSOR_R4111, 3 },
729 { "vr4120", PROCESSOR_R4120, 3 },
730 { "vr4130", PROCESSOR_R4130, 3 },
731 { "vr4300", PROCESSOR_R4300, 3 },
732 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
733 { "r4600", PROCESSOR_R4600, 3 },
734 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
735 { "r4650", PROCESSOR_R4650, 3 },
736
737 /* MIPS IV */
738 { "r8000", PROCESSOR_R8000, 4 },
739 { "vr5000", PROCESSOR_R5000, 4 },
740 { "vr5400", PROCESSOR_R5400, 4 },
741 { "vr5500", PROCESSOR_R5500, 4 },
742 { "rm7000", PROCESSOR_R7000, 4 },
743 { "rm9000", PROCESSOR_R9000, 4 },
744
745 /* MIPS32 */
746 { "4kc", PROCESSOR_4KC, 32 },
747 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
748 { "4kp", PROCESSOR_4KP, 32 },
749
750 /* MIPS32 Release 2 */
751 { "m4k", PROCESSOR_M4K, 33 },
752 { "24k", PROCESSOR_24K, 33 },
753 { "24kc", PROCESSOR_24K, 33 }, /* 24K no FPU */
754 { "24kf", PROCESSOR_24K, 33 }, /* 24K 1:2 FPU */
755 { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
756
757 /* MIPS64 */
758 { "5kc", PROCESSOR_5KC, 64 },
759 { "5kf", PROCESSOR_5KF, 64 },
760 { "20kc", PROCESSOR_20KC, 64 },
761 { "sb1", PROCESSOR_SB1, 64 },
762 { "sb1a", PROCESSOR_SB1A, 64 },
763 { "sr71000", PROCESSOR_SR71000, 64 },
764
765 /* End marker */
766 { 0, 0, 0 }
767 };
768
769 /* Default costs. If these are used for a processor we should look
770 up the actual costs. */
771 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
772 COSTS_N_INSNS (7), /* fp_mult_sf */ \
773 COSTS_N_INSNS (8), /* fp_mult_df */ \
774 COSTS_N_INSNS (23), /* fp_div_sf */ \
775 COSTS_N_INSNS (36), /* fp_div_df */ \
776 COSTS_N_INSNS (10), /* int_mult_si */ \
777 COSTS_N_INSNS (10), /* int_mult_di */ \
778 COSTS_N_INSNS (69), /* int_div_si */ \
779 COSTS_N_INSNS (69), /* int_div_di */ \
780 2, /* branch_cost */ \
781 4 /* memory_latency */
782
783 /* Need to replace these with the costs of calling the appropriate
784 libgcc routine. */
785 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
786 COSTS_N_INSNS (256), /* fp_mult_sf */ \
787 COSTS_N_INSNS (256), /* fp_mult_df */ \
788 COSTS_N_INSNS (256), /* fp_div_sf */ \
789 COSTS_N_INSNS (256) /* fp_div_df */
790
791 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
792 {
793 { /* R3000 */
794 COSTS_N_INSNS (2), /* fp_add */
795 COSTS_N_INSNS (4), /* fp_mult_sf */
796 COSTS_N_INSNS (5), /* fp_mult_df */
797 COSTS_N_INSNS (12), /* fp_div_sf */
798 COSTS_N_INSNS (19), /* fp_div_df */
799 COSTS_N_INSNS (12), /* int_mult_si */
800 COSTS_N_INSNS (12), /* int_mult_di */
801 COSTS_N_INSNS (35), /* int_div_si */
802 COSTS_N_INSNS (35), /* int_div_di */
803 1, /* branch_cost */
804 4 /* memory_latency */
805
806 },
807 { /* 4KC */
808 SOFT_FP_COSTS,
809 COSTS_N_INSNS (6), /* int_mult_si */
810 COSTS_N_INSNS (6), /* int_mult_di */
811 COSTS_N_INSNS (36), /* int_div_si */
812 COSTS_N_INSNS (36), /* int_div_di */
813 1, /* branch_cost */
814 4 /* memory_latency */
815 },
816 { /* 4KP */
817 SOFT_FP_COSTS,
818 COSTS_N_INSNS (36), /* int_mult_si */
819 COSTS_N_INSNS (36), /* int_mult_di */
820 COSTS_N_INSNS (37), /* int_div_si */
821 COSTS_N_INSNS (37), /* int_div_di */
822 1, /* branch_cost */
823 4 /* memory_latency */
824 },
825 { /* 5KC */
826 SOFT_FP_COSTS,
827 COSTS_N_INSNS (4), /* int_mult_si */
828 COSTS_N_INSNS (11), /* int_mult_di */
829 COSTS_N_INSNS (36), /* int_div_si */
830 COSTS_N_INSNS (68), /* int_div_di */
831 1, /* branch_cost */
832 4 /* memory_latency */
833 },
834 { /* 5KF */
835 COSTS_N_INSNS (4), /* fp_add */
836 COSTS_N_INSNS (4), /* fp_mult_sf */
837 COSTS_N_INSNS (5), /* fp_mult_df */
838 COSTS_N_INSNS (17), /* fp_div_sf */
839 COSTS_N_INSNS (32), /* fp_div_df */
840 COSTS_N_INSNS (4), /* int_mult_si */
841 COSTS_N_INSNS (11), /* int_mult_di */
842 COSTS_N_INSNS (36), /* int_div_si */
843 COSTS_N_INSNS (68), /* int_div_di */
844 1, /* branch_cost */
845 4 /* memory_latency */
846 },
847 { /* 20KC */
848 DEFAULT_COSTS
849 },
850 { /* 24k */
851 COSTS_N_INSNS (8), /* fp_add */
852 COSTS_N_INSNS (8), /* fp_mult_sf */
853 COSTS_N_INSNS (10), /* fp_mult_df */
854 COSTS_N_INSNS (34), /* fp_div_sf */
855 COSTS_N_INSNS (64), /* fp_div_df */
856 COSTS_N_INSNS (5), /* int_mult_si */
857 COSTS_N_INSNS (5), /* int_mult_di */
858 COSTS_N_INSNS (41), /* int_div_si */
859 COSTS_N_INSNS (41), /* int_div_di */
860 1, /* branch_cost */
861 4 /* memory_latency */
862 },
863 { /* 24kx */
864 COSTS_N_INSNS (4), /* fp_add */
865 COSTS_N_INSNS (4), /* fp_mult_sf */
866 COSTS_N_INSNS (5), /* fp_mult_df */
867 COSTS_N_INSNS (17), /* fp_div_sf */
868 COSTS_N_INSNS (32), /* fp_div_df */
869 COSTS_N_INSNS (5), /* int_mult_si */
870 COSTS_N_INSNS (5), /* int_mult_di */
871 COSTS_N_INSNS (41), /* int_div_si */
872 COSTS_N_INSNS (41), /* int_div_di */
873 1, /* branch_cost */
874 4 /* memory_latency */
875 },
876 { /* M4k */
877 DEFAULT_COSTS
878 },
879 { /* R3900 */
880 COSTS_N_INSNS (2), /* fp_add */
881 COSTS_N_INSNS (4), /* fp_mult_sf */
882 COSTS_N_INSNS (5), /* fp_mult_df */
883 COSTS_N_INSNS (12), /* fp_div_sf */
884 COSTS_N_INSNS (19), /* fp_div_df */
885 COSTS_N_INSNS (2), /* int_mult_si */
886 COSTS_N_INSNS (2), /* int_mult_di */
887 COSTS_N_INSNS (35), /* int_div_si */
888 COSTS_N_INSNS (35), /* int_div_di */
889 1, /* branch_cost */
890 4 /* memory_latency */
891 },
892 { /* R6000 */
893 COSTS_N_INSNS (3), /* fp_add */
894 COSTS_N_INSNS (5), /* fp_mult_sf */
895 COSTS_N_INSNS (6), /* fp_mult_df */
896 COSTS_N_INSNS (15), /* fp_div_sf */
897 COSTS_N_INSNS (16), /* fp_div_df */
898 COSTS_N_INSNS (17), /* int_mult_si */
899 COSTS_N_INSNS (17), /* int_mult_di */
900 COSTS_N_INSNS (38), /* int_div_si */
901 COSTS_N_INSNS (38), /* int_div_di */
902 2, /* branch_cost */
903 6 /* memory_latency */
904 },
905 { /* R4000 */
906 COSTS_N_INSNS (6), /* fp_add */
907 COSTS_N_INSNS (7), /* fp_mult_sf */
908 COSTS_N_INSNS (8), /* fp_mult_df */
909 COSTS_N_INSNS (23), /* fp_div_sf */
910 COSTS_N_INSNS (36), /* fp_div_df */
911 COSTS_N_INSNS (10), /* int_mult_si */
912 COSTS_N_INSNS (10), /* int_mult_di */
913 COSTS_N_INSNS (69), /* int_div_si */
914 COSTS_N_INSNS (69), /* int_div_di */
915 2, /* branch_cost */
916 6 /* memory_latency */
917 },
918 { /* R4100 */
919 DEFAULT_COSTS
920 },
921 { /* R4111 */
922 DEFAULT_COSTS
923 },
924 { /* R4120 */
925 DEFAULT_COSTS
926 },
927 { /* R4130 */
928 /* The only costs that appear to be updated here are
929 integer multiplication. */
930 SOFT_FP_COSTS,
931 COSTS_N_INSNS (4), /* int_mult_si */
932 COSTS_N_INSNS (6), /* int_mult_di */
933 COSTS_N_INSNS (69), /* int_div_si */
934 COSTS_N_INSNS (69), /* int_div_di */
935 1, /* branch_cost */
936 4 /* memory_latency */
937 },
938 { /* R4300 */
939 DEFAULT_COSTS
940 },
941 { /* R4600 */
942 DEFAULT_COSTS
943 },
944 { /* R4650 */
945 DEFAULT_COSTS
946 },
947 { /* R5000 */
948 COSTS_N_INSNS (6), /* fp_add */
949 COSTS_N_INSNS (4), /* fp_mult_sf */
950 COSTS_N_INSNS (5), /* fp_mult_df */
951 COSTS_N_INSNS (23), /* fp_div_sf */
952 COSTS_N_INSNS (36), /* fp_div_df */
953 COSTS_N_INSNS (5), /* int_mult_si */
954 COSTS_N_INSNS (5), /* int_mult_di */
955 COSTS_N_INSNS (36), /* int_div_si */
956 COSTS_N_INSNS (36), /* int_div_di */
957 1, /* branch_cost */
958 4 /* memory_latency */
959 },
960 { /* R5400 */
961 COSTS_N_INSNS (6), /* fp_add */
962 COSTS_N_INSNS (5), /* fp_mult_sf */
963 COSTS_N_INSNS (6), /* fp_mult_df */
964 COSTS_N_INSNS (30), /* fp_div_sf */
965 COSTS_N_INSNS (59), /* fp_div_df */
966 COSTS_N_INSNS (3), /* int_mult_si */
967 COSTS_N_INSNS (4), /* int_mult_di */
968 COSTS_N_INSNS (42), /* int_div_si */
969 COSTS_N_INSNS (74), /* int_div_di */
970 1, /* branch_cost */
971 4 /* memory_latency */
972 },
973 { /* R5500 */
974 COSTS_N_INSNS (6), /* fp_add */
975 COSTS_N_INSNS (5), /* fp_mult_sf */
976 COSTS_N_INSNS (6), /* fp_mult_df */
977 COSTS_N_INSNS (30), /* fp_div_sf */
978 COSTS_N_INSNS (59), /* fp_div_df */
979 COSTS_N_INSNS (5), /* int_mult_si */
980 COSTS_N_INSNS (9), /* int_mult_di */
981 COSTS_N_INSNS (42), /* int_div_si */
982 COSTS_N_INSNS (74), /* int_div_di */
983 1, /* branch_cost */
984 4 /* memory_latency */
985 },
986 { /* R7000 */
987 /* The only costs that are changed here are
988 integer multiplication. */
989 COSTS_N_INSNS (6), /* fp_add */
990 COSTS_N_INSNS (7), /* fp_mult_sf */
991 COSTS_N_INSNS (8), /* fp_mult_df */
992 COSTS_N_INSNS (23), /* fp_div_sf */
993 COSTS_N_INSNS (36), /* fp_div_df */
994 COSTS_N_INSNS (5), /* int_mult_si */
995 COSTS_N_INSNS (9), /* int_mult_di */
996 COSTS_N_INSNS (69), /* int_div_si */
997 COSTS_N_INSNS (69), /* int_div_di */
998 1, /* branch_cost */
999 4 /* memory_latency */
1000 },
1001 { /* R8000 */
1002 DEFAULT_COSTS
1003 },
1004 { /* R9000 */
1005 /* The only costs that are changed here are
1006 integer multiplication. */
1007 COSTS_N_INSNS (6), /* fp_add */
1008 COSTS_N_INSNS (7), /* fp_mult_sf */
1009 COSTS_N_INSNS (8), /* fp_mult_df */
1010 COSTS_N_INSNS (23), /* fp_div_sf */
1011 COSTS_N_INSNS (36), /* fp_div_df */
1012 COSTS_N_INSNS (3), /* int_mult_si */
1013 COSTS_N_INSNS (8), /* int_mult_di */
1014 COSTS_N_INSNS (69), /* int_div_si */
1015 COSTS_N_INSNS (69), /* int_div_di */
1016 1, /* branch_cost */
1017 4 /* memory_latency */
1018 },
1019 { /* SB1 */
1020 /* These costs are the same as the SB-1A below. */
1021 COSTS_N_INSNS (4), /* fp_add */
1022 COSTS_N_INSNS (4), /* fp_mult_sf */
1023 COSTS_N_INSNS (4), /* fp_mult_df */
1024 COSTS_N_INSNS (24), /* fp_div_sf */
1025 COSTS_N_INSNS (32), /* fp_div_df */
1026 COSTS_N_INSNS (3), /* int_mult_si */
1027 COSTS_N_INSNS (4), /* int_mult_di */
1028 COSTS_N_INSNS (36), /* int_div_si */
1029 COSTS_N_INSNS (68), /* int_div_di */
1030 1, /* branch_cost */
1031 4 /* memory_latency */
1032 },
1033 { /* SB1-A */
1034 /* These costs are the same as the SB-1 above. */
1035 COSTS_N_INSNS (4), /* fp_add */
1036 COSTS_N_INSNS (4), /* fp_mult_sf */
1037 COSTS_N_INSNS (4), /* fp_mult_df */
1038 COSTS_N_INSNS (24), /* fp_div_sf */
1039 COSTS_N_INSNS (32), /* fp_div_df */
1040 COSTS_N_INSNS (3), /* int_mult_si */
1041 COSTS_N_INSNS (4), /* int_mult_di */
1042 COSTS_N_INSNS (36), /* int_div_si */
1043 COSTS_N_INSNS (68), /* int_div_di */
1044 1, /* branch_cost */
1045 4 /* memory_latency */
1046 },
1047 { /* SR71000 */
1048 DEFAULT_COSTS
1049 },
1050 };
1051
1052
1053 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1054 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1055 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1056 #endif
1057
1058 /* Initialize the GCC target structure. */
1059 #undef TARGET_ASM_ALIGNED_HI_OP
1060 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1061 #undef TARGET_ASM_ALIGNED_SI_OP
1062 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1063 #undef TARGET_ASM_ALIGNED_DI_OP
1064 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1065
1066 #undef TARGET_ASM_FUNCTION_PROLOGUE
1067 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1068 #undef TARGET_ASM_FUNCTION_EPILOGUE
1069 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1070 #undef TARGET_ASM_SELECT_RTX_SECTION
1071 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1072 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1073 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1074
1075 #undef TARGET_SCHED_REORDER
1076 #define TARGET_SCHED_REORDER mips_sched_reorder
1077 #undef TARGET_SCHED_VARIABLE_ISSUE
1078 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1079 #undef TARGET_SCHED_ADJUST_COST
1080 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1081 #undef TARGET_SCHED_ISSUE_RATE
1082 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1083 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1084 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1085 mips_multipass_dfa_lookahead
1086
1087 #undef TARGET_DEFAULT_TARGET_FLAGS
1088 #define TARGET_DEFAULT_TARGET_FLAGS \
1089 (TARGET_DEFAULT \
1090 | TARGET_CPU_DEFAULT \
1091 | TARGET_ENDIAN_DEFAULT \
1092 | TARGET_FP_EXCEPTIONS_DEFAULT \
1093 | MASK_CHECK_ZERO_DIV \
1094 | MASK_FUSED_MADD)
1095 #undef TARGET_HANDLE_OPTION
1096 #define TARGET_HANDLE_OPTION mips_handle_option
1097
1098 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1099 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1100
1101 #undef TARGET_VALID_POINTER_MODE
1102 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1103 #undef TARGET_RTX_COSTS
1104 #define TARGET_RTX_COSTS mips_rtx_costs
1105 #undef TARGET_ADDRESS_COST
1106 #define TARGET_ADDRESS_COST mips_address_cost
1107
1108 #undef TARGET_IN_SMALL_DATA_P
1109 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1110
1111 #undef TARGET_MACHINE_DEPENDENT_REORG
1112 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1113
1114 #undef TARGET_ASM_FILE_START
1115 #undef TARGET_ASM_FILE_END
1116 #define TARGET_ASM_FILE_START mips_file_start
1117 #define TARGET_ASM_FILE_END mips_file_end
1118 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1119 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1120
1121 #undef TARGET_INIT_LIBFUNCS
1122 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1123
1124 #undef TARGET_BUILD_BUILTIN_VA_LIST
1125 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1126 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1127 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1128
1129 #undef TARGET_PROMOTE_FUNCTION_ARGS
1130 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1131 #undef TARGET_PROMOTE_FUNCTION_RETURN
1132 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1133 #undef TARGET_PROMOTE_PROTOTYPES
1134 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1135
1136 #undef TARGET_RETURN_IN_MEMORY
1137 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1138 #undef TARGET_RETURN_IN_MSB
1139 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1140
1141 #undef TARGET_ASM_OUTPUT_MI_THUNK
1142 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1143 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1144 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1145
1146 #undef TARGET_SETUP_INCOMING_VARARGS
1147 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1148 #undef TARGET_STRICT_ARGUMENT_NAMING
1149 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1150 #undef TARGET_MUST_PASS_IN_STACK
1151 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1152 #undef TARGET_PASS_BY_REFERENCE
1153 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1154 #undef TARGET_CALLEE_COPIES
1155 #define TARGET_CALLEE_COPIES mips_callee_copies
1156 #undef TARGET_ARG_PARTIAL_BYTES
1157 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1158
1159 #undef TARGET_MODE_REP_EXTENDED
1160 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1161
1162 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1163 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1164
1165 #undef TARGET_INIT_BUILTINS
1166 #define TARGET_INIT_BUILTINS mips_init_builtins
1167 #undef TARGET_EXPAND_BUILTIN
1168 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1169
1170 #undef TARGET_HAVE_TLS
1171 #define TARGET_HAVE_TLS HAVE_AS_TLS
1172
1173 #undef TARGET_CANNOT_FORCE_CONST_MEM
1174 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1175
1176 #undef TARGET_ENCODE_SECTION_INFO
1177 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1178
1179 #undef TARGET_ATTRIBUTE_TABLE
1180 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1181
1182 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1183 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1184
1185 #undef TARGET_MIN_ANCHOR_OFFSET
1186 #define TARGET_MIN_ANCHOR_OFFSET -32768
1187 #undef TARGET_MAX_ANCHOR_OFFSET
1188 #define TARGET_MAX_ANCHOR_OFFSET 32767
1189 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1190 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1191 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1192 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1193
1194 struct gcc_target targetm = TARGET_INITIALIZER;
1195
1196 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1197
1198 static enum mips_symbol_type
mips_classify_symbol(rtx x)1199 mips_classify_symbol (rtx x)
1200 {
1201 if (GET_CODE (x) == LABEL_REF)
1202 {
1203 if (TARGET_MIPS16)
1204 return SYMBOL_CONSTANT_POOL;
1205 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1206 return SYMBOL_GOT_LOCAL;
1207 return SYMBOL_GENERAL;
1208 }
1209
1210 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1211
1212 if (SYMBOL_REF_TLS_MODEL (x))
1213 return SYMBOL_TLS;
1214
1215 if (CONSTANT_POOL_ADDRESS_P (x))
1216 {
1217 if (TARGET_MIPS16)
1218 return SYMBOL_CONSTANT_POOL;
1219
1220 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1221 return SYMBOL_SMALL_DATA;
1222 }
1223
1224 /* Do not use small-data accesses for weak symbols; they may end up
1225 being zero. */
1226 if (SYMBOL_REF_SMALL_P (x)
1227 && !SYMBOL_REF_WEAK (x))
1228 return SYMBOL_SMALL_DATA;
1229
1230 if (TARGET_ABICALLS)
1231 {
1232 if (SYMBOL_REF_DECL (x) == 0)
1233 {
1234 if (!SYMBOL_REF_LOCAL_P (x))
1235 return SYMBOL_GOT_GLOBAL;
1236 }
1237 else
1238 {
1239 /* Don't use GOT accesses for locally-binding symbols if
1240 TARGET_ABSOLUTE_ABICALLS. Otherwise, there are three
1241 cases to consider:
1242
1243 - o32 PIC (either with or without explicit relocs)
1244 - n32/n64 PIC without explicit relocs
1245 - n32/n64 PIC with explicit relocs
1246
1247 In the first case, both local and global accesses will use an
1248 R_MIPS_GOT16 relocation. We must correctly predict which of
1249 the two semantics (local or global) the assembler and linker
1250 will apply. The choice doesn't depend on the symbol's
1251 visibility, so we deliberately ignore decl_visibility and
1252 binds_local_p here.
1253
1254 In the second case, the assembler will not use R_MIPS_GOT16
1255 relocations, but it chooses between local and global accesses
1256 in the same way as for o32 PIC.
1257
1258 In the third case we have more freedom since both forms of
1259 access will work for any kind of symbol. However, there seems
1260 little point in doing things differently. */
1261 if (DECL_P (SYMBOL_REF_DECL (x))
1262 && TREE_PUBLIC (SYMBOL_REF_DECL (x))
1263 && !(TARGET_ABSOLUTE_ABICALLS
1264 && targetm.binds_local_p (SYMBOL_REF_DECL (x))))
1265 return SYMBOL_GOT_GLOBAL;
1266 }
1267
1268 if (!TARGET_ABSOLUTE_ABICALLS)
1269 return SYMBOL_GOT_LOCAL;
1270 }
1271
1272 return SYMBOL_GENERAL;
1273 }
1274
1275
1276 /* Split X into a base and a constant offset, storing them in *BASE
1277 and *OFFSET respectively. */
1278
1279 static void
mips_split_const(rtx x,rtx * base,HOST_WIDE_INT * offset)1280 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
1281 {
1282 *offset = 0;
1283
1284 if (GET_CODE (x) == CONST)
1285 {
1286 x = XEXP (x, 0);
1287 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1288 {
1289 *offset += INTVAL (XEXP (x, 1));
1290 x = XEXP (x, 0);
1291 }
1292 }
1293 *base = x;
1294 }
1295
1296
1297 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
1298 to the same object as SYMBOL, or to the same object_block. */
1299
1300 static bool
mips_offset_within_object_p(rtx symbol,HOST_WIDE_INT offset)1301 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
1302 {
1303 if (GET_CODE (symbol) != SYMBOL_REF)
1304 return false;
1305
1306 if (CONSTANT_POOL_ADDRESS_P (symbol)
1307 && offset >= 0
1308 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
1309 return true;
1310
1311 if (SYMBOL_REF_DECL (symbol) != 0
1312 && offset >= 0
1313 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
1314 return true;
1315
1316 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
1317 && SYMBOL_REF_BLOCK (symbol)
1318 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
1319 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
1320 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
1321 return true;
1322
1323 return false;
1324 }
1325
1326
1327 /* Return true if X is a symbolic constant that can be calculated in
1328 the same way as a bare symbol. If it is, store the type of the
1329 symbol in *SYMBOL_TYPE. */
1330
1331 bool
mips_symbolic_constant_p(rtx x,enum mips_symbol_type * symbol_type)1332 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1333 {
1334 HOST_WIDE_INT offset;
1335
1336 mips_split_const (x, &x, &offset);
1337 if (UNSPEC_ADDRESS_P (x))
1338 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1339 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1340 {
1341 *symbol_type = mips_classify_symbol (x);
1342 if (*symbol_type == SYMBOL_TLS)
1343 return false;
1344 }
1345 else
1346 return false;
1347
1348 if (offset == 0)
1349 return true;
1350
1351 /* Check whether a nonzero offset is valid for the underlying
1352 relocations. */
1353 switch (*symbol_type)
1354 {
1355 case SYMBOL_GENERAL:
1356 case SYMBOL_64_HIGH:
1357 case SYMBOL_64_MID:
1358 case SYMBOL_64_LOW:
1359 /* If the target has 64-bit pointers and the object file only
1360 supports 32-bit symbols, the values of those symbols will be
1361 sign-extended. In this case we can't allow an arbitrary offset
1362 in case the 32-bit value X + OFFSET has a different sign from X. */
1363 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1364 return mips_offset_within_object_p (x, offset);
1365
1366 /* In other cases the relocations can handle any offset. */
1367 return true;
1368
1369 case SYMBOL_CONSTANT_POOL:
1370 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1371 In this case, we no longer have access to the underlying constant,
1372 but the original symbol-based access was known to be valid. */
1373 if (GET_CODE (x) == LABEL_REF)
1374 return true;
1375
1376 /* Fall through. */
1377
1378 case SYMBOL_SMALL_DATA:
1379 /* Make sure that the offset refers to something within the
1380 underlying object. This should guarantee that the final
1381 PC- or GP-relative offset is within the 16-bit limit. */
1382 return mips_offset_within_object_p (x, offset);
1383
1384 case SYMBOL_GOT_LOCAL:
1385 case SYMBOL_GOTOFF_PAGE:
1386 /* The linker should provide enough local GOT entries for a
1387 16-bit offset. Larger offsets may lead to GOT overflow. */
1388 return SMALL_OPERAND (offset);
1389
1390 case SYMBOL_GOT_GLOBAL:
1391 case SYMBOL_GOTOFF_GLOBAL:
1392 case SYMBOL_GOTOFF_CALL:
1393 case SYMBOL_GOTOFF_LOADGP:
1394 case SYMBOL_TLSGD:
1395 case SYMBOL_TLSLDM:
1396 case SYMBOL_DTPREL:
1397 case SYMBOL_TPREL:
1398 case SYMBOL_GOTTPREL:
1399 case SYMBOL_TLS:
1400 return false;
1401 }
1402 gcc_unreachable ();
1403 }
1404
1405
1406 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1407
1408 int
mips_regno_mode_ok_for_base_p(int regno,enum machine_mode mode,int strict)1409 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1410 {
1411 if (regno >= FIRST_PSEUDO_REGISTER)
1412 {
1413 if (!strict)
1414 return true;
1415 regno = reg_renumber[regno];
1416 }
1417
1418 /* These fake registers will be eliminated to either the stack or
1419 hard frame pointer, both of which are usually valid base registers.
1420 Reload deals with the cases where the eliminated form isn't valid. */
1421 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1422 return true;
1423
1424 /* In mips16 mode, the stack pointer can only address word and doubleword
1425 values, nothing smaller. There are two problems here:
1426
1427 (a) Instantiating virtual registers can introduce new uses of the
1428 stack pointer. If these virtual registers are valid addresses,
1429 the stack pointer should be too.
1430
1431 (b) Most uses of the stack pointer are not made explicit until
1432 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1433 We don't know until that stage whether we'll be eliminating to the
1434 stack pointer (which needs the restriction) or the hard frame
1435 pointer (which doesn't).
1436
1437 All in all, it seems more consistent to only enforce this restriction
1438 during and after reload. */
1439 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1440 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1441
1442 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1443 }
1444
1445
1446 /* Return true if X is a valid base register for the given mode.
1447 Allow only hard registers if STRICT. */
1448
1449 static bool
mips_valid_base_register_p(rtx x,enum machine_mode mode,int strict)1450 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1451 {
1452 if (!strict && GET_CODE (x) == SUBREG)
1453 x = SUBREG_REG (x);
1454
1455 return (REG_P (x)
1456 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1457 }
1458
1459
1460 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1461 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1462
1463 static bool
mips_symbolic_address_p(enum mips_symbol_type symbol_type,enum machine_mode mode)1464 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1465 enum machine_mode mode)
1466 {
1467 switch (symbol_type)
1468 {
1469 case SYMBOL_GENERAL:
1470 return !TARGET_MIPS16;
1471
1472 case SYMBOL_SMALL_DATA:
1473 return true;
1474
1475 case SYMBOL_CONSTANT_POOL:
1476 /* PC-relative addressing is only available for lw and ld. */
1477 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1478
1479 case SYMBOL_GOT_LOCAL:
1480 return true;
1481
1482 case SYMBOL_GOT_GLOBAL:
1483 /* The address will have to be loaded from the GOT first. */
1484 return false;
1485
1486 case SYMBOL_GOTOFF_PAGE:
1487 case SYMBOL_GOTOFF_GLOBAL:
1488 case SYMBOL_GOTOFF_CALL:
1489 case SYMBOL_GOTOFF_LOADGP:
1490 case SYMBOL_TLS:
1491 case SYMBOL_TLSGD:
1492 case SYMBOL_TLSLDM:
1493 case SYMBOL_DTPREL:
1494 case SYMBOL_GOTTPREL:
1495 case SYMBOL_TPREL:
1496 case SYMBOL_64_HIGH:
1497 case SYMBOL_64_MID:
1498 case SYMBOL_64_LOW:
1499 return true;
1500 }
1501 gcc_unreachable ();
1502 }
1503
1504
1505 /* Return true if X is a valid address for machine mode MODE. If it is,
1506 fill in INFO appropriately. STRICT is true if we should only accept
1507 hard base registers. */
1508
1509 static bool
mips_classify_address(struct mips_address_info * info,rtx x,enum machine_mode mode,int strict)1510 mips_classify_address (struct mips_address_info *info, rtx x,
1511 enum machine_mode mode, int strict)
1512 {
1513 switch (GET_CODE (x))
1514 {
1515 case REG:
1516 case SUBREG:
1517 info->type = ADDRESS_REG;
1518 info->reg = x;
1519 info->offset = const0_rtx;
1520 return mips_valid_base_register_p (info->reg, mode, strict);
1521
1522 case PLUS:
1523 info->type = ADDRESS_REG;
1524 info->reg = XEXP (x, 0);
1525 info->offset = XEXP (x, 1);
1526 return (mips_valid_base_register_p (info->reg, mode, strict)
1527 && const_arith_operand (info->offset, VOIDmode));
1528
1529 case LO_SUM:
1530 info->type = ADDRESS_LO_SUM;
1531 info->reg = XEXP (x, 0);
1532 info->offset = XEXP (x, 1);
1533 return (mips_valid_base_register_p (info->reg, mode, strict)
1534 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1535 && mips_symbolic_address_p (info->symbol_type, mode)
1536 && mips_lo_relocs[info->symbol_type] != 0);
1537
1538 case CONST_INT:
1539 /* Small-integer addresses don't occur very often, but they
1540 are legitimate if $0 is a valid base register. */
1541 info->type = ADDRESS_CONST_INT;
1542 return !TARGET_MIPS16 && SMALL_INT (x);
1543
1544 case CONST:
1545 case LABEL_REF:
1546 case SYMBOL_REF:
1547 info->type = ADDRESS_SYMBOLIC;
1548 return (mips_symbolic_constant_p (x, &info->symbol_type)
1549 && mips_symbolic_address_p (info->symbol_type, mode)
1550 && !mips_split_p[info->symbol_type]);
1551
1552 default:
1553 return false;
1554 }
1555 }
1556
1557 /* Return true if X is a thread-local symbol. */
1558
1559 static bool
mips_tls_operand_p(rtx x)1560 mips_tls_operand_p (rtx x)
1561 {
1562 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1563 }
1564
1565 /* Return true if X can not be forced into a constant pool. */
1566
1567 static int
mips_tls_symbol_ref_1(rtx * x,void * data ATTRIBUTE_UNUSED)1568 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1569 {
1570 return mips_tls_operand_p (*x);
1571 }
1572
1573 /* Return true if X can not be forced into a constant pool. */
1574
1575 static bool
mips_cannot_force_const_mem(rtx x)1576 mips_cannot_force_const_mem (rtx x)
1577 {
1578 rtx base;
1579 HOST_WIDE_INT offset;
1580
1581 if (!TARGET_MIPS16)
1582 {
1583 /* As an optimization, reject constants that mips_legitimize_move
1584 can expand inline.
1585
1586 Suppose we have a multi-instruction sequence that loads constant C
1587 into register R. If R does not get allocated a hard register, and
1588 R is used in an operand that allows both registers and memory
1589 references, reload will consider forcing C into memory and using
1590 one of the instruction's memory alternatives. Returning false
1591 here will force it to use an input reload instead. */
1592 if (GET_CODE (x) == CONST_INT)
1593 return true;
1594
1595 mips_split_const (x, &base, &offset);
1596 if (symbolic_operand (base, VOIDmode) && SMALL_OPERAND (offset))
1597 return true;
1598 }
1599
1600 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1601 return true;
1602
1603 return false;
1604 }
1605
1606 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1607 constant pools, but normal-mode code doesn't need to. */
1608
1609 static bool
mips_use_blocks_for_constant_p(enum machine_mode mode ATTRIBUTE_UNUSED,rtx x ATTRIBUTE_UNUSED)1610 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1611 rtx x ATTRIBUTE_UNUSED)
1612 {
1613 return !TARGET_MIPS16;
1614 }
1615
1616 /* Return the number of instructions needed to load a symbol of the
1617 given type into a register. If valid in an address, the same number
1618 of instructions are needed for loads and stores. Treat extended
1619 mips16 instructions as two instructions. */
1620
1621 static int
mips_symbol_insns(enum mips_symbol_type type)1622 mips_symbol_insns (enum mips_symbol_type type)
1623 {
1624 switch (type)
1625 {
1626 case SYMBOL_GENERAL:
1627 /* In mips16 code, general symbols must be fetched from the
1628 constant pool. */
1629 if (TARGET_MIPS16)
1630 return 0;
1631
1632 /* When using 64-bit symbols, we need 5 preparatory instructions,
1633 such as:
1634
1635 lui $at,%highest(symbol)
1636 daddiu $at,$at,%higher(symbol)
1637 dsll $at,$at,16
1638 daddiu $at,$at,%hi(symbol)
1639 dsll $at,$at,16
1640
1641 The final address is then $at + %lo(symbol). With 32-bit
1642 symbols we just need a preparatory lui. */
1643 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1644
1645 case SYMBOL_SMALL_DATA:
1646 return 1;
1647
1648 case SYMBOL_CONSTANT_POOL:
1649 /* This case is for mips16 only. Assume we'll need an
1650 extended instruction. */
1651 return 2;
1652
1653 case SYMBOL_GOT_LOCAL:
1654 case SYMBOL_GOT_GLOBAL:
1655 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1656 the local/global classification is accurate. See override_options
1657 for details.
1658
1659 The worst cases are:
1660
1661 (1) For local symbols when generating o32 or o64 code. The assembler
1662 will use:
1663
1664 lw $at,%got(symbol)
1665 nop
1666
1667 ...and the final address will be $at + %lo(symbol).
1668
1669 (2) For global symbols when -mxgot. The assembler will use:
1670
1671 lui $at,%got_hi(symbol)
1672 (d)addu $at,$at,$gp
1673
1674 ...and the final address will be $at + %got_lo(symbol). */
1675 return 3;
1676
1677 case SYMBOL_GOTOFF_PAGE:
1678 case SYMBOL_GOTOFF_GLOBAL:
1679 case SYMBOL_GOTOFF_CALL:
1680 case SYMBOL_GOTOFF_LOADGP:
1681 case SYMBOL_64_HIGH:
1682 case SYMBOL_64_MID:
1683 case SYMBOL_64_LOW:
1684 case SYMBOL_TLSGD:
1685 case SYMBOL_TLSLDM:
1686 case SYMBOL_DTPREL:
1687 case SYMBOL_GOTTPREL:
1688 case SYMBOL_TPREL:
1689 /* Check whether the offset is a 16- or 32-bit value. */
1690 return mips_split_p[type] ? 2 : 1;
1691
1692 case SYMBOL_TLS:
1693 /* We don't treat a bare TLS symbol as a constant. */
1694 return 0;
1695 }
1696 gcc_unreachable ();
1697 }
1698
1699 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1700
1701 bool
mips_stack_address_p(rtx x,enum machine_mode mode)1702 mips_stack_address_p (rtx x, enum machine_mode mode)
1703 {
1704 struct mips_address_info addr;
1705
1706 return (mips_classify_address (&addr, x, mode, false)
1707 && addr.type == ADDRESS_REG
1708 && addr.reg == stack_pointer_rtx);
1709 }
1710
1711 /* Return true if a value at OFFSET bytes from BASE can be accessed
1712 using an unextended mips16 instruction. MODE is the mode of the
1713 value.
1714
1715 Usually the offset in an unextended instruction is a 5-bit field.
1716 The offset is unsigned and shifted left once for HIs, twice
1717 for SIs, and so on. An exception is SImode accesses off the
1718 stack pointer, which have an 8-bit immediate field. */
1719
1720 static bool
mips16_unextended_reference_p(enum machine_mode mode,rtx base,rtx offset)1721 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1722 {
1723 if (TARGET_MIPS16
1724 && GET_CODE (offset) == CONST_INT
1725 && INTVAL (offset) >= 0
1726 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1727 {
1728 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1729 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1730 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1731 }
1732 return false;
1733 }
1734
1735
1736 /* Return the number of instructions needed to load or store a value
1737 of mode MODE at X. Return 0 if X isn't valid for MODE.
1738
1739 For mips16 code, count extended instructions as two instructions. */
1740
1741 int
mips_address_insns(rtx x,enum machine_mode mode)1742 mips_address_insns (rtx x, enum machine_mode mode)
1743 {
1744 struct mips_address_info addr;
1745 int factor;
1746
1747 if (mode == BLKmode)
1748 /* BLKmode is used for single unaligned loads and stores. */
1749 factor = 1;
1750 else
1751 /* Each word of a multi-word value will be accessed individually. */
1752 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1753
1754 if (mips_classify_address (&addr, x, mode, false))
1755 switch (addr.type)
1756 {
1757 case ADDRESS_REG:
1758 if (TARGET_MIPS16
1759 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1760 return factor * 2;
1761 return factor;
1762
1763 case ADDRESS_LO_SUM:
1764 return (TARGET_MIPS16 ? factor * 2 : factor);
1765
1766 case ADDRESS_CONST_INT:
1767 return factor;
1768
1769 case ADDRESS_SYMBOLIC:
1770 return factor * mips_symbol_insns (addr.symbol_type);
1771 }
1772 return 0;
1773 }
1774
1775
1776 /* Likewise for constant X. */
1777
1778 int
mips_const_insns(rtx x)1779 mips_const_insns (rtx x)
1780 {
1781 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1782 enum mips_symbol_type symbol_type;
1783 HOST_WIDE_INT offset;
1784
1785 switch (GET_CODE (x))
1786 {
1787 case HIGH:
1788 if (TARGET_MIPS16
1789 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1790 || !mips_split_p[symbol_type])
1791 return 0;
1792
1793 return 1;
1794
1795 case CONST_INT:
1796 if (TARGET_MIPS16)
1797 /* Unsigned 8-bit constants can be loaded using an unextended
1798 LI instruction. Unsigned 16-bit constants can be loaded
1799 using an extended LI. Negative constants must be loaded
1800 using LI and then negated. */
1801 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1802 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1803 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1804 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1805 : 0);
1806
1807 return mips_build_integer (codes, INTVAL (x));
1808
1809 case CONST_DOUBLE:
1810 case CONST_VECTOR:
1811 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1812
1813 case CONST:
1814 if (CONST_GP_P (x))
1815 return 1;
1816
1817 /* See if we can refer to X directly. */
1818 if (mips_symbolic_constant_p (x, &symbol_type))
1819 return mips_symbol_insns (symbol_type);
1820
1821 /* Otherwise try splitting the constant into a base and offset.
1822 16-bit offsets can be added using an extra addiu. Larger offsets
1823 must be calculated separately and then added to the base. */
1824 mips_split_const (x, &x, &offset);
1825 if (offset != 0)
1826 {
1827 int n = mips_const_insns (x);
1828 if (n != 0)
1829 {
1830 if (SMALL_OPERAND (offset))
1831 return n + 1;
1832 else
1833 return n + 1 + mips_build_integer (codes, offset);
1834 }
1835 }
1836 return 0;
1837
1838 case SYMBOL_REF:
1839 case LABEL_REF:
1840 return mips_symbol_insns (mips_classify_symbol (x));
1841
1842 default:
1843 return 0;
1844 }
1845 }
1846
1847
1848 /* Return the number of instructions needed for memory reference X.
1849 Count extended mips16 instructions as two instructions. */
1850
1851 int
mips_fetch_insns(rtx x)1852 mips_fetch_insns (rtx x)
1853 {
1854 gcc_assert (MEM_P (x));
1855 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1856 }
1857
1858
1859 /* Return the number of instructions needed for an integer division. */
1860
1861 int
mips_idiv_insns(void)1862 mips_idiv_insns (void)
1863 {
1864 int count;
1865
1866 count = 1;
1867 if (TARGET_CHECK_ZERO_DIV)
1868 {
1869 if (GENERATE_DIVIDE_TRAPS)
1870 count++;
1871 else
1872 count += 2;
1873 }
1874
1875 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1876 count++;
1877 return count;
1878 }
1879
1880 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1881 returns a nonzero value if X is a legitimate address for a memory
1882 operand of the indicated MODE. STRICT is nonzero if this function
1883 is called during reload. */
1884
1885 bool
mips_legitimate_address_p(enum machine_mode mode,rtx x,int strict)1886 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1887 {
1888 struct mips_address_info addr;
1889
1890 return mips_classify_address (&addr, x, mode, strict);
1891 }
1892
1893
1894 /* Copy VALUE to a register and return that register. If new psuedos
1895 are allowed, copy it into a new register, otherwise use DEST. */
1896
1897 static rtx
mips_force_temporary(rtx dest,rtx value)1898 mips_force_temporary (rtx dest, rtx value)
1899 {
1900 if (!no_new_pseudos)
1901 return force_reg (Pmode, value);
1902 else
1903 {
1904 emit_move_insn (copy_rtx (dest), value);
1905 return dest;
1906 }
1907 }
1908
1909
1910 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1911 and is used to load the high part into a register. */
1912
1913 rtx
mips_split_symbol(rtx temp,rtx addr)1914 mips_split_symbol (rtx temp, rtx addr)
1915 {
1916 rtx high;
1917
1918 if (TARGET_MIPS16)
1919 high = mips16_gp_pseudo_reg ();
1920 else
1921 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1922 return gen_rtx_LO_SUM (Pmode, high, addr);
1923 }
1924
1925
1926 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1927 type SYMBOL_TYPE. */
1928
1929 rtx
mips_unspec_address(rtx address,enum mips_symbol_type symbol_type)1930 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1931 {
1932 rtx base;
1933 HOST_WIDE_INT offset;
1934
1935 mips_split_const (address, &base, &offset);
1936 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1937 UNSPEC_ADDRESS_FIRST + symbol_type);
1938 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1939 }
1940
1941
1942 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1943 high part to BASE and return the result. Just return BASE otherwise.
1944 TEMP is available as a temporary register if needed.
1945
1946 The returned expression can be used as the first operand to a LO_SUM. */
1947
1948 static rtx
mips_unspec_offset_high(rtx temp,rtx base,rtx addr,enum mips_symbol_type symbol_type)1949 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1950 enum mips_symbol_type symbol_type)
1951 {
1952 if (mips_split_p[symbol_type])
1953 {
1954 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1955 addr = mips_force_temporary (temp, addr);
1956 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1957 }
1958 return base;
1959 }
1960
1961
1962 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1963 mips_force_temporary; it is only needed when OFFSET is not a
1964 SMALL_OPERAND. */
1965
1966 static rtx
mips_add_offset(rtx temp,rtx reg,HOST_WIDE_INT offset)1967 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1968 {
1969 if (!SMALL_OPERAND (offset))
1970 {
1971 rtx high;
1972 if (TARGET_MIPS16)
1973 {
1974 /* Load the full offset into a register so that we can use
1975 an unextended instruction for the address itself. */
1976 high = GEN_INT (offset);
1977 offset = 0;
1978 }
1979 else
1980 {
1981 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1982 high = GEN_INT (CONST_HIGH_PART (offset));
1983 offset = CONST_LOW_PART (offset);
1984 }
1985 high = mips_force_temporary (temp, high);
1986 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1987 }
1988 return plus_constant (reg, offset);
1989 }
1990
1991 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
1992 referencing, and TYPE is the symbol type to use (either global
1993 dynamic or local dynamic). V0 is an RTX for the return value
1994 location. The entire insn sequence is returned. */
1995
1996 static GTY(()) rtx mips_tls_symbol;
1997
1998 static rtx
mips_call_tls_get_addr(rtx sym,enum mips_symbol_type type,rtx v0)1999 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2000 {
2001 rtx insn, loc, tga, a0;
2002
2003 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2004
2005 if (!mips_tls_symbol)
2006 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2007
2008 loc = mips_unspec_address (sym, type);
2009
2010 start_sequence ();
2011
2012 emit_insn (gen_rtx_SET (Pmode, a0,
2013 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2014 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2015 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2016 CONST_OR_PURE_CALL_P (insn) = 1;
2017 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2018 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2019 insn = get_insns ();
2020
2021 end_sequence ();
2022
2023 return insn;
2024 }
2025
2026 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2027 return value will be a valid address and move_operand (either a REG
2028 or a LO_SUM). */
2029
2030 static rtx
mips_legitimize_tls_address(rtx loc)2031 mips_legitimize_tls_address (rtx loc)
2032 {
2033 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2034 enum tls_model model;
2035
2036 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2037 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2038
2039 model = SYMBOL_REF_TLS_MODEL (loc);
2040 /* Only TARGET_ABICALLS code can have more than one module; other
2041 code must be be static and should not use a GOT. All TLS models
2042 reduce to local exec in this situation. */
2043 if (!TARGET_ABICALLS)
2044 model = TLS_MODEL_LOCAL_EXEC;
2045
2046 switch (model)
2047 {
2048 case TLS_MODEL_GLOBAL_DYNAMIC:
2049 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2050 dest = gen_reg_rtx (Pmode);
2051 emit_libcall_block (insn, dest, v0, loc);
2052 break;
2053
2054 case TLS_MODEL_LOCAL_DYNAMIC:
2055 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2056 tmp1 = gen_reg_rtx (Pmode);
2057
2058 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2059 share the LDM result with other LD model accesses. */
2060 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2061 UNSPEC_TLS_LDM);
2062 emit_libcall_block (insn, tmp1, v0, eqv);
2063
2064 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2065 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2066 mips_unspec_address (loc, SYMBOL_DTPREL));
2067 break;
2068
2069 case TLS_MODEL_INITIAL_EXEC:
2070 tmp1 = gen_reg_rtx (Pmode);
2071 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2072 if (Pmode == DImode)
2073 {
2074 emit_insn (gen_tls_get_tp_di (v1));
2075 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2076 }
2077 else
2078 {
2079 emit_insn (gen_tls_get_tp_si (v1));
2080 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2081 }
2082 dest = gen_reg_rtx (Pmode);
2083 emit_insn (gen_add3_insn (dest, tmp1, v1));
2084 break;
2085
2086 case TLS_MODEL_LOCAL_EXEC:
2087 if (Pmode == DImode)
2088 emit_insn (gen_tls_get_tp_di (v1));
2089 else
2090 emit_insn (gen_tls_get_tp_si (v1));
2091
2092 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2093 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2094 mips_unspec_address (loc, SYMBOL_TPREL));
2095 break;
2096
2097 default:
2098 gcc_unreachable ();
2099 }
2100
2101 return dest;
2102 }
2103
2104 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2105 be legitimized in a way that the generic machinery might not expect,
2106 put the new address in *XLOC and return true. MODE is the mode of
2107 the memory being accessed. */
2108
2109 bool
mips_legitimize_address(rtx * xloc,enum machine_mode mode)2110 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2111 {
2112 enum mips_symbol_type symbol_type;
2113
2114 if (mips_tls_operand_p (*xloc))
2115 {
2116 *xloc = mips_legitimize_tls_address (*xloc);
2117 return true;
2118 }
2119
2120 /* See if the address can split into a high part and a LO_SUM. */
2121 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2122 && mips_symbolic_address_p (symbol_type, mode)
2123 && mips_split_p[symbol_type])
2124 {
2125 *xloc = mips_split_symbol (0, *xloc);
2126 return true;
2127 }
2128
2129 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2130 {
2131 /* Handle REG + CONSTANT using mips_add_offset. */
2132 rtx reg;
2133
2134 reg = XEXP (*xloc, 0);
2135 if (!mips_valid_base_register_p (reg, mode, 0))
2136 reg = copy_to_mode_reg (Pmode, reg);
2137 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2138 return true;
2139 }
2140
2141 return false;
2142 }
2143
2144
2145 /* Subroutine of mips_build_integer (with the same interface).
2146 Assume that the final action in the sequence should be a left shift. */
2147
2148 static unsigned int
mips_build_shift(struct mips_integer_op * codes,HOST_WIDE_INT value)2149 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2150 {
2151 unsigned int i, shift;
2152
2153 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2154 since signed numbers are easier to load than unsigned ones. */
2155 shift = 0;
2156 while ((value & 1) == 0)
2157 value /= 2, shift++;
2158
2159 i = mips_build_integer (codes, value);
2160 codes[i].code = ASHIFT;
2161 codes[i].value = shift;
2162 return i + 1;
2163 }
2164
2165
2166 /* As for mips_build_shift, but assume that the final action will be
2167 an IOR or PLUS operation. */
2168
2169 static unsigned int
mips_build_lower(struct mips_integer_op * codes,unsigned HOST_WIDE_INT value)2170 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2171 {
2172 unsigned HOST_WIDE_INT high;
2173 unsigned int i;
2174
2175 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2176 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2177 {
2178 /* The constant is too complex to load with a simple lui/ori pair
2179 so our goal is to clear as many trailing zeros as possible.
2180 In this case, we know bit 16 is set and that the low 16 bits
2181 form a negative number. If we subtract that number from VALUE,
2182 we will clear at least the lowest 17 bits, maybe more. */
2183 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2184 codes[i].code = PLUS;
2185 codes[i].value = CONST_LOW_PART (value);
2186 }
2187 else
2188 {
2189 i = mips_build_integer (codes, high);
2190 codes[i].code = IOR;
2191 codes[i].value = value & 0xffff;
2192 }
2193 return i + 1;
2194 }
2195
2196
2197 /* Fill CODES with a sequence of rtl operations to load VALUE.
2198 Return the number of operations needed. */
2199
2200 static unsigned int
mips_build_integer(struct mips_integer_op * codes,unsigned HOST_WIDE_INT value)2201 mips_build_integer (struct mips_integer_op *codes,
2202 unsigned HOST_WIDE_INT value)
2203 {
2204 if (SMALL_OPERAND (value)
2205 || SMALL_OPERAND_UNSIGNED (value)
2206 || LUI_OPERAND (value))
2207 {
2208 /* The value can be loaded with a single instruction. */
2209 codes[0].code = UNKNOWN;
2210 codes[0].value = value;
2211 return 1;
2212 }
2213 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2214 {
2215 /* Either the constant is a simple LUI/ORI combination or its
2216 lowest bit is set. We don't want to shift in this case. */
2217 return mips_build_lower (codes, value);
2218 }
2219 else if ((value & 0xffff) == 0)
2220 {
2221 /* The constant will need at least three actions. The lowest
2222 16 bits are clear, so the final action will be a shift. */
2223 return mips_build_shift (codes, value);
2224 }
2225 else
2226 {
2227 /* The final action could be a shift, add or inclusive OR.
2228 Rather than use a complex condition to select the best
2229 approach, try both mips_build_shift and mips_build_lower
2230 and pick the one that gives the shortest sequence.
2231 Note that this case is only used once per constant. */
2232 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2233 unsigned int cost, alt_cost;
2234
2235 cost = mips_build_shift (codes, value);
2236 alt_cost = mips_build_lower (alt_codes, value);
2237 if (alt_cost < cost)
2238 {
2239 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2240 cost = alt_cost;
2241 }
2242 return cost;
2243 }
2244 }
2245
2246
2247 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2248
2249 void
mips_move_integer(rtx dest,rtx temp,unsigned HOST_WIDE_INT value)2250 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2251 {
2252 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2253 enum machine_mode mode;
2254 unsigned int i, cost;
2255 rtx x;
2256
2257 mode = GET_MODE (dest);
2258 cost = mips_build_integer (codes, value);
2259
2260 /* Apply each binary operation to X. Invariant: X is a legitimate
2261 source operand for a SET pattern. */
2262 x = GEN_INT (codes[0].value);
2263 for (i = 1; i < cost; i++)
2264 {
2265 if (no_new_pseudos)
2266 {
2267 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2268 x = temp;
2269 }
2270 else
2271 x = force_reg (mode, x);
2272 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2273 }
2274
2275 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2276 }
2277
2278
2279 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2280 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2281 move_operand. */
2282
2283 static void
mips_legitimize_const_move(enum machine_mode mode,rtx dest,rtx src)2284 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2285 {
2286 rtx base;
2287 HOST_WIDE_INT offset;
2288
2289 /* Split moves of big integers into smaller pieces. */
2290 if (splittable_const_int_operand (src, mode))
2291 {
2292 mips_move_integer (dest, dest, INTVAL (src));
2293 return;
2294 }
2295
2296 /* Split moves of symbolic constants into high/low pairs. */
2297 if (splittable_symbolic_operand (src, mode))
2298 {
2299 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2300 return;
2301 }
2302
2303 if (mips_tls_operand_p (src))
2304 {
2305 emit_move_insn (dest, mips_legitimize_tls_address (src));
2306 return;
2307 }
2308
2309 /* If we have (const (plus symbol offset)), load the symbol first
2310 and then add in the offset. This is usually better than forcing
2311 the constant into memory, at least in non-mips16 code. */
2312 mips_split_const (src, &base, &offset);
2313 if (!TARGET_MIPS16
2314 && offset != 0
2315 && (!no_new_pseudos || SMALL_OPERAND (offset)))
2316 {
2317 base = mips_force_temporary (dest, base);
2318 emit_move_insn (dest, mips_add_offset (0, base, offset));
2319 return;
2320 }
2321
2322 src = force_const_mem (mode, src);
2323
2324 /* When using explicit relocs, constant pool references are sometimes
2325 not legitimate addresses. */
2326 if (!memory_operand (src, VOIDmode))
2327 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2328 emit_move_insn (dest, src);
2329 }
2330
2331
2332 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2333 sequence that is valid. */
2334
2335 bool
mips_legitimize_move(enum machine_mode mode,rtx dest,rtx src)2336 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2337 {
2338 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2339 {
2340 emit_move_insn (dest, force_reg (mode, src));
2341 return true;
2342 }
2343
2344 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2345 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2346 && REG_P (src) && MD_REG_P (REGNO (src))
2347 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2348 {
2349 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2350 if (GET_MODE_SIZE (mode) <= 4)
2351 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2352 gen_rtx_REG (SImode, REGNO (src)),
2353 gen_rtx_REG (SImode, other_regno)));
2354 else
2355 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2356 gen_rtx_REG (DImode, REGNO (src)),
2357 gen_rtx_REG (DImode, other_regno)));
2358 return true;
2359 }
2360
2361 /* We need to deal with constants that would be legitimate
2362 immediate_operands but not legitimate move_operands. */
2363 if (CONSTANT_P (src) && !move_operand (src, mode))
2364 {
2365 mips_legitimize_const_move (mode, dest, src);
2366 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2367 return true;
2368 }
2369 return false;
2370 }
2371
2372 /* We need a lot of little routines to check constant values on the
2373 mips16. These are used to figure out how long the instruction will
2374 be. It would be much better to do this using constraints, but
2375 there aren't nearly enough letters available. */
2376
2377 static int
m16_check_op(rtx op,int low,int high,int mask)2378 m16_check_op (rtx op, int low, int high, int mask)
2379 {
2380 return (GET_CODE (op) == CONST_INT
2381 && INTVAL (op) >= low
2382 && INTVAL (op) <= high
2383 && (INTVAL (op) & mask) == 0);
2384 }
2385
2386 int
m16_uimm3_b(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2387 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2388 {
2389 return m16_check_op (op, 0x1, 0x8, 0);
2390 }
2391
2392 int
m16_simm4_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2393 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2394 {
2395 return m16_check_op (op, - 0x8, 0x7, 0);
2396 }
2397
2398 int
m16_nsimm4_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2399 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2400 {
2401 return m16_check_op (op, - 0x7, 0x8, 0);
2402 }
2403
2404 int
m16_simm5_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2405 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2406 {
2407 return m16_check_op (op, - 0x10, 0xf, 0);
2408 }
2409
2410 int
m16_nsimm5_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2411 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2412 {
2413 return m16_check_op (op, - 0xf, 0x10, 0);
2414 }
2415
2416 int
m16_uimm5_4(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2417 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2418 {
2419 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2420 }
2421
2422 int
m16_nuimm5_4(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2423 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2424 {
2425 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2426 }
2427
2428 int
m16_simm8_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2429 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2430 {
2431 return m16_check_op (op, - 0x80, 0x7f, 0);
2432 }
2433
2434 int
m16_nsimm8_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2435 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2436 {
2437 return m16_check_op (op, - 0x7f, 0x80, 0);
2438 }
2439
2440 int
m16_uimm8_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2441 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2442 {
2443 return m16_check_op (op, 0x0, 0xff, 0);
2444 }
2445
2446 int
m16_nuimm8_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2447 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2448 {
2449 return m16_check_op (op, - 0xff, 0x0, 0);
2450 }
2451
2452 int
m16_uimm8_m1_1(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2453 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2454 {
2455 return m16_check_op (op, - 0x1, 0xfe, 0);
2456 }
2457
2458 int
m16_uimm8_4(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2459 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2460 {
2461 return m16_check_op (op, 0x0, 0xff << 2, 3);
2462 }
2463
2464 int
m16_nuimm8_4(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2465 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2466 {
2467 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2468 }
2469
2470 int
m16_simm8_8(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2471 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2472 {
2473 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2474 }
2475
2476 int
m16_nsimm8_8(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)2477 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2478 {
2479 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2480 }
2481
2482 static bool
mips_rtx_costs(rtx x,int code,int outer_code,int * total)2483 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2484 {
2485 enum machine_mode mode = GET_MODE (x);
2486 bool float_mode_p = FLOAT_MODE_P (mode);
2487
2488 switch (code)
2489 {
2490 case CONST_INT:
2491 if (TARGET_MIPS16)
2492 {
2493 /* A number between 1 and 8 inclusive is efficient for a shift.
2494 Otherwise, we will need an extended instruction. */
2495 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2496 || (outer_code) == LSHIFTRT)
2497 {
2498 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2499 *total = 0;
2500 else
2501 *total = COSTS_N_INSNS (1);
2502 return true;
2503 }
2504
2505 /* We can use cmpi for an xor with an unsigned 16 bit value. */
2506 if ((outer_code) == XOR
2507 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2508 {
2509 *total = 0;
2510 return true;
2511 }
2512
2513 /* We may be able to use slt or sltu for a comparison with a
2514 signed 16 bit value. (The boundary conditions aren't quite
2515 right, but this is just a heuristic anyhow.) */
2516 if (((outer_code) == LT || (outer_code) == LE
2517 || (outer_code) == GE || (outer_code) == GT
2518 || (outer_code) == LTU || (outer_code) == LEU
2519 || (outer_code) == GEU || (outer_code) == GTU)
2520 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2521 {
2522 *total = 0;
2523 return true;
2524 }
2525
2526 /* Equality comparisons with 0 are cheap. */
2527 if (((outer_code) == EQ || (outer_code) == NE)
2528 && INTVAL (x) == 0)
2529 {
2530 *total = 0;
2531 return true;
2532 }
2533
2534 /* Constants in the range 0...255 can be loaded with an unextended
2535 instruction. They are therefore as cheap as a register move.
2536
2537 Given the choice between "li R1,0...255" and "move R1,R2"
2538 (where R2 is a known constant), it is usually better to use "li",
2539 since we do not want to unnecessarily extend the lifetime
2540 of R2. */
2541 if (outer_code == SET
2542 && INTVAL (x) >= 0
2543 && INTVAL (x) < 256)
2544 {
2545 *total = 0;
2546 return true;
2547 }
2548 }
2549 else
2550 {
2551 /* These can be used anywhere. */
2552 *total = 0;
2553 return true;
2554 }
2555
2556 /* Otherwise fall through to the handling below because
2557 we'll need to construct the constant. */
2558
2559 case CONST:
2560 case SYMBOL_REF:
2561 case LABEL_REF:
2562 case CONST_DOUBLE:
2563 if (LEGITIMATE_CONSTANT_P (x))
2564 {
2565 *total = COSTS_N_INSNS (1);
2566 return true;
2567 }
2568 else
2569 {
2570 /* The value will need to be fetched from the constant pool. */
2571 *total = CONSTANT_POOL_COST;
2572 return true;
2573 }
2574
2575 case MEM:
2576 {
2577 /* If the address is legitimate, return the number of
2578 instructions it needs, otherwise use the default handling. */
2579 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2580 if (n > 0)
2581 {
2582 *total = COSTS_N_INSNS (n + 1);
2583 return true;
2584 }
2585 return false;
2586 }
2587
2588 case FFS:
2589 *total = COSTS_N_INSNS (6);
2590 return true;
2591
2592 case NOT:
2593 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2594 return true;
2595
2596 case AND:
2597 case IOR:
2598 case XOR:
2599 if (mode == DImode && !TARGET_64BIT)
2600 {
2601 *total = COSTS_N_INSNS (2);
2602 return true;
2603 }
2604 return false;
2605
2606 case ASHIFT:
2607 case ASHIFTRT:
2608 case LSHIFTRT:
2609 if (mode == DImode && !TARGET_64BIT)
2610 {
2611 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2612 ? 4 : 12);
2613 return true;
2614 }
2615 return false;
2616
2617 case ABS:
2618 if (float_mode_p)
2619 *total = COSTS_N_INSNS (1);
2620 else
2621 *total = COSTS_N_INSNS (4);
2622 return true;
2623
2624 case LO_SUM:
2625 *total = COSTS_N_INSNS (1);
2626 return true;
2627
2628 case PLUS:
2629 case MINUS:
2630 if (float_mode_p)
2631 {
2632 *total = mips_cost->fp_add;
2633 return true;
2634 }
2635
2636 else if (mode == DImode && !TARGET_64BIT)
2637 {
2638 *total = COSTS_N_INSNS (4);
2639 return true;
2640 }
2641 return false;
2642
2643 case NEG:
2644 if (mode == DImode && !TARGET_64BIT)
2645 {
2646 *total = COSTS_N_INSNS (4);
2647 return true;
2648 }
2649 return false;
2650
2651 case MULT:
2652 if (mode == SFmode)
2653 *total = mips_cost->fp_mult_sf;
2654
2655 else if (mode == DFmode)
2656 *total = mips_cost->fp_mult_df;
2657
2658 else if (mode == SImode)
2659 *total = mips_cost->int_mult_si;
2660
2661 else
2662 *total = mips_cost->int_mult_di;
2663
2664 return true;
2665
2666 case DIV:
2667 case MOD:
2668 if (float_mode_p)
2669 {
2670 if (mode == SFmode)
2671 *total = mips_cost->fp_div_sf;
2672 else
2673 *total = mips_cost->fp_div_df;
2674
2675 return true;
2676 }
2677 /* Fall through. */
2678
2679 case UDIV:
2680 case UMOD:
2681 if (mode == DImode)
2682 *total = mips_cost->int_div_di;
2683 else
2684 *total = mips_cost->int_div_si;
2685
2686 return true;
2687
2688 case SIGN_EXTEND:
2689 /* A sign extend from SImode to DImode in 64 bit mode is often
2690 zero instructions, because the result can often be used
2691 directly by another instruction; we'll call it one. */
2692 if (TARGET_64BIT && mode == DImode
2693 && GET_MODE (XEXP (x, 0)) == SImode)
2694 *total = COSTS_N_INSNS (1);
2695 else
2696 *total = COSTS_N_INSNS (2);
2697 return true;
2698
2699 case ZERO_EXTEND:
2700 if (TARGET_64BIT && mode == DImode
2701 && GET_MODE (XEXP (x, 0)) == SImode)
2702 *total = COSTS_N_INSNS (2);
2703 else
2704 *total = COSTS_N_INSNS (1);
2705 return true;
2706
2707 case FLOAT:
2708 case UNSIGNED_FLOAT:
2709 case FIX:
2710 case FLOAT_EXTEND:
2711 case FLOAT_TRUNCATE:
2712 case SQRT:
2713 *total = mips_cost->fp_add;
2714 return true;
2715
2716 default:
2717 return false;
2718 }
2719 }
2720
2721 /* Provide the costs of an addressing mode that contains ADDR.
2722 If ADDR is not a valid address, its cost is irrelevant. */
2723
2724 static int
mips_address_cost(rtx addr)2725 mips_address_cost (rtx addr)
2726 {
2727 return mips_address_insns (addr, SImode);
2728 }
2729
2730 /* Return one word of double-word value OP, taking into account the fixed
2731 endianness of certain registers. HIGH_P is true to select the high part,
2732 false to select the low part. */
2733
2734 rtx
mips_subword(rtx op,int high_p)2735 mips_subword (rtx op, int high_p)
2736 {
2737 unsigned int byte;
2738 enum machine_mode mode;
2739
2740 mode = GET_MODE (op);
2741 if (mode == VOIDmode)
2742 mode = DImode;
2743
2744 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2745 byte = UNITS_PER_WORD;
2746 else
2747 byte = 0;
2748
2749 if (REG_P (op))
2750 {
2751 if (FP_REG_P (REGNO (op)))
2752 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2753 if (ACC_HI_REG_P (REGNO (op)))
2754 return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2755 }
2756
2757 if (MEM_P (op))
2758 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2759
2760 return simplify_gen_subreg (word_mode, op, mode, byte);
2761 }
2762
2763
2764 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2765
2766 bool
mips_split_64bit_move_p(rtx dest,rtx src)2767 mips_split_64bit_move_p (rtx dest, rtx src)
2768 {
2769 if (TARGET_64BIT)
2770 return false;
2771
2772 /* FP->FP moves can be done in a single instruction. */
2773 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2774 return false;
2775
2776 /* Check for floating-point loads and stores. They can be done using
2777 ldc1 and sdc1 on MIPS II and above. */
2778 if (mips_isa > 1)
2779 {
2780 if (FP_REG_RTX_P (dest) && MEM_P (src))
2781 return false;
2782 if (FP_REG_RTX_P (src) && MEM_P (dest))
2783 return false;
2784 }
2785 return true;
2786 }
2787
2788
2789 /* Split a 64-bit move from SRC to DEST assuming that
2790 mips_split_64bit_move_p holds.
2791
2792 Moves into and out of FPRs cause some difficulty here. Such moves
2793 will always be DFmode, since paired FPRs are not allowed to store
2794 DImode values. The most natural representation would be two separate
2795 32-bit moves, such as:
2796
2797 (set (reg:SI $f0) (mem:SI ...))
2798 (set (reg:SI $f1) (mem:SI ...))
2799
2800 However, the second insn is invalid because odd-numbered FPRs are
2801 not allowed to store independent values. Use the patterns load_df_low,
2802 load_df_high and store_df_high instead. */
2803
2804 void
mips_split_64bit_move(rtx dest,rtx src)2805 mips_split_64bit_move (rtx dest, rtx src)
2806 {
2807 if (FP_REG_RTX_P (dest))
2808 {
2809 /* Loading an FPR from memory or from GPRs. */
2810 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2811 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2812 copy_rtx (dest)));
2813 }
2814 else if (FP_REG_RTX_P (src))
2815 {
2816 /* Storing an FPR into memory or GPRs. */
2817 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2818 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2819 }
2820 else
2821 {
2822 /* The operation can be split into two normal moves. Decide in
2823 which order to do them. */
2824 rtx low_dest;
2825
2826 low_dest = mips_subword (dest, 0);
2827 if (REG_P (low_dest)
2828 && reg_overlap_mentioned_p (low_dest, src))
2829 {
2830 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2831 emit_move_insn (low_dest, mips_subword (src, 0));
2832 }
2833 else
2834 {
2835 emit_move_insn (low_dest, mips_subword (src, 0));
2836 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2837 }
2838 }
2839 }
2840
2841 /* Return the appropriate instructions to move SRC into DEST. Assume
2842 that SRC is operand 1 and DEST is operand 0. */
2843
2844 const char *
mips_output_move(rtx dest,rtx src)2845 mips_output_move (rtx dest, rtx src)
2846 {
2847 enum rtx_code dest_code, src_code;
2848 bool dbl_p;
2849
2850 dest_code = GET_CODE (dest);
2851 src_code = GET_CODE (src);
2852 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2853
2854 if (dbl_p && mips_split_64bit_move_p (dest, src))
2855 return "#";
2856
2857 if ((src_code == REG && GP_REG_P (REGNO (src)))
2858 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2859 {
2860 if (dest_code == REG)
2861 {
2862 if (GP_REG_P (REGNO (dest)))
2863 return "move\t%0,%z1";
2864
2865 if (MD_REG_P (REGNO (dest)))
2866 return "mt%0\t%z1";
2867
2868 if (DSP_ACC_REG_P (REGNO (dest)))
2869 {
2870 static char retval[] = "mt__\t%z1,%q0";
2871 retval[2] = reg_names[REGNO (dest)][4];
2872 retval[3] = reg_names[REGNO (dest)][5];
2873 return retval;
2874 }
2875
2876 if (FP_REG_P (REGNO (dest)))
2877 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2878
2879 if (ALL_COP_REG_P (REGNO (dest)))
2880 {
2881 static char retval[] = "dmtc_\t%z1,%0";
2882
2883 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2884 return (dbl_p ? retval : retval + 1);
2885 }
2886 }
2887 if (dest_code == MEM)
2888 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2889 }
2890 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2891 {
2892 if (src_code == REG)
2893 {
2894 if (DSP_ACC_REG_P (REGNO (src)))
2895 {
2896 static char retval[] = "mf__\t%0,%q1";
2897 retval[2] = reg_names[REGNO (src)][4];
2898 retval[3] = reg_names[REGNO (src)][5];
2899 return retval;
2900 }
2901
2902 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2903 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2904
2905 if (FP_REG_P (REGNO (src)))
2906 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2907
2908 if (ALL_COP_REG_P (REGNO (src)))
2909 {
2910 static char retval[] = "dmfc_\t%0,%1";
2911
2912 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2913 return (dbl_p ? retval : retval + 1);
2914 }
2915 }
2916
2917 if (src_code == MEM)
2918 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2919
2920 if (src_code == CONST_INT)
2921 {
2922 /* Don't use the X format, because that will give out of
2923 range numbers for 64 bit hosts and 32 bit targets. */
2924 if (!TARGET_MIPS16)
2925 return "li\t%0,%1\t\t\t# %X1";
2926
2927 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2928 return "li\t%0,%1";
2929
2930 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2931 return "#";
2932 }
2933
2934 if (src_code == HIGH)
2935 return "lui\t%0,%h1";
2936
2937 if (CONST_GP_P (src))
2938 return "move\t%0,%1";
2939
2940 if (symbolic_operand (src, VOIDmode))
2941 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2942 }
2943 if (src_code == REG && FP_REG_P (REGNO (src)))
2944 {
2945 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2946 {
2947 if (GET_MODE (dest) == V2SFmode)
2948 return "mov.ps\t%0,%1";
2949 else
2950 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2951 }
2952
2953 if (dest_code == MEM)
2954 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2955 }
2956 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2957 {
2958 if (src_code == MEM)
2959 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2960 }
2961 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2962 {
2963 static char retval[] = "l_c_\t%0,%1";
2964
2965 retval[1] = (dbl_p ? 'd' : 'w');
2966 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2967 return retval;
2968 }
2969 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2970 {
2971 static char retval[] = "s_c_\t%1,%0";
2972
2973 retval[1] = (dbl_p ? 'd' : 'w');
2974 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2975 return retval;
2976 }
2977 gcc_unreachable ();
2978 }
2979
2980 /* Restore $gp from its save slot. Valid only when using o32 or
2981 o64 abicalls. */
2982
2983 void
mips_restore_gp(void)2984 mips_restore_gp (void)
2985 {
2986 rtx address, slot;
2987
2988 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2989
2990 address = mips_add_offset (pic_offset_table_rtx,
2991 frame_pointer_needed
2992 ? hard_frame_pointer_rtx
2993 : stack_pointer_rtx,
2994 current_function_outgoing_args_size);
2995 slot = gen_rtx_MEM (Pmode, address);
2996
2997 emit_move_insn (pic_offset_table_rtx, slot);
2998 if (!TARGET_EXPLICIT_RELOCS)
2999 emit_insn (gen_blockage ());
3000 }
3001
3002 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3003
3004 static void
mips_emit_binary(enum rtx_code code,rtx target,rtx op0,rtx op1)3005 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3006 {
3007 emit_insn (gen_rtx_SET (VOIDmode, target,
3008 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3009 }
3010
3011 /* Return true if CMP1 is a suitable second operand for relational
3012 operator CODE. See also the *sCC patterns in mips.md. */
3013
3014 static bool
mips_relational_operand_ok_p(enum rtx_code code,rtx cmp1)3015 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3016 {
3017 switch (code)
3018 {
3019 case GT:
3020 case GTU:
3021 return reg_or_0_operand (cmp1, VOIDmode);
3022
3023 case GE:
3024 case GEU:
3025 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3026
3027 case LT:
3028 case LTU:
3029 return arith_operand (cmp1, VOIDmode);
3030
3031 case LE:
3032 return sle_operand (cmp1, VOIDmode);
3033
3034 case LEU:
3035 return sleu_operand (cmp1, VOIDmode);
3036
3037 default:
3038 gcc_unreachable ();
3039 }
3040 }
3041
3042 /* Canonicalize LE or LEU comparisons into LT comparisons when
3043 possible to avoid extra instructions or inverting the
3044 comparison. */
3045
3046 static bool
mips_canonicalize_comparison(enum rtx_code * code,rtx * cmp1,enum machine_mode mode)3047 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3048 enum machine_mode mode)
3049 {
3050 HOST_WIDE_INT original, plus_one;
3051
3052 if (GET_CODE (*cmp1) != CONST_INT)
3053 return false;
3054
3055 original = INTVAL (*cmp1);
3056 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3057
3058 switch (*code)
3059 {
3060 case LE:
3061 if (original < plus_one)
3062 {
3063 *code = LT;
3064 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3065 return true;
3066 }
3067 break;
3068
3069 case LEU:
3070 if (plus_one != 0)
3071 {
3072 *code = LTU;
3073 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3074 return true;
3075 }
3076 break;
3077
3078 default:
3079 return false;
3080 }
3081
3082 return false;
3083
3084 }
3085
3086 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3087 result in TARGET. CMP0 and TARGET are register_operands that have
3088 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3089 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3090
3091 static void
mips_emit_int_relational(enum rtx_code code,bool * invert_ptr,rtx target,rtx cmp0,rtx cmp1)3092 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3093 rtx target, rtx cmp0, rtx cmp1)
3094 {
3095 /* First see if there is a MIPS instruction that can do this operation
3096 with CMP1 in its current form. If not, try to canonicalize the
3097 comparison to LT. If that fails, try doing the same for the
3098 inverse operation. If that also fails, force CMP1 into a register
3099 and try again. */
3100 if (mips_relational_operand_ok_p (code, cmp1))
3101 mips_emit_binary (code, target, cmp0, cmp1);
3102 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3103 mips_emit_binary (code, target, cmp0, cmp1);
3104 else
3105 {
3106 enum rtx_code inv_code = reverse_condition (code);
3107 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3108 {
3109 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3110 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3111 }
3112 else if (invert_ptr == 0)
3113 {
3114 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3115 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3116 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3117 }
3118 else
3119 {
3120 *invert_ptr = !*invert_ptr;
3121 mips_emit_binary (inv_code, target, cmp0, cmp1);
3122 }
3123 }
3124 }
3125
3126 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3127 The register will have the same mode as CMP0. */
3128
3129 static rtx
mips_zero_if_equal(rtx cmp0,rtx cmp1)3130 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3131 {
3132 if (cmp1 == const0_rtx)
3133 return cmp0;
3134
3135 if (uns_arith_operand (cmp1, VOIDmode))
3136 return expand_binop (GET_MODE (cmp0), xor_optab,
3137 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3138
3139 return expand_binop (GET_MODE (cmp0), sub_optab,
3140 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3141 }
3142
3143 /* Convert *CODE into a code that can be used in a floating-point
3144 scc instruction (c.<cond>.<fmt>). Return true if the values of
3145 the condition code registers will be inverted, with 0 indicating
3146 that the condition holds. */
3147
3148 static bool
mips_reverse_fp_cond_p(enum rtx_code * code)3149 mips_reverse_fp_cond_p (enum rtx_code *code)
3150 {
3151 switch (*code)
3152 {
3153 case NE:
3154 case LTGT:
3155 case ORDERED:
3156 *code = reverse_condition_maybe_unordered (*code);
3157 return true;
3158
3159 default:
3160 return false;
3161 }
3162 }
3163
3164 /* Convert a comparison into something that can be used in a branch or
3165 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3166 being compared and *CODE is the code used to compare them.
3167
3168 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3169 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3170 otherwise any standard branch condition can be used. The standard branch
3171 conditions are:
3172
3173 - EQ/NE between two registers.
3174 - any comparison between a register and zero. */
3175
3176 static void
mips_emit_compare(enum rtx_code * code,rtx * op0,rtx * op1,bool need_eq_ne_p)3177 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3178 {
3179 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3180 {
3181 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3182 {
3183 *op0 = cmp_operands[0];
3184 *op1 = cmp_operands[1];
3185 }
3186 else if (*code == EQ || *code == NE)
3187 {
3188 if (need_eq_ne_p)
3189 {
3190 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3191 *op1 = const0_rtx;
3192 }
3193 else
3194 {
3195 *op0 = cmp_operands[0];
3196 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3197 }
3198 }
3199 else
3200 {
3201 /* The comparison needs a separate scc instruction. Store the
3202 result of the scc in *OP0 and compare it against zero. */
3203 bool invert = false;
3204 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3205 *op1 = const0_rtx;
3206 mips_emit_int_relational (*code, &invert, *op0,
3207 cmp_operands[0], cmp_operands[1]);
3208 *code = (invert ? EQ : NE);
3209 }
3210 }
3211 else
3212 {
3213 enum rtx_code cmp_code;
3214
3215 /* Floating-point tests use a separate c.cond.fmt comparison to
3216 set a condition code register. The branch or conditional move
3217 will then compare that register against zero.
3218
3219 Set CMP_CODE to the code of the comparison instruction and
3220 *CODE to the code that the branch or move should use. */
3221 cmp_code = *code;
3222 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3223 *op0 = (ISA_HAS_8CC
3224 ? gen_reg_rtx (CCmode)
3225 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3226 *op1 = const0_rtx;
3227 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3228 }
3229 }
3230
3231 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3232 Store the result in TARGET and return true if successful.
3233
3234 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3235
3236 bool
mips_emit_scc(enum rtx_code code,rtx target)3237 mips_emit_scc (enum rtx_code code, rtx target)
3238 {
3239 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3240 return false;
3241
3242 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3243 if (code == EQ || code == NE)
3244 {
3245 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3246 mips_emit_binary (code, target, zie, const0_rtx);
3247 }
3248 else
3249 mips_emit_int_relational (code, 0, target,
3250 cmp_operands[0], cmp_operands[1]);
3251 return true;
3252 }
3253
3254 /* Emit the common code for doing conditional branches.
3255 operand[0] is the label to jump to.
3256 The comparison operands are saved away by cmp{si,di,sf,df}. */
3257
3258 void
gen_conditional_branch(rtx * operands,enum rtx_code code)3259 gen_conditional_branch (rtx *operands, enum rtx_code code)
3260 {
3261 rtx op0, op1, condition;
3262
3263 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3264 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3265 emit_jump_insn (gen_condjump (condition, operands[0]));
3266 }
3267
3268 /* Implement:
3269
3270 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3271 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3272
3273 void
mips_expand_vcondv2sf(rtx dest,rtx true_src,rtx false_src,enum rtx_code cond,rtx cmp_op0,rtx cmp_op1)3274 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3275 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3276 {
3277 rtx cmp_result;
3278 bool reversed_p;
3279
3280 reversed_p = mips_reverse_fp_cond_p (&cond);
3281 cmp_result = gen_reg_rtx (CCV2mode);
3282 emit_insn (gen_scc_ps (cmp_result,
3283 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3284 if (reversed_p)
3285 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3286 cmp_result));
3287 else
3288 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3289 cmp_result));
3290 }
3291
3292 /* Emit the common code for conditional moves. OPERANDS is the array
3293 of operands passed to the conditional move define_expand. */
3294
3295 void
gen_conditional_move(rtx * operands)3296 gen_conditional_move (rtx *operands)
3297 {
3298 enum rtx_code code;
3299 rtx op0, op1;
3300
3301 code = GET_CODE (operands[1]);
3302 mips_emit_compare (&code, &op0, &op1, true);
3303 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3304 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3305 gen_rtx_fmt_ee (code,
3306 GET_MODE (op0),
3307 op0, op1),
3308 operands[2], operands[3])));
3309 }
3310
3311 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3312 the conditional_trap expander. */
3313
3314 void
mips_gen_conditional_trap(rtx * operands)3315 mips_gen_conditional_trap (rtx *operands)
3316 {
3317 rtx op0, op1;
3318 enum rtx_code cmp_code = GET_CODE (operands[0]);
3319 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3320
3321 /* MIPS conditional trap machine instructions don't have GT or LE
3322 flavors, so we must invert the comparison and convert to LT and
3323 GE, respectively. */
3324 switch (cmp_code)
3325 {
3326 case GT: cmp_code = LT; break;
3327 case LE: cmp_code = GE; break;
3328 case GTU: cmp_code = LTU; break;
3329 case LEU: cmp_code = GEU; break;
3330 default: break;
3331 }
3332 if (cmp_code == GET_CODE (operands[0]))
3333 {
3334 op0 = cmp_operands[0];
3335 op1 = cmp_operands[1];
3336 }
3337 else
3338 {
3339 op0 = cmp_operands[1];
3340 op1 = cmp_operands[0];
3341 }
3342 op0 = force_reg (mode, op0);
3343 if (!arith_operand (op1, mode))
3344 op1 = force_reg (mode, op1);
3345
3346 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3347 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3348 operands[1]));
3349 }
3350
3351 /* Load function address ADDR into register DEST. SIBCALL_P is true
3352 if the address is needed for a sibling call. */
3353
3354 static void
mips_load_call_address(rtx dest,rtx addr,int sibcall_p)3355 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3356 {
3357 /* If we're generating PIC, and this call is to a global function,
3358 try to allow its address to be resolved lazily. This isn't
3359 possible for NewABI sibcalls since the value of $gp on entry
3360 to the stub would be our caller's gp, not ours. */
3361 if (TARGET_EXPLICIT_RELOCS
3362 && !(sibcall_p && TARGET_NEWABI)
3363 && global_got_operand (addr, VOIDmode))
3364 {
3365 rtx high, lo_sum_symbol;
3366
3367 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3368 addr, SYMBOL_GOTOFF_CALL);
3369 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3370 if (Pmode == SImode)
3371 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3372 else
3373 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3374 }
3375 else
3376 emit_move_insn (dest, addr);
3377 }
3378
3379
3380 /* Expand a call or call_value instruction. RESULT is where the
3381 result will go (null for calls), ADDR is the address of the
3382 function, ARGS_SIZE is the size of the arguments and AUX is
3383 the value passed to us by mips_function_arg. SIBCALL_P is true
3384 if we are expanding a sibling call, false if we're expanding
3385 a normal call. */
3386
3387 void
mips_expand_call(rtx result,rtx addr,rtx args_size,rtx aux,int sibcall_p)3388 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3389 {
3390 rtx orig_addr, pattern, insn;
3391
3392 orig_addr = addr;
3393 if (!call_insn_operand (addr, VOIDmode))
3394 {
3395 addr = gen_reg_rtx (Pmode);
3396 mips_load_call_address (addr, orig_addr, sibcall_p);
3397 }
3398
3399 if (TARGET_MIPS16
3400 && mips16_hard_float
3401 && build_mips16_call_stub (result, addr, args_size,
3402 aux == 0 ? 0 : (int) GET_MODE (aux)))
3403 return;
3404
3405 if (result == 0)
3406 pattern = (sibcall_p
3407 ? gen_sibcall_internal (addr, args_size)
3408 : gen_call_internal (addr, args_size));
3409 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3410 {
3411 rtx reg1, reg2;
3412
3413 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3414 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3415 pattern =
3416 (sibcall_p
3417 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3418 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3419 }
3420 else
3421 pattern = (sibcall_p
3422 ? gen_sibcall_value_internal (result, addr, args_size)
3423 : gen_call_value_internal (result, addr, args_size));
3424
3425 insn = emit_call_insn (pattern);
3426
3427 /* Lazy-binding stubs require $gp to be valid on entry. */
3428 if (global_got_operand (orig_addr, VOIDmode))
3429 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3430 }
3431
3432
3433 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3434
3435 static bool
mips_function_ok_for_sibcall(tree decl ATTRIBUTE_UNUSED,tree exp ATTRIBUTE_UNUSED)3436 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3437 tree exp ATTRIBUTE_UNUSED)
3438 {
3439 return TARGET_SIBCALLS;
3440 }
3441
3442 /* Emit code to move general operand SRC into condition-code
3443 register DEST. SCRATCH is a scratch TFmode float register.
3444 The sequence is:
3445
3446 FP1 = SRC
3447 FP2 = 0.0f
3448 DEST = FP2 < FP1
3449
3450 where FP1 and FP2 are single-precision float registers
3451 taken from SCRATCH. */
3452
3453 void
mips_emit_fcc_reload(rtx dest,rtx src,rtx scratch)3454 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3455 {
3456 rtx fp1, fp2;
3457
3458 /* Change the source to SFmode. */
3459 if (MEM_P (src))
3460 src = adjust_address (src, SFmode, 0);
3461 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3462 src = gen_rtx_REG (SFmode, true_regnum (src));
3463
3464 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3465 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3466
3467 emit_move_insn (copy_rtx (fp1), src);
3468 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3469 emit_insn (gen_slt_sf (dest, fp2, fp1));
3470 }
3471
3472 /* Emit code to change the current function's return address to
3473 ADDRESS. SCRATCH is available as a scratch register, if needed.
3474 ADDRESS and SCRATCH are both word-mode GPRs. */
3475
3476 void
mips_set_return_address(rtx address,rtx scratch)3477 mips_set_return_address (rtx address, rtx scratch)
3478 {
3479 rtx slot_address;
3480
3481 compute_frame_size (get_frame_size ());
3482 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3483 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3484 cfun->machine->frame.gp_sp_offset);
3485
3486 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3487 }
3488
3489 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3490 Assume that the areas do not overlap. */
3491
3492 static void
mips_block_move_straight(rtx dest,rtx src,HOST_WIDE_INT length)3493 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3494 {
3495 HOST_WIDE_INT offset, delta;
3496 unsigned HOST_WIDE_INT bits;
3497 int i;
3498 enum machine_mode mode;
3499 rtx *regs;
3500
3501 /* Work out how many bits to move at a time. If both operands have
3502 half-word alignment, it is usually better to move in half words.
3503 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3504 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3505 Otherwise move word-sized chunks. */
3506 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3507 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3508 bits = BITS_PER_WORD / 2;
3509 else
3510 bits = BITS_PER_WORD;
3511
3512 mode = mode_for_size (bits, MODE_INT, 0);
3513 delta = bits / BITS_PER_UNIT;
3514
3515 /* Allocate a buffer for the temporary registers. */
3516 regs = alloca (sizeof (rtx) * length / delta);
3517
3518 /* Load as many BITS-sized chunks as possible. Use a normal load if
3519 the source has enough alignment, otherwise use left/right pairs. */
3520 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3521 {
3522 regs[i] = gen_reg_rtx (mode);
3523 if (MEM_ALIGN (src) >= bits)
3524 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3525 else
3526 {
3527 rtx part = adjust_address (src, BLKmode, offset);
3528 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3529 gcc_unreachable ();
3530 }
3531 }
3532
3533 /* Copy the chunks to the destination. */
3534 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3535 if (MEM_ALIGN (dest) >= bits)
3536 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3537 else
3538 {
3539 rtx part = adjust_address (dest, BLKmode, offset);
3540 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3541 gcc_unreachable ();
3542 }
3543
3544 /* Mop up any left-over bytes. */
3545 if (offset < length)
3546 {
3547 src = adjust_address (src, BLKmode, offset);
3548 dest = adjust_address (dest, BLKmode, offset);
3549 move_by_pieces (dest, src, length - offset,
3550 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3551 }
3552 }
3553
3554 #define MAX_MOVE_REGS 4
3555 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3556
3557
3558 /* Helper function for doing a loop-based block operation on memory
3559 reference MEM. Each iteration of the loop will operate on LENGTH
3560 bytes of MEM.
3561
3562 Create a new base register for use within the loop and point it to
3563 the start of MEM. Create a new memory reference that uses this
3564 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3565
3566 static void
mips_adjust_block_mem(rtx mem,HOST_WIDE_INT length,rtx * loop_reg,rtx * loop_mem)3567 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3568 rtx *loop_reg, rtx *loop_mem)
3569 {
3570 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3571
3572 /* Although the new mem does not refer to a known location,
3573 it does keep up to LENGTH bytes of alignment. */
3574 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3575 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3576 }
3577
3578
3579 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3580 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3581 memory regions do not overlap. */
3582
3583 static void
mips_block_move_loop(rtx dest,rtx src,HOST_WIDE_INT length)3584 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3585 {
3586 rtx label, src_reg, dest_reg, final_src;
3587 HOST_WIDE_INT leftover;
3588
3589 leftover = length % MAX_MOVE_BYTES;
3590 length -= leftover;
3591
3592 /* Create registers and memory references for use within the loop. */
3593 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3594 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3595
3596 /* Calculate the value that SRC_REG should have after the last iteration
3597 of the loop. */
3598 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3599 0, 0, OPTAB_WIDEN);
3600
3601 /* Emit the start of the loop. */
3602 label = gen_label_rtx ();
3603 emit_label (label);
3604
3605 /* Emit the loop body. */
3606 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3607
3608 /* Move on to the next block. */
3609 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3610 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3611
3612 /* Emit the loop condition. */
3613 if (Pmode == DImode)
3614 emit_insn (gen_cmpdi (src_reg, final_src));
3615 else
3616 emit_insn (gen_cmpsi (src_reg, final_src));
3617 emit_jump_insn (gen_bne (label));
3618
3619 /* Mop up any left-over bytes. */
3620 if (leftover)
3621 mips_block_move_straight (dest, src, leftover);
3622 }
3623
3624 /* Expand a movmemsi instruction. */
3625
3626 bool
mips_expand_block_move(rtx dest,rtx src,rtx length)3627 mips_expand_block_move (rtx dest, rtx src, rtx length)
3628 {
3629 if (GET_CODE (length) == CONST_INT)
3630 {
3631 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3632 {
3633 mips_block_move_straight (dest, src, INTVAL (length));
3634 return true;
3635 }
3636 else if (optimize)
3637 {
3638 mips_block_move_loop (dest, src, INTVAL (length));
3639 return true;
3640 }
3641 }
3642 return false;
3643 }
3644
3645 /* Argument support functions. */
3646
3647 /* Initialize CUMULATIVE_ARGS for a function. */
3648
3649 void
init_cumulative_args(CUMULATIVE_ARGS * cum,tree fntype,rtx libname ATTRIBUTE_UNUSED)3650 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3651 rtx libname ATTRIBUTE_UNUSED)
3652 {
3653 static CUMULATIVE_ARGS zero_cum;
3654 tree param, next_param;
3655
3656 *cum = zero_cum;
3657 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3658
3659 /* Determine if this function has variable arguments. This is
3660 indicated by the last argument being 'void_type_mode' if there
3661 are no variable arguments. The standard MIPS calling sequence
3662 passes all arguments in the general purpose registers in this case. */
3663
3664 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3665 param != 0; param = next_param)
3666 {
3667 next_param = TREE_CHAIN (param);
3668 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3669 cum->gp_reg_found = 1;
3670 }
3671 }
3672
3673
3674 /* Fill INFO with information about a single argument. CUM is the
3675 cumulative state for earlier arguments. MODE is the mode of this
3676 argument and TYPE is its type (if known). NAMED is true if this
3677 is a named (fixed) argument rather than a variable one. */
3678
3679 static void
mips_arg_info(const CUMULATIVE_ARGS * cum,enum machine_mode mode,tree type,int named,struct mips_arg_info * info)3680 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3681 tree type, int named, struct mips_arg_info *info)
3682 {
3683 bool doubleword_aligned_p;
3684 unsigned int num_bytes, num_words, max_regs;
3685
3686 /* Work out the size of the argument. */
3687 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3688 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3689
3690 /* Decide whether it should go in a floating-point register, assuming
3691 one is free. Later code checks for availability.
3692
3693 The checks against UNITS_PER_FPVALUE handle the soft-float and
3694 single-float cases. */
3695 switch (mips_abi)
3696 {
3697 case ABI_EABI:
3698 /* The EABI conventions have traditionally been defined in terms
3699 of TYPE_MODE, regardless of the actual type. */
3700 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3701 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3702 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3703 break;
3704
3705 case ABI_32:
3706 case ABI_O64:
3707 /* Only leading floating-point scalars are passed in
3708 floating-point registers. We also handle vector floats the same
3709 say, which is OK because they are not covered by the standard ABI. */
3710 info->fpr_p = (!cum->gp_reg_found
3711 && cum->arg_number < 2
3712 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3713 || VECTOR_FLOAT_TYPE_P (type))
3714 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3715 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3716 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3717 break;
3718
3719 case ABI_N32:
3720 case ABI_64:
3721 /* Scalar and complex floating-point types are passed in
3722 floating-point registers. */
3723 info->fpr_p = (named
3724 && (type == 0 || FLOAT_TYPE_P (type))
3725 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3726 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3727 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3728 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3729
3730 /* ??? According to the ABI documentation, the real and imaginary
3731 parts of complex floats should be passed in individual registers.
3732 The real and imaginary parts of stack arguments are supposed
3733 to be contiguous and there should be an extra word of padding
3734 at the end.
3735
3736 This has two problems. First, it makes it impossible to use a
3737 single "void *" va_list type, since register and stack arguments
3738 are passed differently. (At the time of writing, MIPSpro cannot
3739 handle complex float varargs correctly.) Second, it's unclear
3740 what should happen when there is only one register free.
3741
3742 For now, we assume that named complex floats should go into FPRs
3743 if there are two FPRs free, otherwise they should be passed in the
3744 same way as a struct containing two floats. */
3745 if (info->fpr_p
3746 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3747 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3748 {
3749 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3750 info->fpr_p = false;
3751 else
3752 num_words = 2;
3753 }
3754 break;
3755
3756 default:
3757 gcc_unreachable ();
3758 }
3759
3760 /* See whether the argument has doubleword alignment. */
3761 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3762
3763 /* Set REG_OFFSET to the register count we're interested in.
3764 The EABI allocates the floating-point registers separately,
3765 but the other ABIs allocate them like integer registers. */
3766 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3767 ? cum->num_fprs
3768 : cum->num_gprs);
3769
3770 /* Advance to an even register if the argument is doubleword-aligned. */
3771 if (doubleword_aligned_p)
3772 info->reg_offset += info->reg_offset & 1;
3773
3774 /* Work out the offset of a stack argument. */
3775 info->stack_offset = cum->stack_words;
3776 if (doubleword_aligned_p)
3777 info->stack_offset += info->stack_offset & 1;
3778
3779 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3780
3781 /* Partition the argument between registers and stack. */
3782 info->reg_words = MIN (num_words, max_regs);
3783 info->stack_words = num_words - info->reg_words;
3784 }
3785
3786
3787 /* Implement FUNCTION_ARG_ADVANCE. */
3788
3789 void
function_arg_advance(CUMULATIVE_ARGS * cum,enum machine_mode mode,tree type,int named)3790 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3791 tree type, int named)
3792 {
3793 struct mips_arg_info info;
3794
3795 mips_arg_info (cum, mode, type, named, &info);
3796
3797 if (!info.fpr_p)
3798 cum->gp_reg_found = true;
3799
3800 /* See the comment above the cumulative args structure in mips.h
3801 for an explanation of what this code does. It assumes the O32
3802 ABI, which passes at most 2 arguments in float registers. */
3803 if (cum->arg_number < 2 && info.fpr_p)
3804 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3805
3806 if (mips_abi != ABI_EABI || !info.fpr_p)
3807 cum->num_gprs = info.reg_offset + info.reg_words;
3808 else if (info.reg_words > 0)
3809 cum->num_fprs += FP_INC;
3810
3811 if (info.stack_words > 0)
3812 cum->stack_words = info.stack_offset + info.stack_words;
3813
3814 cum->arg_number++;
3815 }
3816
3817 /* Implement FUNCTION_ARG. */
3818
3819 struct rtx_def *
function_arg(const CUMULATIVE_ARGS * cum,enum machine_mode mode,tree type,int named)3820 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3821 tree type, int named)
3822 {
3823 struct mips_arg_info info;
3824
3825 /* We will be called with a mode of VOIDmode after the last argument
3826 has been seen. Whatever we return will be passed to the call
3827 insn. If we need a mips16 fp_code, return a REG with the code
3828 stored as the mode. */
3829 if (mode == VOIDmode)
3830 {
3831 if (TARGET_MIPS16 && cum->fp_code != 0)
3832 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3833
3834 else
3835 return 0;
3836 }
3837
3838 mips_arg_info (cum, mode, type, named, &info);
3839
3840 /* Return straight away if the whole argument is passed on the stack. */
3841 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3842 return 0;
3843
3844 if (type != 0
3845 && TREE_CODE (type) == RECORD_TYPE
3846 && TARGET_NEWABI
3847 && TYPE_SIZE_UNIT (type)
3848 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3849 && named)
3850 {
3851 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3852 structure contains a double in its entirety, then that 64 bit
3853 chunk is passed in a floating point register. */
3854 tree field;
3855
3856 /* First check to see if there is any such field. */
3857 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3858 if (TREE_CODE (field) == FIELD_DECL
3859 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3860 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3861 && host_integerp (bit_position (field), 0)
3862 && int_bit_position (field) % BITS_PER_WORD == 0)
3863 break;
3864
3865 if (field != 0)
3866 {
3867 /* Now handle the special case by returning a PARALLEL
3868 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3869 chunks are passed in registers. */
3870 unsigned int i;
3871 HOST_WIDE_INT bitpos;
3872 rtx ret;
3873
3874 /* assign_parms checks the mode of ENTRY_PARM, so we must
3875 use the actual mode here. */
3876 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3877
3878 bitpos = 0;
3879 field = TYPE_FIELDS (type);
3880 for (i = 0; i < info.reg_words; i++)
3881 {
3882 rtx reg;
3883
3884 for (; field; field = TREE_CHAIN (field))
3885 if (TREE_CODE (field) == FIELD_DECL
3886 && int_bit_position (field) >= bitpos)
3887 break;
3888
3889 if (field
3890 && int_bit_position (field) == bitpos
3891 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3892 && !TARGET_SOFT_FLOAT
3893 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3894 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3895 else
3896 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3897
3898 XVECEXP (ret, 0, i)
3899 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3900 GEN_INT (bitpos / BITS_PER_UNIT));
3901
3902 bitpos += BITS_PER_WORD;
3903 }
3904 return ret;
3905 }
3906 }
3907
3908 /* Handle the n32/n64 conventions for passing complex floating-point
3909 arguments in FPR pairs. The real part goes in the lower register
3910 and the imaginary part goes in the upper register. */
3911 if (TARGET_NEWABI
3912 && info.fpr_p
3913 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3914 {
3915 rtx real, imag;
3916 enum machine_mode inner;
3917 int reg;
3918
3919 inner = GET_MODE_INNER (mode);
3920 reg = FP_ARG_FIRST + info.reg_offset;
3921 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
3922 {
3923 /* Real part in registers, imaginary part on stack. */
3924 gcc_assert (info.stack_words == info.reg_words);
3925 return gen_rtx_REG (inner, reg);
3926 }
3927 else
3928 {
3929 gcc_assert (info.stack_words == 0);
3930 real = gen_rtx_EXPR_LIST (VOIDmode,
3931 gen_rtx_REG (inner, reg),
3932 const0_rtx);
3933 imag = gen_rtx_EXPR_LIST (VOIDmode,
3934 gen_rtx_REG (inner,
3935 reg + info.reg_words / 2),
3936 GEN_INT (GET_MODE_SIZE (inner)));
3937 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3938 }
3939 }
3940
3941 if (!info.fpr_p)
3942 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3943 else if (info.reg_offset == 1)
3944 /* This code handles the special o32 case in which the second word
3945 of the argument structure is passed in floating-point registers. */
3946 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3947 else
3948 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3949 }
3950
3951
3952 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3953
3954 static int
mips_arg_partial_bytes(CUMULATIVE_ARGS * cum,enum machine_mode mode,tree type,bool named)3955 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3956 enum machine_mode mode, tree type, bool named)
3957 {
3958 struct mips_arg_info info;
3959
3960 mips_arg_info (cum, mode, type, named, &info);
3961 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3962 }
3963
3964
3965 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3966 PARM_BOUNDARY bits of alignment, but will be given anything up
3967 to STACK_BOUNDARY bits if the type requires it. */
3968
3969 int
function_arg_boundary(enum machine_mode mode,tree type)3970 function_arg_boundary (enum machine_mode mode, tree type)
3971 {
3972 unsigned int alignment;
3973
3974 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3975 if (alignment < PARM_BOUNDARY)
3976 alignment = PARM_BOUNDARY;
3977 if (alignment > STACK_BOUNDARY)
3978 alignment = STACK_BOUNDARY;
3979 return alignment;
3980 }
3981
3982 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3983 upward rather than downward. In other words, return true if the
3984 first byte of the stack slot has useful data, false if the last
3985 byte does. */
3986
3987 bool
mips_pad_arg_upward(enum machine_mode mode,tree type)3988 mips_pad_arg_upward (enum machine_mode mode, tree type)
3989 {
3990 /* On little-endian targets, the first byte of every stack argument
3991 is passed in the first byte of the stack slot. */
3992 if (!BYTES_BIG_ENDIAN)
3993 return true;
3994
3995 /* Otherwise, integral types are padded downward: the last byte of a
3996 stack argument is passed in the last byte of the stack slot. */
3997 if (type != 0
3998 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3999 : GET_MODE_CLASS (mode) == MODE_INT)
4000 return false;
4001
4002 /* Big-endian o64 pads floating-point arguments downward. */
4003 if (mips_abi == ABI_O64)
4004 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4005 return false;
4006
4007 /* Other types are padded upward for o32, o64, n32 and n64. */
4008 if (mips_abi != ABI_EABI)
4009 return true;
4010
4011 /* Arguments smaller than a stack slot are padded downward. */
4012 if (mode != BLKmode)
4013 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4014 else
4015 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4016 }
4017
4018
4019 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4020 if the least significant byte of the register has useful data. Return
4021 the opposite if the most significant byte does. */
4022
4023 bool
mips_pad_reg_upward(enum machine_mode mode,tree type)4024 mips_pad_reg_upward (enum machine_mode mode, tree type)
4025 {
4026 /* No shifting is required for floating-point arguments. */
4027 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4028 return !BYTES_BIG_ENDIAN;
4029
4030 /* Otherwise, apply the same padding to register arguments as we do
4031 to stack arguments. */
4032 return mips_pad_arg_upward (mode, type);
4033 }
4034
4035 static void
mips_setup_incoming_varargs(CUMULATIVE_ARGS * cum,enum machine_mode mode,tree type,int * pretend_size ATTRIBUTE_UNUSED,int no_rtl)4036 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4037 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4038 int no_rtl)
4039 {
4040 CUMULATIVE_ARGS local_cum;
4041 int gp_saved, fp_saved;
4042
4043 /* The caller has advanced CUM up to, but not beyond, the last named
4044 argument. Advance a local copy of CUM past the last "real" named
4045 argument, to find out how many registers are left over. */
4046
4047 local_cum = *cum;
4048 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4049
4050 /* Found out how many registers we need to save. */
4051 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4052 fp_saved = (EABI_FLOAT_VARARGS_P
4053 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4054 : 0);
4055
4056 if (!no_rtl)
4057 {
4058 if (gp_saved > 0)
4059 {
4060 rtx ptr, mem;
4061
4062 ptr = plus_constant (virtual_incoming_args_rtx,
4063 REG_PARM_STACK_SPACE (cfun->decl)
4064 - gp_saved * UNITS_PER_WORD);
4065 mem = gen_rtx_MEM (BLKmode, ptr);
4066 set_mem_alias_set (mem, get_varargs_alias_set ());
4067
4068 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4069 mem, gp_saved);
4070 }
4071 if (fp_saved > 0)
4072 {
4073 /* We can't use move_block_from_reg, because it will use
4074 the wrong mode. */
4075 enum machine_mode mode;
4076 int off, i;
4077
4078 /* Set OFF to the offset from virtual_incoming_args_rtx of
4079 the first float register. The FP save area lies below
4080 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4081 off = -gp_saved * UNITS_PER_WORD;
4082 off &= ~(UNITS_PER_FPVALUE - 1);
4083 off -= fp_saved * UNITS_PER_FPREG;
4084
4085 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4086
4087 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
4088 {
4089 rtx ptr, mem;
4090
4091 ptr = plus_constant (virtual_incoming_args_rtx, off);
4092 mem = gen_rtx_MEM (mode, ptr);
4093 set_mem_alias_set (mem, get_varargs_alias_set ());
4094 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4095 off += UNITS_PER_HWFPVALUE;
4096 }
4097 }
4098 }
4099 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4100 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4101 + fp_saved * UNITS_PER_FPREG);
4102 }
4103
4104 /* Create the va_list data type.
4105 We keep 3 pointers, and two offsets.
4106 Two pointers are to the overflow area, which starts at the CFA.
4107 One of these is constant, for addressing into the GPR save area below it.
4108 The other is advanced up the stack through the overflow region.
4109 The third pointer is to the GPR save area. Since the FPR save area
4110 is just below it, we can address FPR slots off this pointer.
4111 We also keep two one-byte offsets, which are to be subtracted from the
4112 constant pointers to yield addresses in the GPR and FPR save areas.
4113 These are downcounted as float or non-float arguments are used,
4114 and when they get to zero, the argument must be obtained from the
4115 overflow region.
4116 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4117 pointer is enough. It's started at the GPR save area, and is
4118 advanced, period.
4119 Note that the GPR save area is not constant size, due to optimization
4120 in the prologue. Hence, we can't use a design with two pointers
4121 and two offsets, although we could have designed this with two pointers
4122 and three offsets. */
4123
4124 static tree
mips_build_builtin_va_list(void)4125 mips_build_builtin_va_list (void)
4126 {
4127 if (EABI_FLOAT_VARARGS_P)
4128 {
4129 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4130 tree array, index;
4131
4132 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4133
4134 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4135 ptr_type_node);
4136 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4137 ptr_type_node);
4138 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4139 ptr_type_node);
4140 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4141 unsigned_char_type_node);
4142 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4143 unsigned_char_type_node);
4144 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4145 warn on every user file. */
4146 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4147 array = build_array_type (unsigned_char_type_node,
4148 build_index_type (index));
4149 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4150
4151 DECL_FIELD_CONTEXT (f_ovfl) = record;
4152 DECL_FIELD_CONTEXT (f_gtop) = record;
4153 DECL_FIELD_CONTEXT (f_ftop) = record;
4154 DECL_FIELD_CONTEXT (f_goff) = record;
4155 DECL_FIELD_CONTEXT (f_foff) = record;
4156 DECL_FIELD_CONTEXT (f_res) = record;
4157
4158 TYPE_FIELDS (record) = f_ovfl;
4159 TREE_CHAIN (f_ovfl) = f_gtop;
4160 TREE_CHAIN (f_gtop) = f_ftop;
4161 TREE_CHAIN (f_ftop) = f_goff;
4162 TREE_CHAIN (f_goff) = f_foff;
4163 TREE_CHAIN (f_foff) = f_res;
4164
4165 layout_type (record);
4166 return record;
4167 }
4168 else if (TARGET_IRIX && TARGET_IRIX6)
4169 /* On IRIX 6, this type is 'char *'. */
4170 return build_pointer_type (char_type_node);
4171 else
4172 /* Otherwise, we use 'void *'. */
4173 return ptr_type_node;
4174 }
4175
4176 /* Implement va_start. */
4177
4178 void
mips_va_start(tree valist,rtx nextarg)4179 mips_va_start (tree valist, rtx nextarg)
4180 {
4181 if (EABI_FLOAT_VARARGS_P)
4182 {
4183 const CUMULATIVE_ARGS *cum;
4184 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4185 tree ovfl, gtop, ftop, goff, foff;
4186 tree t;
4187 int gpr_save_area_size;
4188 int fpr_save_area_size;
4189 int fpr_offset;
4190
4191 cum = ¤t_function_args_info;
4192 gpr_save_area_size
4193 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4194 fpr_save_area_size
4195 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4196
4197 f_ovfl = TYPE_FIELDS (va_list_type_node);
4198 f_gtop = TREE_CHAIN (f_ovfl);
4199 f_ftop = TREE_CHAIN (f_gtop);
4200 f_goff = TREE_CHAIN (f_ftop);
4201 f_foff = TREE_CHAIN (f_goff);
4202
4203 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4204 NULL_TREE);
4205 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4206 NULL_TREE);
4207 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4208 NULL_TREE);
4209 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4210 NULL_TREE);
4211 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4212 NULL_TREE);
4213
4214 /* Emit code to initialize OVFL, which points to the next varargs
4215 stack argument. CUM->STACK_WORDS gives the number of stack
4216 words used by named arguments. */
4217 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4218 if (cum->stack_words > 0)
4219 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
4220 build_int_cst (NULL_TREE,
4221 cum->stack_words * UNITS_PER_WORD));
4222 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4223 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4224
4225 /* Emit code to initialize GTOP, the top of the GPR save area. */
4226 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4227 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4228 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4229
4230 /* Emit code to initialize FTOP, the top of the FPR save area.
4231 This address is gpr_save_area_bytes below GTOP, rounded
4232 down to the next fp-aligned boundary. */
4233 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4234 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4235 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4236 if (fpr_offset)
4237 t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
4238 build_int_cst (NULL_TREE, -fpr_offset));
4239 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4240 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4241
4242 /* Emit code to initialize GOFF, the offset from GTOP of the
4243 next GPR argument. */
4244 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4245 build_int_cst (NULL_TREE, gpr_save_area_size));
4246 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4247
4248 /* Likewise emit code to initialize FOFF, the offset from FTOP
4249 of the next FPR argument. */
4250 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4251 build_int_cst (NULL_TREE, fpr_save_area_size));
4252 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4253 }
4254 else
4255 {
4256 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4257 std_expand_builtin_va_start (valist, nextarg);
4258 }
4259 }
4260
4261 /* Implement va_arg. */
4262
4263 static tree
mips_gimplify_va_arg_expr(tree valist,tree type,tree * pre_p,tree * post_p)4264 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4265 {
4266 HOST_WIDE_INT size, rsize;
4267 tree addr;
4268 bool indirect;
4269
4270 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4271
4272 if (indirect)
4273 type = build_pointer_type (type);
4274
4275 size = int_size_in_bytes (type);
4276 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4277
4278 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4279 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4280 else
4281 {
4282 /* Not a simple merged stack. */
4283
4284 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4285 tree ovfl, top, off, align;
4286 HOST_WIDE_INT osize;
4287 tree t, u;
4288
4289 f_ovfl = TYPE_FIELDS (va_list_type_node);
4290 f_gtop = TREE_CHAIN (f_ovfl);
4291 f_ftop = TREE_CHAIN (f_gtop);
4292 f_goff = TREE_CHAIN (f_ftop);
4293 f_foff = TREE_CHAIN (f_goff);
4294
4295 /* We maintain separate pointers and offsets for floating-point
4296 and integer arguments, but we need similar code in both cases.
4297 Let:
4298
4299 TOP be the top of the register save area;
4300 OFF be the offset from TOP of the next register;
4301 ADDR_RTX be the address of the argument;
4302 RSIZE be the number of bytes used to store the argument
4303 when it's in the register save area;
4304 OSIZE be the number of bytes used to store it when it's
4305 in the stack overflow area; and
4306 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4307
4308 The code we want is:
4309
4310 1: off &= -rsize; // round down
4311 2: if (off != 0)
4312 3: {
4313 4: addr_rtx = top - off;
4314 5: off -= rsize;
4315 6: }
4316 7: else
4317 8: {
4318 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4319 10: addr_rtx = ovfl + PADDING;
4320 11: ovfl += osize;
4321 14: }
4322
4323 [1] and [9] can sometimes be optimized away. */
4324
4325 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4326 NULL_TREE);
4327
4328 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4329 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4330 {
4331 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4332 NULL_TREE);
4333 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4334 NULL_TREE);
4335
4336 /* When floating-point registers are saved to the stack,
4337 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4338 of the float's precision. */
4339 rsize = UNITS_PER_HWFPVALUE;
4340
4341 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4342 (= PARM_BOUNDARY bits). This can be different from RSIZE
4343 in two cases:
4344
4345 (1) On 32-bit targets when TYPE is a structure such as:
4346
4347 struct s { float f; };
4348
4349 Such structures are passed in paired FPRs, so RSIZE
4350 will be 8 bytes. However, the structure only takes
4351 up 4 bytes of memory, so OSIZE will only be 4.
4352
4353 (2) In combinations such as -mgp64 -msingle-float
4354 -fshort-double. Doubles passed in registers
4355 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4356 but those passed on the stack take up
4357 UNITS_PER_WORD bytes. */
4358 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4359 }
4360 else
4361 {
4362 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4363 NULL_TREE);
4364 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4365 NULL_TREE);
4366 if (rsize > UNITS_PER_WORD)
4367 {
4368 /* [1] Emit code for: off &= -rsize. */
4369 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4370 build_int_cst (NULL_TREE, -rsize));
4371 t = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
4372 gimplify_and_add (t, pre_p);
4373 }
4374 osize = rsize;
4375 }
4376
4377 /* [2] Emit code to branch if off == 0. */
4378 t = build2 (NE_EXPR, boolean_type_node, off,
4379 build_int_cst (TREE_TYPE (off), 0));
4380 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4381
4382 /* [5] Emit code for: off -= rsize. We do this as a form of
4383 post-increment not available to C. Also widen for the
4384 coming pointer arithmetic. */
4385 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4386 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4387 t = fold_convert (sizetype, t);
4388 t = fold_convert (TREE_TYPE (top), t);
4389
4390 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4391 the argument has RSIZE - SIZE bytes of leading padding. */
4392 t = build2 (MINUS_EXPR, TREE_TYPE (top), top, t);
4393 if (BYTES_BIG_ENDIAN && rsize > size)
4394 {
4395 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
4396 rsize - size));
4397 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4398 }
4399 COND_EXPR_THEN (addr) = t;
4400
4401 if (osize > UNITS_PER_WORD)
4402 {
4403 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4404 u = fold_convert (TREE_TYPE (ovfl),
4405 build_int_cst (NULL_TREE, osize - 1));
4406 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4407 u = fold_convert (TREE_TYPE (ovfl),
4408 build_int_cst (NULL_TREE, -osize));
4409 t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4410 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4411 }
4412 else
4413 align = NULL;
4414
4415 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4416 post-increment ovfl by osize. On big-endian machines,
4417 the argument has OSIZE - SIZE bytes of leading padding. */
4418 u = fold_convert (TREE_TYPE (ovfl),
4419 build_int_cst (NULL_TREE, osize));
4420 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4421 if (BYTES_BIG_ENDIAN && osize > size)
4422 {
4423 u = fold_convert (TREE_TYPE (t),
4424 build_int_cst (NULL_TREE, osize - size));
4425 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4426 }
4427
4428 /* String [9] and [10,11] together. */
4429 if (align)
4430 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4431 COND_EXPR_ELSE (addr) = t;
4432
4433 addr = fold_convert (build_pointer_type (type), addr);
4434 addr = build_va_arg_indirect_ref (addr);
4435 }
4436
4437 if (indirect)
4438 addr = build_va_arg_indirect_ref (addr);
4439
4440 return addr;
4441 }
4442
4443 /* Return true if it is possible to use left/right accesses for a
4444 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4445 returning true, update *OP, *LEFT and *RIGHT as follows:
4446
4447 *OP is a BLKmode reference to the whole field.
4448
4449 *LEFT is a QImode reference to the first byte if big endian or
4450 the last byte if little endian. This address can be used in the
4451 left-side instructions (lwl, swl, ldl, sdl).
4452
4453 *RIGHT is a QImode reference to the opposite end of the field and
4454 can be used in the patterning right-side instruction. */
4455
4456 static bool
mips_get_unaligned_mem(rtx * op,unsigned int width,int bitpos,rtx * left,rtx * right)4457 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4458 rtx *left, rtx *right)
4459 {
4460 rtx first, last;
4461
4462 /* Check that the operand really is a MEM. Not all the extv and
4463 extzv predicates are checked. */
4464 if (!MEM_P (*op))
4465 return false;
4466
4467 /* Check that the size is valid. */
4468 if (width != 32 && (!TARGET_64BIT || width != 64))
4469 return false;
4470
4471 /* We can only access byte-aligned values. Since we are always passed
4472 a reference to the first byte of the field, it is not necessary to
4473 do anything with BITPOS after this check. */
4474 if (bitpos % BITS_PER_UNIT != 0)
4475 return false;
4476
4477 /* Reject aligned bitfields: we want to use a normal load or store
4478 instead of a left/right pair. */
4479 if (MEM_ALIGN (*op) >= width)
4480 return false;
4481
4482 /* Adjust *OP to refer to the whole field. This also has the effect
4483 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4484 *op = adjust_address (*op, BLKmode, 0);
4485 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4486
4487 /* Get references to both ends of the field. We deliberately don't
4488 use the original QImode *OP for FIRST since the new BLKmode one
4489 might have a simpler address. */
4490 first = adjust_address (*op, QImode, 0);
4491 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4492
4493 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4494 be the upper word and RIGHT the lower word. */
4495 if (TARGET_BIG_ENDIAN)
4496 *left = first, *right = last;
4497 else
4498 *left = last, *right = first;
4499
4500 return true;
4501 }
4502
4503
4504 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4505 Return true on success. We only handle cases where zero_extract is
4506 equivalent to sign_extract. */
4507
4508 bool
mips_expand_unaligned_load(rtx dest,rtx src,unsigned int width,int bitpos)4509 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4510 {
4511 rtx left, right, temp;
4512
4513 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4514 paradoxical word_mode subreg. This is the only case in which
4515 we allow the destination to be larger than the source. */
4516 if (GET_CODE (dest) == SUBREG
4517 && GET_MODE (dest) == DImode
4518 && SUBREG_BYTE (dest) == 0
4519 && GET_MODE (SUBREG_REG (dest)) == SImode)
4520 dest = SUBREG_REG (dest);
4521
4522 /* After the above adjustment, the destination must be the same
4523 width as the source. */
4524 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4525 return false;
4526
4527 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4528 return false;
4529
4530 temp = gen_reg_rtx (GET_MODE (dest));
4531 if (GET_MODE (dest) == DImode)
4532 {
4533 emit_insn (gen_mov_ldl (temp, src, left));
4534 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4535 }
4536 else
4537 {
4538 emit_insn (gen_mov_lwl (temp, src, left));
4539 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4540 }
4541 return true;
4542 }
4543
4544
4545 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4546 true on success. */
4547
4548 bool
mips_expand_unaligned_store(rtx dest,rtx src,unsigned int width,int bitpos)4549 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4550 {
4551 rtx left, right;
4552 enum machine_mode mode;
4553
4554 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4555 return false;
4556
4557 mode = mode_for_size (width, MODE_INT, 0);
4558 src = gen_lowpart (mode, src);
4559
4560 if (mode == DImode)
4561 {
4562 emit_insn (gen_mov_sdl (dest, src, left));
4563 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4564 }
4565 else
4566 {
4567 emit_insn (gen_mov_swl (dest, src, left));
4568 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4569 }
4570 return true;
4571 }
4572
4573 /* Return true if X is a MEM with the same size as MODE. */
4574
4575 bool
mips_mem_fits_mode_p(enum machine_mode mode,rtx x)4576 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4577 {
4578 rtx size;
4579
4580 if (!MEM_P (x))
4581 return false;
4582
4583 size = MEM_SIZE (x);
4584 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4585 }
4586
4587 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4588 source of an "ext" instruction or the destination of an "ins"
4589 instruction. OP must be a register operand and the following
4590 conditions must hold:
4591
4592 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4593 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4594 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4595
4596 Also reject lengths equal to a word as they are better handled
4597 by the move patterns. */
4598
4599 bool
mips_use_ins_ext_p(rtx op,rtx size,rtx position)4600 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4601 {
4602 HOST_WIDE_INT len, pos;
4603
4604 if (!ISA_HAS_EXT_INS
4605 || !register_operand (op, VOIDmode)
4606 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4607 return false;
4608
4609 len = INTVAL (size);
4610 pos = INTVAL (position);
4611
4612 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4613 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4614 return false;
4615
4616 return true;
4617 }
4618
4619 /* Set up globals to generate code for the ISA or processor
4620 described by INFO. */
4621
4622 static void
mips_set_architecture(const struct mips_cpu_info * info)4623 mips_set_architecture (const struct mips_cpu_info *info)
4624 {
4625 if (info != 0)
4626 {
4627 mips_arch_info = info;
4628 mips_arch = info->cpu;
4629 mips_isa = info->isa;
4630 }
4631 }
4632
4633
4634 /* Likewise for tuning. */
4635
4636 static void
mips_set_tune(const struct mips_cpu_info * info)4637 mips_set_tune (const struct mips_cpu_info *info)
4638 {
4639 if (info != 0)
4640 {
4641 mips_tune_info = info;
4642 mips_tune = info->cpu;
4643 }
4644 }
4645
4646 /* Implement TARGET_HANDLE_OPTION. */
4647
4648 static bool
mips_handle_option(size_t code,const char * arg,int value ATTRIBUTE_UNUSED)4649 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4650 {
4651 switch (code)
4652 {
4653 case OPT_mabi_:
4654 if (strcmp (arg, "32") == 0)
4655 mips_abi = ABI_32;
4656 else if (strcmp (arg, "o64") == 0)
4657 mips_abi = ABI_O64;
4658 else if (strcmp (arg, "n32") == 0)
4659 mips_abi = ABI_N32;
4660 else if (strcmp (arg, "64") == 0)
4661 mips_abi = ABI_64;
4662 else if (strcmp (arg, "eabi") == 0)
4663 mips_abi = ABI_EABI;
4664 else
4665 return false;
4666 return true;
4667
4668 case OPT_march_:
4669 case OPT_mtune_:
4670 return mips_parse_cpu (arg) != 0;
4671
4672 case OPT_mips:
4673 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4674 return mips_isa_info != 0;
4675
4676 case OPT_mno_flush_func:
4677 mips_cache_flush_func = NULL;
4678 return true;
4679
4680 default:
4681 return true;
4682 }
4683 }
4684
4685 /* Set up the threshold for data to go into the small data area, instead
4686 of the normal data area, and detect any conflicts in the switches. */
4687
4688 void
override_options(void)4689 override_options (void)
4690 {
4691 int i, start, regno;
4692 enum machine_mode mode;
4693
4694 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4695
4696 /* The following code determines the architecture and register size.
4697 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4698 The GAS and GCC code should be kept in sync as much as possible. */
4699
4700 if (mips_arch_string != 0)
4701 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4702
4703 if (mips_isa_info != 0)
4704 {
4705 if (mips_arch_info == 0)
4706 mips_set_architecture (mips_isa_info);
4707 else if (mips_arch_info->isa != mips_isa_info->isa)
4708 error ("-%s conflicts with the other architecture options, "
4709 "which specify a %s processor",
4710 mips_isa_info->name,
4711 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4712 }
4713
4714 if (mips_arch_info == 0)
4715 {
4716 #ifdef MIPS_CPU_STRING_DEFAULT
4717 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4718 #else
4719 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4720 #endif
4721 }
4722
4723 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4724 error ("-march=%s is not compatible with the selected ABI",
4725 mips_arch_info->name);
4726
4727 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4728 if (mips_tune_string != 0)
4729 mips_set_tune (mips_parse_cpu (mips_tune_string));
4730
4731 if (mips_tune_info == 0)
4732 mips_set_tune (mips_arch_info);
4733
4734 /* Set cost structure for the processor. */
4735 mips_cost = &mips_rtx_cost_data[mips_tune];
4736
4737 if ((target_flags_explicit & MASK_64BIT) != 0)
4738 {
4739 /* The user specified the size of the integer registers. Make sure
4740 it agrees with the ABI and ISA. */
4741 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4742 error ("-mgp64 used with a 32-bit processor");
4743 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4744 error ("-mgp32 used with a 64-bit ABI");
4745 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4746 error ("-mgp64 used with a 32-bit ABI");
4747 }
4748 else
4749 {
4750 /* Infer the integer register size from the ABI and processor.
4751 Restrict ourselves to 32-bit registers if that's all the
4752 processor has, or if the ABI cannot handle 64-bit registers. */
4753 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4754 target_flags &= ~MASK_64BIT;
4755 else
4756 target_flags |= MASK_64BIT;
4757 }
4758
4759 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4760 {
4761 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4762 only one right answer here. */
4763 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4764 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4765 else if (!TARGET_64BIT && TARGET_FLOAT64)
4766 error ("unsupported combination: %s", "-mgp32 -mfp64");
4767 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4768 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4769 }
4770 else
4771 {
4772 /* -msingle-float selects 32-bit float registers. Otherwise the
4773 float registers should be the same size as the integer ones. */
4774 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4775 target_flags |= MASK_FLOAT64;
4776 else
4777 target_flags &= ~MASK_FLOAT64;
4778 }
4779
4780 /* End of code shared with GAS. */
4781
4782 if ((target_flags_explicit & MASK_LONG64) == 0)
4783 {
4784 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4785 target_flags |= MASK_LONG64;
4786 else
4787 target_flags &= ~MASK_LONG64;
4788 }
4789
4790 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4791 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4792 {
4793 /* For some configurations, it is useful to have -march control
4794 the default setting of MASK_SOFT_FLOAT. */
4795 switch ((int) mips_arch)
4796 {
4797 case PROCESSOR_R4100:
4798 case PROCESSOR_R4111:
4799 case PROCESSOR_R4120:
4800 case PROCESSOR_R4130:
4801 target_flags |= MASK_SOFT_FLOAT;
4802 break;
4803
4804 default:
4805 target_flags &= ~MASK_SOFT_FLOAT;
4806 break;
4807 }
4808 }
4809
4810 if (!TARGET_OLDABI)
4811 flag_pcc_struct_return = 0;
4812
4813 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4814 {
4815 /* If neither -mbranch-likely nor -mno-branch-likely was given
4816 on the command line, set MASK_BRANCHLIKELY based on the target
4817 architecture.
4818
4819 By default, we enable use of Branch Likely instructions on
4820 all architectures which support them with the following
4821 exceptions: when creating MIPS32 or MIPS64 code, and when
4822 tuning for architectures where their use tends to hurt
4823 performance.
4824
4825 The MIPS32 and MIPS64 architecture specifications say "Software
4826 is strongly encouraged to avoid use of Branch Likely
4827 instructions, as they will be removed from a future revision
4828 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4829 issue those instructions unless instructed to do so by
4830 -mbranch-likely. */
4831 if (ISA_HAS_BRANCHLIKELY
4832 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4833 && !(TUNE_MIPS5500 || TUNE_SB1))
4834 target_flags |= MASK_BRANCHLIKELY;
4835 else
4836 target_flags &= ~MASK_BRANCHLIKELY;
4837 }
4838 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4839 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4840
4841 /* The effect of -mabicalls isn't defined for the EABI. */
4842 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4843 {
4844 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4845 target_flags &= ~MASK_ABICALLS;
4846 }
4847
4848 if (TARGET_ABICALLS)
4849 {
4850 /* We need to set flag_pic for executables as well as DSOs
4851 because we may reference symbols that are not defined in
4852 the final executable. (MIPS does not use things like
4853 copy relocs, for example.)
4854
4855 Also, there is a body of code that uses __PIC__ to distinguish
4856 between -mabicalls and -mno-abicalls code. */
4857 flag_pic = 1;
4858 if (mips_section_threshold > 0)
4859 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
4860 }
4861
4862 /* mips_split_addresses is a half-way house between explicit
4863 relocations and the traditional assembler macros. It can
4864 split absolute 32-bit symbolic constants into a high/lo_sum
4865 pair but uses macros for other sorts of access.
4866
4867 Like explicit relocation support for REL targets, it relies
4868 on GNU extensions in the assembler and the linker.
4869
4870 Although this code should work for -O0, it has traditionally
4871 been treated as an optimization. */
4872 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4873 && optimize && !flag_pic
4874 && !ABI_HAS_64BIT_SYMBOLS)
4875 mips_split_addresses = 1;
4876 else
4877 mips_split_addresses = 0;
4878
4879 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4880 faster code, but at the expense of more nops. Enable it at -O3 and
4881 above. */
4882 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4883 target_flags |= MASK_VR4130_ALIGN;
4884
4885 /* When compiling for the mips16, we cannot use floating point. We
4886 record the original hard float value in mips16_hard_float. */
4887 if (TARGET_MIPS16)
4888 {
4889 if (TARGET_SOFT_FLOAT)
4890 mips16_hard_float = 0;
4891 else
4892 mips16_hard_float = 1;
4893 target_flags |= MASK_SOFT_FLOAT;
4894
4895 /* Don't run the scheduler before reload, since it tends to
4896 increase register pressure. */
4897 flag_schedule_insns = 0;
4898
4899 /* Don't do hot/cold partitioning. The constant layout code expects
4900 the whole function to be in a single section. */
4901 flag_reorder_blocks_and_partition = 0;
4902
4903 /* Silently disable -mexplicit-relocs since it doesn't apply
4904 to mips16 code. Even so, it would overly pedantic to warn
4905 about "-mips16 -mexplicit-relocs", especially given that
4906 we use a %gprel() operator. */
4907 target_flags &= ~MASK_EXPLICIT_RELOCS;
4908 }
4909
4910 /* When using explicit relocs, we call dbr_schedule from within
4911 mips_reorg. */
4912 if (TARGET_EXPLICIT_RELOCS)
4913 {
4914 mips_flag_delayed_branch = flag_delayed_branch;
4915 flag_delayed_branch = 0;
4916 }
4917
4918 #ifdef MIPS_TFMODE_FORMAT
4919 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4920 #endif
4921
4922 /* Make sure that the user didn't turn off paired single support when
4923 MIPS-3D support is requested. */
4924 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4925 && !TARGET_PAIRED_SINGLE_FLOAT)
4926 error ("-mips3d requires -mpaired-single");
4927
4928 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4929 if (TARGET_MIPS3D)
4930 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4931
4932 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4933 and TARGET_HARD_FLOAT are both true. */
4934 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4935 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4936
4937 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4938 enabled. */
4939 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4940 error ("-mips3d/-mpaired-single must be used with -mips64");
4941
4942 if (TARGET_MIPS16 && TARGET_DSP)
4943 error ("-mips16 and -mdsp cannot be used together");
4944
4945 mips_print_operand_punct['?'] = 1;
4946 mips_print_operand_punct['#'] = 1;
4947 mips_print_operand_punct['/'] = 1;
4948 mips_print_operand_punct['&'] = 1;
4949 mips_print_operand_punct['!'] = 1;
4950 mips_print_operand_punct['*'] = 1;
4951 mips_print_operand_punct['@'] = 1;
4952 mips_print_operand_punct['.'] = 1;
4953 mips_print_operand_punct['('] = 1;
4954 mips_print_operand_punct[')'] = 1;
4955 mips_print_operand_punct['['] = 1;
4956 mips_print_operand_punct[']'] = 1;
4957 mips_print_operand_punct['<'] = 1;
4958 mips_print_operand_punct['>'] = 1;
4959 mips_print_operand_punct['{'] = 1;
4960 mips_print_operand_punct['}'] = 1;
4961 mips_print_operand_punct['^'] = 1;
4962 mips_print_operand_punct['$'] = 1;
4963 mips_print_operand_punct['+'] = 1;
4964 mips_print_operand_punct['~'] = 1;
4965
4966 /* Set up array to map GCC register number to debug register number.
4967 Ignore the special purpose register numbers. */
4968
4969 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4970 mips_dbx_regno[i] = -1;
4971
4972 start = GP_DBX_FIRST - GP_REG_FIRST;
4973 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4974 mips_dbx_regno[i] = i + start;
4975
4976 start = FP_DBX_FIRST - FP_REG_FIRST;
4977 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4978 mips_dbx_regno[i] = i + start;
4979
4980 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4981 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4982
4983 /* Set up array giving whether a given register can hold a given mode. */
4984
4985 for (mode = VOIDmode;
4986 mode != MAX_MACHINE_MODE;
4987 mode = (enum machine_mode) ((int)mode + 1))
4988 {
4989 register int size = GET_MODE_SIZE (mode);
4990 register enum mode_class class = GET_MODE_CLASS (mode);
4991
4992 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4993 {
4994 register int temp;
4995
4996 if (mode == CCV2mode)
4997 temp = (ISA_HAS_8CC
4998 && ST_REG_P (regno)
4999 && (regno - ST_REG_FIRST) % 2 == 0);
5000
5001 else if (mode == CCV4mode)
5002 temp = (ISA_HAS_8CC
5003 && ST_REG_P (regno)
5004 && (regno - ST_REG_FIRST) % 4 == 0);
5005
5006 else if (mode == CCmode)
5007 {
5008 if (! ISA_HAS_8CC)
5009 temp = (regno == FPSW_REGNUM);
5010 else
5011 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5012 || FP_REG_P (regno));
5013 }
5014
5015 else if (GP_REG_P (regno))
5016 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5017
5018 else if (FP_REG_P (regno))
5019 temp = ((regno % FP_INC) == 0)
5020 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5021 || class == MODE_VECTOR_FLOAT)
5022 && size <= UNITS_PER_FPVALUE)
5023 /* Allow integer modes that fit into a single
5024 register. We need to put integers into FPRs
5025 when using instructions like cvt and trunc.
5026 We can't allow sizes smaller than a word,
5027 the FPU has no appropriate load/store
5028 instructions for those. */
5029 || (class == MODE_INT
5030 && size >= MIN_UNITS_PER_WORD
5031 && size <= UNITS_PER_FPREG)
5032 /* Allow TFmode for CCmode reloads. */
5033 || (ISA_HAS_8CC && mode == TFmode));
5034
5035 else if (ACC_REG_P (regno))
5036 temp = (INTEGRAL_MODE_P (mode)
5037 && (size <= UNITS_PER_WORD
5038 || (ACC_HI_REG_P (regno)
5039 && size == 2 * UNITS_PER_WORD)));
5040
5041 else if (ALL_COP_REG_P (regno))
5042 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5043 else
5044 temp = 0;
5045
5046 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5047 }
5048 }
5049
5050 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5051 initialized yet, so we can't use that here. */
5052 gpr_mode = TARGET_64BIT ? DImode : SImode;
5053
5054 /* Provide default values for align_* for 64-bit targets. */
5055 if (TARGET_64BIT && !TARGET_MIPS16)
5056 {
5057 if (align_loops == 0)
5058 align_loops = 8;
5059 if (align_jumps == 0)
5060 align_jumps = 8;
5061 if (align_functions == 0)
5062 align_functions = 8;
5063 }
5064
5065 /* Function to allocate machine-dependent function status. */
5066 init_machine_status = &mips_init_machine_status;
5067
5068 if (ABI_HAS_64BIT_SYMBOLS)
5069 {
5070 if (TARGET_EXPLICIT_RELOCS)
5071 {
5072 mips_split_p[SYMBOL_64_HIGH] = true;
5073 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5074 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5075
5076 mips_split_p[SYMBOL_64_MID] = true;
5077 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5078 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5079
5080 mips_split_p[SYMBOL_64_LOW] = true;
5081 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5082 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5083
5084 mips_split_p[SYMBOL_GENERAL] = true;
5085 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5086 }
5087 }
5088 else
5089 {
5090 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5091 {
5092 mips_split_p[SYMBOL_GENERAL] = true;
5093 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5094 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5095 }
5096 }
5097
5098 if (TARGET_MIPS16)
5099 {
5100 /* The high part is provided by a pseudo copy of $gp. */
5101 mips_split_p[SYMBOL_SMALL_DATA] = true;
5102 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5103 }
5104
5105 if (TARGET_EXPLICIT_RELOCS)
5106 {
5107 /* Small data constants are kept whole until after reload,
5108 then lowered by mips_rewrite_small_data. */
5109 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5110
5111 mips_split_p[SYMBOL_GOT_LOCAL] = true;
5112 if (TARGET_NEWABI)
5113 {
5114 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5115 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
5116 }
5117 else
5118 {
5119 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5120 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
5121 }
5122
5123 if (TARGET_XGOT)
5124 {
5125 /* The HIGH and LO_SUM are matched by special .md patterns. */
5126 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
5127
5128 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
5129 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
5130 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
5131
5132 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5133 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5134 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5135 }
5136 else
5137 {
5138 if (TARGET_NEWABI)
5139 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
5140 else
5141 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
5142 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5143 }
5144 }
5145
5146 if (TARGET_NEWABI)
5147 {
5148 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5149 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5150 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5151 }
5152
5153 /* Thread-local relocation operators. */
5154 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5155 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5156 mips_split_p[SYMBOL_DTPREL] = 1;
5157 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5158 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5159 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5160 mips_split_p[SYMBOL_TPREL] = 1;
5161 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5162 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5163
5164 /* We don't have a thread pointer access instruction on MIPS16, or
5165 appropriate TLS relocations. */
5166 if (TARGET_MIPS16)
5167 targetm.have_tls = false;
5168
5169 /* Default to working around R4000 errata only if the processor
5170 was selected explicitly. */
5171 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5172 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5173 target_flags |= MASK_FIX_R4000;
5174
5175 /* Default to working around R4400 errata only if the processor
5176 was selected explicitly. */
5177 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5178 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5179 target_flags |= MASK_FIX_R4400;
5180 }
5181
5182 /* Implement CONDITIONAL_REGISTER_USAGE. */
5183
5184 void
mips_conditional_register_usage(void)5185 mips_conditional_register_usage (void)
5186 {
5187 if (!TARGET_DSP)
5188 {
5189 int regno;
5190
5191 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5192 fixed_regs[regno] = call_used_regs[regno] = 1;
5193 }
5194 if (!TARGET_HARD_FLOAT)
5195 {
5196 int regno;
5197
5198 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5199 fixed_regs[regno] = call_used_regs[regno] = 1;
5200 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5201 fixed_regs[regno] = call_used_regs[regno] = 1;
5202 }
5203 else if (! ISA_HAS_8CC)
5204 {
5205 int regno;
5206
5207 /* We only have a single condition code register. We
5208 implement this by hiding all the condition code registers,
5209 and generating RTL that refers directly to ST_REG_FIRST. */
5210 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5211 fixed_regs[regno] = call_used_regs[regno] = 1;
5212 }
5213 /* In mips16 mode, we permit the $t temporary registers to be used
5214 for reload. We prohibit the unused $s registers, since they
5215 are caller saved, and saving them via a mips16 register would
5216 probably waste more time than just reloading the value. */
5217 if (TARGET_MIPS16)
5218 {
5219 fixed_regs[18] = call_used_regs[18] = 1;
5220 fixed_regs[19] = call_used_regs[19] = 1;
5221 fixed_regs[20] = call_used_regs[20] = 1;
5222 fixed_regs[21] = call_used_regs[21] = 1;
5223 fixed_regs[22] = call_used_regs[22] = 1;
5224 fixed_regs[23] = call_used_regs[23] = 1;
5225 fixed_regs[26] = call_used_regs[26] = 1;
5226 fixed_regs[27] = call_used_regs[27] = 1;
5227 fixed_regs[30] = call_used_regs[30] = 1;
5228 }
5229 /* fp20-23 are now caller saved. */
5230 if (mips_abi == ABI_64)
5231 {
5232 int regno;
5233 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5234 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5235 }
5236 /* Odd registers from fp21 to fp31 are now caller saved. */
5237 if (mips_abi == ABI_N32)
5238 {
5239 int regno;
5240 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5241 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5242 }
5243 }
5244
5245 /* Allocate a chunk of memory for per-function machine-dependent data. */
5246 static struct machine_function *
mips_init_machine_status(void)5247 mips_init_machine_status (void)
5248 {
5249 return ((struct machine_function *)
5250 ggc_alloc_cleared (sizeof (struct machine_function)));
5251 }
5252
5253 /* On the mips16, we want to allocate $24 (T_REG) before other
5254 registers for instructions for which it is possible. This helps
5255 avoid shuffling registers around in order to set up for an xor,
5256 encouraging the compiler to use a cmp instead. */
5257
5258 void
mips_order_regs_for_local_alloc(void)5259 mips_order_regs_for_local_alloc (void)
5260 {
5261 register int i;
5262
5263 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5264 reg_alloc_order[i] = i;
5265
5266 if (TARGET_MIPS16)
5267 {
5268 /* It really doesn't matter where we put register 0, since it is
5269 a fixed register anyhow. */
5270 reg_alloc_order[0] = 24;
5271 reg_alloc_order[24] = 0;
5272 }
5273 }
5274
5275
5276 /* The MIPS debug format wants all automatic variables and arguments
5277 to be in terms of the virtual frame pointer (stack pointer before
5278 any adjustment in the function), while the MIPS 3.0 linker wants
5279 the frame pointer to be the stack pointer after the initial
5280 adjustment. So, we do the adjustment here. The arg pointer (which
5281 is eliminated) points to the virtual frame pointer, while the frame
5282 pointer (which may be eliminated) points to the stack pointer after
5283 the initial adjustments. */
5284
5285 HOST_WIDE_INT
mips_debugger_offset(rtx addr,HOST_WIDE_INT offset)5286 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5287 {
5288 rtx offset2 = const0_rtx;
5289 rtx reg = eliminate_constant_term (addr, &offset2);
5290
5291 if (offset == 0)
5292 offset = INTVAL (offset2);
5293
5294 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5295 || reg == hard_frame_pointer_rtx)
5296 {
5297 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5298 ? compute_frame_size (get_frame_size ())
5299 : cfun->machine->frame.total_size;
5300
5301 /* MIPS16 frame is smaller */
5302 if (frame_pointer_needed && TARGET_MIPS16)
5303 frame_size -= cfun->machine->frame.args_size;
5304
5305 offset = offset - frame_size;
5306 }
5307
5308 /* sdbout_parms does not want this to crash for unrecognized cases. */
5309 #if 0
5310 else if (reg != arg_pointer_rtx)
5311 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5312 addr);
5313 #endif
5314
5315 return offset;
5316 }
5317
5318 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5319
5320 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5321 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5322 'h' OP is HIGH, prints %hi(X),
5323 'd' output integer constant in decimal,
5324 'z' if the operand is 0, use $0 instead of normal operand.
5325 'D' print second part of double-word register or memory operand.
5326 'L' print low-order register of double-word register operand.
5327 'M' print high-order register of double-word register operand.
5328 'C' print part of opcode for a branch condition.
5329 'F' print part of opcode for a floating-point branch condition.
5330 'N' print part of opcode for a branch condition, inverted.
5331 'W' print part of opcode for a floating-point branch condition, inverted.
5332 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5333 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5334 't' like 'T', but with the EQ/NE cases reversed
5335 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5336 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5337 'R' print the reloc associated with LO_SUM
5338 'q' print DSP accumulator registers
5339
5340 The punctuation characters are:
5341
5342 '(' Turn on .set noreorder
5343 ')' Turn on .set reorder
5344 '[' Turn on .set noat
5345 ']' Turn on .set at
5346 '<' Turn on .set nomacro
5347 '>' Turn on .set macro
5348 '{' Turn on .set volatile (not GAS)
5349 '}' Turn on .set novolatile (not GAS)
5350 '&' Turn on .set noreorder if filling delay slots
5351 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5352 '!' Turn on .set nomacro if filling delay slots
5353 '#' Print nop if in a .set noreorder section.
5354 '/' Like '#', but does nothing within a delayed branch sequence
5355 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5356 '@' Print the name of the assembler temporary register (at or $1).
5357 '.' Print the name of the register with a hard-wired zero (zero or $0).
5358 '^' Print the name of the pic call-through register (t9 or $25).
5359 '$' Print the name of the stack pointer register (sp or $29).
5360 '+' Print the name of the gp register (usually gp or $28).
5361 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5362
5363 void
print_operand(FILE * file,rtx op,int letter)5364 print_operand (FILE *file, rtx op, int letter)
5365 {
5366 register enum rtx_code code;
5367
5368 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5369 {
5370 switch (letter)
5371 {
5372 case '?':
5373 if (mips_branch_likely)
5374 putc ('l', file);
5375 break;
5376
5377 case '@':
5378 fputs (reg_names [GP_REG_FIRST + 1], file);
5379 break;
5380
5381 case '^':
5382 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5383 break;
5384
5385 case '.':
5386 fputs (reg_names [GP_REG_FIRST + 0], file);
5387 break;
5388
5389 case '$':
5390 fputs (reg_names[STACK_POINTER_REGNUM], file);
5391 break;
5392
5393 case '+':
5394 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5395 break;
5396
5397 case '&':
5398 if (final_sequence != 0 && set_noreorder++ == 0)
5399 fputs (".set\tnoreorder\n\t", file);
5400 break;
5401
5402 case '*':
5403 if (final_sequence != 0)
5404 {
5405 if (set_noreorder++ == 0)
5406 fputs (".set\tnoreorder\n\t", file);
5407
5408 if (set_nomacro++ == 0)
5409 fputs (".set\tnomacro\n\t", file);
5410 }
5411 break;
5412
5413 case '!':
5414 if (final_sequence != 0 && set_nomacro++ == 0)
5415 fputs ("\n\t.set\tnomacro", file);
5416 break;
5417
5418 case '#':
5419 if (set_noreorder != 0)
5420 fputs ("\n\tnop", file);
5421 break;
5422
5423 case '/':
5424 /* Print an extra newline so that the delayed insn is separated
5425 from the following ones. This looks neater and is consistent
5426 with non-nop delayed sequences. */
5427 if (set_noreorder != 0 && final_sequence == 0)
5428 fputs ("\n\tnop\n", file);
5429 break;
5430
5431 case '(':
5432 if (set_noreorder++ == 0)
5433 fputs (".set\tnoreorder\n\t", file);
5434 break;
5435
5436 case ')':
5437 if (set_noreorder == 0)
5438 error ("internal error: %%) found without a %%( in assembler pattern");
5439
5440 else if (--set_noreorder == 0)
5441 fputs ("\n\t.set\treorder", file);
5442
5443 break;
5444
5445 case '[':
5446 if (set_noat++ == 0)
5447 fputs (".set\tnoat\n\t", file);
5448 break;
5449
5450 case ']':
5451 if (set_noat == 0)
5452 error ("internal error: %%] found without a %%[ in assembler pattern");
5453 else if (--set_noat == 0)
5454 fputs ("\n\t.set\tat", file);
5455
5456 break;
5457
5458 case '<':
5459 if (set_nomacro++ == 0)
5460 fputs (".set\tnomacro\n\t", file);
5461 break;
5462
5463 case '>':
5464 if (set_nomacro == 0)
5465 error ("internal error: %%> found without a %%< in assembler pattern");
5466 else if (--set_nomacro == 0)
5467 fputs ("\n\t.set\tmacro", file);
5468
5469 break;
5470
5471 case '{':
5472 if (set_volatile++ == 0)
5473 fputs ("#.set\tvolatile\n\t", file);
5474 break;
5475
5476 case '}':
5477 if (set_volatile == 0)
5478 error ("internal error: %%} found without a %%{ in assembler pattern");
5479 else if (--set_volatile == 0)
5480 fputs ("\n\t#.set\tnovolatile", file);
5481
5482 break;
5483
5484 case '~':
5485 {
5486 if (align_labels_log > 0)
5487 ASM_OUTPUT_ALIGN (file, align_labels_log);
5488 }
5489 break;
5490
5491 default:
5492 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5493 break;
5494 }
5495
5496 return;
5497 }
5498
5499 if (! op)
5500 {
5501 error ("PRINT_OPERAND null pointer");
5502 return;
5503 }
5504
5505 code = GET_CODE (op);
5506
5507 if (letter == 'C')
5508 switch (code)
5509 {
5510 case EQ: fputs ("eq", file); break;
5511 case NE: fputs ("ne", file); break;
5512 case GT: fputs ("gt", file); break;
5513 case GE: fputs ("ge", file); break;
5514 case LT: fputs ("lt", file); break;
5515 case LE: fputs ("le", file); break;
5516 case GTU: fputs ("gtu", file); break;
5517 case GEU: fputs ("geu", file); break;
5518 case LTU: fputs ("ltu", file); break;
5519 case LEU: fputs ("leu", file); break;
5520 default:
5521 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5522 }
5523
5524 else if (letter == 'N')
5525 switch (code)
5526 {
5527 case EQ: fputs ("ne", file); break;
5528 case NE: fputs ("eq", file); break;
5529 case GT: fputs ("le", file); break;
5530 case GE: fputs ("lt", file); break;
5531 case LT: fputs ("ge", file); break;
5532 case LE: fputs ("gt", file); break;
5533 case GTU: fputs ("leu", file); break;
5534 case GEU: fputs ("ltu", file); break;
5535 case LTU: fputs ("geu", file); break;
5536 case LEU: fputs ("gtu", file); break;
5537 default:
5538 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5539 }
5540
5541 else if (letter == 'F')
5542 switch (code)
5543 {
5544 case EQ: fputs ("c1f", file); break;
5545 case NE: fputs ("c1t", file); break;
5546 default:
5547 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5548 }
5549
5550 else if (letter == 'W')
5551 switch (code)
5552 {
5553 case EQ: fputs ("c1t", file); break;
5554 case NE: fputs ("c1f", file); break;
5555 default:
5556 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5557 }
5558
5559 else if (letter == 'h')
5560 {
5561 if (GET_CODE (op) == HIGH)
5562 op = XEXP (op, 0);
5563
5564 print_operand_reloc (file, op, mips_hi_relocs);
5565 }
5566
5567 else if (letter == 'R')
5568 print_operand_reloc (file, op, mips_lo_relocs);
5569
5570 else if (letter == 'Y')
5571 {
5572 if (GET_CODE (op) == CONST_INT
5573 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5574 < ARRAY_SIZE (mips_fp_conditions)))
5575 fputs (mips_fp_conditions[INTVAL (op)], file);
5576 else
5577 output_operand_lossage ("invalid %%Y value");
5578 }
5579
5580 else if (letter == 'Z')
5581 {
5582 if (ISA_HAS_8CC)
5583 {
5584 print_operand (file, op, 0);
5585 fputc (',', file);
5586 }
5587 }
5588
5589 else if (letter == 'q')
5590 {
5591 int regnum;
5592
5593 if (code != REG)
5594 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5595
5596 regnum = REGNO (op);
5597 if (MD_REG_P (regnum))
5598 fprintf (file, "$ac0");
5599 else if (DSP_ACC_REG_P (regnum))
5600 fprintf (file, "$ac%c", reg_names[regnum][3]);
5601 else
5602 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5603 }
5604
5605 else if (code == REG || code == SUBREG)
5606 {
5607 register int regnum;
5608
5609 if (code == REG)
5610 regnum = REGNO (op);
5611 else
5612 regnum = true_regnum (op);
5613
5614 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5615 || (letter == 'L' && WORDS_BIG_ENDIAN)
5616 || letter == 'D')
5617 regnum++;
5618
5619 fprintf (file, "%s", reg_names[regnum]);
5620 }
5621
5622 else if (code == MEM)
5623 {
5624 if (letter == 'D')
5625 output_address (plus_constant (XEXP (op, 0), 4));
5626 else
5627 output_address (XEXP (op, 0));
5628 }
5629
5630 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5631 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5632
5633 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5634 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5635
5636 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5637 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5638
5639 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5640 fputs (reg_names[GP_REG_FIRST], file);
5641
5642 else if (letter == 'd' || letter == 'x' || letter == 'X')
5643 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5644
5645 else if (letter == 'T' || letter == 't')
5646 {
5647 int truth = (code == NE) == (letter == 'T');
5648 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5649 }
5650
5651 else if (CONST_GP_P (op))
5652 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5653
5654 else
5655 output_addr_const (file, op);
5656 }
5657
5658
5659 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5660 RELOCS is the array of relocations to use. */
5661
5662 static void
print_operand_reloc(FILE * file,rtx op,const char ** relocs)5663 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5664 {
5665 enum mips_symbol_type symbol_type;
5666 const char *p;
5667 rtx base;
5668 HOST_WIDE_INT offset;
5669
5670 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5671 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5672
5673 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5674 mips_split_const (op, &base, &offset);
5675 if (UNSPEC_ADDRESS_P (base))
5676 op = plus_constant (UNSPEC_ADDRESS (base), offset);
5677
5678 fputs (relocs[symbol_type], file);
5679 output_addr_const (file, op);
5680 for (p = relocs[symbol_type]; *p != 0; p++)
5681 if (*p == '(')
5682 fputc (')', file);
5683 }
5684
5685 /* Output address operand X to FILE. */
5686
5687 void
print_operand_address(FILE * file,rtx x)5688 print_operand_address (FILE *file, rtx x)
5689 {
5690 struct mips_address_info addr;
5691
5692 if (mips_classify_address (&addr, x, word_mode, true))
5693 switch (addr.type)
5694 {
5695 case ADDRESS_REG:
5696 print_operand (file, addr.offset, 0);
5697 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5698 return;
5699
5700 case ADDRESS_LO_SUM:
5701 print_operand (file, addr.offset, 'R');
5702 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5703 return;
5704
5705 case ADDRESS_CONST_INT:
5706 output_addr_const (file, x);
5707 fprintf (file, "(%s)", reg_names[0]);
5708 return;
5709
5710 case ADDRESS_SYMBOLIC:
5711 output_addr_const (file, x);
5712 return;
5713 }
5714 gcc_unreachable ();
5715 }
5716
5717 /* When using assembler macros, keep track of all of small-data externs
5718 so that mips_file_end can emit the appropriate declarations for them.
5719
5720 In most cases it would be safe (though pointless) to emit .externs
5721 for other symbols too. One exception is when an object is within
5722 the -G limit but declared by the user to be in a section other
5723 than .sbss or .sdata. */
5724
5725 int
mips_output_external(FILE * file ATTRIBUTE_UNUSED,tree decl,const char * name)5726 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5727 {
5728 register struct extern_list *p;
5729
5730 default_elf_asm_output_external(file, decl, name);
5731
5732 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5733 {
5734 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5735 p->next = extern_head;
5736 p->name = name;
5737 p->size = int_size_in_bytes (TREE_TYPE (decl));
5738 extern_head = p;
5739 }
5740
5741 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5742 {
5743 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5744 p->next = extern_head;
5745 p->name = name;
5746 p->size = -1;
5747 extern_head = p;
5748 }
5749
5750 return 0;
5751 }
5752
5753 #if TARGET_IRIX
5754 static void
irix_output_external_libcall(rtx fun)5755 irix_output_external_libcall (rtx fun)
5756 {
5757 register struct extern_list *p;
5758
5759 if (mips_abi == ABI_32)
5760 {
5761 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5762 p->next = extern_head;
5763 p->name = XSTR (fun, 0);
5764 p->size = -1;
5765 extern_head = p;
5766 }
5767 }
5768 #endif
5769
5770 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5771 put out a MIPS ECOFF file and a stab. */
5772
5773 void
mips_output_filename(FILE * stream,const char * name)5774 mips_output_filename (FILE *stream, const char *name)
5775 {
5776
5777 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5778 directives. */
5779 if (write_symbols == DWARF2_DEBUG)
5780 return;
5781 else if (mips_output_filename_first_time)
5782 {
5783 mips_output_filename_first_time = 0;
5784 num_source_filenames += 1;
5785 current_function_file = name;
5786 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5787 output_quoted_string (stream, name);
5788 putc ('\n', stream);
5789 }
5790
5791 /* If we are emitting stabs, let dbxout.c handle this (except for
5792 the mips_output_filename_first_time case). */
5793 else if (write_symbols == DBX_DEBUG)
5794 return;
5795
5796 else if (name != current_function_file
5797 && strcmp (name, current_function_file) != 0)
5798 {
5799 num_source_filenames += 1;
5800 current_function_file = name;
5801 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5802 output_quoted_string (stream, name);
5803 putc ('\n', stream);
5804 }
5805 }
5806
5807 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5808 that should be written before the opening quote, such as "\t.ascii\t"
5809 for real string data or "\t# " for a comment. */
5810
5811 void
mips_output_ascii(FILE * stream,const char * string_param,size_t len,const char * prefix)5812 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5813 const char *prefix)
5814 {
5815 size_t i;
5816 int cur_pos = 17;
5817 register const unsigned char *string =
5818 (const unsigned char *)string_param;
5819
5820 fprintf (stream, "%s\"", prefix);
5821 for (i = 0; i < len; i++)
5822 {
5823 register int c = string[i];
5824
5825 if (ISPRINT (c))
5826 {
5827 if (c == '\\' || c == '\"')
5828 {
5829 putc ('\\', stream);
5830 cur_pos++;
5831 }
5832 putc (c, stream);
5833 cur_pos++;
5834 }
5835 else
5836 {
5837 fprintf (stream, "\\%03o", c);
5838 cur_pos += 4;
5839 }
5840
5841 if (cur_pos > 72 && i+1 < len)
5842 {
5843 cur_pos = 17;
5844 fprintf (stream, "\"\n%s\"", prefix);
5845 }
5846 }
5847 fprintf (stream, "\"\n");
5848 }
5849
5850 /* Implement TARGET_ASM_FILE_START. */
5851
5852 static void
mips_file_start(void)5853 mips_file_start (void)
5854 {
5855 default_file_start ();
5856
5857 if (!TARGET_IRIX)
5858 {
5859 /* Generate a special section to describe the ABI switches used to
5860 produce the resultant binary. This used to be done by the assembler
5861 setting bits in the ELF header's flags field, but we have run out of
5862 bits. GDB needs this information in order to be able to correctly
5863 debug these binaries. See the function mips_gdbarch_init() in
5864 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5865 causes unnecessary IRIX 6 ld warnings. */
5866 const char * abi_string = NULL;
5867
5868 switch (mips_abi)
5869 {
5870 case ABI_32: abi_string = "abi32"; break;
5871 case ABI_N32: abi_string = "abiN32"; break;
5872 case ABI_64: abi_string = "abi64"; break;
5873 case ABI_O64: abi_string = "abiO64"; break;
5874 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5875 default:
5876 gcc_unreachable ();
5877 }
5878 /* Note - we use fprintf directly rather than calling switch_to_section
5879 because in this way we can avoid creating an allocated section. We
5880 do not want this section to take up any space in the running
5881 executable. */
5882 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5883
5884 /* There is no ELF header flag to distinguish long32 forms of the
5885 EABI from long64 forms. Emit a special section to help tools
5886 such as GDB. Do the same for o64, which is sometimes used with
5887 -mlong64. */
5888 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
5889 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5890 TARGET_LONG64 ? 64 : 32);
5891
5892 /* Restore the default section. */
5893 fprintf (asm_out_file, "\t.previous\n");
5894 }
5895
5896 /* Generate the pseudo ops that System V.4 wants. */
5897 if (TARGET_ABICALLS)
5898 fprintf (asm_out_file, "\t.abicalls\n");
5899
5900 if (TARGET_MIPS16)
5901 fprintf (asm_out_file, "\t.set\tmips16\n");
5902
5903 if (flag_verbose_asm)
5904 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5905 ASM_COMMENT_START,
5906 mips_section_threshold, mips_arch_info->name, mips_isa);
5907 }
5908
5909 #ifdef BSS_SECTION_ASM_OP
5910 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5911 in the use of sbss. */
5912
5913 void
mips_output_aligned_bss(FILE * stream,tree decl,const char * name,unsigned HOST_WIDE_INT size,int align)5914 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5915 unsigned HOST_WIDE_INT size, int align)
5916 {
5917 extern tree last_assemble_variable_decl;
5918
5919 if (mips_in_small_data_p (decl))
5920 switch_to_section (get_named_section (NULL, ".sbss", 0));
5921 else
5922 switch_to_section (bss_section);
5923 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5924 last_assemble_variable_decl = decl;
5925 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5926 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5927 }
5928 #endif
5929
5930 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5931 .externs for any small-data variables that turned out to be external. */
5932
5933 static void
mips_file_end(void)5934 mips_file_end (void)
5935 {
5936 tree name_tree;
5937 struct extern_list *p;
5938
5939 if (extern_head)
5940 {
5941 fputs ("\n", asm_out_file);
5942
5943 for (p = extern_head; p != 0; p = p->next)
5944 {
5945 name_tree = get_identifier (p->name);
5946
5947 /* Positively ensure only one .extern for any given symbol. */
5948 if (!TREE_ASM_WRITTEN (name_tree)
5949 && TREE_SYMBOL_REFERENCED (name_tree))
5950 {
5951 TREE_ASM_WRITTEN (name_tree) = 1;
5952 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5953 `.global name .text' directive for every used but
5954 undefined function. If we don't, the linker may perform
5955 an optimization (skipping over the insns that set $gp)
5956 when it is unsafe. */
5957 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5958 {
5959 fputs ("\t.globl ", asm_out_file);
5960 assemble_name (asm_out_file, p->name);
5961 fputs (" .text\n", asm_out_file);
5962 }
5963 else
5964 {
5965 fputs ("\t.extern\t", asm_out_file);
5966 assemble_name (asm_out_file, p->name);
5967 fprintf (asm_out_file, ", %d\n", p->size);
5968 }
5969 }
5970 }
5971 }
5972 }
5973
5974 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5975 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5976
5977 void
mips_output_aligned_decl_common(FILE * stream,tree decl,const char * name,unsigned HOST_WIDE_INT size,unsigned int align)5978 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5979 unsigned HOST_WIDE_INT size,
5980 unsigned int align)
5981 {
5982 /* If the target wants uninitialized const declarations in
5983 .rdata then don't put them in .comm. */
5984 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5985 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5986 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5987 {
5988 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5989 targetm.asm_out.globalize_label (stream, name);
5990
5991 switch_to_section (readonly_data_section);
5992 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5993 mips_declare_object (stream, name, "",
5994 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5995 size);
5996 }
5997 else
5998 mips_declare_common_object (stream, name, "\n\t.comm\t",
5999 size, align, true);
6000 }
6001
6002 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6003 NAME is the name of the object and ALIGN is the required alignment
6004 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6005 alignment argument. */
6006
6007 void
mips_declare_common_object(FILE * stream,const char * name,const char * init_string,unsigned HOST_WIDE_INT size,unsigned int align,bool takes_alignment_p)6008 mips_declare_common_object (FILE *stream, const char *name,
6009 const char *init_string,
6010 unsigned HOST_WIDE_INT size,
6011 unsigned int align, bool takes_alignment_p)
6012 {
6013 if (!takes_alignment_p)
6014 {
6015 size += (align / BITS_PER_UNIT) - 1;
6016 size -= size % (align / BITS_PER_UNIT);
6017 mips_declare_object (stream, name, init_string,
6018 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6019 }
6020 else
6021 mips_declare_object (stream, name, init_string,
6022 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6023 size, align / BITS_PER_UNIT);
6024 }
6025
6026 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6027 macros, mark the symbol as written so that mips_file_end won't emit an
6028 .extern for it. STREAM is the output file, NAME is the name of the
6029 symbol, INIT_STRING is the string that should be written before the
6030 symbol and FINAL_STRING is the string that should be written after it.
6031 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6032
6033 void
mips_declare_object(FILE * stream,const char * name,const char * init_string,const char * final_string,...)6034 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6035 const char *final_string, ...)
6036 {
6037 va_list ap;
6038
6039 fputs (init_string, stream);
6040 assemble_name (stream, name);
6041 va_start (ap, final_string);
6042 vfprintf (stream, final_string, ap);
6043 va_end (ap);
6044
6045 if (!TARGET_EXPLICIT_RELOCS)
6046 {
6047 tree name_tree = get_identifier (name);
6048 TREE_ASM_WRITTEN (name_tree) = 1;
6049 }
6050 }
6051
6052 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6053 extern int size_directive_output;
6054
6055 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6056 definitions except that it uses mips_declare_object() to emit the label. */
6057
6058 void
mips_declare_object_name(FILE * stream,const char * name,tree decl ATTRIBUTE_UNUSED)6059 mips_declare_object_name (FILE *stream, const char *name,
6060 tree decl ATTRIBUTE_UNUSED)
6061 {
6062 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6063 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6064 #endif
6065
6066 size_directive_output = 0;
6067 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6068 {
6069 HOST_WIDE_INT size;
6070
6071 size_directive_output = 1;
6072 size = int_size_in_bytes (TREE_TYPE (decl));
6073 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6074 }
6075
6076 mips_declare_object (stream, name, "", ":\n");
6077 }
6078
6079 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6080
6081 void
mips_finish_declare_object(FILE * stream,tree decl,int top_level,int at_end)6082 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6083 {
6084 const char *name;
6085
6086 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6087 if (!flag_inhibit_size_directive
6088 && DECL_SIZE (decl) != 0
6089 && !at_end && top_level
6090 && DECL_INITIAL (decl) == error_mark_node
6091 && !size_directive_output)
6092 {
6093 HOST_WIDE_INT size;
6094
6095 size_directive_output = 1;
6096 size = int_size_in_bytes (TREE_TYPE (decl));
6097 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6098 }
6099 }
6100 #endif
6101
6102 /* Return true if X is a small data address that can be rewritten
6103 as a LO_SUM. */
6104
6105 static bool
mips_rewrite_small_data_p(rtx x)6106 mips_rewrite_small_data_p (rtx x)
6107 {
6108 enum mips_symbol_type symbol_type;
6109
6110 return (TARGET_EXPLICIT_RELOCS
6111 && mips_symbolic_constant_p (x, &symbol_type)
6112 && symbol_type == SYMBOL_SMALL_DATA);
6113 }
6114
6115
6116 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6117
6118 static int
mips_small_data_pattern_1(rtx * loc,void * data ATTRIBUTE_UNUSED)6119 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6120 {
6121 if (GET_CODE (*loc) == LO_SUM)
6122 return -1;
6123
6124 return mips_rewrite_small_data_p (*loc);
6125 }
6126
6127 /* Return true if OP refers to small data symbols directly, not through
6128 a LO_SUM. */
6129
6130 bool
mips_small_data_pattern_p(rtx op)6131 mips_small_data_pattern_p (rtx op)
6132 {
6133 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6134 }
6135
6136 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6137
6138 static int
mips_rewrite_small_data_1(rtx * loc,void * data ATTRIBUTE_UNUSED)6139 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6140 {
6141 if (mips_rewrite_small_data_p (*loc))
6142 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6143
6144 if (GET_CODE (*loc) == LO_SUM)
6145 return -1;
6146
6147 return 0;
6148 }
6149
6150 /* If possible, rewrite OP so that it refers to small data using
6151 explicit relocations. */
6152
6153 rtx
mips_rewrite_small_data(rtx op)6154 mips_rewrite_small_data (rtx op)
6155 {
6156 op = copy_insn (op);
6157 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6158 return op;
6159 }
6160
6161 /* Return true if the current function has an insn that implicitly
6162 refers to $gp. */
6163
6164 static bool
mips_function_has_gp_insn(void)6165 mips_function_has_gp_insn (void)
6166 {
6167 /* Don't bother rechecking if we found one last time. */
6168 if (!cfun->machine->has_gp_insn_p)
6169 {
6170 rtx insn;
6171
6172 push_topmost_sequence ();
6173 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6174 if (INSN_P (insn)
6175 && GET_CODE (PATTERN (insn)) != USE
6176 && GET_CODE (PATTERN (insn)) != CLOBBER
6177 && (get_attr_got (insn) != GOT_UNSET
6178 || small_data_pattern (PATTERN (insn), VOIDmode)))
6179 break;
6180 pop_topmost_sequence ();
6181
6182 cfun->machine->has_gp_insn_p = (insn != 0);
6183 }
6184 return cfun->machine->has_gp_insn_p;
6185 }
6186
6187
6188 /* Return the register that should be used as the global pointer
6189 within this function. Return 0 if the function doesn't need
6190 a global pointer. */
6191
6192 static unsigned int
mips_global_pointer(void)6193 mips_global_pointer (void)
6194 {
6195 unsigned int regno;
6196
6197 /* $gp is always available in non-abicalls code. */
6198 if (!TARGET_ABICALLS)
6199 return GLOBAL_POINTER_REGNUM;
6200
6201 /* We must always provide $gp when it is used implicitly. */
6202 if (!TARGET_EXPLICIT_RELOCS)
6203 return GLOBAL_POINTER_REGNUM;
6204
6205 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6206 a valid gp. */
6207 if (current_function_profile)
6208 return GLOBAL_POINTER_REGNUM;
6209
6210 /* If the function has a nonlocal goto, $gp must hold the correct
6211 global pointer for the target function. */
6212 if (current_function_has_nonlocal_goto)
6213 return GLOBAL_POINTER_REGNUM;
6214
6215 /* If the gp is never referenced, there's no need to initialize it.
6216 Note that reload can sometimes introduce constant pool references
6217 into a function that otherwise didn't need them. For example,
6218 suppose we have an instruction like:
6219
6220 (set (reg:DF R1) (float:DF (reg:SI R2)))
6221
6222 If R2 turns out to be constant such as 1, the instruction may have a
6223 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6224 using this constant if R2 doesn't get allocated to a register.
6225
6226 In cases like these, reload will have added the constant to the pool
6227 but no instruction will yet refer to it. */
6228 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
6229 && !current_function_uses_const_pool
6230 && !mips_function_has_gp_insn ())
6231 return 0;
6232
6233 /* We need a global pointer, but perhaps we can use a call-clobbered
6234 register instead of $gp. */
6235 if (TARGET_NEWABI && current_function_is_leaf)
6236 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6237 if (!regs_ever_live[regno]
6238 && call_used_regs[regno]
6239 && !fixed_regs[regno]
6240 && regno != PIC_FUNCTION_ADDR_REGNUM)
6241 return regno;
6242
6243 return GLOBAL_POINTER_REGNUM;
6244 }
6245
6246
6247 /* Return true if the current function must save REGNO. */
6248
6249 static bool
mips_save_reg_p(unsigned int regno)6250 mips_save_reg_p (unsigned int regno)
6251 {
6252 /* We only need to save $gp for NewABI PIC. */
6253 if (regno == GLOBAL_POINTER_REGNUM)
6254 return (TARGET_ABICALLS && TARGET_NEWABI
6255 && cfun->machine->global_pointer == regno);
6256
6257 /* Check call-saved registers. */
6258 if (regs_ever_live[regno] && !call_used_regs[regno])
6259 return true;
6260
6261 /* We need to save the old frame pointer before setting up a new one. */
6262 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6263 return true;
6264
6265 /* We need to save the incoming return address if it is ever clobbered
6266 within the function. */
6267 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
6268 return true;
6269
6270 if (TARGET_MIPS16)
6271 {
6272 tree return_type;
6273
6274 return_type = DECL_RESULT (current_function_decl);
6275
6276 /* $18 is a special case in mips16 code. It may be used to call
6277 a function which returns a floating point value, but it is
6278 marked in call_used_regs. */
6279 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
6280 return true;
6281
6282 /* $31 is also a special case. It will be used to copy a return
6283 value into the floating point registers if the return value is
6284 floating point. */
6285 if (regno == GP_REG_FIRST + 31
6286 && mips16_hard_float
6287 && !aggregate_value_p (return_type, current_function_decl)
6288 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6289 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6290 return true;
6291 }
6292
6293 return false;
6294 }
6295
6296
6297 /* Return the bytes needed to compute the frame pointer from the current
6298 stack pointer. SIZE is the size (in bytes) of the local variables.
6299
6300 MIPS stack frames look like:
6301
6302 Before call After call
6303 +-----------------------+ +-----------------------+
6304 high | | | |
6305 mem. | | | |
6306 | caller's temps. | | caller's temps. |
6307 | | | |
6308 +-----------------------+ +-----------------------+
6309 | | | |
6310 | arguments on stack. | | arguments on stack. |
6311 | | | |
6312 +-----------------------+ +-----------------------+
6313 | 4 words to save | | 4 words to save |
6314 | arguments passed | | arguments passed |
6315 | in registers, even | | in registers, even |
6316 SP->| if not passed. | VFP->| if not passed. |
6317 +-----------------------+ +-----------------------+
6318 | |
6319 | fp register save |
6320 | |
6321 +-----------------------+
6322 | |
6323 | gp register save |
6324 | |
6325 +-----------------------+
6326 | |
6327 | local variables |
6328 | |
6329 +-----------------------+
6330 | |
6331 | alloca allocations |
6332 | |
6333 +-----------------------+
6334 | |
6335 | GP save for V.4 abi |
6336 | |
6337 +-----------------------+
6338 | |
6339 | arguments on stack |
6340 | |
6341 +-----------------------+
6342 | 4 words to save |
6343 | arguments passed |
6344 | in registers, even |
6345 low SP->| if not passed. |
6346 memory +-----------------------+
6347
6348 */
6349
6350 HOST_WIDE_INT
compute_frame_size(HOST_WIDE_INT size)6351 compute_frame_size (HOST_WIDE_INT size)
6352 {
6353 unsigned int regno;
6354 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6355 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6356 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6357 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6358 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6359 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6360 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6361 unsigned int mask; /* mask of saved gp registers */
6362 unsigned int fmask; /* mask of saved fp registers */
6363
6364 cfun->machine->global_pointer = mips_global_pointer ();
6365
6366 gp_reg_size = 0;
6367 fp_reg_size = 0;
6368 mask = 0;
6369 fmask = 0;
6370 var_size = MIPS_STACK_ALIGN (size);
6371 args_size = current_function_outgoing_args_size;
6372 cprestore_size = (TARGET_ABICALLS && !TARGET_NEWABI
6373 ? MIPS_STACK_ALIGN (UNITS_PER_WORD) : 0);
6374
6375 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6376 functions. If the function has local variables, we're committed
6377 to allocating it anyway. Otherwise reclaim it here. */
6378 if (current_function_is_leaf)
6379 cprestore_size = args_size = 0;
6380
6381 /* The MIPS 3.0 linker does not like functions that dynamically
6382 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6383 looks like we are trying to create a second frame pointer to the
6384 function, so allocate some stack space to make it happy. */
6385
6386 if (args_size == 0 && current_function_calls_alloca)
6387 args_size = 4 * UNITS_PER_WORD;
6388
6389 total_size = var_size + args_size + cprestore_size;
6390
6391 /* Calculate space needed for gp registers. */
6392 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6393 if (mips_save_reg_p (regno))
6394 {
6395 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6396 mask |= 1 << (regno - GP_REG_FIRST);
6397 }
6398
6399 /* We need to restore these for the handler. */
6400 if (current_function_calls_eh_return)
6401 {
6402 unsigned int i;
6403 for (i = 0; ; ++i)
6404 {
6405 regno = EH_RETURN_DATA_REGNO (i);
6406 if (regno == INVALID_REGNUM)
6407 break;
6408 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6409 mask |= 1 << (regno - GP_REG_FIRST);
6410 }
6411 }
6412
6413 /* This loop must iterate over the same space as its companion in
6414 save_restore_insns. */
6415 for (regno = (FP_REG_LAST - FP_INC + 1);
6416 regno >= FP_REG_FIRST;
6417 regno -= FP_INC)
6418 {
6419 if (mips_save_reg_p (regno))
6420 {
6421 fp_reg_size += FP_INC * UNITS_PER_FPREG;
6422 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
6423 }
6424 }
6425
6426 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6427 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6428
6429 /* Add in the space required for saving incoming register arguments. */
6430 total_size += current_function_pretend_args_size;
6431 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6432
6433 /* Save other computed information. */
6434 cfun->machine->frame.total_size = total_size;
6435 cfun->machine->frame.var_size = var_size;
6436 cfun->machine->frame.args_size = args_size;
6437 cfun->machine->frame.cprestore_size = cprestore_size;
6438 cfun->machine->frame.gp_reg_size = gp_reg_size;
6439 cfun->machine->frame.fp_reg_size = fp_reg_size;
6440 cfun->machine->frame.mask = mask;
6441 cfun->machine->frame.fmask = fmask;
6442 cfun->machine->frame.initialized = reload_completed;
6443 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6444 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
6445
6446 if (mask)
6447 {
6448 HOST_WIDE_INT offset;
6449
6450 offset = (args_size + cprestore_size + var_size
6451 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6452 cfun->machine->frame.gp_sp_offset = offset;
6453 cfun->machine->frame.gp_save_offset = offset - total_size;
6454 }
6455 else
6456 {
6457 cfun->machine->frame.gp_sp_offset = 0;
6458 cfun->machine->frame.gp_save_offset = 0;
6459 }
6460
6461 if (fmask)
6462 {
6463 HOST_WIDE_INT offset;
6464
6465 offset = (args_size + cprestore_size + var_size
6466 + gp_reg_rounded + fp_reg_size
6467 - FP_INC * UNITS_PER_FPREG);
6468 cfun->machine->frame.fp_sp_offset = offset;
6469 cfun->machine->frame.fp_save_offset = offset - total_size;
6470 }
6471 else
6472 {
6473 cfun->machine->frame.fp_sp_offset = 0;
6474 cfun->machine->frame.fp_save_offset = 0;
6475 }
6476
6477 /* Ok, we're done. */
6478 return total_size;
6479 }
6480
6481 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6482 pointer or argument pointer. TO is either the stack pointer or
6483 hard frame pointer. */
6484
6485 HOST_WIDE_INT
mips_initial_elimination_offset(int from,int to)6486 mips_initial_elimination_offset (int from, int to)
6487 {
6488 HOST_WIDE_INT offset;
6489
6490 compute_frame_size (get_frame_size ());
6491
6492 /* Set OFFSET to the offset from the stack pointer. */
6493 switch (from)
6494 {
6495 case FRAME_POINTER_REGNUM:
6496 offset = (cfun->machine->frame.args_size
6497 + cfun->machine->frame.cprestore_size
6498 + cfun->machine->frame.var_size);
6499 break;
6500
6501 case ARG_POINTER_REGNUM:
6502 offset = (cfun->machine->frame.total_size
6503 - current_function_pretend_args_size);
6504 break;
6505
6506 default:
6507 gcc_unreachable ();
6508 }
6509
6510 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6511 offset -= cfun->machine->frame.args_size;
6512
6513 return offset;
6514 }
6515
6516 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6517 back to a previous frame. */
6518 rtx
mips_return_addr(int count,rtx frame ATTRIBUTE_UNUSED)6519 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6520 {
6521 if (count != 0)
6522 return const0_rtx;
6523
6524 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6525 }
6526
6527 /* Use FN to save or restore register REGNO. MODE is the register's
6528 mode and OFFSET is the offset of its save slot from the current
6529 stack pointer. */
6530
6531 static void
mips_save_restore_reg(enum machine_mode mode,int regno,HOST_WIDE_INT offset,mips_save_restore_fn fn)6532 mips_save_restore_reg (enum machine_mode mode, int regno,
6533 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6534 {
6535 rtx mem;
6536
6537 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6538
6539 fn (gen_rtx_REG (mode, regno), mem);
6540 }
6541
6542
6543 /* Call FN for each register that is saved by the current function.
6544 SP_OFFSET is the offset of the current stack pointer from the start
6545 of the frame. */
6546
6547 static void
mips_for_each_saved_reg(HOST_WIDE_INT sp_offset,mips_save_restore_fn fn)6548 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6549 {
6550 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6551
6552 enum machine_mode fpr_mode;
6553 HOST_WIDE_INT offset;
6554 int regno;
6555
6556 /* Save registers starting from high to low. The debuggers prefer at least
6557 the return register be stored at func+4, and also it allows us not to
6558 need a nop in the epilog if at least one register is reloaded in
6559 addition to return address. */
6560 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6561 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6562 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6563 {
6564 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6565 offset -= GET_MODE_SIZE (gpr_mode);
6566 }
6567
6568 /* This loop must iterate over the same space as its companion in
6569 compute_frame_size. */
6570 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6571 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6572 for (regno = (FP_REG_LAST - FP_INC + 1);
6573 regno >= FP_REG_FIRST;
6574 regno -= FP_INC)
6575 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6576 {
6577 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6578 offset -= GET_MODE_SIZE (fpr_mode);
6579 }
6580 #undef BITSET_P
6581 }
6582
6583 /* If we're generating n32 or n64 abicalls, and the current function
6584 does not use $28 as its global pointer, emit a cplocal directive.
6585 Use pic_offset_table_rtx as the argument to the directive. */
6586
6587 static void
mips_output_cplocal(void)6588 mips_output_cplocal (void)
6589 {
6590 if (!TARGET_EXPLICIT_RELOCS
6591 && cfun->machine->global_pointer > 0
6592 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6593 output_asm_insn (".cplocal %+", 0);
6594 }
6595
6596 /* Return the style of GP load sequence that is being used for the
6597 current function. */
6598
6599 enum mips_loadgp_style
mips_current_loadgp_style(void)6600 mips_current_loadgp_style (void)
6601 {
6602 if (!TARGET_ABICALLS || cfun->machine->global_pointer == 0)
6603 return LOADGP_NONE;
6604
6605 if (TARGET_ABSOLUTE_ABICALLS)
6606 return LOADGP_ABSOLUTE;
6607
6608 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6609 }
6610
6611 /* The __gnu_local_gp symbol. */
6612
6613 static GTY(()) rtx mips_gnu_local_gp;
6614
6615 /* If we're generating n32 or n64 abicalls, emit instructions
6616 to set up the global pointer. */
6617
6618 static void
mips_emit_loadgp(void)6619 mips_emit_loadgp (void)
6620 {
6621 rtx addr, offset, incoming_address;
6622
6623 switch (mips_current_loadgp_style ())
6624 {
6625 case LOADGP_ABSOLUTE:
6626 if (mips_gnu_local_gp == NULL)
6627 {
6628 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6629 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6630 }
6631 emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
6632 break;
6633
6634 case LOADGP_NEWABI:
6635 addr = XEXP (DECL_RTL (current_function_decl), 0);
6636 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6637 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6638 emit_insn (gen_loadgp (offset, incoming_address));
6639 if (!TARGET_EXPLICIT_RELOCS)
6640 emit_insn (gen_loadgp_blockage ());
6641 break;
6642
6643 default:
6644 break;
6645 }
6646 }
6647
6648 /* Set up the stack and frame (if desired) for the function. */
6649
6650 static void
mips_output_function_prologue(FILE * file,HOST_WIDE_INT size ATTRIBUTE_UNUSED)6651 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6652 {
6653 const char *fnname;
6654 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6655
6656 #ifdef SDB_DEBUGGING_INFO
6657 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6658 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6659 #endif
6660
6661 /* In mips16 mode, we may need to generate a 32 bit to handle
6662 floating point arguments. The linker will arrange for any 32 bit
6663 functions to call this stub, which will then jump to the 16 bit
6664 function proper. */
6665 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6666 && current_function_args_info.fp_code != 0)
6667 build_mips16_function_stub (file);
6668
6669 if (!FUNCTION_NAME_ALREADY_DECLARED)
6670 {
6671 /* Get the function name the same way that toplev.c does before calling
6672 assemble_start_function. This is needed so that the name used here
6673 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6674 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6675
6676 if (!flag_inhibit_size_directive)
6677 {
6678 fputs ("\t.ent\t", file);
6679 assemble_name (file, fnname);
6680 fputs ("\n", file);
6681 }
6682
6683 assemble_name (file, fnname);
6684 fputs (":\n", file);
6685 }
6686
6687 /* Stop mips_file_end from treating this function as external. */
6688 if (TARGET_IRIX && mips_abi == ABI_32)
6689 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6690
6691 if (!flag_inhibit_size_directive)
6692 {
6693 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6694 fprintf (file,
6695 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6696 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6697 ", args= " HOST_WIDE_INT_PRINT_DEC
6698 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6699 (reg_names[(frame_pointer_needed)
6700 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6701 ((frame_pointer_needed && TARGET_MIPS16)
6702 ? tsize - cfun->machine->frame.args_size
6703 : tsize),
6704 reg_names[GP_REG_FIRST + 31],
6705 cfun->machine->frame.var_size,
6706 cfun->machine->frame.num_gp,
6707 cfun->machine->frame.num_fp,
6708 cfun->machine->frame.args_size,
6709 cfun->machine->frame.cprestore_size);
6710
6711 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6712 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6713 cfun->machine->frame.mask,
6714 cfun->machine->frame.gp_save_offset);
6715 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6716 cfun->machine->frame.fmask,
6717 cfun->machine->frame.fp_save_offset);
6718
6719 /* Require:
6720 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6721 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6722 }
6723
6724 if (mips_current_loadgp_style () == LOADGP_OLDABI)
6725 {
6726 /* Handle the initialization of $gp for SVR4 PIC. */
6727 if (!cfun->machine->all_noreorder_p)
6728 output_asm_insn ("%(.cpload\t%^%)", 0);
6729 else
6730 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6731 }
6732 else if (cfun->machine->all_noreorder_p)
6733 output_asm_insn ("%(%<", 0);
6734
6735 /* Tell the assembler which register we're using as the global
6736 pointer. This is needed for thunks, since they can use either
6737 explicit relocs or assembler macros. */
6738 mips_output_cplocal ();
6739 }
6740
6741 /* Make the last instruction frame related and note that it performs
6742 the operation described by FRAME_PATTERN. */
6743
6744 static void
mips_set_frame_expr(rtx frame_pattern)6745 mips_set_frame_expr (rtx frame_pattern)
6746 {
6747 rtx insn;
6748
6749 insn = get_last_insn ();
6750 RTX_FRAME_RELATED_P (insn) = 1;
6751 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6752 frame_pattern,
6753 REG_NOTES (insn));
6754 }
6755
6756
6757 /* Return a frame-related rtx that stores REG at MEM.
6758 REG must be a single register. */
6759
6760 static rtx
mips_frame_set(rtx mem,rtx reg)6761 mips_frame_set (rtx mem, rtx reg)
6762 {
6763 rtx set;
6764
6765 /* If we're saving the return address register and the dwarf return
6766 address column differs from the hard register number, adjust the
6767 note reg to refer to the former. */
6768 if (REGNO (reg) == GP_REG_FIRST + 31
6769 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6770 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6771
6772 set = gen_rtx_SET (VOIDmode, mem, reg);
6773 RTX_FRAME_RELATED_P (set) = 1;
6774
6775 return set;
6776 }
6777
6778
6779 /* Save register REG to MEM. Make the instruction frame-related. */
6780
6781 static void
mips_save_reg(rtx reg,rtx mem)6782 mips_save_reg (rtx reg, rtx mem)
6783 {
6784 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6785 {
6786 rtx x1, x2;
6787
6788 if (mips_split_64bit_move_p (mem, reg))
6789 mips_split_64bit_move (mem, reg);
6790 else
6791 emit_move_insn (mem, reg);
6792
6793 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6794 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6795 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6796 }
6797 else
6798 {
6799 if (TARGET_MIPS16
6800 && REGNO (reg) != GP_REG_FIRST + 31
6801 && !M16_REG_P (REGNO (reg)))
6802 {
6803 /* Save a non-mips16 register by moving it through a temporary.
6804 We don't need to do this for $31 since there's a special
6805 instruction for it. */
6806 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6807 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6808 }
6809 else
6810 emit_move_insn (mem, reg);
6811
6812 mips_set_frame_expr (mips_frame_set (mem, reg));
6813 }
6814 }
6815
6816
6817 /* Expand the prologue into a bunch of separate insns. */
6818
6819 void
mips_expand_prologue(void)6820 mips_expand_prologue (void)
6821 {
6822 HOST_WIDE_INT size;
6823
6824 if (cfun->machine->global_pointer > 0)
6825 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6826
6827 size = compute_frame_size (get_frame_size ());
6828
6829 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6830 bytes beforehand; this is enough to cover the register save area
6831 without going out of range. */
6832 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6833 {
6834 HOST_WIDE_INT step1;
6835
6836 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6837 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6838 stack_pointer_rtx,
6839 GEN_INT (-step1)))) = 1;
6840 size -= step1;
6841 mips_for_each_saved_reg (size, mips_save_reg);
6842 }
6843
6844 /* Allocate the rest of the frame. */
6845 if (size > 0)
6846 {
6847 if (SMALL_OPERAND (-size))
6848 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6849 stack_pointer_rtx,
6850 GEN_INT (-size)))) = 1;
6851 else
6852 {
6853 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6854 if (TARGET_MIPS16)
6855 {
6856 /* There are no instructions to add or subtract registers
6857 from the stack pointer, so use the frame pointer as a
6858 temporary. We should always be using a frame pointer
6859 in this case anyway. */
6860 gcc_assert (frame_pointer_needed);
6861 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6862 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6863 hard_frame_pointer_rtx,
6864 MIPS_PROLOGUE_TEMP (Pmode)));
6865 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6866 }
6867 else
6868 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6869 stack_pointer_rtx,
6870 MIPS_PROLOGUE_TEMP (Pmode)));
6871
6872 /* Describe the combined effect of the previous instructions. */
6873 mips_set_frame_expr
6874 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6875 plus_constant (stack_pointer_rtx, -size)));
6876 }
6877 }
6878
6879 /* Set up the frame pointer, if we're using one. In mips16 code,
6880 we point the frame pointer ahead of the outgoing argument area.
6881 This should allow more variables & incoming arguments to be
6882 accessed with unextended instructions. */
6883 if (frame_pointer_needed)
6884 {
6885 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6886 {
6887 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6888 if (SMALL_OPERAND (cfun->machine->frame.args_size))
6889 RTX_FRAME_RELATED_P
6890 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6891 stack_pointer_rtx,
6892 offset))) = 1;
6893 else
6894 {
6895 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
6896 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6897 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6898 hard_frame_pointer_rtx,
6899 MIPS_PROLOGUE_TEMP (Pmode)));
6900 mips_set_frame_expr
6901 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
6902 plus_constant (stack_pointer_rtx,
6903 cfun->machine->frame.args_size)));
6904 }
6905 }
6906 else
6907 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6908 stack_pointer_rtx)) = 1;
6909 }
6910
6911 mips_emit_loadgp ();
6912
6913 /* If generating o32/o64 abicalls, save $gp on the stack. */
6914 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6915 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6916
6917 /* If we are profiling, make sure no instructions are scheduled before
6918 the call to mcount. */
6919
6920 if (current_function_profile)
6921 emit_insn (gen_blockage ());
6922 }
6923
6924 /* Do any necessary cleanup after a function to restore stack, frame,
6925 and regs. */
6926
6927 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6928
6929 static void
mips_output_function_epilogue(FILE * file ATTRIBUTE_UNUSED,HOST_WIDE_INT size ATTRIBUTE_UNUSED)6930 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6931 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6932 {
6933 /* Reinstate the normal $gp. */
6934 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6935 mips_output_cplocal ();
6936
6937 if (cfun->machine->all_noreorder_p)
6938 {
6939 /* Avoid using %>%) since it adds excess whitespace. */
6940 output_asm_insn (".set\tmacro", 0);
6941 output_asm_insn (".set\treorder", 0);
6942 set_noreorder = set_nomacro = 0;
6943 }
6944
6945 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6946 {
6947 const char *fnname;
6948
6949 /* Get the function name the same way that toplev.c does before calling
6950 assemble_start_function. This is needed so that the name used here
6951 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6952 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6953 fputs ("\t.end\t", file);
6954 assemble_name (file, fnname);
6955 fputs ("\n", file);
6956 }
6957 }
6958
6959 /* Emit instructions to restore register REG from slot MEM. */
6960
6961 static void
mips_restore_reg(rtx reg,rtx mem)6962 mips_restore_reg (rtx reg, rtx mem)
6963 {
6964 /* There's no mips16 instruction to load $31 directly. Load into
6965 $7 instead and adjust the return insn appropriately. */
6966 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6967 reg = gen_rtx_REG (GET_MODE (reg), 7);
6968
6969 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6970 {
6971 /* Can't restore directly; move through a temporary. */
6972 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6973 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6974 }
6975 else
6976 emit_move_insn (reg, mem);
6977 }
6978
6979
6980 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6981 if this epilogue precedes a sibling call, false if it is for a normal
6982 "epilogue" pattern. */
6983
6984 void
mips_expand_epilogue(int sibcall_p)6985 mips_expand_epilogue (int sibcall_p)
6986 {
6987 HOST_WIDE_INT step1, step2;
6988 rtx base, target;
6989
6990 if (!sibcall_p && mips_can_use_return_insn ())
6991 {
6992 emit_jump_insn (gen_return ());
6993 return;
6994 }
6995
6996 /* Split the frame into two. STEP1 is the amount of stack we should
6997 deallocate before restoring the registers. STEP2 is the amount we
6998 should deallocate afterwards.
6999
7000 Start off by assuming that no registers need to be restored. */
7001 step1 = cfun->machine->frame.total_size;
7002 step2 = 0;
7003
7004 /* Work out which register holds the frame address. Account for the
7005 frame pointer offset used by mips16 code. */
7006 if (!frame_pointer_needed)
7007 base = stack_pointer_rtx;
7008 else
7009 {
7010 base = hard_frame_pointer_rtx;
7011 if (TARGET_MIPS16)
7012 step1 -= cfun->machine->frame.args_size;
7013 }
7014
7015 /* If we need to restore registers, deallocate as much stack as
7016 possible in the second step without going out of range. */
7017 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7018 {
7019 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7020 step1 -= step2;
7021 }
7022
7023 /* Set TARGET to BASE + STEP1. */
7024 target = base;
7025 if (step1 > 0)
7026 {
7027 rtx adjust;
7028
7029 /* Get an rtx for STEP1 that we can add to BASE. */
7030 adjust = GEN_INT (step1);
7031 if (!SMALL_OPERAND (step1))
7032 {
7033 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7034 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7035 }
7036
7037 /* Normal mode code can copy the result straight into $sp. */
7038 if (!TARGET_MIPS16)
7039 target = stack_pointer_rtx;
7040
7041 emit_insn (gen_add3_insn (target, base, adjust));
7042 }
7043
7044 /* Copy TARGET into the stack pointer. */
7045 if (target != stack_pointer_rtx)
7046 emit_move_insn (stack_pointer_rtx, target);
7047
7048 /* If we're using addressing macros for n32/n64 abicalls, $gp is
7049 implicitly used by all SYMBOL_REFs. We must emit a blockage
7050 insn before restoring it. */
7051 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
7052 emit_insn (gen_blockage ());
7053
7054 /* Restore the registers. */
7055 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7056 mips_restore_reg);
7057
7058 /* Deallocate the final bit of the frame. */
7059 if (step2 > 0)
7060 emit_insn (gen_add3_insn (stack_pointer_rtx,
7061 stack_pointer_rtx,
7062 GEN_INT (step2)));
7063
7064 /* Add in the __builtin_eh_return stack adjustment. We need to
7065 use a temporary in mips16 code. */
7066 if (current_function_calls_eh_return)
7067 {
7068 if (TARGET_MIPS16)
7069 {
7070 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7071 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7072 MIPS_EPILOGUE_TEMP (Pmode),
7073 EH_RETURN_STACKADJ_RTX));
7074 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7075 }
7076 else
7077 emit_insn (gen_add3_insn (stack_pointer_rtx,
7078 stack_pointer_rtx,
7079 EH_RETURN_STACKADJ_RTX));
7080 }
7081
7082 if (!sibcall_p)
7083 {
7084 /* The mips16 loads the return address into $7, not $31. */
7085 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
7086 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7087 GP_REG_FIRST + 7)));
7088 else
7089 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7090 GP_REG_FIRST + 31)));
7091 }
7092 }
7093
7094 /* Return nonzero if this function is known to have a null epilogue.
7095 This allows the optimizer to omit jumps to jumps if no stack
7096 was created. */
7097
7098 int
mips_can_use_return_insn(void)7099 mips_can_use_return_insn (void)
7100 {
7101 tree return_type;
7102
7103 if (! reload_completed)
7104 return 0;
7105
7106 if (regs_ever_live[31] || current_function_profile)
7107 return 0;
7108
7109 return_type = DECL_RESULT (current_function_decl);
7110
7111 /* In mips16 mode, a function which returns a floating point value
7112 needs to arrange to copy the return value into the floating point
7113 registers. */
7114 if (TARGET_MIPS16
7115 && mips16_hard_float
7116 && ! aggregate_value_p (return_type, current_function_decl)
7117 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
7118 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
7119 return 0;
7120
7121 if (cfun->machine->frame.initialized)
7122 return cfun->machine->frame.total_size == 0;
7123
7124 return compute_frame_size (get_frame_size ()) == 0;
7125 }
7126
7127 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
7128 in order to avoid duplicating too much logic from elsewhere. */
7129
7130 static void
mips_output_mi_thunk(FILE * file,tree thunk_fndecl ATTRIBUTE_UNUSED,HOST_WIDE_INT delta,HOST_WIDE_INT vcall_offset,tree function)7131 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7132 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7133 tree function)
7134 {
7135 rtx this, temp1, temp2, insn, fnaddr;
7136
7137 /* Pretend to be a post-reload pass while generating rtl. */
7138 no_new_pseudos = 1;
7139 reload_completed = 1;
7140 reset_block_changes ();
7141
7142 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
7143 for TARGET_NEWABI since the latter is a call-saved register. */
7144 if (TARGET_ABICALLS)
7145 cfun->machine->global_pointer
7146 = REGNO (pic_offset_table_rtx)
7147 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
7148
7149 /* Set up the global pointer for n32 or n64 abicalls. */
7150 mips_emit_loadgp ();
7151
7152 /* We need two temporary registers in some cases. */
7153 temp1 = gen_rtx_REG (Pmode, 2);
7154 temp2 = gen_rtx_REG (Pmode, 3);
7155
7156 /* Find out which register contains the "this" pointer. */
7157 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7158 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7159 else
7160 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7161
7162 /* Add DELTA to THIS. */
7163 if (delta != 0)
7164 {
7165 rtx offset = GEN_INT (delta);
7166 if (!SMALL_OPERAND (delta))
7167 {
7168 emit_move_insn (temp1, offset);
7169 offset = temp1;
7170 }
7171 emit_insn (gen_add3_insn (this, this, offset));
7172 }
7173
7174 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
7175 if (vcall_offset != 0)
7176 {
7177 rtx addr;
7178
7179 /* Set TEMP1 to *THIS. */
7180 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7181
7182 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
7183 addr = mips_add_offset (temp2, temp1, vcall_offset);
7184
7185 /* Load the offset and add it to THIS. */
7186 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7187 emit_insn (gen_add3_insn (this, this, temp1));
7188 }
7189
7190 /* Jump to the target function. Use a sibcall if direct jumps are
7191 allowed, otherwise load the address into a register first. */
7192 fnaddr = XEXP (DECL_RTL (function), 0);
7193 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
7194 {
7195 /* This is messy. gas treats "la $25,foo" as part of a call
7196 sequence and may allow a global "foo" to be lazily bound.
7197 The general move patterns therefore reject this combination.
7198
7199 In this context, lazy binding would actually be OK for o32 and o64,
7200 but it's still wrong for n32 and n64; see mips_load_call_address.
7201 We must therefore load the address via a temporary register if
7202 mips_dangerous_for_la25_p.
7203
7204 If we jump to the temporary register rather than $25, the assembler
7205 can use the move insn to fill the jump's delay slot. */
7206 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
7207 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7208 mips_load_call_address (temp1, fnaddr, true);
7209
7210 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7211 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7212 emit_jump_insn (gen_indirect_jump (temp1));
7213 }
7214 else
7215 {
7216 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
7217 SIBLING_CALL_P (insn) = 1;
7218 }
7219
7220 /* Run just enough of rest_of_compilation. This sequence was
7221 "borrowed" from alpha.c. */
7222 insn = get_insns ();
7223 insn_locators_initialize ();
7224 split_all_insns_noflow ();
7225 if (TARGET_MIPS16)
7226 mips16_lay_out_constants ();
7227 shorten_branches (insn);
7228 final_start_function (insn, file, 1);
7229 final (insn, file, 1);
7230 final_end_function ();
7231
7232 /* Clean up the vars set above. Note that final_end_function resets
7233 the global pointer for us. */
7234 reload_completed = 0;
7235 no_new_pseudos = 0;
7236 }
7237
7238 /* Returns nonzero if X contains a SYMBOL_REF. */
7239
7240 static int
symbolic_expression_p(rtx x)7241 symbolic_expression_p (rtx x)
7242 {
7243 if (GET_CODE (x) == SYMBOL_REF)
7244 return 1;
7245
7246 if (GET_CODE (x) == CONST)
7247 return symbolic_expression_p (XEXP (x, 0));
7248
7249 if (UNARY_P (x))
7250 return symbolic_expression_p (XEXP (x, 0));
7251
7252 if (ARITHMETIC_P (x))
7253 return (symbolic_expression_p (XEXP (x, 0))
7254 || symbolic_expression_p (XEXP (x, 1)));
7255
7256 return 0;
7257 }
7258
7259 /* Choose the section to use for the constant rtx expression X that has
7260 mode MODE. */
7261
7262 static section *
mips_select_rtx_section(enum machine_mode mode,rtx x,unsigned HOST_WIDE_INT align)7263 mips_select_rtx_section (enum machine_mode mode, rtx x,
7264 unsigned HOST_WIDE_INT align)
7265 {
7266 if (TARGET_MIPS16)
7267 {
7268 /* In mips16 mode, the constant table always goes in the same section
7269 as the function, so that constants can be loaded using PC relative
7270 addressing. */
7271 return function_section (current_function_decl);
7272 }
7273 else if (TARGET_EMBEDDED_DATA)
7274 {
7275 /* For embedded applications, always put constants in read-only data,
7276 in order to reduce RAM usage. */
7277 return mergeable_constant_section (mode, align, 0);
7278 }
7279 else
7280 {
7281 /* For hosted applications, always put constants in small data if
7282 possible, as this gives the best performance. */
7283 /* ??? Consider using mergeable small data sections. */
7284
7285 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
7286 && mips_section_threshold > 0)
7287 return get_named_section (NULL, ".sdata", 0);
7288 else if (flag_pic && symbolic_expression_p (x))
7289 return get_named_section (NULL, ".data.rel.ro", 3);
7290 else
7291 return mergeable_constant_section (mode, align, 0);
7292 }
7293 }
7294
7295 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7296
7297 The complication here is that, with the combination TARGET_ABICALLS
7298 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
7299 therefore not be included in the read-only part of a DSO. Handle such
7300 cases by selecting a normal data section instead of a read-only one.
7301 The logic apes that in default_function_rodata_section. */
7302
7303 static section *
mips_function_rodata_section(tree decl)7304 mips_function_rodata_section (tree decl)
7305 {
7306 if (!TARGET_ABICALLS || TARGET_GPWORD)
7307 return default_function_rodata_section (decl);
7308
7309 if (decl && DECL_SECTION_NAME (decl))
7310 {
7311 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7312 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7313 {
7314 char *rname = ASTRDUP (name);
7315 rname[14] = 'd';
7316 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7317 }
7318 else if (flag_function_sections && flag_data_sections
7319 && strncmp (name, ".text.", 6) == 0)
7320 {
7321 char *rname = ASTRDUP (name);
7322 memcpy (rname + 1, "data", 4);
7323 return get_section (rname, SECTION_WRITE, decl);
7324 }
7325 }
7326 return data_section;
7327 }
7328
7329 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
7330 locally-defined objects go in a small data section. It also controls
7331 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
7332 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
7333
7334 static bool
mips_in_small_data_p(tree decl)7335 mips_in_small_data_p (tree decl)
7336 {
7337 HOST_WIDE_INT size;
7338
7339 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7340 return false;
7341
7342 /* We don't yet generate small-data references for -mabicalls. See related
7343 -G handling in override_options. */
7344 if (TARGET_ABICALLS)
7345 return false;
7346
7347 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7348 {
7349 const char *name;
7350
7351 /* Reject anything that isn't in a known small-data section. */
7352 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7353 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7354 return false;
7355
7356 /* If a symbol is defined externally, the assembler will use the
7357 usual -G rules when deciding how to implement macros. */
7358 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
7359 return true;
7360 }
7361 else if (TARGET_EMBEDDED_DATA)
7362 {
7363 /* Don't put constants into the small data section: we want them
7364 to be in ROM rather than RAM. */
7365 if (TREE_CODE (decl) != VAR_DECL)
7366 return false;
7367
7368 if (TREE_READONLY (decl)
7369 && !TREE_SIDE_EFFECTS (decl)
7370 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7371 return false;
7372 }
7373
7374 size = int_size_in_bytes (TREE_TYPE (decl));
7375 return (size > 0 && size <= mips_section_threshold);
7376 }
7377
7378 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7379 anchors for small data: the GP register acts as an anchor in that
7380 case. We also don't want to use them for PC-relative accesses,
7381 where the PC acts as an anchor. */
7382
7383 static bool
mips_use_anchors_for_symbol_p(rtx symbol)7384 mips_use_anchors_for_symbol_p (rtx symbol)
7385 {
7386 switch (mips_classify_symbol (symbol))
7387 {
7388 case SYMBOL_CONSTANT_POOL:
7389 case SYMBOL_SMALL_DATA:
7390 return false;
7391
7392 default:
7393 return true;
7394 }
7395 }
7396
7397 /* See whether VALTYPE is a record whose fields should be returned in
7398 floating-point registers. If so, return the number of fields and
7399 list them in FIELDS (which should have two elements). Return 0
7400 otherwise.
7401
7402 For n32 & n64, a structure with one or two fields is returned in
7403 floating-point registers as long as every field has a floating-point
7404 type. */
7405
7406 static int
mips_fpr_return_fields(tree valtype,tree * fields)7407 mips_fpr_return_fields (tree valtype, tree *fields)
7408 {
7409 tree field;
7410 int i;
7411
7412 if (!TARGET_NEWABI)
7413 return 0;
7414
7415 if (TREE_CODE (valtype) != RECORD_TYPE)
7416 return 0;
7417
7418 i = 0;
7419 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
7420 {
7421 if (TREE_CODE (field) != FIELD_DECL)
7422 continue;
7423
7424 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
7425 return 0;
7426
7427 if (i == 2)
7428 return 0;
7429
7430 fields[i++] = field;
7431 }
7432 return i;
7433 }
7434
7435
7436 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
7437 a value in the most significant part of $2/$3 if:
7438
7439 - the target is big-endian;
7440
7441 - the value has a structure or union type (we generalize this to
7442 cover aggregates from other languages too); and
7443
7444 - the structure is not returned in floating-point registers. */
7445
7446 static bool
mips_return_in_msb(tree valtype)7447 mips_return_in_msb (tree valtype)
7448 {
7449 tree fields[2];
7450
7451 return (TARGET_NEWABI
7452 && TARGET_BIG_ENDIAN
7453 && AGGREGATE_TYPE_P (valtype)
7454 && mips_fpr_return_fields (valtype, fields) == 0);
7455 }
7456
7457
7458 /* Return a composite value in a pair of floating-point registers.
7459 MODE1 and OFFSET1 are the mode and byte offset for the first value,
7460 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
7461 complete value.
7462
7463 For n32 & n64, $f0 always holds the first value and $f2 the second.
7464 Otherwise the values are packed together as closely as possible. */
7465
7466 static rtx
mips_return_fpr_pair(enum machine_mode mode,enum machine_mode mode1,HOST_WIDE_INT offset1,enum machine_mode mode2,HOST_WIDE_INT offset2)7467 mips_return_fpr_pair (enum machine_mode mode,
7468 enum machine_mode mode1, HOST_WIDE_INT offset1,
7469 enum machine_mode mode2, HOST_WIDE_INT offset2)
7470 {
7471 int inc;
7472
7473 inc = (TARGET_NEWABI ? 2 : FP_INC);
7474 return gen_rtx_PARALLEL
7475 (mode,
7476 gen_rtvec (2,
7477 gen_rtx_EXPR_LIST (VOIDmode,
7478 gen_rtx_REG (mode1, FP_RETURN),
7479 GEN_INT (offset1)),
7480 gen_rtx_EXPR_LIST (VOIDmode,
7481 gen_rtx_REG (mode2, FP_RETURN + inc),
7482 GEN_INT (offset2))));
7483
7484 }
7485
7486
7487 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
7488 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
7489 VALTYPE is null and MODE is the mode of the return value. */
7490
7491 rtx
mips_function_value(tree valtype,tree func ATTRIBUTE_UNUSED,enum machine_mode mode)7492 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
7493 enum machine_mode mode)
7494 {
7495 if (valtype)
7496 {
7497 tree fields[2];
7498 int unsignedp;
7499
7500 mode = TYPE_MODE (valtype);
7501 unsignedp = TYPE_UNSIGNED (valtype);
7502
7503 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
7504 true, we must promote the mode just as PROMOTE_MODE does. */
7505 mode = promote_mode (valtype, mode, &unsignedp, 1);
7506
7507 /* Handle structures whose fields are returned in $f0/$f2. */
7508 switch (mips_fpr_return_fields (valtype, fields))
7509 {
7510 case 1:
7511 return gen_rtx_REG (mode, FP_RETURN);
7512
7513 case 2:
7514 return mips_return_fpr_pair (mode,
7515 TYPE_MODE (TREE_TYPE (fields[0])),
7516 int_byte_position (fields[0]),
7517 TYPE_MODE (TREE_TYPE (fields[1])),
7518 int_byte_position (fields[1]));
7519 }
7520
7521 /* If a value is passed in the most significant part of a register, see
7522 whether we have to round the mode up to a whole number of words. */
7523 if (mips_return_in_msb (valtype))
7524 {
7525 HOST_WIDE_INT size = int_size_in_bytes (valtype);
7526 if (size % UNITS_PER_WORD != 0)
7527 {
7528 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7529 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7530 }
7531 }
7532
7533 /* For EABI, the class of return register depends entirely on MODE.
7534 For example, "struct { some_type x; }" and "union { some_type x; }"
7535 are returned in the same way as a bare "some_type" would be.
7536 Other ABIs only use FPRs for scalar, complex or vector types. */
7537 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7538 return gen_rtx_REG (mode, GP_RETURN);
7539 }
7540
7541 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7542 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7543 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7544 return gen_rtx_REG (mode, FP_RETURN);
7545
7546 /* Handle long doubles for n32 & n64. */
7547 if (mode == TFmode)
7548 return mips_return_fpr_pair (mode,
7549 DImode, 0,
7550 DImode, GET_MODE_SIZE (mode) / 2);
7551
7552 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7553 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7554 return mips_return_fpr_pair (mode,
7555 GET_MODE_INNER (mode), 0,
7556 GET_MODE_INNER (mode),
7557 GET_MODE_SIZE (mode) / 2);
7558
7559 return gen_rtx_REG (mode, GP_RETURN);
7560 }
7561
7562 /* Return nonzero when an argument must be passed by reference. */
7563
7564 static bool
mips_pass_by_reference(CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,enum machine_mode mode,tree type,bool named ATTRIBUTE_UNUSED)7565 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7566 enum machine_mode mode, tree type,
7567 bool named ATTRIBUTE_UNUSED)
7568 {
7569 if (mips_abi == ABI_EABI)
7570 {
7571 int size;
7572
7573 /* ??? How should SCmode be handled? */
7574 if (mode == DImode || mode == DFmode)
7575 return 0;
7576
7577 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
7578 return size == -1 || size > UNITS_PER_WORD;
7579 }
7580 else
7581 {
7582 /* If we have a variable-sized parameter, we have no choice. */
7583 return targetm.calls.must_pass_in_stack (mode, type);
7584 }
7585 }
7586
7587 static bool
mips_callee_copies(CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,tree type ATTRIBUTE_UNUSED,bool named)7588 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7589 enum machine_mode mode ATTRIBUTE_UNUSED,
7590 tree type ATTRIBUTE_UNUSED, bool named)
7591 {
7592 return mips_abi == ABI_EABI && named;
7593 }
7594
7595 /* Return true if registers of class CLASS cannot change from mode FROM
7596 to mode TO. */
7597
7598 bool
mips_cannot_change_mode_class(enum machine_mode from,enum machine_mode to,enum reg_class class)7599 mips_cannot_change_mode_class (enum machine_mode from,
7600 enum machine_mode to, enum reg_class class)
7601 {
7602 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7603 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7604 {
7605 if (TARGET_BIG_ENDIAN)
7606 {
7607 /* When a multi-word value is stored in paired floating-point
7608 registers, the first register always holds the low word.
7609 We therefore can't allow FPRs to change between single-word
7610 and multi-word modes. */
7611 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7612 return true;
7613 }
7614 else
7615 {
7616 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7617 in LO and HI, the high word always comes first. We therefore
7618 can't allow values stored in HI to change between single-word
7619 and multi-word modes.
7620 This rule applies to both the original HI/LO pair and the new
7621 DSP accumulators. */
7622 if (reg_classes_intersect_p (ACC_REGS, class))
7623 return true;
7624 }
7625 }
7626 /* Loading a 32-bit value into a 64-bit floating-point register
7627 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7628 We can't allow 64-bit float registers to change from SImode to
7629 to a wider mode. */
7630 if (TARGET_FLOAT64
7631 && from == SImode
7632 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7633 && reg_classes_intersect_p (FP_REGS, class))
7634 return true;
7635 return false;
7636 }
7637
7638 /* Return true if X should not be moved directly into register $25.
7639 We need this because many versions of GAS will treat "la $25,foo" as
7640 part of a call sequence and so allow a global "foo" to be lazily bound. */
7641
7642 bool
mips_dangerous_for_la25_p(rtx x)7643 mips_dangerous_for_la25_p (rtx x)
7644 {
7645 HOST_WIDE_INT offset;
7646
7647 if (TARGET_EXPLICIT_RELOCS)
7648 return false;
7649
7650 mips_split_const (x, &x, &offset);
7651 return global_got_operand (x, VOIDmode);
7652 }
7653
7654 /* Implement PREFERRED_RELOAD_CLASS. */
7655
7656 enum reg_class
mips_preferred_reload_class(rtx x,enum reg_class class)7657 mips_preferred_reload_class (rtx x, enum reg_class class)
7658 {
7659 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7660 return LEA_REGS;
7661
7662 if (TARGET_HARD_FLOAT
7663 && FLOAT_MODE_P (GET_MODE (x))
7664 && reg_class_subset_p (FP_REGS, class))
7665 return FP_REGS;
7666
7667 if (reg_class_subset_p (GR_REGS, class))
7668 class = GR_REGS;
7669
7670 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7671 class = M16_REGS;
7672
7673 return class;
7674 }
7675
7676 /* This function returns the register class required for a secondary
7677 register when copying between one of the registers in CLASS, and X,
7678 using MODE. If IN_P is nonzero, the copy is going from X to the
7679 register, otherwise the register is the source. A return value of
7680 NO_REGS means that no secondary register is required. */
7681
7682 enum reg_class
mips_secondary_reload_class(enum reg_class class,enum machine_mode mode,rtx x,int in_p)7683 mips_secondary_reload_class (enum reg_class class,
7684 enum machine_mode mode, rtx x, int in_p)
7685 {
7686 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7687 int regno = -1;
7688 int gp_reg_p;
7689
7690 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7691 regno = true_regnum (x);
7692
7693 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7694
7695 if (mips_dangerous_for_la25_p (x))
7696 {
7697 gr_regs = LEA_REGS;
7698 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7699 return gr_regs;
7700 }
7701
7702 /* Copying from HI or LO to anywhere other than a general register
7703 requires a general register.
7704 This rule applies to both the original HI/LO pair and the new
7705 DSP accumulators. */
7706 if (reg_class_subset_p (class, ACC_REGS))
7707 {
7708 if (TARGET_MIPS16 && in_p)
7709 {
7710 /* We can't really copy to HI or LO at all in mips16 mode. */
7711 return M16_REGS;
7712 }
7713 return gp_reg_p ? NO_REGS : gr_regs;
7714 }
7715 if (ACC_REG_P (regno))
7716 {
7717 if (TARGET_MIPS16 && ! in_p)
7718 {
7719 /* We can't really copy to HI or LO at all in mips16 mode. */
7720 return M16_REGS;
7721 }
7722 return class == gr_regs ? NO_REGS : gr_regs;
7723 }
7724
7725 /* We can only copy a value to a condition code register from a
7726 floating point register, and even then we require a scratch
7727 floating point register. We can only copy a value out of a
7728 condition code register into a general register. */
7729 if (class == ST_REGS)
7730 {
7731 if (in_p)
7732 return FP_REGS;
7733 return gp_reg_p ? NO_REGS : gr_regs;
7734 }
7735 if (ST_REG_P (regno))
7736 {
7737 if (! in_p)
7738 return FP_REGS;
7739 return class == gr_regs ? NO_REGS : gr_regs;
7740 }
7741
7742 if (class == FP_REGS)
7743 {
7744 if (MEM_P (x))
7745 {
7746 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7747 return NO_REGS;
7748 }
7749 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7750 {
7751 /* We can use the l.s and l.d macros to load floating-point
7752 constants. ??? For l.s, we could probably get better
7753 code by returning GR_REGS here. */
7754 return NO_REGS;
7755 }
7756 else if (gp_reg_p || x == CONST0_RTX (mode))
7757 {
7758 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7759 return NO_REGS;
7760 }
7761 else if (FP_REG_P (regno))
7762 {
7763 /* In this case we can use mov.s or mov.d. */
7764 return NO_REGS;
7765 }
7766 else
7767 {
7768 /* Otherwise, we need to reload through an integer register. */
7769 return gr_regs;
7770 }
7771 }
7772
7773 /* In mips16 mode, going between memory and anything but M16_REGS
7774 requires an M16_REG. */
7775 if (TARGET_MIPS16)
7776 {
7777 if (class != M16_REGS && class != M16_NA_REGS)
7778 {
7779 if (gp_reg_p)
7780 return NO_REGS;
7781 return M16_REGS;
7782 }
7783 if (! gp_reg_p)
7784 {
7785 if (class == M16_REGS || class == M16_NA_REGS)
7786 return NO_REGS;
7787 return M16_REGS;
7788 }
7789 }
7790
7791 return NO_REGS;
7792 }
7793
7794 /* Implement CLASS_MAX_NREGS.
7795
7796 Usually all registers are word-sized. The only supported exception
7797 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7798 registers. A word-based calculation is correct even in that case,
7799 since -msingle-float disallows multi-FPR values.
7800
7801 The FP status registers are an exception to this rule. They are always
7802 4 bytes wide as they only hold condition code modes, and CCmode is always
7803 considered to be 4 bytes wide. */
7804
7805 int
mips_class_max_nregs(enum reg_class class ATTRIBUTE_UNUSED,enum machine_mode mode)7806 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7807 enum machine_mode mode)
7808 {
7809 if (class == ST_REGS)
7810 return (GET_MODE_SIZE (mode) + 3) / 4;
7811 else
7812 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7813 }
7814
7815 static bool
mips_valid_pointer_mode(enum machine_mode mode)7816 mips_valid_pointer_mode (enum machine_mode mode)
7817 {
7818 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7819 }
7820
7821 /* Target hook for vector_mode_supported_p. */
7822
7823 static bool
mips_vector_mode_supported_p(enum machine_mode mode)7824 mips_vector_mode_supported_p (enum machine_mode mode)
7825 {
7826 switch (mode)
7827 {
7828 case V2SFmode:
7829 return TARGET_PAIRED_SINGLE_FLOAT;
7830
7831 case V2HImode:
7832 case V4QImode:
7833 return TARGET_DSP;
7834
7835 default:
7836 return false;
7837 }
7838 }
7839
7840 /* If we can access small data directly (using gp-relative relocation
7841 operators) return the small data pointer, otherwise return null.
7842
7843 For each mips16 function which refers to GP relative symbols, we
7844 use a pseudo register, initialized at the start of the function, to
7845 hold the $gp value. */
7846
7847 static rtx
mips16_gp_pseudo_reg(void)7848 mips16_gp_pseudo_reg (void)
7849 {
7850 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7851 {
7852 rtx unspec;
7853 rtx insn, scan;
7854
7855 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7856
7857 /* We want to initialize this to a value which gcc will believe
7858 is constant. */
7859 start_sequence ();
7860 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7861 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7862 gen_rtx_CONST (Pmode, unspec));
7863 insn = get_insns ();
7864 end_sequence ();
7865
7866 push_topmost_sequence ();
7867 /* We need to emit the initialization after the FUNCTION_BEG
7868 note, so that it will be integrated. */
7869 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7870 if (NOTE_P (scan)
7871 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7872 break;
7873 if (scan == NULL_RTX)
7874 scan = get_insns ();
7875 insn = emit_insn_after (insn, scan);
7876 pop_topmost_sequence ();
7877 }
7878
7879 return cfun->machine->mips16_gp_pseudo_rtx;
7880 }
7881
7882 /* Write out code to move floating point arguments in or out of
7883 general registers. Output the instructions to FILE. FP_CODE is
7884 the code describing which arguments are present (see the comment at
7885 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7886 we are copying from the floating point registers. */
7887
7888 static void
mips16_fp_args(FILE * file,int fp_code,int from_fp_p)7889 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7890 {
7891 const char *s;
7892 int gparg, fparg;
7893 unsigned int f;
7894
7895 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7896 gcc_assert (TARGET_OLDABI);
7897
7898 if (from_fp_p)
7899 s = "mfc1";
7900 else
7901 s = "mtc1";
7902 gparg = GP_ARG_FIRST;
7903 fparg = FP_ARG_FIRST;
7904 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7905 {
7906 if ((f & 3) == 1)
7907 {
7908 if ((fparg & 1) != 0)
7909 ++fparg;
7910 fprintf (file, "\t%s\t%s,%s\n", s,
7911 reg_names[gparg], reg_names[fparg]);
7912 }
7913 else if ((f & 3) == 2)
7914 {
7915 if (TARGET_64BIT)
7916 fprintf (file, "\td%s\t%s,%s\n", s,
7917 reg_names[gparg], reg_names[fparg]);
7918 else
7919 {
7920 if ((fparg & 1) != 0)
7921 ++fparg;
7922 if (TARGET_BIG_ENDIAN)
7923 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7924 reg_names[gparg], reg_names[fparg + 1], s,
7925 reg_names[gparg + 1], reg_names[fparg]);
7926 else
7927 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7928 reg_names[gparg], reg_names[fparg], s,
7929 reg_names[gparg + 1], reg_names[fparg + 1]);
7930 ++gparg;
7931 ++fparg;
7932 }
7933 }
7934 else
7935 gcc_unreachable ();
7936
7937 ++gparg;
7938 ++fparg;
7939 }
7940 }
7941
7942 /* Build a mips16 function stub. This is used for functions which
7943 take arguments in the floating point registers. It is 32 bit code
7944 that moves the floating point args into the general registers, and
7945 then jumps to the 16 bit code. */
7946
7947 static void
build_mips16_function_stub(FILE * file)7948 build_mips16_function_stub (FILE *file)
7949 {
7950 const char *fnname;
7951 char *secname, *stubname;
7952 tree stubid, stubdecl;
7953 int need_comma;
7954 unsigned int f;
7955
7956 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7957 secname = (char *) alloca (strlen (fnname) + 20);
7958 sprintf (secname, ".mips16.fn.%s", fnname);
7959 stubname = (char *) alloca (strlen (fnname) + 20);
7960 sprintf (stubname, "__fn_stub_%s", fnname);
7961 stubid = get_identifier (stubname);
7962 stubdecl = build_decl (FUNCTION_DECL, stubid,
7963 build_function_type (void_type_node, NULL_TREE));
7964 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7965
7966 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7967 need_comma = 0;
7968 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7969 {
7970 fprintf (file, "%s%s",
7971 need_comma ? ", " : "",
7972 (f & 3) == 1 ? "float" : "double");
7973 need_comma = 1;
7974 }
7975 fprintf (file, ")\n");
7976
7977 fprintf (file, "\t.set\tnomips16\n");
7978 switch_to_section (function_section (stubdecl));
7979 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7980
7981 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7982 within a .ent, and we cannot emit another .ent. */
7983 if (!FUNCTION_NAME_ALREADY_DECLARED)
7984 {
7985 fputs ("\t.ent\t", file);
7986 assemble_name (file, stubname);
7987 fputs ("\n", file);
7988 }
7989
7990 assemble_name (file, stubname);
7991 fputs (":\n", file);
7992
7993 /* We don't want the assembler to insert any nops here. */
7994 fprintf (file, "\t.set\tnoreorder\n");
7995
7996 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7997
7998 fprintf (asm_out_file, "\t.set\tnoat\n");
7999 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8000 assemble_name (file, fnname);
8001 fprintf (file, "\n");
8002 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8003 fprintf (asm_out_file, "\t.set\tat\n");
8004
8005 /* Unfortunately, we can't fill the jump delay slot. We can't fill
8006 with one of the mfc1 instructions, because the result is not
8007 available for one instruction, so if the very first instruction
8008 in the function refers to the register, it will see the wrong
8009 value. */
8010 fprintf (file, "\tnop\n");
8011
8012 fprintf (file, "\t.set\treorder\n");
8013
8014 if (!FUNCTION_NAME_ALREADY_DECLARED)
8015 {
8016 fputs ("\t.end\t", file);
8017 assemble_name (file, stubname);
8018 fputs ("\n", file);
8019 }
8020
8021 fprintf (file, "\t.set\tmips16\n");
8022
8023 switch_to_section (function_section (current_function_decl));
8024 }
8025
8026 /* We keep a list of functions for which we have already built stubs
8027 in build_mips16_call_stub. */
8028
8029 struct mips16_stub
8030 {
8031 struct mips16_stub *next;
8032 char *name;
8033 int fpret;
8034 };
8035
8036 static struct mips16_stub *mips16_stubs;
8037
8038 /* Build a call stub for a mips16 call. A stub is needed if we are
8039 passing any floating point values which should go into the floating
8040 point registers. If we are, and the call turns out to be to a 32
8041 bit function, the stub will be used to move the values into the
8042 floating point registers before calling the 32 bit function. The
8043 linker will magically adjust the function call to either the 16 bit
8044 function or the 32 bit stub, depending upon where the function call
8045 is actually defined.
8046
8047 Similarly, we need a stub if the return value might come back in a
8048 floating point register.
8049
8050 RETVAL is the location of the return value, or null if this is
8051 a call rather than a call_value. FN is the address of the
8052 function and ARG_SIZE is the size of the arguments. FP_CODE
8053 is the code built by function_arg. This function returns a nonzero
8054 value if it builds the call instruction itself. */
8055
8056 int
build_mips16_call_stub(rtx retval,rtx fn,rtx arg_size,int fp_code)8057 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
8058 {
8059 int fpret;
8060 const char *fnname;
8061 char *secname, *stubname;
8062 struct mips16_stub *l;
8063 tree stubid, stubdecl;
8064 int need_comma;
8065 unsigned int f;
8066
8067 /* We don't need to do anything if we aren't in mips16 mode, or if
8068 we were invoked with the -msoft-float option. */
8069 if (! TARGET_MIPS16 || ! mips16_hard_float)
8070 return 0;
8071
8072 /* Figure out whether the value might come back in a floating point
8073 register. */
8074 fpret = (retval != 0
8075 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
8076 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
8077
8078 /* We don't need to do anything if there were no floating point
8079 arguments and the value will not be returned in a floating point
8080 register. */
8081 if (fp_code == 0 && ! fpret)
8082 return 0;
8083
8084 /* We don't need to do anything if this is a call to a special
8085 mips16 support function. */
8086 if (GET_CODE (fn) == SYMBOL_REF
8087 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
8088 return 0;
8089
8090 /* This code will only work for o32 and o64 abis. The other ABI's
8091 require more sophisticated support. */
8092 gcc_assert (TARGET_OLDABI);
8093
8094 /* We can only handle SFmode and DFmode floating point return
8095 values. */
8096 if (fpret)
8097 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
8098
8099 /* If we're calling via a function pointer, then we must always call
8100 via a stub. There are magic stubs provided in libgcc.a for each
8101 of the required cases. Each of them expects the function address
8102 to arrive in register $2. */
8103
8104 if (GET_CODE (fn) != SYMBOL_REF)
8105 {
8106 char buf[30];
8107 tree id;
8108 rtx stub_fn, insn;
8109
8110 /* ??? If this code is modified to support other ABI's, we need
8111 to handle PARALLEL return values here. */
8112
8113 sprintf (buf, "__mips16_call_stub_%s%d",
8114 (fpret
8115 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
8116 : ""),
8117 fp_code);
8118 id = get_identifier (buf);
8119 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8120
8121 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8122
8123 if (retval == NULL_RTX)
8124 insn = gen_call_internal (stub_fn, arg_size);
8125 else
8126 insn = gen_call_value_internal (retval, stub_fn, arg_size);
8127 insn = emit_call_insn (insn);
8128
8129 /* Put the register usage information on the CALL. */
8130 CALL_INSN_FUNCTION_USAGE (insn) =
8131 gen_rtx_EXPR_LIST (VOIDmode,
8132 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8133 CALL_INSN_FUNCTION_USAGE (insn));
8134
8135 /* If we are handling a floating point return value, we need to
8136 save $18 in the function prologue. Putting a note on the
8137 call will mean that regs_ever_live[$18] will be true if the
8138 call is not eliminated, and we can check that in the prologue
8139 code. */
8140 if (fpret)
8141 CALL_INSN_FUNCTION_USAGE (insn) =
8142 gen_rtx_EXPR_LIST (VOIDmode,
8143 gen_rtx_USE (VOIDmode,
8144 gen_rtx_REG (word_mode, 18)),
8145 CALL_INSN_FUNCTION_USAGE (insn));
8146
8147 /* Return 1 to tell the caller that we've generated the call
8148 insn. */
8149 return 1;
8150 }
8151
8152 /* We know the function we are going to call. If we have already
8153 built a stub, we don't need to do anything further. */
8154
8155 fnname = XSTR (fn, 0);
8156 for (l = mips16_stubs; l != NULL; l = l->next)
8157 if (strcmp (l->name, fnname) == 0)
8158 break;
8159
8160 if (l == NULL)
8161 {
8162 /* Build a special purpose stub. When the linker sees a
8163 function call in mips16 code, it will check where the target
8164 is defined. If the target is a 32 bit call, the linker will
8165 search for the section defined here. It can tell which
8166 symbol this section is associated with by looking at the
8167 relocation information (the name is unreliable, since this
8168 might be a static function). If such a section is found, the
8169 linker will redirect the call to the start of the magic
8170 section.
8171
8172 If the function does not return a floating point value, the
8173 special stub section is named
8174 .mips16.call.FNNAME
8175
8176 If the function does return a floating point value, the stub
8177 section is named
8178 .mips16.call.fp.FNNAME
8179 */
8180
8181 secname = (char *) alloca (strlen (fnname) + 40);
8182 sprintf (secname, ".mips16.call.%s%s",
8183 fpret ? "fp." : "",
8184 fnname);
8185 stubname = (char *) alloca (strlen (fnname) + 20);
8186 sprintf (stubname, "__call_stub_%s%s",
8187 fpret ? "fp_" : "",
8188 fnname);
8189 stubid = get_identifier (stubname);
8190 stubdecl = build_decl (FUNCTION_DECL, stubid,
8191 build_function_type (void_type_node, NULL_TREE));
8192 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8193
8194 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
8195 (fpret
8196 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
8197 : ""),
8198 fnname);
8199 need_comma = 0;
8200 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8201 {
8202 fprintf (asm_out_file, "%s%s",
8203 need_comma ? ", " : "",
8204 (f & 3) == 1 ? "float" : "double");
8205 need_comma = 1;
8206 }
8207 fprintf (asm_out_file, ")\n");
8208
8209 fprintf (asm_out_file, "\t.set\tnomips16\n");
8210 assemble_start_function (stubdecl, stubname);
8211
8212 if (!FUNCTION_NAME_ALREADY_DECLARED)
8213 {
8214 fputs ("\t.ent\t", asm_out_file);
8215 assemble_name (asm_out_file, stubname);
8216 fputs ("\n", asm_out_file);
8217
8218 assemble_name (asm_out_file, stubname);
8219 fputs (":\n", asm_out_file);
8220 }
8221
8222 /* We build the stub code by hand. That's the only way we can
8223 do it, since we can't generate 32 bit code during a 16 bit
8224 compilation. */
8225
8226 /* We don't want the assembler to insert any nops here. */
8227 fprintf (asm_out_file, "\t.set\tnoreorder\n");
8228
8229 mips16_fp_args (asm_out_file, fp_code, 0);
8230
8231 if (! fpret)
8232 {
8233 fprintf (asm_out_file, "\t.set\tnoat\n");
8234 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
8235 fnname);
8236 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8237 fprintf (asm_out_file, "\t.set\tat\n");
8238 /* Unfortunately, we can't fill the jump delay slot. We
8239 can't fill with one of the mtc1 instructions, because the
8240 result is not available for one instruction, so if the
8241 very first instruction in the function refers to the
8242 register, it will see the wrong value. */
8243 fprintf (asm_out_file, "\tnop\n");
8244 }
8245 else
8246 {
8247 fprintf (asm_out_file, "\tmove\t%s,%s\n",
8248 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
8249 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
8250 /* As above, we can't fill the delay slot. */
8251 fprintf (asm_out_file, "\tnop\n");
8252 if (GET_MODE (retval) == SFmode)
8253 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8254 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
8255 else
8256 {
8257 if (TARGET_BIG_ENDIAN)
8258 {
8259 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8260 reg_names[GP_REG_FIRST + 2],
8261 reg_names[FP_REG_FIRST + 1]);
8262 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8263 reg_names[GP_REG_FIRST + 3],
8264 reg_names[FP_REG_FIRST + 0]);
8265 }
8266 else
8267 {
8268 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8269 reg_names[GP_REG_FIRST + 2],
8270 reg_names[FP_REG_FIRST + 0]);
8271 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8272 reg_names[GP_REG_FIRST + 3],
8273 reg_names[FP_REG_FIRST + 1]);
8274 }
8275 }
8276 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
8277 /* As above, we can't fill the delay slot. */
8278 fprintf (asm_out_file, "\tnop\n");
8279 }
8280
8281 fprintf (asm_out_file, "\t.set\treorder\n");
8282
8283 #ifdef ASM_DECLARE_FUNCTION_SIZE
8284 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
8285 #endif
8286
8287 if (!FUNCTION_NAME_ALREADY_DECLARED)
8288 {
8289 fputs ("\t.end\t", asm_out_file);
8290 assemble_name (asm_out_file, stubname);
8291 fputs ("\n", asm_out_file);
8292 }
8293
8294 fprintf (asm_out_file, "\t.set\tmips16\n");
8295
8296 /* Record this stub. */
8297 l = (struct mips16_stub *) xmalloc (sizeof *l);
8298 l->name = xstrdup (fnname);
8299 l->fpret = fpret;
8300 l->next = mips16_stubs;
8301 mips16_stubs = l;
8302 }
8303
8304 /* If we expect a floating point return value, but we've built a
8305 stub which does not expect one, then we're in trouble. We can't
8306 use the existing stub, because it won't handle the floating point
8307 value. We can't build a new stub, because the linker won't know
8308 which stub to use for the various calls in this object file.
8309 Fortunately, this case is illegal, since it means that a function
8310 was declared in two different ways in a single compilation. */
8311 if (fpret && ! l->fpret)
8312 error ("cannot handle inconsistent calls to %qs", fnname);
8313
8314 /* If we are calling a stub which handles a floating point return
8315 value, we need to arrange to save $18 in the prologue. We do
8316 this by marking the function call as using the register. The
8317 prologue will later see that it is used, and emit code to save
8318 it. */
8319
8320 if (l->fpret)
8321 {
8322 rtx insn;
8323
8324 if (retval == NULL_RTX)
8325 insn = gen_call_internal (fn, arg_size);
8326 else
8327 insn = gen_call_value_internal (retval, fn, arg_size);
8328 insn = emit_call_insn (insn);
8329
8330 CALL_INSN_FUNCTION_USAGE (insn) =
8331 gen_rtx_EXPR_LIST (VOIDmode,
8332 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
8333 CALL_INSN_FUNCTION_USAGE (insn));
8334
8335 /* Return 1 to tell the caller that we've generated the call
8336 insn. */
8337 return 1;
8338 }
8339
8340 /* Return 0 to let the caller generate the call insn. */
8341 return 0;
8342 }
8343
8344 /* An entry in the mips16 constant pool. VALUE is the pool constant,
8345 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
8346
8347 struct mips16_constant {
8348 struct mips16_constant *next;
8349 rtx value;
8350 rtx label;
8351 enum machine_mode mode;
8352 };
8353
8354 /* Information about an incomplete mips16 constant pool. FIRST is the
8355 first constant, HIGHEST_ADDRESS is the highest address that the first
8356 byte of the pool can have, and INSN_ADDRESS is the current instruction
8357 address. */
8358
8359 struct mips16_constant_pool {
8360 struct mips16_constant *first;
8361 int highest_address;
8362 int insn_address;
8363 };
8364
8365 /* Add constant VALUE to POOL and return its label. MODE is the
8366 value's mode (used for CONST_INTs, etc.). */
8367
8368 static rtx
add_constant(struct mips16_constant_pool * pool,rtx value,enum machine_mode mode)8369 add_constant (struct mips16_constant_pool *pool,
8370 rtx value, enum machine_mode mode)
8371 {
8372 struct mips16_constant **p, *c;
8373 bool first_of_size_p;
8374
8375 /* See whether the constant is already in the pool. If so, return the
8376 existing label, otherwise leave P pointing to the place where the
8377 constant should be added.
8378
8379 Keep the pool sorted in increasing order of mode size so that we can
8380 reduce the number of alignments needed. */
8381 first_of_size_p = true;
8382 for (p = &pool->first; *p != 0; p = &(*p)->next)
8383 {
8384 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
8385 return (*p)->label;
8386 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
8387 break;
8388 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
8389 first_of_size_p = false;
8390 }
8391
8392 /* In the worst case, the constant needed by the earliest instruction
8393 will end up at the end of the pool. The entire pool must then be
8394 accessible from that instruction.
8395
8396 When adding the first constant, set the pool's highest address to
8397 the address of the first out-of-range byte. Adjust this address
8398 downwards each time a new constant is added. */
8399 if (pool->first == 0)
8400 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
8401 is the address of the instruction with the lowest two bits clear.
8402 The base PC value for ld has the lowest three bits clear. Assume
8403 the worst case here. */
8404 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
8405 pool->highest_address -= GET_MODE_SIZE (mode);
8406 if (first_of_size_p)
8407 /* Take into account the worst possible padding due to alignment. */
8408 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
8409
8410 /* Create a new entry. */
8411 c = (struct mips16_constant *) xmalloc (sizeof *c);
8412 c->value = value;
8413 c->mode = mode;
8414 c->label = gen_label_rtx ();
8415 c->next = *p;
8416 *p = c;
8417
8418 return c->label;
8419 }
8420
8421 /* Output constant VALUE after instruction INSN and return the last
8422 instruction emitted. MODE is the mode of the constant. */
8423
8424 static rtx
dump_constants_1(enum machine_mode mode,rtx value,rtx insn)8425 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
8426 {
8427 switch (GET_MODE_CLASS (mode))
8428 {
8429 case MODE_INT:
8430 {
8431 rtx size = GEN_INT (GET_MODE_SIZE (mode));
8432 return emit_insn_after (gen_consttable_int (value, size), insn);
8433 }
8434
8435 case MODE_FLOAT:
8436 return emit_insn_after (gen_consttable_float (value), insn);
8437
8438 case MODE_VECTOR_FLOAT:
8439 case MODE_VECTOR_INT:
8440 {
8441 int i;
8442 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
8443 insn = dump_constants_1 (GET_MODE_INNER (mode),
8444 CONST_VECTOR_ELT (value, i), insn);
8445 return insn;
8446 }
8447
8448 default:
8449 gcc_unreachable ();
8450 }
8451 }
8452
8453
8454 /* Dump out the constants in CONSTANTS after INSN. */
8455
8456 static void
dump_constants(struct mips16_constant * constants,rtx insn)8457 dump_constants (struct mips16_constant *constants, rtx insn)
8458 {
8459 struct mips16_constant *c, *next;
8460 int align;
8461
8462 align = 0;
8463 for (c = constants; c != NULL; c = next)
8464 {
8465 /* If necessary, increase the alignment of PC. */
8466 if (align < GET_MODE_SIZE (c->mode))
8467 {
8468 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
8469 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
8470 }
8471 align = GET_MODE_SIZE (c->mode);
8472
8473 insn = emit_label_after (c->label, insn);
8474 insn = dump_constants_1 (c->mode, c->value, insn);
8475
8476 next = c->next;
8477 free (c);
8478 }
8479
8480 emit_barrier_after (insn);
8481 }
8482
8483 /* Return the length of instruction INSN. */
8484
8485 static int
mips16_insn_length(rtx insn)8486 mips16_insn_length (rtx insn)
8487 {
8488 if (JUMP_P (insn))
8489 {
8490 rtx body = PATTERN (insn);
8491 if (GET_CODE (body) == ADDR_VEC)
8492 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8493 if (GET_CODE (body) == ADDR_DIFF_VEC)
8494 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8495 }
8496 return get_attr_length (insn);
8497 }
8498
8499 /* Rewrite *X so that constant pool references refer to the constant's
8500 label instead. DATA points to the constant pool structure. */
8501
8502 static int
mips16_rewrite_pool_refs(rtx * x,void * data)8503 mips16_rewrite_pool_refs (rtx *x, void *data)
8504 {
8505 struct mips16_constant_pool *pool = data;
8506 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8507 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8508 get_pool_constant (*x),
8509 get_pool_mode (*x)));
8510 return 0;
8511 }
8512
8513 /* Build MIPS16 constant pools. */
8514
8515 static void
mips16_lay_out_constants(void)8516 mips16_lay_out_constants (void)
8517 {
8518 struct mips16_constant_pool pool;
8519 rtx insn, barrier;
8520
8521 barrier = 0;
8522 memset (&pool, 0, sizeof (pool));
8523 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8524 {
8525 /* Rewrite constant pool references in INSN. */
8526 if (INSN_P (insn))
8527 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8528
8529 pool.insn_address += mips16_insn_length (insn);
8530
8531 if (pool.first != NULL)
8532 {
8533 /* If there are no natural barriers between the first user of
8534 the pool and the highest acceptable address, we'll need to
8535 create a new instruction to jump around the constant pool.
8536 In the worst case, this instruction will be 4 bytes long.
8537
8538 If it's too late to do this transformation after INSN,
8539 do it immediately before INSN. */
8540 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8541 {
8542 rtx label, jump;
8543
8544 label = gen_label_rtx ();
8545
8546 jump = emit_jump_insn_before (gen_jump (label), insn);
8547 JUMP_LABEL (jump) = label;
8548 LABEL_NUSES (label) = 1;
8549 barrier = emit_barrier_after (jump);
8550
8551 emit_label_after (label, barrier);
8552 pool.insn_address += 4;
8553 }
8554
8555 /* See whether the constant pool is now out of range of the first
8556 user. If so, output the constants after the previous barrier.
8557 Note that any instructions between BARRIER and INSN (inclusive)
8558 will use negative offsets to refer to the pool. */
8559 if (pool.insn_address > pool.highest_address)
8560 {
8561 dump_constants (pool.first, barrier);
8562 pool.first = NULL;
8563 barrier = 0;
8564 }
8565 else if (BARRIER_P (insn))
8566 barrier = insn;
8567 }
8568 }
8569 dump_constants (pool.first, get_last_insn ());
8570 }
8571
8572 /* A temporary variable used by for_each_rtx callbacks, etc. */
8573 static rtx mips_sim_insn;
8574
8575 /* A structure representing the state of the processor pipeline.
8576 Used by the mips_sim_* family of functions. */
8577 struct mips_sim {
8578 /* The maximum number of instructions that can be issued in a cycle.
8579 (Caches mips_issue_rate.) */
8580 unsigned int issue_rate;
8581
8582 /* The current simulation time. */
8583 unsigned int time;
8584
8585 /* How many more instructions can be issued in the current cycle. */
8586 unsigned int insns_left;
8587
8588 /* LAST_SET[X].INSN is the last instruction to set register X.
8589 LAST_SET[X].TIME is the time at which that instruction was issued.
8590 INSN is null if no instruction has yet set register X. */
8591 struct {
8592 rtx insn;
8593 unsigned int time;
8594 } last_set[FIRST_PSEUDO_REGISTER];
8595
8596 /* The pipeline's current DFA state. */
8597 state_t dfa_state;
8598 };
8599
8600 /* Reset STATE to the initial simulation state. */
8601
8602 static void
mips_sim_reset(struct mips_sim * state)8603 mips_sim_reset (struct mips_sim *state)
8604 {
8605 state->time = 0;
8606 state->insns_left = state->issue_rate;
8607 memset (&state->last_set, 0, sizeof (state->last_set));
8608 state_reset (state->dfa_state);
8609 }
8610
8611 /* Initialize STATE before its first use. DFA_STATE points to an
8612 allocated but uninitialized DFA state. */
8613
8614 static void
mips_sim_init(struct mips_sim * state,state_t dfa_state)8615 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8616 {
8617 state->issue_rate = mips_issue_rate ();
8618 state->dfa_state = dfa_state;
8619 mips_sim_reset (state);
8620 }
8621
8622 /* Advance STATE by one clock cycle. */
8623
8624 static void
mips_sim_next_cycle(struct mips_sim * state)8625 mips_sim_next_cycle (struct mips_sim *state)
8626 {
8627 state->time++;
8628 state->insns_left = state->issue_rate;
8629 state_transition (state->dfa_state, 0);
8630 }
8631
8632 /* Advance simulation state STATE until instruction INSN can read
8633 register REG. */
8634
8635 static void
mips_sim_wait_reg(struct mips_sim * state,rtx insn,rtx reg)8636 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8637 {
8638 unsigned int i;
8639
8640 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8641 if (state->last_set[REGNO (reg) + i].insn != 0)
8642 {
8643 unsigned int t;
8644
8645 t = state->last_set[REGNO (reg) + i].time;
8646 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8647 while (state->time < t)
8648 mips_sim_next_cycle (state);
8649 }
8650 }
8651
8652 /* A for_each_rtx callback. If *X is a register, advance simulation state
8653 DATA until mips_sim_insn can read the register's value. */
8654
8655 static int
mips_sim_wait_regs_2(rtx * x,void * data)8656 mips_sim_wait_regs_2 (rtx *x, void *data)
8657 {
8658 if (REG_P (*x))
8659 mips_sim_wait_reg (data, mips_sim_insn, *x);
8660 return 0;
8661 }
8662
8663 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8664
8665 static void
mips_sim_wait_regs_1(rtx * x,void * data)8666 mips_sim_wait_regs_1 (rtx *x, void *data)
8667 {
8668 for_each_rtx (x, mips_sim_wait_regs_2, data);
8669 }
8670
8671 /* Advance simulation state STATE until all of INSN's register
8672 dependencies are satisfied. */
8673
8674 static void
mips_sim_wait_regs(struct mips_sim * state,rtx insn)8675 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8676 {
8677 mips_sim_insn = insn;
8678 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8679 }
8680
8681 /* Advance simulation state STATE until the units required by
8682 instruction INSN are available. */
8683
8684 static void
mips_sim_wait_units(struct mips_sim * state,rtx insn)8685 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8686 {
8687 state_t tmp_state;
8688
8689 tmp_state = alloca (state_size ());
8690 while (state->insns_left == 0
8691 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8692 state_transition (tmp_state, insn) >= 0))
8693 mips_sim_next_cycle (state);
8694 }
8695
8696 /* Advance simulation state STATE until INSN is ready to issue. */
8697
8698 static void
mips_sim_wait_insn(struct mips_sim * state,rtx insn)8699 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8700 {
8701 mips_sim_wait_regs (state, insn);
8702 mips_sim_wait_units (state, insn);
8703 }
8704
8705 /* mips_sim_insn has just set X. Update the LAST_SET array
8706 in simulation state DATA. */
8707
8708 static void
mips_sim_record_set(rtx x,rtx pat ATTRIBUTE_UNUSED,void * data)8709 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8710 {
8711 struct mips_sim *state;
8712 unsigned int i;
8713
8714 state = data;
8715 if (REG_P (x))
8716 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8717 {
8718 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8719 state->last_set[REGNO (x) + i].time = state->time;
8720 }
8721 }
8722
8723 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8724 can issue immediately (i.e., that mips_sim_wait_insn has already
8725 been called). */
8726
8727 static void
mips_sim_issue_insn(struct mips_sim * state,rtx insn)8728 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8729 {
8730 state_transition (state->dfa_state, insn);
8731 state->insns_left--;
8732
8733 mips_sim_insn = insn;
8734 note_stores (PATTERN (insn), mips_sim_record_set, state);
8735 }
8736
8737 /* Simulate issuing a NOP in state STATE. */
8738
8739 static void
mips_sim_issue_nop(struct mips_sim * state)8740 mips_sim_issue_nop (struct mips_sim *state)
8741 {
8742 if (state->insns_left == 0)
8743 mips_sim_next_cycle (state);
8744 state->insns_left--;
8745 }
8746
8747 /* Update simulation state STATE so that it's ready to accept the instruction
8748 after INSN. INSN should be part of the main rtl chain, not a member of a
8749 SEQUENCE. */
8750
8751 static void
mips_sim_finish_insn(struct mips_sim * state,rtx insn)8752 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8753 {
8754 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8755 if (JUMP_P (insn))
8756 mips_sim_issue_nop (state);
8757
8758 switch (GET_CODE (SEQ_BEGIN (insn)))
8759 {
8760 case CODE_LABEL:
8761 case CALL_INSN:
8762 /* We can't predict the processor state after a call or label. */
8763 mips_sim_reset (state);
8764 break;
8765
8766 case JUMP_INSN:
8767 /* The delay slots of branch likely instructions are only executed
8768 when the branch is taken. Therefore, if the caller has simulated
8769 the delay slot instruction, STATE does not really reflect the state
8770 of the pipeline for the instruction after the delay slot. Also,
8771 branch likely instructions tend to incur a penalty when not taken,
8772 so there will probably be an extra delay between the branch and
8773 the instruction after the delay slot. */
8774 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8775 mips_sim_reset (state);
8776 break;
8777
8778 default:
8779 break;
8780 }
8781 }
8782
8783 /* The VR4130 pipeline issues aligned pairs of instructions together,
8784 but it stalls the second instruction if it depends on the first.
8785 In order to cut down the amount of logic required, this dependence
8786 check is not based on a full instruction decode. Instead, any non-SPECIAL
8787 instruction is assumed to modify the register specified by bits 20-16
8788 (which is usually the "rt" field).
8789
8790 In beq, beql, bne and bnel instructions, the rt field is actually an
8791 input, so we can end up with a false dependence between the branch
8792 and its delay slot. If this situation occurs in instruction INSN,
8793 try to avoid it by swapping rs and rt. */
8794
8795 static void
vr4130_avoid_branch_rt_conflict(rtx insn)8796 vr4130_avoid_branch_rt_conflict (rtx insn)
8797 {
8798 rtx first, second;
8799
8800 first = SEQ_BEGIN (insn);
8801 second = SEQ_END (insn);
8802 if (JUMP_P (first)
8803 && NONJUMP_INSN_P (second)
8804 && GET_CODE (PATTERN (first)) == SET
8805 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8806 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8807 {
8808 /* Check for the right kind of condition. */
8809 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8810 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8811 && REG_P (XEXP (cond, 0))
8812 && REG_P (XEXP (cond, 1))
8813 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8814 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8815 {
8816 /* SECOND mentions the rt register but not the rs register. */
8817 rtx tmp = XEXP (cond, 0);
8818 XEXP (cond, 0) = XEXP (cond, 1);
8819 XEXP (cond, 1) = tmp;
8820 }
8821 }
8822 }
8823
8824 /* Implement -mvr4130-align. Go through each basic block and simulate the
8825 processor pipeline. If we find that a pair of instructions could execute
8826 in parallel, and the first of those instruction is not 8-byte aligned,
8827 insert a nop to make it aligned. */
8828
8829 static void
vr4130_align_insns(void)8830 vr4130_align_insns (void)
8831 {
8832 struct mips_sim state;
8833 rtx insn, subinsn, last, last2, next;
8834 bool aligned_p;
8835
8836 dfa_start ();
8837
8838 /* LAST is the last instruction before INSN to have a nonzero length.
8839 LAST2 is the last such instruction before LAST. */
8840 last = 0;
8841 last2 = 0;
8842
8843 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8844 aligned_p = true;
8845
8846 mips_sim_init (&state, alloca (state_size ()));
8847 for (insn = get_insns (); insn != 0; insn = next)
8848 {
8849 unsigned int length;
8850
8851 next = NEXT_INSN (insn);
8852
8853 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8854 This isn't really related to the alignment pass, but we do it on
8855 the fly to avoid a separate instruction walk. */
8856 vr4130_avoid_branch_rt_conflict (insn);
8857
8858 if (USEFUL_INSN_P (insn))
8859 FOR_EACH_SUBINSN (subinsn, insn)
8860 {
8861 mips_sim_wait_insn (&state, subinsn);
8862
8863 /* If we want this instruction to issue in parallel with the
8864 previous one, make sure that the previous instruction is
8865 aligned. There are several reasons why this isn't worthwhile
8866 when the second instruction is a call:
8867
8868 - Calls are less likely to be performance critical,
8869 - There's a good chance that the delay slot can execute
8870 in parallel with the call.
8871 - The return address would then be unaligned.
8872
8873 In general, if we're going to insert a nop between instructions
8874 X and Y, it's better to insert it immediately after X. That
8875 way, if the nop makes Y aligned, it will also align any labels
8876 between X and Y. */
8877 if (state.insns_left != state.issue_rate
8878 && !CALL_P (subinsn))
8879 {
8880 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8881 {
8882 /* SUBINSN is the first instruction in INSN and INSN is
8883 aligned. We want to align the previous instruction
8884 instead, so insert a nop between LAST2 and LAST.
8885
8886 Note that LAST could be either a single instruction
8887 or a branch with a delay slot. In the latter case,
8888 LAST, like INSN, is already aligned, but the delay
8889 slot must have some extra delay that stops it from
8890 issuing at the same time as the branch. We therefore
8891 insert a nop before the branch in order to align its
8892 delay slot. */
8893 emit_insn_after (gen_nop (), last2);
8894 aligned_p = false;
8895 }
8896 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8897 {
8898 /* SUBINSN is the delay slot of INSN, but INSN is
8899 currently unaligned. Insert a nop between
8900 LAST and INSN to align it. */
8901 emit_insn_after (gen_nop (), last);
8902 aligned_p = true;
8903 }
8904 }
8905 mips_sim_issue_insn (&state, subinsn);
8906 }
8907 mips_sim_finish_insn (&state, insn);
8908
8909 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8910 length = get_attr_length (insn);
8911 if (length > 0)
8912 {
8913 /* If the instruction is an asm statement or multi-instruction
8914 mips.md patern, the length is only an estimate. Insert an
8915 8 byte alignment after it so that the following instructions
8916 can be handled correctly. */
8917 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8918 && (recog_memoized (insn) < 0 || length >= 8))
8919 {
8920 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8921 next = NEXT_INSN (next);
8922 mips_sim_next_cycle (&state);
8923 aligned_p = true;
8924 }
8925 else if (length & 4)
8926 aligned_p = !aligned_p;
8927 last2 = last;
8928 last = insn;
8929 }
8930
8931 /* See whether INSN is an aligned label. */
8932 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8933 aligned_p = true;
8934 }
8935 dfa_finish ();
8936 }
8937
8938 /* Subroutine of mips_reorg. If there is a hazard between INSN
8939 and a previous instruction, avoid it by inserting nops after
8940 instruction AFTER.
8941
8942 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8943 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8944 before using the value of that register. *HILO_DELAY counts the
8945 number of instructions since the last hilo hazard (that is,
8946 the number of instructions since the last mflo or mfhi).
8947
8948 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8949 for the next instruction.
8950
8951 LO_REG is an rtx for the LO register, used in dependence checking. */
8952
8953 static void
mips_avoid_hazard(rtx after,rtx insn,int * hilo_delay,rtx * delayed_reg,rtx lo_reg)8954 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8955 rtx *delayed_reg, rtx lo_reg)
8956 {
8957 rtx pattern, set;
8958 int nops, ninsns;
8959
8960 if (!INSN_P (insn))
8961 return;
8962
8963 pattern = PATTERN (insn);
8964
8965 /* Do not put the whole function in .set noreorder if it contains
8966 an asm statement. We don't know whether there will be hazards
8967 between the asm statement and the gcc-generated code. */
8968 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8969 cfun->machine->all_noreorder_p = false;
8970
8971 /* Ignore zero-length instructions (barriers and the like). */
8972 ninsns = get_attr_length (insn) / 4;
8973 if (ninsns == 0)
8974 return;
8975
8976 /* Work out how many nops are needed. Note that we only care about
8977 registers that are explicitly mentioned in the instruction's pattern.
8978 It doesn't matter that calls use the argument registers or that they
8979 clobber hi and lo. */
8980 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8981 nops = 2 - *hilo_delay;
8982 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8983 nops = 1;
8984 else
8985 nops = 0;
8986
8987 /* Insert the nops between this instruction and the previous one.
8988 Each new nop takes us further from the last hilo hazard. */
8989 *hilo_delay += nops;
8990 while (nops-- > 0)
8991 emit_insn_after (gen_hazard_nop (), after);
8992
8993 /* Set up the state for the next instruction. */
8994 *hilo_delay += ninsns;
8995 *delayed_reg = 0;
8996 if (INSN_CODE (insn) >= 0)
8997 switch (get_attr_hazard (insn))
8998 {
8999 case HAZARD_NONE:
9000 break;
9001
9002 case HAZARD_HILO:
9003 *hilo_delay = 0;
9004 break;
9005
9006 case HAZARD_DELAY:
9007 set = single_set (insn);
9008 gcc_assert (set != 0);
9009 *delayed_reg = SET_DEST (set);
9010 break;
9011 }
9012 }
9013
9014
9015 /* Go through the instruction stream and insert nops where necessary.
9016 See if the whole function can then be put into .set noreorder &
9017 .set nomacro. */
9018
9019 static void
mips_avoid_hazards(void)9020 mips_avoid_hazards (void)
9021 {
9022 rtx insn, last_insn, lo_reg, delayed_reg;
9023 int hilo_delay, i;
9024
9025 /* Force all instructions to be split into their final form. */
9026 split_all_insns_noflow ();
9027
9028 /* Recalculate instruction lengths without taking nops into account. */
9029 cfun->machine->ignore_hazard_length_p = true;
9030 shorten_branches (get_insns ());
9031
9032 cfun->machine->all_noreorder_p = true;
9033
9034 /* Profiled functions can't be all noreorder because the profiler
9035 support uses assembler macros. */
9036 if (current_function_profile)
9037 cfun->machine->all_noreorder_p = false;
9038
9039 /* Code compiled with -mfix-vr4120 can't be all noreorder because
9040 we rely on the assembler to work around some errata. */
9041 if (TARGET_FIX_VR4120)
9042 cfun->machine->all_noreorder_p = false;
9043
9044 /* The same is true for -mfix-vr4130 if we might generate mflo or
9045 mfhi instructions. Note that we avoid using mflo and mfhi if
9046 the VR4130 macc and dmacc instructions are available instead;
9047 see the *mfhilo_{si,di}_macc patterns. */
9048 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
9049 cfun->machine->all_noreorder_p = false;
9050
9051 last_insn = 0;
9052 hilo_delay = 2;
9053 delayed_reg = 0;
9054 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
9055
9056 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
9057 if (INSN_P (insn))
9058 {
9059 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
9060 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9061 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
9062 &hilo_delay, &delayed_reg, lo_reg);
9063 else
9064 mips_avoid_hazard (last_insn, insn, &hilo_delay,
9065 &delayed_reg, lo_reg);
9066
9067 last_insn = insn;
9068 }
9069 }
9070
9071
9072 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
9073
9074 static void
mips_reorg(void)9075 mips_reorg (void)
9076 {
9077 if (TARGET_MIPS16)
9078 mips16_lay_out_constants ();
9079 else if (TARGET_EXPLICIT_RELOCS)
9080 {
9081 if (mips_flag_delayed_branch)
9082 dbr_schedule (get_insns ());
9083 mips_avoid_hazards ();
9084 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
9085 vr4130_align_insns ();
9086 }
9087 }
9088
9089 /* This function does three things:
9090
9091 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9092 - Register the mips16 hardware floating point stubs.
9093 - Register the gofast functions if selected using --enable-gofast. */
9094
9095 #include "config/gofast.h"
9096
9097 static void
mips_init_libfuncs(void)9098 mips_init_libfuncs (void)
9099 {
9100 if (TARGET_FIX_VR4120)
9101 {
9102 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9103 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9104 }
9105
9106 if (TARGET_MIPS16 && mips16_hard_float)
9107 {
9108 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9109 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9110 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9111 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9112
9113 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9114 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9115 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9116 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9117 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9118 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9119
9120 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9121 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9122
9123 if (TARGET_DOUBLE_FLOAT)
9124 {
9125 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9126 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9127 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9128 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9129
9130 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9131 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9132 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9133 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9134 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9135 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9136
9137 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9138 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9139
9140 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9141 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9142 }
9143 }
9144 else
9145 gofast_maybe_init_libfuncs ();
9146 }
9147
9148 /* Return a number assessing the cost of moving a register in class
9149 FROM to class TO. The classes are expressed using the enumeration
9150 values such as `GENERAL_REGS'. A value of 2 is the default; other
9151 values are interpreted relative to that.
9152
9153 It is not required that the cost always equal 2 when FROM is the
9154 same as TO; on some machines it is expensive to move between
9155 registers if they are not general registers.
9156
9157 If reload sees an insn consisting of a single `set' between two
9158 hard registers, and if `REGISTER_MOVE_COST' applied to their
9159 classes returns a value of 2, reload does not check to ensure that
9160 the constraints of the insn are met. Setting a cost of other than
9161 2 will allow reload to verify that the constraints are met. You
9162 should do this if the `movM' pattern's constraints do not allow
9163 such copying.
9164
9165 ??? We make the cost of moving from HI/LO into general
9166 registers the same as for one of moving general registers to
9167 HI/LO for TARGET_MIPS16 in order to prevent allocating a
9168 pseudo to HI/LO. This might hurt optimizations though, it
9169 isn't clear if it is wise. And it might not work in all cases. We
9170 could solve the DImode LO reg problem by using a multiply, just
9171 like reload_{in,out}si. We could solve the SImode/HImode HI reg
9172 problem by using divide instructions. divu puts the remainder in
9173 the HI reg, so doing a divide by -1 will move the value in the HI
9174 reg for all values except -1. We could handle that case by using a
9175 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
9176 a compare/branch to test the input value to see which instruction
9177 we need to use. This gets pretty messy, but it is feasible. */
9178
9179 int
mips_register_move_cost(enum machine_mode mode ATTRIBUTE_UNUSED,enum reg_class to,enum reg_class from)9180 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9181 enum reg_class to, enum reg_class from)
9182 {
9183 if (from == M16_REGS && GR_REG_CLASS_P (to))
9184 return 2;
9185 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
9186 return 2;
9187 else if (GR_REG_CLASS_P (from))
9188 {
9189 if (to == M16_REGS)
9190 return 2;
9191 else if (to == M16_NA_REGS)
9192 return 2;
9193 else if (GR_REG_CLASS_P (to))
9194 {
9195 if (TARGET_MIPS16)
9196 return 4;
9197 else
9198 return 2;
9199 }
9200 else if (to == FP_REGS)
9201 return 4;
9202 else if (reg_class_subset_p (to, ACC_REGS))
9203 {
9204 if (TARGET_MIPS16)
9205 return 12;
9206 else
9207 return 6;
9208 }
9209 else if (COP_REG_CLASS_P (to))
9210 {
9211 return 5;
9212 }
9213 }
9214 else if (from == FP_REGS)
9215 {
9216 if (GR_REG_CLASS_P (to))
9217 return 4;
9218 else if (to == FP_REGS)
9219 return 2;
9220 else if (to == ST_REGS)
9221 return 8;
9222 }
9223 else if (reg_class_subset_p (from, ACC_REGS))
9224 {
9225 if (GR_REG_CLASS_P (to))
9226 {
9227 if (TARGET_MIPS16)
9228 return 12;
9229 else
9230 return 6;
9231 }
9232 }
9233 else if (from == ST_REGS && GR_REG_CLASS_P (to))
9234 return 4;
9235 else if (COP_REG_CLASS_P (from))
9236 {
9237 return 5;
9238 }
9239
9240 /* Fall through.
9241 ??? What cases are these? Shouldn't we return 2 here? */
9242
9243 return 12;
9244 }
9245
9246 /* Return the length of INSN. LENGTH is the initial length computed by
9247 attributes in the machine-description file. */
9248
9249 int
mips_adjust_insn_length(rtx insn,int length)9250 mips_adjust_insn_length (rtx insn, int length)
9251 {
9252 /* A unconditional jump has an unfilled delay slot if it is not part
9253 of a sequence. A conditional jump normally has a delay slot, but
9254 does not on MIPS16. */
9255 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9256 length += 4;
9257
9258 /* See how many nops might be needed to avoid hardware hazards. */
9259 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9260 switch (get_attr_hazard (insn))
9261 {
9262 case HAZARD_NONE:
9263 break;
9264
9265 case HAZARD_DELAY:
9266 length += 4;
9267 break;
9268
9269 case HAZARD_HILO:
9270 length += 8;
9271 break;
9272 }
9273
9274 /* All MIPS16 instructions are a measly two bytes. */
9275 if (TARGET_MIPS16)
9276 length /= 2;
9277
9278 return length;
9279 }
9280
9281
9282 /* Return an asm sequence to start a noat block and load the address
9283 of a label into $1. */
9284
9285 const char *
mips_output_load_label(void)9286 mips_output_load_label (void)
9287 {
9288 if (TARGET_EXPLICIT_RELOCS)
9289 switch (mips_abi)
9290 {
9291 case ABI_N32:
9292 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9293
9294 case ABI_64:
9295 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9296
9297 default:
9298 if (ISA_HAS_LOAD_DELAY)
9299 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9300 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9301 }
9302 else
9303 {
9304 if (Pmode == DImode)
9305 return "%[dla\t%@,%0";
9306 else
9307 return "%[la\t%@,%0";
9308 }
9309 }
9310
9311 /* Return the assembly code for INSN, which has the operands given by
9312 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9313 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9314 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9315 version of BRANCH_IF_TRUE. */
9316
9317 const char *
mips_output_conditional_branch(rtx insn,rtx * operands,const char * branch_if_true,const char * branch_if_false)9318 mips_output_conditional_branch (rtx insn, rtx *operands,
9319 const char *branch_if_true,
9320 const char *branch_if_false)
9321 {
9322 unsigned int length;
9323 rtx taken, not_taken;
9324
9325 length = get_attr_length (insn);
9326 if (length <= 8)
9327 {
9328 /* Just a simple conditional branch. */
9329 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9330 return branch_if_true;
9331 }
9332
9333 /* Generate a reversed branch around a direct jump. This fallback does
9334 not use branch-likely instructions. */
9335 mips_branch_likely = false;
9336 not_taken = gen_label_rtx ();
9337 taken = operands[1];
9338
9339 /* Generate the reversed branch to NOT_TAKEN. */
9340 operands[1] = not_taken;
9341 output_asm_insn (branch_if_false, operands);
9342
9343 /* If INSN has a delay slot, we must provide delay slots for both the
9344 branch to NOT_TAKEN and the conditional jump. We must also ensure
9345 that INSN's delay slot is executed in the appropriate cases. */
9346 if (final_sequence)
9347 {
9348 /* This first delay slot will always be executed, so use INSN's
9349 delay slot if is not annulled. */
9350 if (!INSN_ANNULLED_BRANCH_P (insn))
9351 {
9352 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9353 asm_out_file, optimize, 1, NULL);
9354 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9355 }
9356 else
9357 output_asm_insn ("nop", 0);
9358 fprintf (asm_out_file, "\n");
9359 }
9360
9361 /* Output the unconditional branch to TAKEN. */
9362 if (length <= 16)
9363 output_asm_insn ("j\t%0%/", &taken);
9364 else
9365 {
9366 output_asm_insn (mips_output_load_label (), &taken);
9367 output_asm_insn ("jr\t%@%]%/", 0);
9368 }
9369
9370 /* Now deal with its delay slot; see above. */
9371 if (final_sequence)
9372 {
9373 /* This delay slot will only be executed if the branch is taken.
9374 Use INSN's delay slot if is annulled. */
9375 if (INSN_ANNULLED_BRANCH_P (insn))
9376 {
9377 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9378 asm_out_file, optimize, 1, NULL);
9379 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9380 }
9381 else
9382 output_asm_insn ("nop", 0);
9383 fprintf (asm_out_file, "\n");
9384 }
9385
9386 /* Output NOT_TAKEN. */
9387 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9388 CODE_LABEL_NUMBER (not_taken));
9389 return "";
9390 }
9391
9392 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9393 if some ordered condition is true. The condition is given by
9394 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9395 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9396 its second is always zero. */
9397
9398 const char *
mips_output_order_conditional_branch(rtx insn,rtx * operands,bool inverted_p)9399 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9400 {
9401 const char *branch[2];
9402
9403 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9404 Make BRANCH[0] branch on the inverse condition. */
9405 switch (GET_CODE (operands[0]))
9406 {
9407 /* These cases are equivalent to comparisons against zero. */
9408 case LEU:
9409 inverted_p = !inverted_p;
9410 /* Fall through. */
9411 case GTU:
9412 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9413 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9414 break;
9415
9416 /* These cases are always true or always false. */
9417 case LTU:
9418 inverted_p = !inverted_p;
9419 /* Fall through. */
9420 case GEU:
9421 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9422 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9423 break;
9424
9425 default:
9426 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9427 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9428 break;
9429 }
9430 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9431 }
9432
9433 /* Used to output div or ddiv instruction DIVISION, which has the operands
9434 given by OPERANDS. Add in a divide-by-zero check if needed.
9435
9436 When working around R4000 and R4400 errata, we need to make sure that
9437 the division is not immediately followed by a shift[1][2]. We also
9438 need to stop the division from being put into a branch delay slot[3].
9439 The easiest way to avoid both problems is to add a nop after the
9440 division. When a divide-by-zero check is needed, this nop can be
9441 used to fill the branch delay slot.
9442
9443 [1] If a double-word or a variable shift executes immediately
9444 after starting an integer division, the shift may give an
9445 incorrect result. See quotations of errata #16 and #28 from
9446 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9447 in mips.md for details.
9448
9449 [2] A similar bug to [1] exists for all revisions of the
9450 R4000 and the R4400 when run in an MC configuration.
9451 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9452
9453 "19. In this following sequence:
9454
9455 ddiv (or ddivu or div or divu)
9456 dsll32 (or dsrl32, dsra32)
9457
9458 if an MPT stall occurs, while the divide is slipping the cpu
9459 pipeline, then the following double shift would end up with an
9460 incorrect result.
9461
9462 Workaround: The compiler needs to avoid generating any
9463 sequence with divide followed by extended double shift."
9464
9465 This erratum is also present in "MIPS R4400MC Errata, Processor
9466 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9467 & 3.0" as errata #10 and #4, respectively.
9468
9469 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9470 (also valid for MIPS R4000MC processors):
9471
9472 "52. R4000SC: This bug does not apply for the R4000PC.
9473
9474 There are two flavors of this bug:
9475
9476 1) If the instruction just after divide takes an RF exception
9477 (tlb-refill, tlb-invalid) and gets an instruction cache
9478 miss (both primary and secondary) and the line which is
9479 currently in secondary cache at this index had the first
9480 data word, where the bits 5..2 are set, then R4000 would
9481 get a wrong result for the div.
9482
9483 ##1
9484 nop
9485 div r8, r9
9486 ------------------- # end-of page. -tlb-refill
9487 nop
9488 ##2
9489 nop
9490 div r8, r9
9491 ------------------- # end-of page. -tlb-invalid
9492 nop
9493
9494 2) If the divide is in the taken branch delay slot, where the
9495 target takes RF exception and gets an I-cache miss for the
9496 exception vector or where I-cache miss occurs for the
9497 target address, under the above mentioned scenarios, the
9498 div would get wrong results.
9499
9500 ##1
9501 j r2 # to next page mapped or unmapped
9502 div r8,r9 # this bug would be there as long
9503 # as there is an ICache miss and
9504 nop # the "data pattern" is present
9505
9506 ##2
9507 beq r0, r0, NextPage # to Next page
9508 div r8,r9
9509 nop
9510
9511 This bug is present for div, divu, ddiv, and ddivu
9512 instructions.
9513
9514 Workaround: For item 1), OS could make sure that the next page
9515 after the divide instruction is also mapped. For item 2), the
9516 compiler could make sure that the divide instruction is not in
9517 the branch delay slot."
9518
9519 These processors have PRId values of 0x00004220 and 0x00004300 for
9520 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9521
9522 const char *
mips_output_division(const char * division,rtx * operands)9523 mips_output_division (const char *division, rtx *operands)
9524 {
9525 const char *s;
9526
9527 s = division;
9528 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9529 {
9530 output_asm_insn (s, operands);
9531 s = "nop";
9532 }
9533 if (TARGET_CHECK_ZERO_DIV)
9534 {
9535 if (TARGET_MIPS16)
9536 {
9537 output_asm_insn (s, operands);
9538 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9539 }
9540 else if (GENERATE_DIVIDE_TRAPS)
9541 {
9542 output_asm_insn (s, operands);
9543 s = "teq\t%2,%.,7";
9544 }
9545 else
9546 {
9547 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9548 output_asm_insn (s, operands);
9549 s = "break\t7%)\n1:";
9550 }
9551 }
9552 return s;
9553 }
9554
9555 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9556 with a final "000" replaced by "k". Ignore case.
9557
9558 Note: this function is shared between GCC and GAS. */
9559
9560 static bool
mips_strict_matching_cpu_name_p(const char * canonical,const char * given)9561 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9562 {
9563 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9564 given++, canonical++;
9565
9566 return ((*given == 0 && *canonical == 0)
9567 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9568 }
9569
9570
9571 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9572 CPU name. We've traditionally allowed a lot of variation here.
9573
9574 Note: this function is shared between GCC and GAS. */
9575
9576 static bool
mips_matching_cpu_name_p(const char * canonical,const char * given)9577 mips_matching_cpu_name_p (const char *canonical, const char *given)
9578 {
9579 /* First see if the name matches exactly, or with a final "000"
9580 turned into "k". */
9581 if (mips_strict_matching_cpu_name_p (canonical, given))
9582 return true;
9583
9584 /* If not, try comparing based on numerical designation alone.
9585 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9586 if (TOLOWER (*given) == 'r')
9587 given++;
9588 if (!ISDIGIT (*given))
9589 return false;
9590
9591 /* Skip over some well-known prefixes in the canonical name,
9592 hoping to find a number there too. */
9593 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9594 canonical += 2;
9595 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9596 canonical += 2;
9597 else if (TOLOWER (canonical[0]) == 'r')
9598 canonical += 1;
9599
9600 return mips_strict_matching_cpu_name_p (canonical, given);
9601 }
9602
9603
9604 /* Return the mips_cpu_info entry for the processor or ISA given
9605 by CPU_STRING. Return null if the string isn't recognized.
9606
9607 A similar function exists in GAS. */
9608
9609 static const struct mips_cpu_info *
mips_parse_cpu(const char * cpu_string)9610 mips_parse_cpu (const char *cpu_string)
9611 {
9612 const struct mips_cpu_info *p;
9613 const char *s;
9614
9615 /* In the past, we allowed upper-case CPU names, but it doesn't
9616 work well with the multilib machinery. */
9617 for (s = cpu_string; *s != 0; s++)
9618 if (ISUPPER (*s))
9619 {
9620 warning (0, "the cpu name must be lower case");
9621 break;
9622 }
9623
9624 /* 'from-abi' selects the most compatible architecture for the given
9625 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9626 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9627 version. Look first at the -mgp options, if given, otherwise base
9628 the choice on MASK_64BIT in TARGET_DEFAULT. */
9629 if (strcasecmp (cpu_string, "from-abi") == 0)
9630 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9631 : ABI_NEEDS_64BIT_REGS ? 3
9632 : (TARGET_64BIT ? 3 : 1));
9633
9634 /* 'default' has traditionally been a no-op. Probably not very useful. */
9635 if (strcasecmp (cpu_string, "default") == 0)
9636 return 0;
9637
9638 for (p = mips_cpu_info_table; p->name != 0; p++)
9639 if (mips_matching_cpu_name_p (p->name, cpu_string))
9640 return p;
9641
9642 return 0;
9643 }
9644
9645
9646 /* Return the processor associated with the given ISA level, or null
9647 if the ISA isn't valid. */
9648
9649 static const struct mips_cpu_info *
mips_cpu_info_from_isa(int isa)9650 mips_cpu_info_from_isa (int isa)
9651 {
9652 const struct mips_cpu_info *p;
9653
9654 for (p = mips_cpu_info_table; p->name != 0; p++)
9655 if (p->isa == isa)
9656 return p;
9657
9658 return 0;
9659 }
9660
9661 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9662 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9663 they only hold condition code modes, and CCmode is always considered to
9664 be 4 bytes wide. All other registers are word sized. */
9665
9666 unsigned int
mips_hard_regno_nregs(int regno,enum machine_mode mode)9667 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9668 {
9669 if (ST_REG_P (regno))
9670 return ((GET_MODE_SIZE (mode) + 3) / 4);
9671 else if (! FP_REG_P (regno))
9672 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9673 else
9674 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9675 }
9676
9677 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9678 all BLKmode objects are returned in memory. Under the new (N32 and
9679 64-bit MIPS ABIs) small structures are returned in a register.
9680 Objects with varying size must still be returned in memory, of
9681 course. */
9682
9683 static bool
mips_return_in_memory(tree type,tree fndecl ATTRIBUTE_UNUSED)9684 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9685 {
9686 if (TARGET_OLDABI)
9687 return (TYPE_MODE (type) == BLKmode);
9688 else
9689 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9690 || (int_size_in_bytes (type) == -1));
9691 }
9692
9693 static bool
mips_strict_argument_naming(CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)9694 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9695 {
9696 return !TARGET_OLDABI;
9697 }
9698
9699 /* Return true if INSN is a multiply-add or multiply-subtract
9700 instruction and PREV assigns to the accumulator operand. */
9701
9702 bool
mips_linked_madd_p(rtx prev,rtx insn)9703 mips_linked_madd_p (rtx prev, rtx insn)
9704 {
9705 rtx x;
9706
9707 x = single_set (insn);
9708 if (x == 0)
9709 return false;
9710
9711 x = SET_SRC (x);
9712
9713 if (GET_CODE (x) == PLUS
9714 && GET_CODE (XEXP (x, 0)) == MULT
9715 && reg_set_p (XEXP (x, 1), prev))
9716 return true;
9717
9718 if (GET_CODE (x) == MINUS
9719 && GET_CODE (XEXP (x, 1)) == MULT
9720 && reg_set_p (XEXP (x, 0), prev))
9721 return true;
9722
9723 return false;
9724 }
9725
9726 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9727 that may clobber hi or lo. */
9728
9729 static rtx mips_macc_chains_last_hilo;
9730
9731 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9732 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9733
9734 static void
mips_macc_chains_record(rtx insn)9735 mips_macc_chains_record (rtx insn)
9736 {
9737 if (get_attr_may_clobber_hilo (insn))
9738 mips_macc_chains_last_hilo = insn;
9739 }
9740
9741 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9742 has NREADY elements, looking for a multiply-add or multiply-subtract
9743 instruction that is cumulative with mips_macc_chains_last_hilo.
9744 If there is one, promote it ahead of anything else that might
9745 clobber hi or lo. */
9746
9747 static void
mips_macc_chains_reorder(rtx * ready,int nready)9748 mips_macc_chains_reorder (rtx *ready, int nready)
9749 {
9750 int i, j;
9751
9752 if (mips_macc_chains_last_hilo != 0)
9753 for (i = nready - 1; i >= 0; i--)
9754 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9755 {
9756 for (j = nready - 1; j > i; j--)
9757 if (recog_memoized (ready[j]) >= 0
9758 && get_attr_may_clobber_hilo (ready[j]))
9759 {
9760 mips_promote_ready (ready, i, j);
9761 break;
9762 }
9763 break;
9764 }
9765 }
9766
9767 /* The last instruction to be scheduled. */
9768
9769 static rtx vr4130_last_insn;
9770
9771 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9772 points to an rtx that is initially an instruction. Nullify the rtx
9773 if the instruction uses the value of register X. */
9774
9775 static void
vr4130_true_reg_dependence_p_1(rtx x,rtx pat ATTRIBUTE_UNUSED,void * data)9776 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9777 {
9778 rtx *insn_ptr = data;
9779 if (REG_P (x)
9780 && *insn_ptr != 0
9781 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9782 *insn_ptr = 0;
9783 }
9784
9785 /* Return true if there is true register dependence between vr4130_last_insn
9786 and INSN. */
9787
9788 static bool
vr4130_true_reg_dependence_p(rtx insn)9789 vr4130_true_reg_dependence_p (rtx insn)
9790 {
9791 note_stores (PATTERN (vr4130_last_insn),
9792 vr4130_true_reg_dependence_p_1, &insn);
9793 return insn == 0;
9794 }
9795
9796 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9797 the ready queue and that INSN2 is the instruction after it, return
9798 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9799 in which INSN1 and INSN2 can probably issue in parallel, but for
9800 which (INSN2, INSN1) should be less sensitive to instruction
9801 alignment than (INSN1, INSN2). See 4130.md for more details. */
9802
9803 static bool
vr4130_swap_insns_p(rtx insn1,rtx insn2)9804 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9805 {
9806 rtx dep;
9807
9808 /* Check for the following case:
9809
9810 1) there is some other instruction X with an anti dependence on INSN1;
9811 2) X has a higher priority than INSN2; and
9812 3) X is an arithmetic instruction (and thus has no unit restrictions).
9813
9814 If INSN1 is the last instruction blocking X, it would better to
9815 choose (INSN1, X) over (INSN2, INSN1). */
9816 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9817 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9818 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9819 && recog_memoized (XEXP (dep, 0)) >= 0
9820 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9821 return false;
9822
9823 if (vr4130_last_insn != 0
9824 && recog_memoized (insn1) >= 0
9825 && recog_memoized (insn2) >= 0)
9826 {
9827 /* See whether INSN1 and INSN2 use different execution units,
9828 or if they are both ALU-type instructions. If so, they can
9829 probably execute in parallel. */
9830 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9831 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9832 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9833 {
9834 /* If only one of the instructions has a dependence on
9835 vr4130_last_insn, prefer to schedule the other one first. */
9836 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9837 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9838 if (dep1 != dep2)
9839 return dep1;
9840
9841 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9842 is not an ALU-type instruction and if INSN1 uses the same
9843 execution unit. (Note that if this condition holds, we already
9844 know that INSN2 uses a different execution unit.) */
9845 if (class1 != VR4130_CLASS_ALU
9846 && recog_memoized (vr4130_last_insn) >= 0
9847 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9848 return true;
9849 }
9850 }
9851 return false;
9852 }
9853
9854 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9855 queue with at least two instructions. Swap the first two if
9856 vr4130_swap_insns_p says that it could be worthwhile. */
9857
9858 static void
vr4130_reorder(rtx * ready,int nready)9859 vr4130_reorder (rtx *ready, int nready)
9860 {
9861 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9862 mips_promote_ready (ready, nready - 2, nready - 1);
9863 }
9864
9865 /* Remove the instruction at index LOWER from ready queue READY and
9866 reinsert it in front of the instruction at index HIGHER. LOWER must
9867 be <= HIGHER. */
9868
9869 static void
mips_promote_ready(rtx * ready,int lower,int higher)9870 mips_promote_ready (rtx *ready, int lower, int higher)
9871 {
9872 rtx new_head;
9873 int i;
9874
9875 new_head = ready[lower];
9876 for (i = lower; i < higher; i++)
9877 ready[i] = ready[i + 1];
9878 ready[i] = new_head;
9879 }
9880
9881 /* Implement TARGET_SCHED_REORDER. */
9882
9883 static int
mips_sched_reorder(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,rtx * ready,int * nreadyp,int cycle)9884 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9885 rtx *ready, int *nreadyp, int cycle)
9886 {
9887 if (!reload_completed && TUNE_MACC_CHAINS)
9888 {
9889 if (cycle == 0)
9890 mips_macc_chains_last_hilo = 0;
9891 if (*nreadyp > 0)
9892 mips_macc_chains_reorder (ready, *nreadyp);
9893 }
9894 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9895 {
9896 if (cycle == 0)
9897 vr4130_last_insn = 0;
9898 if (*nreadyp > 1)
9899 vr4130_reorder (ready, *nreadyp);
9900 }
9901 return mips_issue_rate ();
9902 }
9903
9904 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9905
9906 static int
mips_variable_issue(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,rtx insn,int more)9907 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9908 rtx insn, int more)
9909 {
9910 switch (GET_CODE (PATTERN (insn)))
9911 {
9912 case USE:
9913 case CLOBBER:
9914 /* Don't count USEs and CLOBBERs against the issue rate. */
9915 break;
9916
9917 default:
9918 more--;
9919 if (!reload_completed && TUNE_MACC_CHAINS)
9920 mips_macc_chains_record (insn);
9921 vr4130_last_insn = insn;
9922 break;
9923 }
9924 return more;
9925 }
9926
9927 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9928 dependencies have no cost. */
9929
9930 static int
mips_adjust_cost(rtx insn ATTRIBUTE_UNUSED,rtx link,rtx dep ATTRIBUTE_UNUSED,int cost)9931 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9932 rtx dep ATTRIBUTE_UNUSED, int cost)
9933 {
9934 if (REG_NOTE_KIND (link) != 0)
9935 return 0;
9936 return cost;
9937 }
9938
9939 /* Return the number of instructions that can be issued per cycle. */
9940
9941 static int
mips_issue_rate(void)9942 mips_issue_rate (void)
9943 {
9944 switch (mips_tune)
9945 {
9946 case PROCESSOR_R4130:
9947 case PROCESSOR_R5400:
9948 case PROCESSOR_R5500:
9949 case PROCESSOR_R7000:
9950 case PROCESSOR_R9000:
9951 return 2;
9952
9953 case PROCESSOR_SB1:
9954 case PROCESSOR_SB1A:
9955 /* This is actually 4, but we get better performance if we claim 3.
9956 This is partly because of unwanted speculative code motion with the
9957 larger number, and partly because in most common cases we can't
9958 reach the theoretical max of 4. */
9959 return 3;
9960
9961 default:
9962 return 1;
9963 }
9964 }
9965
9966 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9967 be as wide as the scheduling freedom in the DFA. */
9968
9969 static int
mips_multipass_dfa_lookahead(void)9970 mips_multipass_dfa_lookahead (void)
9971 {
9972 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9973 if (TUNE_SB1)
9974 return 4;
9975
9976 return 0;
9977 }
9978
9979 /* Implements a store data bypass check. We need this because the cprestore
9980 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
9981 default routine to abort. We just return false for that case. */
9982 /* ??? Should try to give a better result here than assuming false. */
9983
9984 int
mips_store_data_bypass_p(rtx out_insn,rtx in_insn)9985 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9986 {
9987 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9988 return false;
9989
9990 return ! store_data_bypass_p (out_insn, in_insn);
9991 }
9992
9993 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9994 return the first operand of the associated "pref" or "prefx" insn. */
9995
9996 rtx
mips_prefetch_cookie(rtx write,rtx locality)9997 mips_prefetch_cookie (rtx write, rtx locality)
9998 {
9999 /* store_streamed / load_streamed. */
10000 if (INTVAL (locality) <= 0)
10001 return GEN_INT (INTVAL (write) + 4);
10002
10003 /* store / load. */
10004 if (INTVAL (locality) <= 2)
10005 return write;
10006
10007 /* store_retained / load_retained. */
10008 return GEN_INT (INTVAL (write) + 6);
10009 }
10010
10011 /* MIPS builtin function support. */
10012
10013 struct builtin_description
10014 {
10015 /* The code of the main .md file instruction. See mips_builtin_type
10016 for more information. */
10017 enum insn_code icode;
10018
10019 /* The floating-point comparison code to use with ICODE, if any. */
10020 enum mips_fp_condition cond;
10021
10022 /* The name of the builtin function. */
10023 const char *name;
10024
10025 /* Specifies how the function should be expanded. */
10026 enum mips_builtin_type builtin_type;
10027
10028 /* The function's prototype. */
10029 enum mips_function_type function_type;
10030
10031 /* The target flags required for this function. */
10032 int target_flags;
10033 };
10034
10035 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
10036 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
10037 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10038 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10039 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
10040
10041 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
10042 TARGET_FLAGS. */
10043 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
10044 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
10045 "__builtin_mips_" #INSN "_" #COND "_s", \
10046 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
10047 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
10048 "__builtin_mips_" #INSN "_" #COND "_d", \
10049 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
10050
10051 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10052 The lower and upper forms require TARGET_FLAGS while the any and all
10053 forms require MASK_MIPS3D. */
10054 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
10055 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10056 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
10057 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10058 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10059 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
10060 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10061 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10062 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
10063 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
10064 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10065 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
10066 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10067
10068 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10069 require MASK_MIPS3D. */
10070 #define CMP_4S_BUILTINS(INSN, COND) \
10071 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10072 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
10073 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10074 MASK_MIPS3D }, \
10075 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10076 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
10077 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10078 MASK_MIPS3D }
10079
10080 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10081 instruction requires TARGET_FLAGS. */
10082 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
10083 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10084 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
10085 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10086 TARGET_FLAGS }, \
10087 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10088 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
10089 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10090 TARGET_FLAGS }
10091
10092 /* Define all the builtins related to c.cond.fmt condition COND. */
10093 #define CMP_BUILTINS(COND) \
10094 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10095 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
10096 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
10097 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10098 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10099 CMP_4S_BUILTINS (c, COND), \
10100 CMP_4S_BUILTINS (cabs, COND)
10101
10102 static const struct builtin_description mips_bdesc[] =
10103 {
10104 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10105 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10106 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10107 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10108 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10109 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10110 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10111 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10112
10113 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10114 MASK_PAIRED_SINGLE_FLOAT),
10115 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10116 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10117 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10118 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10119
10120 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10121 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10122 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10123 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10124 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10125 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10126
10127 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10128 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10129 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10130 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10131 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10132 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10133
10134 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10135 };
10136
10137 /* Builtin functions for the SB-1 processor. */
10138
10139 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10140
10141 static const struct builtin_description sb1_bdesc[] =
10142 {
10143 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10144 };
10145
10146 /* Builtin functions for DSP ASE. */
10147
10148 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10149 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10150 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10151 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10152
10153 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10154 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10155 builtin_description fields. */
10156 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10157 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10158 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10159
10160 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10161 branch instruction. TARGET_FLAGS is a builtin_description field. */
10162 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10163 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10164 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10165
10166 static const struct builtin_description dsp_bdesc[] =
10167 {
10168 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10169 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10170 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10171 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10172 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10173 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10174 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10175 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10176 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10177 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10178 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10179 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10180 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10181 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10182 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10183 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10184 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10185 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10186 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10187 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10188 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10189 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10190 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10191 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10192 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10193 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10194 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10195 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10196 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10197 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10198 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10199 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10200 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10201 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10202 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10203 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10204 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10205 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10206 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10207 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10208 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10209 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10210 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10211 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10212 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10213 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10214 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10215 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10216 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10217 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10218 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10219 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10220 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10221 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10222 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10223 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10224 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10225 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10226 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10227 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10228 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10229 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10230 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10231 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10232 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10233 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10234 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10235 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10236 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10237 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10238 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10239 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10240 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10241 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10242 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10243 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10244 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10245 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10246 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10247 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10248 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10249 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10250 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10251 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10252 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10253 BPOSGE_BUILTIN (32, MASK_DSP)
10254 };
10255
10256 /* This helps provide a mapping from builtin function codes to bdesc
10257 arrays. */
10258
10259 struct bdesc_map
10260 {
10261 /* The builtin function table that this entry describes. */
10262 const struct builtin_description *bdesc;
10263
10264 /* The number of entries in the builtin function table. */
10265 unsigned int size;
10266
10267 /* The target processor that supports these builtin functions.
10268 PROCESSOR_MAX means we enable them for all processors. */
10269 enum processor_type proc;
10270 };
10271
10272 static const struct bdesc_map bdesc_arrays[] =
10273 {
10274 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX },
10275 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 },
10276 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX }
10277 };
10278
10279 /* Take the head of argument list *ARGLIST and convert it into a form
10280 suitable for input operand OP of instruction ICODE. Return the value
10281 and point *ARGLIST at the next element of the list. */
10282
10283 static rtx
mips_prepare_builtin_arg(enum insn_code icode,unsigned int op,tree * arglist)10284 mips_prepare_builtin_arg (enum insn_code icode,
10285 unsigned int op, tree *arglist)
10286 {
10287 rtx value;
10288 enum machine_mode mode;
10289
10290 value = expand_normal (TREE_VALUE (*arglist));
10291 mode = insn_data[icode].operand[op].mode;
10292 if (!insn_data[icode].operand[op].predicate (value, mode))
10293 {
10294 value = copy_to_mode_reg (mode, value);
10295 /* Check the predicate again. */
10296 if (!insn_data[icode].operand[op].predicate (value, mode))
10297 {
10298 error ("invalid argument to builtin function");
10299 return const0_rtx;
10300 }
10301 }
10302
10303 *arglist = TREE_CHAIN (*arglist);
10304 return value;
10305 }
10306
10307 /* Return an rtx suitable for output operand OP of instruction ICODE.
10308 If TARGET is non-null, try to use it where possible. */
10309
10310 static rtx
mips_prepare_builtin_target(enum insn_code icode,unsigned int op,rtx target)10311 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10312 {
10313 enum machine_mode mode;
10314
10315 mode = insn_data[icode].operand[op].mode;
10316 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10317 target = gen_reg_rtx (mode);
10318
10319 return target;
10320 }
10321
10322 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10323
10324 rtx
mips_expand_builtin(tree exp,rtx target,rtx subtarget ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,int ignore ATTRIBUTE_UNUSED)10325 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10326 enum machine_mode mode ATTRIBUTE_UNUSED,
10327 int ignore ATTRIBUTE_UNUSED)
10328 {
10329 enum insn_code icode;
10330 enum mips_builtin_type type;
10331 tree fndecl, arglist;
10332 unsigned int fcode;
10333 const struct builtin_description *bdesc;
10334 const struct bdesc_map *m;
10335
10336 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
10337 arglist = TREE_OPERAND (exp, 1);
10338 fcode = DECL_FUNCTION_CODE (fndecl);
10339
10340 bdesc = NULL;
10341 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10342 {
10343 if (fcode < m->size)
10344 {
10345 bdesc = m->bdesc;
10346 icode = bdesc[fcode].icode;
10347 type = bdesc[fcode].builtin_type;
10348 break;
10349 }
10350 fcode -= m->size;
10351 }
10352 if (bdesc == NULL)
10353 return 0;
10354
10355 switch (type)
10356 {
10357 case MIPS_BUILTIN_DIRECT:
10358 return mips_expand_builtin_direct (icode, target, arglist, true);
10359
10360 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10361 return mips_expand_builtin_direct (icode, target, arglist, false);
10362
10363 case MIPS_BUILTIN_MOVT:
10364 case MIPS_BUILTIN_MOVF:
10365 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10366 target, arglist);
10367
10368 case MIPS_BUILTIN_CMP_ANY:
10369 case MIPS_BUILTIN_CMP_ALL:
10370 case MIPS_BUILTIN_CMP_UPPER:
10371 case MIPS_BUILTIN_CMP_LOWER:
10372 case MIPS_BUILTIN_CMP_SINGLE:
10373 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10374 target, arglist);
10375
10376 case MIPS_BUILTIN_BPOSGE32:
10377 return mips_expand_builtin_bposge (type, target);
10378
10379 default:
10380 return 0;
10381 }
10382 }
10383
10384 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10385
10386 void
mips_init_builtins(void)10387 mips_init_builtins (void)
10388 {
10389 const struct builtin_description *d;
10390 const struct bdesc_map *m;
10391 tree types[(int) MIPS_MAX_FTYPE_MAX];
10392 tree V2SF_type_node;
10393 tree V2HI_type_node;
10394 tree V4QI_type_node;
10395 unsigned int offset;
10396
10397 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
10398 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10399 return;
10400
10401 if (TARGET_PAIRED_SINGLE_FLOAT)
10402 {
10403 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10404
10405 types[MIPS_V2SF_FTYPE_V2SF]
10406 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10407
10408 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10409 = build_function_type_list (V2SF_type_node,
10410 V2SF_type_node, V2SF_type_node, NULL_TREE);
10411
10412 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10413 = build_function_type_list (V2SF_type_node,
10414 V2SF_type_node, V2SF_type_node,
10415 integer_type_node, NULL_TREE);
10416
10417 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10418 = build_function_type_list (V2SF_type_node,
10419 V2SF_type_node, V2SF_type_node,
10420 V2SF_type_node, V2SF_type_node, NULL_TREE);
10421
10422 types[MIPS_V2SF_FTYPE_SF_SF]
10423 = build_function_type_list (V2SF_type_node,
10424 float_type_node, float_type_node, NULL_TREE);
10425
10426 types[MIPS_INT_FTYPE_V2SF_V2SF]
10427 = build_function_type_list (integer_type_node,
10428 V2SF_type_node, V2SF_type_node, NULL_TREE);
10429
10430 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10431 = build_function_type_list (integer_type_node,
10432 V2SF_type_node, V2SF_type_node,
10433 V2SF_type_node, V2SF_type_node, NULL_TREE);
10434
10435 types[MIPS_INT_FTYPE_SF_SF]
10436 = build_function_type_list (integer_type_node,
10437 float_type_node, float_type_node, NULL_TREE);
10438
10439 types[MIPS_INT_FTYPE_DF_DF]
10440 = build_function_type_list (integer_type_node,
10441 double_type_node, double_type_node, NULL_TREE);
10442
10443 types[MIPS_SF_FTYPE_V2SF]
10444 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10445
10446 types[MIPS_SF_FTYPE_SF]
10447 = build_function_type_list (float_type_node,
10448 float_type_node, NULL_TREE);
10449
10450 types[MIPS_SF_FTYPE_SF_SF]
10451 = build_function_type_list (float_type_node,
10452 float_type_node, float_type_node, NULL_TREE);
10453
10454 types[MIPS_DF_FTYPE_DF]
10455 = build_function_type_list (double_type_node,
10456 double_type_node, NULL_TREE);
10457
10458 types[MIPS_DF_FTYPE_DF_DF]
10459 = build_function_type_list (double_type_node,
10460 double_type_node, double_type_node, NULL_TREE);
10461 }
10462
10463 if (TARGET_DSP)
10464 {
10465 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10466 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10467
10468 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10469 = build_function_type_list (V2HI_type_node,
10470 V2HI_type_node, V2HI_type_node,
10471 NULL_TREE);
10472
10473 types[MIPS_SI_FTYPE_SI_SI]
10474 = build_function_type_list (intSI_type_node,
10475 intSI_type_node, intSI_type_node,
10476 NULL_TREE);
10477
10478 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10479 = build_function_type_list (V4QI_type_node,
10480 V4QI_type_node, V4QI_type_node,
10481 NULL_TREE);
10482
10483 types[MIPS_SI_FTYPE_V4QI]
10484 = build_function_type_list (intSI_type_node,
10485 V4QI_type_node,
10486 NULL_TREE);
10487
10488 types[MIPS_V2HI_FTYPE_V2HI]
10489 = build_function_type_list (V2HI_type_node,
10490 V2HI_type_node,
10491 NULL_TREE);
10492
10493 types[MIPS_SI_FTYPE_SI]
10494 = build_function_type_list (intSI_type_node,
10495 intSI_type_node,
10496 NULL_TREE);
10497
10498 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10499 = build_function_type_list (V4QI_type_node,
10500 V2HI_type_node, V2HI_type_node,
10501 NULL_TREE);
10502
10503 types[MIPS_V2HI_FTYPE_SI_SI]
10504 = build_function_type_list (V2HI_type_node,
10505 intSI_type_node, intSI_type_node,
10506 NULL_TREE);
10507
10508 types[MIPS_SI_FTYPE_V2HI]
10509 = build_function_type_list (intSI_type_node,
10510 V2HI_type_node,
10511 NULL_TREE);
10512
10513 types[MIPS_V2HI_FTYPE_V4QI]
10514 = build_function_type_list (V2HI_type_node,
10515 V4QI_type_node,
10516 NULL_TREE);
10517
10518 types[MIPS_V4QI_FTYPE_V4QI_SI]
10519 = build_function_type_list (V4QI_type_node,
10520 V4QI_type_node, intSI_type_node,
10521 NULL_TREE);
10522
10523 types[MIPS_V2HI_FTYPE_V2HI_SI]
10524 = build_function_type_list (V2HI_type_node,
10525 V2HI_type_node, intSI_type_node,
10526 NULL_TREE);
10527
10528 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10529 = build_function_type_list (V2HI_type_node,
10530 V4QI_type_node, V2HI_type_node,
10531 NULL_TREE);
10532
10533 types[MIPS_SI_FTYPE_V2HI_V2HI]
10534 = build_function_type_list (intSI_type_node,
10535 V2HI_type_node, V2HI_type_node,
10536 NULL_TREE);
10537
10538 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10539 = build_function_type_list (intDI_type_node,
10540 intDI_type_node, V4QI_type_node, V4QI_type_node,
10541 NULL_TREE);
10542
10543 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10544 = build_function_type_list (intDI_type_node,
10545 intDI_type_node, V2HI_type_node, V2HI_type_node,
10546 NULL_TREE);
10547
10548 types[MIPS_DI_FTYPE_DI_SI_SI]
10549 = build_function_type_list (intDI_type_node,
10550 intDI_type_node, intSI_type_node, intSI_type_node,
10551 NULL_TREE);
10552
10553 types[MIPS_V4QI_FTYPE_SI]
10554 = build_function_type_list (V4QI_type_node,
10555 intSI_type_node,
10556 NULL_TREE);
10557
10558 types[MIPS_V2HI_FTYPE_SI]
10559 = build_function_type_list (V2HI_type_node,
10560 intSI_type_node,
10561 NULL_TREE);
10562
10563 types[MIPS_VOID_FTYPE_V4QI_V4QI]
10564 = build_function_type_list (void_type_node,
10565 V4QI_type_node, V4QI_type_node,
10566 NULL_TREE);
10567
10568 types[MIPS_SI_FTYPE_V4QI_V4QI]
10569 = build_function_type_list (intSI_type_node,
10570 V4QI_type_node, V4QI_type_node,
10571 NULL_TREE);
10572
10573 types[MIPS_VOID_FTYPE_V2HI_V2HI]
10574 = build_function_type_list (void_type_node,
10575 V2HI_type_node, V2HI_type_node,
10576 NULL_TREE);
10577
10578 types[MIPS_SI_FTYPE_DI_SI]
10579 = build_function_type_list (intSI_type_node,
10580 intDI_type_node, intSI_type_node,
10581 NULL_TREE);
10582
10583 types[MIPS_DI_FTYPE_DI_SI]
10584 = build_function_type_list (intDI_type_node,
10585 intDI_type_node, intSI_type_node,
10586 NULL_TREE);
10587
10588 types[MIPS_VOID_FTYPE_SI_SI]
10589 = build_function_type_list (void_type_node,
10590 intSI_type_node, intSI_type_node,
10591 NULL_TREE);
10592
10593 types[MIPS_SI_FTYPE_PTR_SI]
10594 = build_function_type_list (intSI_type_node,
10595 ptr_type_node, intSI_type_node,
10596 NULL_TREE);
10597
10598 types[MIPS_SI_FTYPE_VOID]
10599 = build_function_type (intSI_type_node, void_list_node);
10600 }
10601
10602 /* Iterate through all of the bdesc arrays, initializing all of the
10603 builtin functions. */
10604
10605 offset = 0;
10606 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10607 {
10608 if (m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10609 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10610 if ((d->target_flags & target_flags) == d->target_flags)
10611 lang_hooks.builtin_function (d->name, types[d->function_type],
10612 d - m->bdesc + offset,
10613 BUILT_IN_MD, NULL, NULL);
10614 offset += m->size;
10615 }
10616 }
10617
10618 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10619 .md pattern and ARGLIST is the list of function arguments. TARGET,
10620 if nonnull, suggests a good place to put the result.
10621 HAS_TARGET indicates the function must return something. */
10622
10623 static rtx
mips_expand_builtin_direct(enum insn_code icode,rtx target,tree arglist,bool has_target)10624 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist,
10625 bool has_target)
10626 {
10627 rtx ops[MAX_RECOG_OPERANDS];
10628 int i = 0;
10629
10630 if (has_target)
10631 {
10632 /* We save target to ops[0]. */
10633 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10634 i = 1;
10635 }
10636
10637 /* We need to test if arglist is not zero. Some instructions have extra
10638 clobber registers. */
10639 for (; i < insn_data[icode].n_operands && arglist != 0; i++)
10640 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10641
10642 switch (i)
10643 {
10644 case 2:
10645 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10646 break;
10647
10648 case 3:
10649 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10650 break;
10651
10652 case 4:
10653 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10654 break;
10655
10656 default:
10657 gcc_unreachable ();
10658 }
10659 return target;
10660 }
10661
10662 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10663 function (TYPE says which). ARGLIST is the list of arguments to the
10664 function, ICODE is the instruction that should be used to compare
10665 the first two arguments, and COND is the condition it should test.
10666 TARGET, if nonnull, suggests a good place to put the result. */
10667
10668 static rtx
mips_expand_builtin_movtf(enum mips_builtin_type type,enum insn_code icode,enum mips_fp_condition cond,rtx target,tree arglist)10669 mips_expand_builtin_movtf (enum mips_builtin_type type,
10670 enum insn_code icode, enum mips_fp_condition cond,
10671 rtx target, tree arglist)
10672 {
10673 rtx cmp_result, op0, op1;
10674
10675 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10676 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10677 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10678 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10679
10680 icode = CODE_FOR_mips_cond_move_tf_ps;
10681 target = mips_prepare_builtin_target (icode, 0, target);
10682 if (type == MIPS_BUILTIN_MOVT)
10683 {
10684 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10685 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10686 }
10687 else
10688 {
10689 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10690 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10691 }
10692 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10693 return target;
10694 }
10695
10696 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10697 into TARGET otherwise. Return TARGET. */
10698
10699 static rtx
mips_builtin_branch_and_move(rtx condition,rtx target,rtx value_if_true,rtx value_if_false)10700 mips_builtin_branch_and_move (rtx condition, rtx target,
10701 rtx value_if_true, rtx value_if_false)
10702 {
10703 rtx true_label, done_label;
10704
10705 true_label = gen_label_rtx ();
10706 done_label = gen_label_rtx ();
10707
10708 /* First assume that CONDITION is false. */
10709 emit_move_insn (target, value_if_false);
10710
10711 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10712 emit_jump_insn (gen_condjump (condition, true_label));
10713 emit_jump_insn (gen_jump (done_label));
10714 emit_barrier ();
10715
10716 /* Fix TARGET if CONDITION is true. */
10717 emit_label (true_label);
10718 emit_move_insn (target, value_if_true);
10719
10720 emit_label (done_label);
10721 return target;
10722 }
10723
10724 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10725 of the comparison instruction and COND is the condition it should test.
10726 ARGLIST is the list of function arguments and TARGET, if nonnull,
10727 suggests a good place to put the boolean result. */
10728
10729 static rtx
mips_expand_builtin_compare(enum mips_builtin_type builtin_type,enum insn_code icode,enum mips_fp_condition cond,rtx target,tree arglist)10730 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10731 enum insn_code icode, enum mips_fp_condition cond,
10732 rtx target, tree arglist)
10733 {
10734 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10735 int i;
10736
10737 if (target == 0 || GET_MODE (target) != SImode)
10738 target = gen_reg_rtx (SImode);
10739
10740 /* Prepare the operands to the comparison. */
10741 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10742 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
10743 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10744
10745 switch (insn_data[icode].n_operands)
10746 {
10747 case 4:
10748 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10749 break;
10750
10751 case 6:
10752 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10753 ops[3], ops[4], GEN_INT (cond)));
10754 break;
10755
10756 default:
10757 gcc_unreachable ();
10758 }
10759
10760 /* If the comparison sets more than one register, we define the result
10761 to be 0 if all registers are false and -1 if all registers are true.
10762 The value of the complete result is indeterminate otherwise. */
10763 switch (builtin_type)
10764 {
10765 case MIPS_BUILTIN_CMP_ALL:
10766 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10767 return mips_builtin_branch_and_move (condition, target,
10768 const0_rtx, const1_rtx);
10769
10770 case MIPS_BUILTIN_CMP_UPPER:
10771 case MIPS_BUILTIN_CMP_LOWER:
10772 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10773 condition = gen_single_cc (cmp_result, offset);
10774 return mips_builtin_branch_and_move (condition, target,
10775 const1_rtx, const0_rtx);
10776
10777 default:
10778 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10779 return mips_builtin_branch_and_move (condition, target,
10780 const1_rtx, const0_rtx);
10781 }
10782 }
10783
10784 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10785 suggests a good place to put the boolean result. */
10786
10787 static rtx
mips_expand_builtin_bposge(enum mips_builtin_type builtin_type,rtx target)10788 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10789 {
10790 rtx condition, cmp_result;
10791 int cmp_value;
10792
10793 if (target == 0 || GET_MODE (target) != SImode)
10794 target = gen_reg_rtx (SImode);
10795
10796 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10797
10798 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10799 cmp_value = 32;
10800 else
10801 gcc_assert (0);
10802
10803 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10804 return mips_builtin_branch_and_move (condition, target,
10805 const1_rtx, const0_rtx);
10806 }
10807
10808 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
10809 FIRST is true if this is the first time handling this decl. */
10810
10811 static void
mips_encode_section_info(tree decl,rtx rtl,int first)10812 mips_encode_section_info (tree decl, rtx rtl, int first)
10813 {
10814 default_encode_section_info (decl, rtl, first);
10815
10816 if (TREE_CODE (decl) == FUNCTION_DECL
10817 && lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
10818 {
10819 rtx symbol = XEXP (rtl, 0);
10820 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
10821 }
10822 }
10823
10824 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. PIC_FUNCTION_ADDR_REGNUM is live
10825 on entry to a function when generating -mshared abicalls code. */
10826
10827 static void
mips_extra_live_on_entry(bitmap regs)10828 mips_extra_live_on_entry (bitmap regs)
10829 {
10830 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
10831 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10832 }
10833
10834 /* SImode values are represented as sign-extended to DImode. */
10835
10836 int
mips_mode_rep_extended(enum machine_mode mode,enum machine_mode mode_rep)10837 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
10838 {
10839 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
10840 return SIGN_EXTEND;
10841
10842 return UNKNOWN;
10843 }
10844
10845 #include "gt-mips.h"
10846