1 /* Subroutines used for MIPS code generation.
2    Copyright (C) 1989-2019 Free Software Foundation, Inc.
3    Contributed by A. Lichnewsky, lich@inria.inria.fr.
4    Changes by Michael Meissner, meissner@osf.org.
5    64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
6    Brendan Eich, brendan@microunity.com.
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
14 
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 #define IN_TARGET_CODE 1
25 
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "backend.h"
30 #include "target.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "memmodel.h"
34 #include "gimple.h"
35 #include "cfghooks.h"
36 #include "df.h"
37 #include "tm_p.h"
38 #include "stringpool.h"
39 #include "attribs.h"
40 #include "optabs.h"
41 #include "regs.h"
42 #include "emit-rtl.h"
43 #include "recog.h"
44 #include "cgraph.h"
45 #include "diagnostic.h"
46 #include "insn-attr.h"
47 #include "output.h"
48 #include "alias.h"
49 #include "fold-const.h"
50 #include "varasm.h"
51 #include "stor-layout.h"
52 #include "calls.h"
53 #include "explow.h"
54 #include "expr.h"
55 #include "libfuncs.h"
56 #include "reload.h"
57 #include "common/common-target.h"
58 #include "langhooks.h"
59 #include "cfgrtl.h"
60 #include "cfganal.h"
61 #include "sched-int.h"
62 #include "gimplify.h"
63 #include "target-globals.h"
64 #include "tree-pass.h"
65 #include "context.h"
66 #include "builtins.h"
67 #include "rtl-iter.h"
68 
69 /* This file should be included last.  */
70 #include "target-def.h"
71 
72 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF.  */
73 #define UNSPEC_ADDRESS_P(X)					\
74   (GET_CODE (X) == UNSPEC					\
75    && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST			\
76    && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
77 
78 /* Extract the symbol or label from UNSPEC wrapper X.  */
79 #define UNSPEC_ADDRESS(X) \
80   XVECEXP (X, 0, 0)
81 
82 /* Extract the symbol type from UNSPEC wrapper X.  */
83 #define UNSPEC_ADDRESS_TYPE(X) \
84   ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
85 
86 /* The maximum distance between the top of the stack frame and the
87    value $sp has when we save and restore registers.
88 
89    The value for normal-mode code must be a SMALL_OPERAND and must
90    preserve the maximum stack alignment.  We therefore use a value
91    of 0x7ff0 in this case.
92 
93    microMIPS LWM and SWM support 12-bit offsets (from -0x800 to 0x7ff),
94    so we use a maximum of 0x7f0 for TARGET_MICROMIPS.
95 
96    MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
97    up to 0x7f8 bytes and can usually save or restore all the registers
98    that we need to save or restore.  (Note that we can only use these
99    instructions for o32, for which the stack alignment is 8 bytes.)
100 
101    We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
102    RESTORE are not available.  We can then use unextended instructions
103    to save and restore registers, and to allocate and deallocate the top
104    part of the frame.  */
105 #define MIPS_MAX_FIRST_STACK_STEP					\
106   (!TARGET_COMPRESSION ? 0x7ff0						\
107    : TARGET_MICROMIPS || GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8		\
108    : TARGET_64BIT ? 0x100 : 0x400)
109 
110 /* True if INSN is a mips.md pattern or asm statement.  */
111 /* ???	This test exists through the compiler, perhaps it should be
112 	moved to rtl.h.  */
113 #define USEFUL_INSN_P(INSN)						\
114   (NONDEBUG_INSN_P (INSN)						\
115    && GET_CODE (PATTERN (INSN)) != USE					\
116    && GET_CODE (PATTERN (INSN)) != CLOBBER)
117 
118 /* If INSN is a delayed branch sequence, return the first instruction
119    in the sequence, otherwise return INSN itself.  */
120 #define SEQ_BEGIN(INSN)							\
121   (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE		\
122    ? as_a <rtx_insn *> (XVECEXP (PATTERN (INSN), 0, 0))			\
123    : (INSN))
124 
125 /* Likewise for the last instruction in a delayed branch sequence.  */
126 #define SEQ_END(INSN)							\
127   (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE		\
128    ? as_a <rtx_insn *> (XVECEXP (PATTERN (INSN),			\
129 				 0,					\
130 				 XVECLEN (PATTERN (INSN), 0) - 1))	\
131    : (INSN))
132 
133 /* Execute the following loop body with SUBINSN set to each instruction
134    between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive.  */
135 #define FOR_EACH_SUBINSN(SUBINSN, INSN)					\
136   for ((SUBINSN) = SEQ_BEGIN (INSN);					\
137        (SUBINSN) != NEXT_INSN (SEQ_END (INSN));				\
138        (SUBINSN) = NEXT_INSN (SUBINSN))
139 
140 /* True if bit BIT is set in VALUE.  */
141 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
142 
143 /* Return the opcode for a ptr_mode load of the form:
144 
145        l[wd]    DEST, OFFSET(BASE).  */
146 #define MIPS_LOAD_PTR(DEST, OFFSET, BASE)	\
147   (((ptr_mode == DImode ? 0x37 : 0x23) << 26)	\
148    | ((BASE) << 21)				\
149    | ((DEST) << 16)				\
150    | (OFFSET))
151 
152 /* Return the opcode to move register SRC into register DEST.  */
153 #define MIPS_MOVE(DEST, SRC)		\
154   ((TARGET_64BIT ? 0x2d : 0x21)		\
155    | ((DEST) << 11)			\
156    | ((SRC) << 21))
157 
158 /* Return the opcode for:
159 
160        lui      DEST, VALUE.  */
161 #define MIPS_LUI(DEST, VALUE) \
162   ((0xf << 26) | ((DEST) << 16) | (VALUE))
163 
164 /* Return the opcode to jump to register DEST.  When the JR opcode is not
165    available use JALR $0, DEST.  */
166 #define MIPS_JR(DEST) \
167   (TARGET_CB_ALWAYS ? ((0x1b << 27) | ((DEST) << 16)) \
168 		    : (((DEST) << 21) | (ISA_HAS_JR ? 0x8 : 0x9)))
169 
170 /* Return the opcode for:
171 
172        bal     . + (1 + OFFSET) * 4.  */
173 #define MIPS_BAL(OFFSET) \
174   ((0x1 << 26) | (0x11 << 16) | (OFFSET))
175 
176 /* Return the usual opcode for a nop.  */
177 #define MIPS_NOP 0
178 
179 /* Classifies an address.
180 
181    ADDRESS_REG
182        A natural register + offset address.  The register satisfies
183        mips_valid_base_register_p and the offset is a const_arith_operand.
184 
185    ADDRESS_LO_SUM
186        A LO_SUM rtx.  The first operand is a valid base register and
187        the second operand is a symbolic address.
188 
189    ADDRESS_CONST_INT
190        A signed 16-bit constant address.
191 
192    ADDRESS_SYMBOLIC:
193        A constant symbolic address.  */
194 enum mips_address_type {
195   ADDRESS_REG,
196   ADDRESS_LO_SUM,
197   ADDRESS_CONST_INT,
198   ADDRESS_SYMBOLIC
199 };
200 
201 /* Classifies an unconditional branch of interest for the P6600.  */
202 
203 enum mips_ucbranch_type
204 {
205   /* May not even be a branch.  */
206   UC_UNDEFINED,
207   UC_BALC,
208   UC_OTHER
209 };
210 
211 /* Macros to create an enumeration identifier for a function prototype.  */
212 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
213 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
214 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
215 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
216 
217 /* Classifies the prototype of a built-in function.  */
218 enum mips_function_type {
219 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
220 #include "config/mips/mips-ftypes.def"
221 #undef DEF_MIPS_FTYPE
222   MIPS_MAX_FTYPE_MAX
223 };
224 
225 /* Specifies how a built-in function should be converted into rtl.  */
226 enum mips_builtin_type {
227   /* The function corresponds directly to an .md pattern.  The return
228      value is mapped to operand 0 and the arguments are mapped to
229      operands 1 and above.  */
230   MIPS_BUILTIN_DIRECT,
231 
232   /* The function corresponds directly to an .md pattern.  There is no return
233      value and the arguments are mapped to operands 0 and above.  */
234   MIPS_BUILTIN_DIRECT_NO_TARGET,
235 
236   /* The function corresponds to a comparison instruction followed by
237      a mips_cond_move_tf_ps pattern.  The first two arguments are the
238      values to compare and the second two arguments are the vector
239      operands for the movt.ps or movf.ps instruction (in assembly order).  */
240   MIPS_BUILTIN_MOVF,
241   MIPS_BUILTIN_MOVT,
242 
243   /* The function corresponds to a V2SF comparison instruction.  Operand 0
244      of this instruction is the result of the comparison, which has mode
245      CCV2 or CCV4.  The function arguments are mapped to operands 1 and
246      above.  The function's return value is an SImode boolean that is
247      true under the following conditions:
248 
249      MIPS_BUILTIN_CMP_ANY: one of the registers is true
250      MIPS_BUILTIN_CMP_ALL: all of the registers are true
251      MIPS_BUILTIN_CMP_LOWER: the first register is true
252      MIPS_BUILTIN_CMP_UPPER: the second register is true.  */
253   MIPS_BUILTIN_CMP_ANY,
254   MIPS_BUILTIN_CMP_ALL,
255   MIPS_BUILTIN_CMP_UPPER,
256   MIPS_BUILTIN_CMP_LOWER,
257 
258   /* As above, but the instruction only sets a single $fcc register.  */
259   MIPS_BUILTIN_CMP_SINGLE,
260 
261   /* The function corresponds to an MSA conditional branch instruction
262      combined with a compare instruction.  */
263   MIPS_BUILTIN_MSA_TEST_BRANCH,
264 
265   /* For generating bposge32 branch instructions in MIPS32 DSP ASE.  */
266   MIPS_BUILTIN_BPOSGE32
267 };
268 
269 /* Invoke MACRO (COND) for each C.cond.fmt condition.  */
270 #define MIPS_FP_CONDITIONS(MACRO) \
271   MACRO (f),	\
272   MACRO (un),	\
273   MACRO (eq),	\
274   MACRO (ueq),	\
275   MACRO (olt),	\
276   MACRO (ult),	\
277   MACRO (ole),	\
278   MACRO (ule),	\
279   MACRO (sf),	\
280   MACRO (ngle),	\
281   MACRO (seq),	\
282   MACRO (ngl),	\
283   MACRO (lt),	\
284   MACRO (nge),	\
285   MACRO (le),	\
286   MACRO (ngt)
287 
288 /* Enumerates the codes above as MIPS_FP_COND_<X>.  */
289 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
290 enum mips_fp_condition {
291   MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
292 };
293 #undef DECLARE_MIPS_COND
294 
295 /* Index X provides the string representation of MIPS_FP_COND_<X>.  */
296 #define STRINGIFY(X) #X
297 static const char *const mips_fp_conditions[] = {
298   MIPS_FP_CONDITIONS (STRINGIFY)
299 };
300 #undef STRINGIFY
301 
302 /* A class used to control a comdat-style stub that we output in each
303    translation unit that needs it.  */
304 class mips_one_only_stub {
305 public:
~mips_one_only_stub()306   virtual ~mips_one_only_stub () {}
307 
308   /* Return the name of the stub.  */
309   virtual const char *get_name () = 0;
310 
311   /* Output the body of the function to asm_out_file.  */
312   virtual void output_body () = 0;
313 };
314 
315 /* Tuning information that is automatically derived from other sources
316    (such as the scheduler).  */
317 static struct {
318   /* The architecture and tuning settings that this structure describes.  */
319   enum processor arch;
320   enum processor tune;
321 
322   /* True if this structure describes MIPS16 settings.  */
323   bool mips16_p;
324 
325   /* True if the structure has been initialized.  */
326   bool initialized_p;
327 
328   /* True if "MULT $0, $0" is preferable to "MTLO $0; MTHI $0"
329      when optimizing for speed.  */
330   bool fast_mult_zero_zero_p;
331 } mips_tuning_info;
332 
333 /* Information about a single argument.  */
334 struct mips_arg_info {
335   /* True if the argument is passed in a floating-point register, or
336      would have been if we hadn't run out of registers.  */
337   bool fpr_p;
338 
339   /* The number of words passed in registers, rounded up.  */
340   unsigned int reg_words;
341 
342   /* For EABI, the offset of the first register from GP_ARG_FIRST or
343      FP_ARG_FIRST.  For other ABIs, the offset of the first register from
344      the start of the ABI's argument structure (see the CUMULATIVE_ARGS
345      comment for details).
346 
347      The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
348      on the stack.  */
349   unsigned int reg_offset;
350 
351   /* The number of words that must be passed on the stack, rounded up.  */
352   unsigned int stack_words;
353 
354   /* The offset from the start of the stack overflow area of the argument's
355      first stack word.  Only meaningful when STACK_WORDS is nonzero.  */
356   unsigned int stack_offset;
357 };
358 
359 /* Information about an address described by mips_address_type.
360 
361    ADDRESS_CONST_INT
362        No fields are used.
363 
364    ADDRESS_REG
365        REG is the base register and OFFSET is the constant offset.
366 
367    ADDRESS_LO_SUM
368        REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
369        is the type of symbol it references.
370 
371    ADDRESS_SYMBOLIC
372        SYMBOL_TYPE is the type of symbol that the address references.  */
373 struct mips_address_info {
374   enum mips_address_type type;
375   rtx reg;
376   rtx offset;
377   enum mips_symbol_type symbol_type;
378 };
379 
380 /* One stage in a constant building sequence.  These sequences have
381    the form:
382 
383 	A = VALUE[0]
384 	A = A CODE[1] VALUE[1]
385 	A = A CODE[2] VALUE[2]
386 	...
387 
388    where A is an accumulator, each CODE[i] is a binary rtl operation
389    and each VALUE[i] is a constant integer.  CODE[0] is undefined.  */
390 struct mips_integer_op {
391   enum rtx_code code;
392   unsigned HOST_WIDE_INT value;
393 };
394 
395 /* The largest number of operations needed to load an integer constant.
396    The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
397    When the lowest bit is clear, we can try, but reject a sequence with
398    an extra SLL at the end.  */
399 #define MIPS_MAX_INTEGER_OPS 7
400 
401 /* Information about a MIPS16e SAVE or RESTORE instruction.  */
402 struct mips16e_save_restore_info {
403   /* The number of argument registers saved by a SAVE instruction.
404      0 for RESTORE instructions.  */
405   unsigned int nargs;
406 
407   /* Bit X is set if the instruction saves or restores GPR X.  */
408   unsigned int mask;
409 
410   /* The total number of bytes to allocate.  */
411   HOST_WIDE_INT size;
412 };
413 
414 /* Costs of various operations on the different architectures.  */
415 
416 struct mips_rtx_cost_data
417 {
418   unsigned short fp_add;
419   unsigned short fp_mult_sf;
420   unsigned short fp_mult_df;
421   unsigned short fp_div_sf;
422   unsigned short fp_div_df;
423   unsigned short int_mult_si;
424   unsigned short int_mult_di;
425   unsigned short int_div_si;
426   unsigned short int_div_di;
427   unsigned short branch_cost;
428   unsigned short memory_latency;
429 };
430 
431 /* Global variables for machine-dependent things.  */
432 
433 /* The -G setting, or the configuration's default small-data limit if
434    no -G option is given.  */
435 static unsigned int mips_small_data_threshold;
436 
437 /* The number of file directives written by mips_output_filename.  */
438 int num_source_filenames;
439 
440 /* The name that appeared in the last .file directive written by
441    mips_output_filename, or "" if mips_output_filename hasn't
442    written anything yet.  */
443 const char *current_function_file = "";
444 
445 /* Arrays that map GCC register numbers to debugger register numbers.  */
446 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
447 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
448 
449 /* Information about the current function's epilogue, used only while
450    expanding it.  */
451 static struct {
452   /* A list of queued REG_CFA_RESTORE notes.  */
453   rtx cfa_restores;
454 
455   /* The CFA is currently defined as CFA_REG + CFA_OFFSET.  */
456   rtx cfa_reg;
457   HOST_WIDE_INT cfa_offset;
458 
459   /* The offset of the CFA from the stack pointer while restoring
460      registers.  */
461   HOST_WIDE_INT cfa_restore_sp_offset;
462 } mips_epilogue;
463 
464 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs.  */
465 struct mips_asm_switch mips_noreorder = { "reorder", 0 };
466 struct mips_asm_switch mips_nomacro = { "macro", 0 };
467 struct mips_asm_switch mips_noat = { "at", 0 };
468 
469 /* True if we're writing out a branch-likely instruction rather than a
470    normal branch.  */
471 static bool mips_branch_likely;
472 
473 /* The current instruction-set architecture.  */
474 enum processor mips_arch;
475 const struct mips_cpu_info *mips_arch_info;
476 
477 /* The processor that we should tune the code for.  */
478 enum processor mips_tune;
479 const struct mips_cpu_info *mips_tune_info;
480 
481 /* The ISA level associated with mips_arch.  */
482 int mips_isa;
483 
484 /* The ISA revision level.  This is 0 for MIPS I to V and N for
485    MIPS{32,64}rN.  */
486 int mips_isa_rev;
487 
488 /* The architecture selected by -mipsN, or null if -mipsN wasn't used.  */
489 static const struct mips_cpu_info *mips_isa_option_info;
490 
491 /* Which cost information to use.  */
492 static const struct mips_rtx_cost_data *mips_cost;
493 
494 /* The ambient target flags, excluding MASK_MIPS16.  */
495 static int mips_base_target_flags;
496 
497 /* The default compression mode.  */
498 unsigned int mips_base_compression_flags;
499 
500 /* The ambient values of other global variables.  */
501 static int mips_base_schedule_insns; /* flag_schedule_insns */
502 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
503 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
504 static const char *mips_base_align_loops; /* align_loops */
505 static const char *mips_base_align_jumps; /* align_jumps */
506 static const char *mips_base_align_functions; /* align_functions */
507 
508 /* Index [M][R] is true if register R is allowed to hold a value of mode M.  */
509 static bool mips_hard_regno_mode_ok_p[MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
510 
511 /* Index C is true if character C is a valid PRINT_OPERAND punctation
512    character.  */
513 static bool mips_print_operand_punct[256];
514 
515 static GTY (()) int mips_output_filename_first_time = 1;
516 
517 /* mips_split_p[X] is true if symbols of type X can be split by
518    mips_split_symbol.  */
519 bool mips_split_p[NUM_SYMBOL_TYPES];
520 
521 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
522    can be split by mips_split_symbol.  */
523 bool mips_split_hi_p[NUM_SYMBOL_TYPES];
524 
525 /* mips_use_pcrel_pool_p[X] is true if symbols of type X should be
526    forced into a PC-relative constant pool.  */
527 bool mips_use_pcrel_pool_p[NUM_SYMBOL_TYPES];
528 
529 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
530    appears in a LO_SUM.  It can be null if such LO_SUMs aren't valid or
531    if they are matched by a special .md file pattern.  */
532 const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
533 
534 /* Likewise for HIGHs.  */
535 const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
536 
537 /* Target state for MIPS16.  */
538 struct target_globals *mips16_globals;
539 
540 /* Target state for MICROMIPS.  */
541 struct target_globals *micromips_globals;
542 
543 /* Cached value of can_issue_more. This is cached in mips_variable_issue hook
544    and returned from mips_sched_reorder2.  */
545 static int cached_can_issue_more;
546 
547 /* The stubs for various MIPS16 support functions, if used.   */
548 static mips_one_only_stub *mips16_rdhwr_stub;
549 static mips_one_only_stub *mips16_get_fcsr_stub;
550 static mips_one_only_stub *mips16_set_fcsr_stub;
551 
552 /* Index R is the smallest register class that contains register R.  */
553 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
554   LEA_REGS,        LEA_REGS,        M16_STORE_REGS,  V1_REG,
555   M16_STORE_REGS,  M16_STORE_REGS,  M16_STORE_REGS,  M16_STORE_REGS,
556   LEA_REGS,        LEA_REGS,        LEA_REGS,        LEA_REGS,
557   LEA_REGS,        LEA_REGS,        LEA_REGS,        LEA_REGS,
558   M16_REGS,        M16_STORE_REGS,  LEA_REGS,        LEA_REGS,
559   LEA_REGS,        LEA_REGS,        LEA_REGS,        LEA_REGS,
560   T_REG,           PIC_FN_ADDR_REG, LEA_REGS,        LEA_REGS,
561   LEA_REGS,        M16_SP_REGS,     LEA_REGS,        LEA_REGS,
562 
563   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
564   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
565   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
566   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
567   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
568   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
569   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
570   FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
571   MD0_REG,	MD1_REG,	NO_REGS,	ST_REGS,
572   ST_REGS,	ST_REGS,	ST_REGS,	ST_REGS,
573   ST_REGS,	ST_REGS,	ST_REGS,	NO_REGS,
574   NO_REGS,	FRAME_REGS,	FRAME_REGS,	NO_REGS,
575   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
576   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
577   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
578   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
579   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
580   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
581   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
582   COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
583   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
584   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
585   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
586   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
587   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
588   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
589   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
590   COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
591   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
592   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
593   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
594   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
595   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
596   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
597   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
598   COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
599   DSP_ACC_REGS,	DSP_ACC_REGS,	DSP_ACC_REGS,	DSP_ACC_REGS,
600   DSP_ACC_REGS,	DSP_ACC_REGS,	ALL_REGS,	ALL_REGS,
601   ALL_REGS,	ALL_REGS,	ALL_REGS,	ALL_REGS
602 };
603 
604 static tree mips_handle_interrupt_attr (tree *, tree, tree, int, bool *);
605 static tree mips_handle_use_shadow_register_set_attr (tree *, tree, tree, int,
606 						      bool *);
607 
608 /* The value of TARGET_ATTRIBUTE_TABLE.  */
609 static const struct attribute_spec mips_attribute_table[] = {
610   /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
611        affects_type_identity, handler, exclude } */
612   { "long_call",   0, 0, false, true,  true,  false, NULL, NULL },
613   { "short_call",  0, 0, false, true,  true,  false, NULL, NULL },
614   { "far",     	   0, 0, false, true,  true,  false, NULL, NULL },
615   { "near",        0, 0, false, true,  true,  false, NULL, NULL },
616   /* We would really like to treat "mips16" and "nomips16" as type
617      attributes, but GCC doesn't provide the hooks we need to support
618      the right conversion rules.  As declaration attributes, they affect
619      code generation but don't carry other semantics.  */
620   { "mips16", 	   0, 0, true,  false, false, false, NULL, NULL },
621   { "nomips16",    0, 0, true,  false, false, false, NULL, NULL },
622   { "micromips",   0, 0, true,  false, false, false, NULL, NULL },
623   { "nomicromips", 0, 0, true,  false, false, false, NULL, NULL },
624   { "nocompression", 0, 0, true,  false, false, false, NULL, NULL },
625   /* Allow functions to be specified as interrupt handlers */
626   { "interrupt",   0, 1, false, true,  true, false, mips_handle_interrupt_attr,
627     NULL },
628   { "use_shadow_register_set",	0, 1, false, true,  true, false,
629     mips_handle_use_shadow_register_set_attr, NULL },
630   { "keep_interrupts_masked",	0, 0, false, true,  true, false, NULL, NULL },
631   { "use_debug_exception_return", 0, 0, false, true, true, false, NULL, NULL },
632   { NULL,	   0, 0, false, false, false, false, NULL, NULL }
633 };
634 
635 /* A table describing all the processors GCC knows about; see
636    mips-cpus.def for details.  */
637 static const struct mips_cpu_info mips_cpu_info_table[] = {
638 #define MIPS_CPU(NAME, CPU, ISA, FLAGS) \
639   { NAME, CPU, ISA, FLAGS },
640 #include "mips-cpus.def"
641 #undef MIPS_CPU
642 };
643 
644 /* Default costs.  If these are used for a processor we should look
645    up the actual costs.  */
646 #define DEFAULT_COSTS COSTS_N_INSNS (6),  /* fp_add */       \
647                       COSTS_N_INSNS (7),  /* fp_mult_sf */   \
648                       COSTS_N_INSNS (8),  /* fp_mult_df */   \
649                       COSTS_N_INSNS (23), /* fp_div_sf */    \
650                       COSTS_N_INSNS (36), /* fp_div_df */    \
651                       COSTS_N_INSNS (10), /* int_mult_si */  \
652                       COSTS_N_INSNS (10), /* int_mult_di */  \
653                       COSTS_N_INSNS (69), /* int_div_si */   \
654                       COSTS_N_INSNS (69), /* int_div_di */   \
655                                        2, /* branch_cost */  \
656                                        4  /* memory_latency */
657 
658 /* Floating-point costs for processors without an FPU.  Just assume that
659    all floating-point libcalls are very expensive.  */
660 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */       \
661                       COSTS_N_INSNS (256), /* fp_mult_sf */   \
662                       COSTS_N_INSNS (256), /* fp_mult_df */   \
663                       COSTS_N_INSNS (256), /* fp_div_sf */    \
664                       COSTS_N_INSNS (256)  /* fp_div_df */
665 
666 /* Costs to use when optimizing for size.  */
667 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
668   COSTS_N_INSNS (1),            /* fp_add */
669   COSTS_N_INSNS (1),            /* fp_mult_sf */
670   COSTS_N_INSNS (1),            /* fp_mult_df */
671   COSTS_N_INSNS (1),            /* fp_div_sf */
672   COSTS_N_INSNS (1),            /* fp_div_df */
673   COSTS_N_INSNS (1),            /* int_mult_si */
674   COSTS_N_INSNS (1),            /* int_mult_di */
675   COSTS_N_INSNS (1),            /* int_div_si */
676   COSTS_N_INSNS (1),            /* int_div_di */
677 		   2,           /* branch_cost */
678 		   4            /* memory_latency */
679 };
680 
681 /* Costs to use when optimizing for speed, indexed by processor.  */
682 static const struct mips_rtx_cost_data
683   mips_rtx_cost_data[NUM_PROCESSOR_VALUES] = {
684   { /* R3000 */
685     COSTS_N_INSNS (2),            /* fp_add */
686     COSTS_N_INSNS (4),            /* fp_mult_sf */
687     COSTS_N_INSNS (5),            /* fp_mult_df */
688     COSTS_N_INSNS (12),           /* fp_div_sf */
689     COSTS_N_INSNS (19),           /* fp_div_df */
690     COSTS_N_INSNS (12),           /* int_mult_si */
691     COSTS_N_INSNS (12),           /* int_mult_di */
692     COSTS_N_INSNS (35),           /* int_div_si */
693     COSTS_N_INSNS (35),           /* int_div_di */
694 		     1,           /* branch_cost */
695 		     4            /* memory_latency */
696   },
697   { /* 4KC */
698     SOFT_FP_COSTS,
699     COSTS_N_INSNS (6),            /* int_mult_si */
700     COSTS_N_INSNS (6),            /* int_mult_di */
701     COSTS_N_INSNS (36),           /* int_div_si */
702     COSTS_N_INSNS (36),           /* int_div_di */
703 		     1,           /* branch_cost */
704 		     4            /* memory_latency */
705   },
706   { /* 4KP */
707     SOFT_FP_COSTS,
708     COSTS_N_INSNS (36),           /* int_mult_si */
709     COSTS_N_INSNS (36),           /* int_mult_di */
710     COSTS_N_INSNS (37),           /* int_div_si */
711     COSTS_N_INSNS (37),           /* int_div_di */
712 		     1,           /* branch_cost */
713 		     4            /* memory_latency */
714   },
715   { /* 5KC */
716     SOFT_FP_COSTS,
717     COSTS_N_INSNS (4),            /* int_mult_si */
718     COSTS_N_INSNS (11),           /* int_mult_di */
719     COSTS_N_INSNS (36),           /* int_div_si */
720     COSTS_N_INSNS (68),           /* int_div_di */
721 		     1,           /* branch_cost */
722 		     4            /* memory_latency */
723   },
724   { /* 5KF */
725     COSTS_N_INSNS (4),            /* fp_add */
726     COSTS_N_INSNS (4),            /* fp_mult_sf */
727     COSTS_N_INSNS (5),            /* fp_mult_df */
728     COSTS_N_INSNS (17),           /* fp_div_sf */
729     COSTS_N_INSNS (32),           /* fp_div_df */
730     COSTS_N_INSNS (4),            /* int_mult_si */
731     COSTS_N_INSNS (11),           /* int_mult_di */
732     COSTS_N_INSNS (36),           /* int_div_si */
733     COSTS_N_INSNS (68),           /* int_div_di */
734 		     1,           /* branch_cost */
735 		     4            /* memory_latency */
736   },
737   { /* 20KC */
738     COSTS_N_INSNS (4),            /* fp_add */
739     COSTS_N_INSNS (4),            /* fp_mult_sf */
740     COSTS_N_INSNS (5),            /* fp_mult_df */
741     COSTS_N_INSNS (17),           /* fp_div_sf */
742     COSTS_N_INSNS (32),           /* fp_div_df */
743     COSTS_N_INSNS (4),            /* int_mult_si */
744     COSTS_N_INSNS (7),            /* int_mult_di */
745     COSTS_N_INSNS (42),           /* int_div_si */
746     COSTS_N_INSNS (72),           /* int_div_di */
747 		     1,           /* branch_cost */
748 		     4            /* memory_latency */
749   },
750   { /* 24KC */
751     SOFT_FP_COSTS,
752     COSTS_N_INSNS (5),            /* int_mult_si */
753     COSTS_N_INSNS (5),            /* int_mult_di */
754     COSTS_N_INSNS (41),           /* int_div_si */
755     COSTS_N_INSNS (41),           /* int_div_di */
756 		     1,           /* branch_cost */
757 		     4            /* memory_latency */
758   },
759   { /* 24KF2_1 */
760     COSTS_N_INSNS (8),            /* fp_add */
761     COSTS_N_INSNS (8),            /* fp_mult_sf */
762     COSTS_N_INSNS (10),           /* fp_mult_df */
763     COSTS_N_INSNS (34),           /* fp_div_sf */
764     COSTS_N_INSNS (64),           /* fp_div_df */
765     COSTS_N_INSNS (5),            /* int_mult_si */
766     COSTS_N_INSNS (5),            /* int_mult_di */
767     COSTS_N_INSNS (41),           /* int_div_si */
768     COSTS_N_INSNS (41),           /* int_div_di */
769 		     1,           /* branch_cost */
770 		     4            /* memory_latency */
771   },
772   { /* 24KF1_1 */
773     COSTS_N_INSNS (4),            /* fp_add */
774     COSTS_N_INSNS (4),            /* fp_mult_sf */
775     COSTS_N_INSNS (5),            /* fp_mult_df */
776     COSTS_N_INSNS (17),           /* fp_div_sf */
777     COSTS_N_INSNS (32),           /* fp_div_df */
778     COSTS_N_INSNS (5),            /* int_mult_si */
779     COSTS_N_INSNS (5),            /* int_mult_di */
780     COSTS_N_INSNS (41),           /* int_div_si */
781     COSTS_N_INSNS (41),           /* int_div_di */
782 		     1,           /* branch_cost */
783 		     4            /* memory_latency */
784   },
785   { /* 74KC */
786     SOFT_FP_COSTS,
787     COSTS_N_INSNS (5),            /* int_mult_si */
788     COSTS_N_INSNS (5),            /* int_mult_di */
789     COSTS_N_INSNS (41),           /* int_div_si */
790     COSTS_N_INSNS (41),           /* int_div_di */
791 		     1,           /* branch_cost */
792 		     4            /* memory_latency */
793   },
794   { /* 74KF2_1 */
795     COSTS_N_INSNS (8),            /* fp_add */
796     COSTS_N_INSNS (8),            /* fp_mult_sf */
797     COSTS_N_INSNS (10),           /* fp_mult_df */
798     COSTS_N_INSNS (34),           /* fp_div_sf */
799     COSTS_N_INSNS (64),           /* fp_div_df */
800     COSTS_N_INSNS (5),            /* int_mult_si */
801     COSTS_N_INSNS (5),            /* int_mult_di */
802     COSTS_N_INSNS (41),           /* int_div_si */
803     COSTS_N_INSNS (41),           /* int_div_di */
804 		     1,           /* branch_cost */
805 		     4            /* memory_latency */
806   },
807   { /* 74KF1_1 */
808     COSTS_N_INSNS (4),            /* fp_add */
809     COSTS_N_INSNS (4),            /* fp_mult_sf */
810     COSTS_N_INSNS (5),            /* fp_mult_df */
811     COSTS_N_INSNS (17),           /* fp_div_sf */
812     COSTS_N_INSNS (32),           /* fp_div_df */
813     COSTS_N_INSNS (5),            /* int_mult_si */
814     COSTS_N_INSNS (5),            /* int_mult_di */
815     COSTS_N_INSNS (41),           /* int_div_si */
816     COSTS_N_INSNS (41),           /* int_div_di */
817 		     1,           /* branch_cost */
818 		     4            /* memory_latency */
819   },
820   { /* 74KF3_2 */
821     COSTS_N_INSNS (6),            /* fp_add */
822     COSTS_N_INSNS (6),            /* fp_mult_sf */
823     COSTS_N_INSNS (7),            /* fp_mult_df */
824     COSTS_N_INSNS (25),           /* fp_div_sf */
825     COSTS_N_INSNS (48),           /* fp_div_df */
826     COSTS_N_INSNS (5),            /* int_mult_si */
827     COSTS_N_INSNS (5),            /* int_mult_di */
828     COSTS_N_INSNS (41),           /* int_div_si */
829     COSTS_N_INSNS (41),           /* int_div_di */
830 		     1,           /* branch_cost */
831 		     4            /* memory_latency */
832   },
833   { /* Loongson-2E */
834     DEFAULT_COSTS
835   },
836   { /* Loongson-2F */
837     DEFAULT_COSTS
838   },
839   { /* Loongson gs464.  */
840     DEFAULT_COSTS
841   },
842   { /* Loongson gs464e.  */
843     DEFAULT_COSTS
844   },
845   { /* Loongson gs264e.  */
846     DEFAULT_COSTS
847   },
848   { /* M4k */
849     DEFAULT_COSTS
850   },
851     /* Octeon */
852   {
853     SOFT_FP_COSTS,
854     COSTS_N_INSNS (5),            /* int_mult_si */
855     COSTS_N_INSNS (5),            /* int_mult_di */
856     COSTS_N_INSNS (72),           /* int_div_si */
857     COSTS_N_INSNS (72),           /* int_div_di */
858                      1,		  /* branch_cost */
859                      4		  /* memory_latency */
860   },
861     /* Octeon II */
862   {
863     SOFT_FP_COSTS,
864     COSTS_N_INSNS (6),            /* int_mult_si */
865     COSTS_N_INSNS (6),            /* int_mult_di */
866     COSTS_N_INSNS (18),           /* int_div_si */
867     COSTS_N_INSNS (35),           /* int_div_di */
868                      4,		  /* branch_cost */
869                      4		  /* memory_latency */
870   },
871     /* Octeon III */
872   {
873     COSTS_N_INSNS (6),            /* fp_add */
874     COSTS_N_INSNS (6),            /* fp_mult_sf */
875     COSTS_N_INSNS (7),            /* fp_mult_df */
876     COSTS_N_INSNS (25),           /* fp_div_sf */
877     COSTS_N_INSNS (48),           /* fp_div_df */
878     COSTS_N_INSNS (6),            /* int_mult_si */
879     COSTS_N_INSNS (6),            /* int_mult_di */
880     COSTS_N_INSNS (18),           /* int_div_si */
881     COSTS_N_INSNS (35),           /* int_div_di */
882                      4,		  /* branch_cost */
883                      4		  /* memory_latency */
884   },
885   { /* R3900 */
886     COSTS_N_INSNS (2),            /* fp_add */
887     COSTS_N_INSNS (4),            /* fp_mult_sf */
888     COSTS_N_INSNS (5),            /* fp_mult_df */
889     COSTS_N_INSNS (12),           /* fp_div_sf */
890     COSTS_N_INSNS (19),           /* fp_div_df */
891     COSTS_N_INSNS (2),            /* int_mult_si */
892     COSTS_N_INSNS (2),            /* int_mult_di */
893     COSTS_N_INSNS (35),           /* int_div_si */
894     COSTS_N_INSNS (35),           /* int_div_di */
895 		     1,           /* branch_cost */
896 		     4            /* memory_latency */
897   },
898   { /* R6000 */
899     COSTS_N_INSNS (3),            /* fp_add */
900     COSTS_N_INSNS (5),            /* fp_mult_sf */
901     COSTS_N_INSNS (6),            /* fp_mult_df */
902     COSTS_N_INSNS (15),           /* fp_div_sf */
903     COSTS_N_INSNS (16),           /* fp_div_df */
904     COSTS_N_INSNS (17),           /* int_mult_si */
905     COSTS_N_INSNS (17),           /* int_mult_di */
906     COSTS_N_INSNS (38),           /* int_div_si */
907     COSTS_N_INSNS (38),           /* int_div_di */
908 		     2,           /* branch_cost */
909 		     6            /* memory_latency */
910   },
911   { /* R4000 */
912      COSTS_N_INSNS (6),           /* fp_add */
913      COSTS_N_INSNS (7),           /* fp_mult_sf */
914      COSTS_N_INSNS (8),           /* fp_mult_df */
915      COSTS_N_INSNS (23),          /* fp_div_sf */
916      COSTS_N_INSNS (36),          /* fp_div_df */
917      COSTS_N_INSNS (10),          /* int_mult_si */
918      COSTS_N_INSNS (10),          /* int_mult_di */
919      COSTS_N_INSNS (69),          /* int_div_si */
920      COSTS_N_INSNS (69),          /* int_div_di */
921 		      2,          /* branch_cost */
922 		      6           /* memory_latency */
923   },
924   { /* R4100 */
925     DEFAULT_COSTS
926   },
927   { /* R4111 */
928     DEFAULT_COSTS
929   },
930   { /* R4120 */
931     DEFAULT_COSTS
932   },
933   { /* R4130 */
934     /* The only costs that appear to be updated here are
935        integer multiplication.  */
936     SOFT_FP_COSTS,
937     COSTS_N_INSNS (4),            /* int_mult_si */
938     COSTS_N_INSNS (6),            /* int_mult_di */
939     COSTS_N_INSNS (69),           /* int_div_si */
940     COSTS_N_INSNS (69),           /* int_div_di */
941 		     1,           /* branch_cost */
942 		     4            /* memory_latency */
943   },
944   { /* R4300 */
945     DEFAULT_COSTS
946   },
947   { /* R4600 */
948     DEFAULT_COSTS
949   },
950   { /* R4650 */
951     DEFAULT_COSTS
952   },
953   { /* R4700 */
954     DEFAULT_COSTS
955   },
956   { /* R5000 */
957     COSTS_N_INSNS (6),            /* fp_add */
958     COSTS_N_INSNS (4),            /* fp_mult_sf */
959     COSTS_N_INSNS (5),            /* fp_mult_df */
960     COSTS_N_INSNS (23),           /* fp_div_sf */
961     COSTS_N_INSNS (36),           /* fp_div_df */
962     COSTS_N_INSNS (5),            /* int_mult_si */
963     COSTS_N_INSNS (5),            /* int_mult_di */
964     COSTS_N_INSNS (36),           /* int_div_si */
965     COSTS_N_INSNS (36),           /* int_div_di */
966 		     1,           /* branch_cost */
967 		     4            /* memory_latency */
968   },
969   { /* R5400 */
970     COSTS_N_INSNS (6),            /* fp_add */
971     COSTS_N_INSNS (5),            /* fp_mult_sf */
972     COSTS_N_INSNS (6),            /* fp_mult_df */
973     COSTS_N_INSNS (30),           /* fp_div_sf */
974     COSTS_N_INSNS (59),           /* fp_div_df */
975     COSTS_N_INSNS (3),            /* int_mult_si */
976     COSTS_N_INSNS (4),            /* int_mult_di */
977     COSTS_N_INSNS (42),           /* int_div_si */
978     COSTS_N_INSNS (74),           /* int_div_di */
979 		     1,           /* branch_cost */
980 		     4            /* memory_latency */
981   },
982   { /* R5500 */
983     COSTS_N_INSNS (6),            /* fp_add */
984     COSTS_N_INSNS (5),            /* fp_mult_sf */
985     COSTS_N_INSNS (6),            /* fp_mult_df */
986     COSTS_N_INSNS (30),           /* fp_div_sf */
987     COSTS_N_INSNS (59),           /* fp_div_df */
988     COSTS_N_INSNS (5),            /* int_mult_si */
989     COSTS_N_INSNS (9),            /* int_mult_di */
990     COSTS_N_INSNS (42),           /* int_div_si */
991     COSTS_N_INSNS (74),           /* int_div_di */
992 		     1,           /* branch_cost */
993 		     4            /* memory_latency */
994   },
995   { /* R5900 */
996     COSTS_N_INSNS (4),            /* fp_add */
997     COSTS_N_INSNS (4),            /* fp_mult_sf */
998     COSTS_N_INSNS (256),          /* fp_mult_df */
999     COSTS_N_INSNS (8),            /* fp_div_sf */
1000     COSTS_N_INSNS (256),          /* fp_div_df */
1001     COSTS_N_INSNS (4),            /* int_mult_si */
1002     COSTS_N_INSNS (256),          /* int_mult_di */
1003     COSTS_N_INSNS (37),           /* int_div_si */
1004     COSTS_N_INSNS (256),          /* int_div_di */
1005 		     1,           /* branch_cost */
1006 		     4            /* memory_latency */
1007   },
1008   { /* R7000 */
1009     /* The only costs that are changed here are
1010        integer multiplication.  */
1011     COSTS_N_INSNS (6),            /* fp_add */
1012     COSTS_N_INSNS (7),            /* fp_mult_sf */
1013     COSTS_N_INSNS (8),            /* fp_mult_df */
1014     COSTS_N_INSNS (23),           /* fp_div_sf */
1015     COSTS_N_INSNS (36),           /* fp_div_df */
1016     COSTS_N_INSNS (5),            /* int_mult_si */
1017     COSTS_N_INSNS (9),            /* int_mult_di */
1018     COSTS_N_INSNS (69),           /* int_div_si */
1019     COSTS_N_INSNS (69),           /* int_div_di */
1020 		     1,           /* branch_cost */
1021 		     4            /* memory_latency */
1022   },
1023   { /* R8000 */
1024     DEFAULT_COSTS
1025   },
1026   { /* R9000 */
1027     /* The only costs that are changed here are
1028        integer multiplication.  */
1029     COSTS_N_INSNS (6),            /* fp_add */
1030     COSTS_N_INSNS (7),            /* fp_mult_sf */
1031     COSTS_N_INSNS (8),            /* fp_mult_df */
1032     COSTS_N_INSNS (23),           /* fp_div_sf */
1033     COSTS_N_INSNS (36),           /* fp_div_df */
1034     COSTS_N_INSNS (3),            /* int_mult_si */
1035     COSTS_N_INSNS (8),            /* int_mult_di */
1036     COSTS_N_INSNS (69),           /* int_div_si */
1037     COSTS_N_INSNS (69),           /* int_div_di */
1038 		     1,           /* branch_cost */
1039 		     4            /* memory_latency */
1040   },
1041   { /* R1x000 */
1042     COSTS_N_INSNS (2),            /* fp_add */
1043     COSTS_N_INSNS (2),            /* fp_mult_sf */
1044     COSTS_N_INSNS (2),            /* fp_mult_df */
1045     COSTS_N_INSNS (12),           /* fp_div_sf */
1046     COSTS_N_INSNS (19),           /* fp_div_df */
1047     COSTS_N_INSNS (5),            /* int_mult_si */
1048     COSTS_N_INSNS (9),            /* int_mult_di */
1049     COSTS_N_INSNS (34),           /* int_div_si */
1050     COSTS_N_INSNS (66),           /* int_div_di */
1051 		     1,           /* branch_cost */
1052 		     4            /* memory_latency */
1053   },
1054   { /* SB1 */
1055     /* These costs are the same as the SB-1A below.  */
1056     COSTS_N_INSNS (4),            /* fp_add */
1057     COSTS_N_INSNS (4),            /* fp_mult_sf */
1058     COSTS_N_INSNS (4),            /* fp_mult_df */
1059     COSTS_N_INSNS (24),           /* fp_div_sf */
1060     COSTS_N_INSNS (32),           /* fp_div_df */
1061     COSTS_N_INSNS (3),            /* int_mult_si */
1062     COSTS_N_INSNS (4),            /* int_mult_di */
1063     COSTS_N_INSNS (36),           /* int_div_si */
1064     COSTS_N_INSNS (68),           /* int_div_di */
1065 		     1,           /* branch_cost */
1066 		     4            /* memory_latency */
1067   },
1068   { /* SB1-A */
1069     /* These costs are the same as the SB-1 above.  */
1070     COSTS_N_INSNS (4),            /* fp_add */
1071     COSTS_N_INSNS (4),            /* fp_mult_sf */
1072     COSTS_N_INSNS (4),            /* fp_mult_df */
1073     COSTS_N_INSNS (24),           /* fp_div_sf */
1074     COSTS_N_INSNS (32),           /* fp_div_df */
1075     COSTS_N_INSNS (3),            /* int_mult_si */
1076     COSTS_N_INSNS (4),            /* int_mult_di */
1077     COSTS_N_INSNS (36),           /* int_div_si */
1078     COSTS_N_INSNS (68),           /* int_div_di */
1079 		     1,           /* branch_cost */
1080 		     4            /* memory_latency */
1081   },
1082   { /* SR71000 */
1083     DEFAULT_COSTS
1084   },
1085   { /* XLR */
1086     SOFT_FP_COSTS,
1087     COSTS_N_INSNS (8),            /* int_mult_si */
1088     COSTS_N_INSNS (8),            /* int_mult_di */
1089     COSTS_N_INSNS (72),           /* int_div_si */
1090     COSTS_N_INSNS (72),           /* int_div_di */
1091 		     1,           /* branch_cost */
1092 		     4            /* memory_latency */
1093   },
1094   { /* XLP */
1095     /* These costs are the same as 5KF above.  */
1096     COSTS_N_INSNS (4),            /* fp_add */
1097     COSTS_N_INSNS (4),            /* fp_mult_sf */
1098     COSTS_N_INSNS (5),            /* fp_mult_df */
1099     COSTS_N_INSNS (17),           /* fp_div_sf */
1100     COSTS_N_INSNS (32),           /* fp_div_df */
1101     COSTS_N_INSNS (4),            /* int_mult_si */
1102     COSTS_N_INSNS (11),           /* int_mult_di */
1103     COSTS_N_INSNS (36),           /* int_div_si */
1104     COSTS_N_INSNS (68),           /* int_div_di */
1105 		     1,           /* branch_cost */
1106 		     4            /* memory_latency */
1107   },
1108   { /* P5600 */
1109     COSTS_N_INSNS (4),            /* fp_add */
1110     COSTS_N_INSNS (5),            /* fp_mult_sf */
1111     COSTS_N_INSNS (5),            /* fp_mult_df */
1112     COSTS_N_INSNS (17),           /* fp_div_sf */
1113     COSTS_N_INSNS (17),           /* fp_div_df */
1114     COSTS_N_INSNS (5),            /* int_mult_si */
1115     COSTS_N_INSNS (5),            /* int_mult_di */
1116     COSTS_N_INSNS (8),            /* int_div_si */
1117     COSTS_N_INSNS (8),            /* int_div_di */
1118 		    2,            /* branch_cost */
1119 		    4             /* memory_latency */
1120   },
1121   { /* M5100 */
1122     COSTS_N_INSNS (4),            /* fp_add */
1123     COSTS_N_INSNS (4),            /* fp_mult_sf */
1124     COSTS_N_INSNS (5),            /* fp_mult_df */
1125     COSTS_N_INSNS (17),           /* fp_div_sf */
1126     COSTS_N_INSNS (32),           /* fp_div_df */
1127     COSTS_N_INSNS (5),            /* int_mult_si */
1128     COSTS_N_INSNS (5),            /* int_mult_di */
1129     COSTS_N_INSNS (34),           /* int_div_si */
1130     COSTS_N_INSNS (68),           /* int_div_di */
1131 		     1,           /* branch_cost */
1132 		     4            /* memory_latency */
1133   },
1134   { /* I6400 */
1135     COSTS_N_INSNS (4),            /* fp_add */
1136     COSTS_N_INSNS (5),            /* fp_mult_sf */
1137     COSTS_N_INSNS (5),            /* fp_mult_df */
1138     COSTS_N_INSNS (32),           /* fp_div_sf */
1139     COSTS_N_INSNS (32),           /* fp_div_df */
1140     COSTS_N_INSNS (5),            /* int_mult_si */
1141     COSTS_N_INSNS (5),            /* int_mult_di */
1142     COSTS_N_INSNS (36),           /* int_div_si */
1143     COSTS_N_INSNS (36),           /* int_div_di */
1144 		    2,            /* branch_cost */
1145 		    4             /* memory_latency */
1146   },
1147   { /* P6600 */
1148     COSTS_N_INSNS (4),            /* fp_add */
1149     COSTS_N_INSNS (5),            /* fp_mult_sf */
1150     COSTS_N_INSNS (5),            /* fp_mult_df */
1151     COSTS_N_INSNS (17),           /* fp_div_sf */
1152     COSTS_N_INSNS (17),           /* fp_div_df */
1153     COSTS_N_INSNS (5),            /* int_mult_si */
1154     COSTS_N_INSNS (5),            /* int_mult_di */
1155     COSTS_N_INSNS (8),            /* int_div_si */
1156     COSTS_N_INSNS (8),            /* int_div_di */
1157 		    2,            /* branch_cost */
1158 		    4             /* memory_latency */
1159   }
1160 };
1161 
1162 static rtx mips_find_pic_call_symbol (rtx_insn *, rtx, bool);
1163 static int mips_register_move_cost (machine_mode, reg_class_t,
1164 				    reg_class_t);
1165 static unsigned int mips_function_arg_boundary (machine_mode, const_tree);
1166 static rtx mips_gen_const_int_vector_shuffle (machine_mode, int);
1167 
1168 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1169    for -mflip_mips16.  It maps decl names onto a boolean mode setting.  */
1170 static GTY (()) hash_map<nofree_string_hash, bool> *mflip_mips16_htab;
1171 
1172 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1173    mode, false if it should next add an attribute for the opposite mode.  */
1174 static GTY(()) bool mips16_flipper;
1175 
1176 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1177    for -mflip-mips16.  Return true if it should use "mips16" and false if
1178    it should use "nomips16".  */
1179 
1180 static bool
mflip_mips16_use_mips16_p(tree decl)1181 mflip_mips16_use_mips16_p (tree decl)
1182 {
1183   const char *name;
1184   bool base_is_mips16 = (mips_base_compression_flags & MASK_MIPS16) != 0;
1185 
1186   /* Use the opposite of the command-line setting for anonymous decls.  */
1187   if (!DECL_NAME (decl))
1188     return !base_is_mips16;
1189 
1190   if (!mflip_mips16_htab)
1191     mflip_mips16_htab = hash_map<nofree_string_hash, bool>::create_ggc (37);
1192 
1193   name = IDENTIFIER_POINTER (DECL_NAME (decl));
1194 
1195   bool existed;
1196   bool *slot = &mflip_mips16_htab->get_or_insert (name, &existed);
1197   if (!existed)
1198     {
1199       mips16_flipper = !mips16_flipper;
1200       *slot = mips16_flipper ? !base_is_mips16 : base_is_mips16;
1201     }
1202   return *slot;
1203 }
1204 
1205 /* Predicates to test for presence of "near"/"short_call" and "far"/"long_call"
1206    attributes on the given TYPE.  */
1207 
1208 static bool
mips_near_type_p(const_tree type)1209 mips_near_type_p (const_tree type)
1210 {
1211   return (lookup_attribute ("short_call", TYPE_ATTRIBUTES (type)) != NULL
1212 	  || lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL);
1213 }
1214 
1215 static bool
mips_far_type_p(const_tree type)1216 mips_far_type_p (const_tree type)
1217 {
1218   return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1219 	  || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1220 }
1221 
1222 
1223 /* Check if the interrupt attribute is set for a function.  */
1224 
1225 static bool
mips_interrupt_type_p(tree type)1226 mips_interrupt_type_p (tree type)
1227 {
1228   return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
1229 }
1230 
1231 /* Return the mask for the "interrupt" attribute.  */
1232 
1233 static enum mips_int_mask
mips_interrupt_mask(tree type)1234 mips_interrupt_mask (tree type)
1235 {
1236   tree attr = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type));
1237   tree args, cst;
1238   const char *str;
1239 
1240   /* For missing attributes or no arguments then return 'eic' as a safe
1241      fallback.  */
1242   if (attr == NULL)
1243     return INT_MASK_EIC;
1244 
1245   args = TREE_VALUE (attr);
1246 
1247   if (args == NULL)
1248     return INT_MASK_EIC;
1249 
1250   cst = TREE_VALUE (args);
1251 
1252   if (strcmp (TREE_STRING_POINTER (cst), "eic") == 0)
1253     return INT_MASK_EIC;
1254 
1255   /* The validation code in mips_handle_interrupt_attr guarantees that the
1256      argument is now in the form:
1257      vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5).  */
1258   str = TREE_STRING_POINTER (cst);
1259 
1260   gcc_assert (strlen (str) == strlen ("vector=sw0"));
1261 
1262   if (str[7] == 's')
1263     return (enum mips_int_mask) (INT_MASK_SW0 + (str[9] - '0'));
1264 
1265   return (enum mips_int_mask) (INT_MASK_HW0 + (str[9] - '0'));
1266 }
1267 
1268 /* Return the mips_shadow_set if the "use_shadow_register_set" attribute is
1269    set for a function.  */
1270 
1271 static enum mips_shadow_set
mips_use_shadow_register_set(tree type)1272 mips_use_shadow_register_set (tree type)
1273 {
1274   tree attr = lookup_attribute ("use_shadow_register_set",
1275 				TYPE_ATTRIBUTES (type));
1276   tree args;
1277 
1278   /* The validation code in mips_handle_use_shadow_register_set_attr guarantees
1279      that if an argument is present then it means: Assume the shadow register
1280      set has a valid stack pointer in it.  */
1281   if (attr == NULL)
1282     return SHADOW_SET_NO;
1283 
1284   args = TREE_VALUE (attr);
1285 
1286   if (args == NULL)
1287     return SHADOW_SET_YES;
1288 
1289   return SHADOW_SET_INTSTACK;
1290 }
1291 
1292 /* Check if the attribute to keep interrupts masked is set for a function.  */
1293 
1294 static bool
mips_keep_interrupts_masked_p(tree type)1295 mips_keep_interrupts_masked_p (tree type)
1296 {
1297   return lookup_attribute ("keep_interrupts_masked",
1298 			   TYPE_ATTRIBUTES (type)) != NULL;
1299 }
1300 
1301 /* Check if the attribute to use debug exception return is set for
1302    a function.  */
1303 
1304 static bool
mips_use_debug_exception_return_p(tree type)1305 mips_use_debug_exception_return_p (tree type)
1306 {
1307   return lookup_attribute ("use_debug_exception_return",
1308 			   TYPE_ATTRIBUTES (type)) != NULL;
1309 }
1310 
1311 /* Return the set of compression modes that are explicitly required
1312    by the attributes in ATTRIBUTES.  */
1313 
1314 static unsigned int
mips_get_compress_on_flags(tree attributes)1315 mips_get_compress_on_flags (tree attributes)
1316 {
1317   unsigned int flags = 0;
1318 
1319   if (lookup_attribute ("mips16", attributes) != NULL)
1320     flags |= MASK_MIPS16;
1321 
1322   if (lookup_attribute ("micromips", attributes) != NULL)
1323     flags |= MASK_MICROMIPS;
1324 
1325   return flags;
1326 }
1327 
1328 /* Return the set of compression modes that are explicitly forbidden
1329    by the attributes in ATTRIBUTES.  */
1330 
1331 static unsigned int
mips_get_compress_off_flags(tree attributes)1332 mips_get_compress_off_flags (tree attributes)
1333 {
1334   unsigned int flags = 0;
1335 
1336   if (lookup_attribute ("nocompression", attributes) != NULL)
1337     flags |= MASK_MIPS16 | MASK_MICROMIPS;
1338 
1339   if (lookup_attribute ("nomips16", attributes) != NULL)
1340     flags |= MASK_MIPS16;
1341 
1342   if (lookup_attribute ("nomicromips", attributes) != NULL)
1343     flags |= MASK_MICROMIPS;
1344 
1345   return flags;
1346 }
1347 
1348 /* Return the compression mode that should be used for function DECL.
1349    Return the ambient setting if DECL is null.  */
1350 
1351 static unsigned int
mips_get_compress_mode(tree decl)1352 mips_get_compress_mode (tree decl)
1353 {
1354   unsigned int flags, force_on;
1355 
1356   flags = mips_base_compression_flags;
1357   if (decl)
1358     {
1359       /* Nested functions must use the same frame pointer as their
1360 	 parent and must therefore use the same ISA mode.  */
1361       tree parent = decl_function_context (decl);
1362       if (parent)
1363 	decl = parent;
1364       force_on = mips_get_compress_on_flags (DECL_ATTRIBUTES (decl));
1365       if (force_on)
1366 	return force_on;
1367       flags &= ~mips_get_compress_off_flags (DECL_ATTRIBUTES (decl));
1368     }
1369   return flags;
1370 }
1371 
1372 /* Return the attribute name associated with MASK_MIPS16 and MASK_MICROMIPS
1373    flags FLAGS.  */
1374 
1375 static const char *
mips_get_compress_on_name(unsigned int flags)1376 mips_get_compress_on_name (unsigned int flags)
1377 {
1378   if (flags == MASK_MIPS16)
1379     return "mips16";
1380   return "micromips";
1381 }
1382 
1383 /* Return the attribute name that forbids MASK_MIPS16 and MASK_MICROMIPS
1384    flags FLAGS.  */
1385 
1386 static const char *
mips_get_compress_off_name(unsigned int flags)1387 mips_get_compress_off_name (unsigned int flags)
1388 {
1389   if (flags == MASK_MIPS16)
1390     return "nomips16";
1391   if (flags == MASK_MICROMIPS)
1392     return "nomicromips";
1393   return "nocompression";
1394 }
1395 
1396 /* Implement TARGET_COMP_TYPE_ATTRIBUTES.  */
1397 
1398 static int
mips_comp_type_attributes(const_tree type1,const_tree type2)1399 mips_comp_type_attributes (const_tree type1, const_tree type2)
1400 {
1401   /* Disallow mixed near/far attributes.  */
1402   if (mips_far_type_p (type1) && mips_near_type_p (type2))
1403     return 0;
1404   if (mips_near_type_p (type1) && mips_far_type_p (type2))
1405     return 0;
1406   return 1;
1407 }
1408 
1409 /* Implement TARGET_INSERT_ATTRIBUTES.  */
1410 
1411 static void
mips_insert_attributes(tree decl,tree * attributes)1412 mips_insert_attributes (tree decl, tree *attributes)
1413 {
1414   const char *name;
1415   unsigned int compression_flags, nocompression_flags;
1416 
1417   /* Check for "mips16" and "nomips16" attributes.  */
1418   compression_flags = mips_get_compress_on_flags (*attributes);
1419   nocompression_flags = mips_get_compress_off_flags (*attributes);
1420 
1421   if (TREE_CODE (decl) != FUNCTION_DECL)
1422     {
1423       if (nocompression_flags)
1424 	error ("%qs attribute only applies to functions",
1425 	       mips_get_compress_off_name (nocompression_flags));
1426 
1427       if (compression_flags)
1428 	error ("%qs attribute only applies to functions",
1429 	       mips_get_compress_on_name (nocompression_flags));
1430     }
1431   else
1432     {
1433       compression_flags |= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl));
1434       nocompression_flags |=
1435 	mips_get_compress_off_flags (DECL_ATTRIBUTES (decl));
1436 
1437       if (compression_flags && nocompression_flags)
1438 	error ("%qE cannot have both %qs and %qs attributes",
1439 	       DECL_NAME (decl), mips_get_compress_on_name (compression_flags),
1440 	       mips_get_compress_off_name (nocompression_flags));
1441 
1442       if (compression_flags & MASK_MIPS16
1443           && compression_flags & MASK_MICROMIPS)
1444 	error ("%qE cannot have both %qs and %qs attributes",
1445 	       DECL_NAME (decl), "mips16", "micromips");
1446 
1447       if (TARGET_FLIP_MIPS16
1448 	  && !DECL_ARTIFICIAL (decl)
1449 	  && compression_flags == 0
1450 	  && nocompression_flags == 0)
1451 	{
1452 	  /* Implement -mflip-mips16.  If DECL has neither a "nomips16" nor a
1453 	     "mips16" attribute, arbitrarily pick one.  We must pick the same
1454 	     setting for duplicate declarations of a function.  */
1455 	  name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1456 	  *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1457 	  name = "nomicromips";
1458 	  *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1459 	}
1460     }
1461 }
1462 
1463 /* Implement TARGET_MERGE_DECL_ATTRIBUTES.  */
1464 
1465 static tree
mips_merge_decl_attributes(tree olddecl,tree newdecl)1466 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1467 {
1468   unsigned int diff;
1469 
1470   diff = (mips_get_compress_on_flags (DECL_ATTRIBUTES (olddecl))
1471 	  ^ mips_get_compress_on_flags (DECL_ATTRIBUTES (newdecl)));
1472   if (diff)
1473     error ("%qE redeclared with conflicting %qs attributes",
1474 	   DECL_NAME (newdecl), mips_get_compress_on_name (diff));
1475 
1476   diff = (mips_get_compress_off_flags (DECL_ATTRIBUTES (olddecl))
1477 	  ^ mips_get_compress_off_flags (DECL_ATTRIBUTES (newdecl)));
1478   if (diff)
1479     error ("%qE redeclared with conflicting %qs attributes",
1480 	   DECL_NAME (newdecl), mips_get_compress_off_name (diff));
1481 
1482   return merge_attributes (DECL_ATTRIBUTES (olddecl),
1483 			   DECL_ATTRIBUTES (newdecl));
1484 }
1485 
1486 /* Implement TARGET_CAN_INLINE_P.  */
1487 
1488 static bool
mips_can_inline_p(tree caller,tree callee)1489 mips_can_inline_p (tree caller, tree callee)
1490 {
1491   if (mips_get_compress_mode (callee) != mips_get_compress_mode (caller))
1492     return false;
1493   return default_target_can_inline_p (caller, callee);
1494 }
1495 
1496 /* Handle an "interrupt" attribute with an optional argument.  */
1497 
1498 static tree
mips_handle_interrupt_attr(tree * node ATTRIBUTE_UNUSED,tree name,tree args,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)1499 mips_handle_interrupt_attr (tree *node ATTRIBUTE_UNUSED, tree name, tree args,
1500 			    int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1501 {
1502   /* Check for an argument.  */
1503   if (is_attribute_p ("interrupt", name) && args != NULL)
1504     {
1505       tree cst;
1506 
1507       cst = TREE_VALUE (args);
1508       if (TREE_CODE (cst) != STRING_CST)
1509 	{
1510 	  warning (OPT_Wattributes,
1511 		   "%qE attribute requires a string argument",
1512 		   name);
1513 	  *no_add_attrs = true;
1514 	}
1515       else if (strcmp (TREE_STRING_POINTER (cst), "eic") != 0
1516 	       && strncmp (TREE_STRING_POINTER (cst), "vector=", 7) != 0)
1517 	{
1518 	  warning (OPT_Wattributes,
1519 		   "argument to %qE attribute is neither eic, nor "
1520 		   "vector=<line>", name);
1521 	  *no_add_attrs = true;
1522 	}
1523       else if (strncmp (TREE_STRING_POINTER (cst), "vector=", 7) == 0)
1524 	{
1525 	  const char *arg = TREE_STRING_POINTER (cst) + 7;
1526 
1527 	  /* Acceptable names are: sw0,sw1,hw0,hw1,hw2,hw3,hw4,hw5.  */
1528 	  if (strlen (arg) != 3
1529 	      || (arg[0] != 's' && arg[0] != 'h')
1530 	      || arg[1] != 'w'
1531 	      || (arg[0] == 's' && arg[2] != '0' && arg[2] != '1')
1532 	      || (arg[0] == 'h' && (arg[2] < '0' || arg[2] > '5')))
1533 	    {
1534 	      warning (OPT_Wattributes,
1535 		       "interrupt vector to %qE attribute is not "
1536 		       "vector=(sw0|sw1|hw0|hw1|hw2|hw3|hw4|hw5)",
1537 		       name);
1538 	      *no_add_attrs = true;
1539 	    }
1540 	}
1541 
1542       return NULL_TREE;
1543     }
1544 
1545   return NULL_TREE;
1546 }
1547 
1548 /* Handle a "use_shadow_register_set" attribute with an optional argument.  */
1549 
1550 static tree
mips_handle_use_shadow_register_set_attr(tree * node ATTRIBUTE_UNUSED,tree name,tree args,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)1551 mips_handle_use_shadow_register_set_attr (tree *node ATTRIBUTE_UNUSED,
1552 					  tree name, tree args,
1553 					  int flags ATTRIBUTE_UNUSED,
1554 					  bool *no_add_attrs)
1555 {
1556   /* Check for an argument.  */
1557   if (is_attribute_p ("use_shadow_register_set", name) && args != NULL)
1558     {
1559       tree cst;
1560 
1561       cst = TREE_VALUE (args);
1562       if (TREE_CODE (cst) != STRING_CST)
1563 	{
1564 	  warning (OPT_Wattributes,
1565 		   "%qE attribute requires a string argument",
1566 		   name);
1567 	  *no_add_attrs = true;
1568 	}
1569       else if (strcmp (TREE_STRING_POINTER (cst), "intstack") != 0)
1570 	{
1571 	  warning (OPT_Wattributes,
1572 		   "argument to %qE attribute is not intstack", name);
1573 	  *no_add_attrs = true;
1574 	}
1575 
1576       return NULL_TREE;
1577     }
1578 
1579   return NULL_TREE;
1580 }
1581 
1582 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1583    and *OFFSET_PTR.  Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise.  */
1584 
1585 static void
mips_split_plus(rtx x,rtx * base_ptr,HOST_WIDE_INT * offset_ptr)1586 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1587 {
1588   if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
1589     {
1590       *base_ptr = XEXP (x, 0);
1591       *offset_ptr = INTVAL (XEXP (x, 1));
1592     }
1593   else
1594     {
1595       *base_ptr = x;
1596       *offset_ptr = 0;
1597     }
1598 }
1599 
1600 static unsigned int mips_build_integer (struct mips_integer_op *,
1601 					unsigned HOST_WIDE_INT);
1602 
1603 /* A subroutine of mips_build_integer, with the same interface.
1604    Assume that the final action in the sequence should be a left shift.  */
1605 
1606 static unsigned int
mips_build_shift(struct mips_integer_op * codes,HOST_WIDE_INT value)1607 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1608 {
1609   unsigned int i, shift;
1610 
1611   /* Shift VALUE right until its lowest bit is set.  Shift arithmetically
1612      since signed numbers are easier to load than unsigned ones.  */
1613   shift = 0;
1614   while ((value & 1) == 0)
1615     value /= 2, shift++;
1616 
1617   i = mips_build_integer (codes, value);
1618   codes[i].code = ASHIFT;
1619   codes[i].value = shift;
1620   return i + 1;
1621 }
1622 
1623 /* As for mips_build_shift, but assume that the final action will be
1624    an IOR or PLUS operation.  */
1625 
1626 static unsigned int
mips_build_lower(struct mips_integer_op * codes,unsigned HOST_WIDE_INT value)1627 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1628 {
1629   unsigned HOST_WIDE_INT high;
1630   unsigned int i;
1631 
1632   high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1633   if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1634     {
1635       /* The constant is too complex to load with a simple LUI/ORI pair,
1636 	 so we want to give the recursive call as many trailing zeros as
1637 	 possible.  In this case, we know bit 16 is set and that the
1638 	 low 16 bits form a negative number.  If we subtract that number
1639 	 from VALUE, we will clear at least the lowest 17 bits, maybe more.  */
1640       i = mips_build_integer (codes, CONST_HIGH_PART (value));
1641       codes[i].code = PLUS;
1642       codes[i].value = CONST_LOW_PART (value);
1643     }
1644   else
1645     {
1646       /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1647 	 bits gives a value with at least 17 trailing zeros.  */
1648       i = mips_build_integer (codes, high);
1649       codes[i].code = IOR;
1650       codes[i].value = value & 0xffff;
1651     }
1652   return i + 1;
1653 }
1654 
1655 /* Fill CODES with a sequence of rtl operations to load VALUE.
1656    Return the number of operations needed.  */
1657 
1658 static unsigned int
mips_build_integer(struct mips_integer_op * codes,unsigned HOST_WIDE_INT value)1659 mips_build_integer (struct mips_integer_op *codes,
1660 		    unsigned HOST_WIDE_INT value)
1661 {
1662   if (SMALL_OPERAND (value)
1663       || SMALL_OPERAND_UNSIGNED (value)
1664       || LUI_OPERAND (value))
1665     {
1666       /* The value can be loaded with a single instruction.  */
1667       codes[0].code = UNKNOWN;
1668       codes[0].value = value;
1669       return 1;
1670     }
1671   else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1672     {
1673       /* Either the constant is a simple LUI/ORI combination or its
1674 	 lowest bit is set.  We don't want to shift in this case.  */
1675       return mips_build_lower (codes, value);
1676     }
1677   else if ((value & 0xffff) == 0)
1678     {
1679       /* The constant will need at least three actions.  The lowest
1680 	 16 bits are clear, so the final action will be a shift.  */
1681       return mips_build_shift (codes, value);
1682     }
1683   else
1684     {
1685       /* The final action could be a shift, add or inclusive OR.
1686 	 Rather than use a complex condition to select the best
1687 	 approach, try both mips_build_shift and mips_build_lower
1688 	 and pick the one that gives the shortest sequence.
1689 	 Note that this case is only used once per constant.  */
1690       struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1691       unsigned int cost, alt_cost;
1692 
1693       cost = mips_build_shift (codes, value);
1694       alt_cost = mips_build_lower (alt_codes, value);
1695       if (alt_cost < cost)
1696 	{
1697 	  memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1698 	  cost = alt_cost;
1699 	}
1700       return cost;
1701     }
1702 }
1703 
1704 /* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
1705 
1706 static bool
mips_legitimate_constant_p(machine_mode mode ATTRIBUTE_UNUSED,rtx x)1707 mips_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1708 {
1709   return mips_const_insns (x) > 0;
1710 }
1711 
1712 /* Return a SYMBOL_REF for a MIPS16 function called NAME.  */
1713 
1714 static rtx
mips16_stub_function(const char * name)1715 mips16_stub_function (const char *name)
1716 {
1717   rtx x;
1718 
1719   x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
1720   SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
1721   return x;
1722 }
1723 
1724 /* Return a legitimate call address for STUB, given that STUB is a MIPS16
1725    support function.  */
1726 
1727 static rtx
mips16_stub_call_address(mips_one_only_stub * stub)1728 mips16_stub_call_address (mips_one_only_stub *stub)
1729 {
1730   rtx fn = mips16_stub_function (stub->get_name ());
1731   SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_LOCAL;
1732   if (!call_insn_operand (fn, VOIDmode))
1733     fn = force_reg (Pmode, fn);
1734   return fn;
1735 }
1736 
1737 /* A stub for moving the thread pointer into TLS_GET_TP_REGNUM.  */
1738 
1739 class mips16_rdhwr_one_only_stub : public mips_one_only_stub
1740 {
1741   virtual const char *get_name ();
1742   virtual void output_body ();
1743 };
1744 
1745 const char *
get_name()1746 mips16_rdhwr_one_only_stub::get_name ()
1747 {
1748   return "__mips16_rdhwr";
1749 }
1750 
1751 void
output_body()1752 mips16_rdhwr_one_only_stub::output_body ()
1753 {
1754   fprintf (asm_out_file,
1755 	   "\t.set\tpush\n"
1756 	   "\t.set\tmips32r2\n"
1757 	   "\t.set\tnoreorder\n"
1758 	   "\trdhwr\t$3,$29\n"
1759 	   "\t.set\tpop\n"
1760 	   "\tj\t$31\n");
1761 }
1762 
1763 /* A stub for moving the FCSR into GET_FCSR_REGNUM.  */
1764 class mips16_get_fcsr_one_only_stub : public mips_one_only_stub
1765 {
1766   virtual const char *get_name ();
1767   virtual void output_body ();
1768 };
1769 
1770 const char *
get_name()1771 mips16_get_fcsr_one_only_stub::get_name ()
1772 {
1773   return "__mips16_get_fcsr";
1774 }
1775 
1776 void
output_body()1777 mips16_get_fcsr_one_only_stub::output_body ()
1778 {
1779   fprintf (asm_out_file,
1780 	   "\tcfc1\t%s,$31\n"
1781 	   "\tj\t$31\n", reg_names[GET_FCSR_REGNUM]);
1782 }
1783 
1784 /* A stub for moving SET_FCSR_REGNUM into the FCSR.  */
1785 class mips16_set_fcsr_one_only_stub : public mips_one_only_stub
1786 {
1787   virtual const char *get_name ();
1788   virtual void output_body ();
1789 };
1790 
1791 const char *
get_name()1792 mips16_set_fcsr_one_only_stub::get_name ()
1793 {
1794   return "__mips16_set_fcsr";
1795 }
1796 
1797 void
output_body()1798 mips16_set_fcsr_one_only_stub::output_body ()
1799 {
1800   fprintf (asm_out_file,
1801 	   "\tctc1\t%s,$31\n"
1802 	   "\tj\t$31\n", reg_names[SET_FCSR_REGNUM]);
1803 }
1804 
1805 /* Return true if symbols of type TYPE require a GOT access.  */
1806 
1807 static bool
mips_got_symbol_type_p(enum mips_symbol_type type)1808 mips_got_symbol_type_p (enum mips_symbol_type type)
1809 {
1810   switch (type)
1811     {
1812     case SYMBOL_GOT_PAGE_OFST:
1813     case SYMBOL_GOT_DISP:
1814       return true;
1815 
1816     default:
1817       return false;
1818     }
1819 }
1820 
1821 /* Return true if X is a thread-local symbol.  */
1822 
1823 static bool
mips_tls_symbol_p(rtx x)1824 mips_tls_symbol_p (rtx x)
1825 {
1826   return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1827 }
1828 
1829 /* Return true if SYMBOL_REF X is associated with a global symbol
1830    (in the STB_GLOBAL sense).  */
1831 
1832 static bool
mips_global_symbol_p(const_rtx x)1833 mips_global_symbol_p (const_rtx x)
1834 {
1835   const_tree decl = SYMBOL_REF_DECL (x);
1836 
1837   if (!decl)
1838     return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1839 
1840   /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1841      or weak symbols.  Relocations in the object file will be against
1842      the target symbol, so it's that symbol's binding that matters here.  */
1843   return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1844 }
1845 
1846 /* Return true if function X is a libgcc MIPS16 stub function.  */
1847 
1848 static bool
mips16_stub_function_p(const_rtx x)1849 mips16_stub_function_p (const_rtx x)
1850 {
1851   return (GET_CODE (x) == SYMBOL_REF
1852 	  && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1853 }
1854 
1855 /* Return true if function X is a locally-defined and locally-binding
1856    MIPS16 function.  */
1857 
1858 static bool
mips16_local_function_p(const_rtx x)1859 mips16_local_function_p (const_rtx x)
1860 {
1861   return (GET_CODE (x) == SYMBOL_REF
1862 	  && SYMBOL_REF_LOCAL_P (x)
1863 	  && !SYMBOL_REF_EXTERNAL_P (x)
1864 	  && (mips_get_compress_mode (SYMBOL_REF_DECL (x)) & MASK_MIPS16));
1865 }
1866 
1867 /* Return true if SYMBOL_REF X binds locally.  */
1868 
1869 static bool
mips_symbol_binds_local_p(const_rtx x)1870 mips_symbol_binds_local_p (const_rtx x)
1871 {
1872   return (SYMBOL_REF_DECL (x)
1873 	  ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1874 	  : SYMBOL_REF_LOCAL_P (x));
1875 }
1876 
1877 /* Return true if OP is a constant vector with the number of units in MODE,
1878    and each unit has the same bit set.  */
1879 
1880 bool
mips_const_vector_bitimm_set_p(rtx op,machine_mode mode)1881 mips_const_vector_bitimm_set_p (rtx op, machine_mode mode)
1882 {
1883   if (GET_CODE (op) == CONST_VECTOR && op != CONST0_RTX (mode))
1884     {
1885       unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0));
1886       int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode)));
1887 
1888       if (vlog2 != -1)
1889 	{
1890 	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
1891 	  gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1);
1892 	  return mips_const_vector_same_val_p (op, mode);
1893 	}
1894     }
1895 
1896   return false;
1897 }
1898 
1899 /* Return true if OP is a constant vector with the number of units in MODE,
1900    and each unit has the same bit clear.  */
1901 
1902 bool
mips_const_vector_bitimm_clr_p(rtx op,machine_mode mode)1903 mips_const_vector_bitimm_clr_p (rtx op, machine_mode mode)
1904 {
1905   if (GET_CODE (op) == CONST_VECTOR && op != CONSTM1_RTX (mode))
1906     {
1907       unsigned HOST_WIDE_INT val = ~UINTVAL (CONST_VECTOR_ELT (op, 0));
1908       int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode)));
1909 
1910       if (vlog2 != -1)
1911 	{
1912 	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
1913 	  gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1);
1914 	  return mips_const_vector_same_val_p (op, mode);
1915 	}
1916     }
1917 
1918   return false;
1919 }
1920 
1921 /* Return true if OP is a constant vector with the number of units in MODE,
1922    and each unit has the same value.  */
1923 
1924 bool
mips_const_vector_same_val_p(rtx op,machine_mode mode)1925 mips_const_vector_same_val_p (rtx op, machine_mode mode)
1926 {
1927   int i, nunits = GET_MODE_NUNITS (mode);
1928   rtx first;
1929 
1930   if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode)
1931     return false;
1932 
1933   first = CONST_VECTOR_ELT (op, 0);
1934   for (i = 1; i < nunits; i++)
1935     if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i)))
1936       return false;
1937 
1938   return true;
1939 }
1940 
1941 /* Return true if OP is a constant vector with the number of units in MODE,
1942    and each unit has the same value as well as replicated bytes in the value.
1943 */
1944 
1945 bool
mips_const_vector_same_bytes_p(rtx op,machine_mode mode)1946 mips_const_vector_same_bytes_p (rtx op, machine_mode mode)
1947 {
1948   int i, bytes;
1949   HOST_WIDE_INT val, first_byte;
1950   rtx first;
1951 
1952   if (!mips_const_vector_same_val_p (op, mode))
1953     return false;
1954 
1955   first = CONST_VECTOR_ELT (op, 0);
1956   bytes = GET_MODE_UNIT_SIZE (mode);
1957   val = INTVAL (first);
1958   first_byte = val & 0xff;
1959   for (i = 1; i < bytes; i++)
1960     {
1961       val >>= 8;
1962       if ((val & 0xff) != first_byte)
1963 	return false;
1964     }
1965 
1966   return true;
1967 }
1968 
1969 /* Return true if OP is a constant vector with the number of units in MODE,
1970    and each unit has the same integer value in the range [LOW, HIGH].  */
1971 
1972 bool
mips_const_vector_same_int_p(rtx op,machine_mode mode,HOST_WIDE_INT low,HOST_WIDE_INT high)1973 mips_const_vector_same_int_p (rtx op, machine_mode mode, HOST_WIDE_INT low,
1974 			      HOST_WIDE_INT high)
1975 {
1976   HOST_WIDE_INT value;
1977   rtx elem0;
1978 
1979   if (!mips_const_vector_same_val_p (op, mode))
1980     return false;
1981 
1982   elem0 = CONST_VECTOR_ELT (op, 0);
1983   if (!CONST_INT_P (elem0))
1984     return false;
1985 
1986   value = INTVAL (elem0);
1987   return (value >= low && value <= high);
1988 }
1989 
1990 /* Return true if OP is a constant vector with repeated 4-element sets
1991    in mode MODE.  */
1992 
1993 bool
mips_const_vector_shuffle_set_p(rtx op,machine_mode mode)1994 mips_const_vector_shuffle_set_p (rtx op, machine_mode mode)
1995 {
1996   int nunits = GET_MODE_NUNITS (mode);
1997   int nsets = nunits / 4;
1998   int set = 0;
1999   int i, j;
2000 
2001   /* Check if we have the same 4-element sets.  */
2002   for (j = 0; j < nsets; j++, set = 4 * j)
2003     for (i = 0; i < 4; i++)
2004       if ((INTVAL (XVECEXP (op, 0, i))
2005 	   != (INTVAL (XVECEXP (op, 0, set + i)) - set))
2006 	  || !IN_RANGE (INTVAL (XVECEXP (op, 0, set + i)), 0, set + 3))
2007 	return false;
2008   return true;
2009 }
2010 
2011 /* Return true if rtx constants of mode MODE should be put into a small
2012    data section.  */
2013 
2014 static bool
mips_rtx_constant_in_small_data_p(machine_mode mode)2015 mips_rtx_constant_in_small_data_p (machine_mode mode)
2016 {
2017   return (!TARGET_EMBEDDED_DATA
2018 	  && TARGET_LOCAL_SDATA
2019 	  && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
2020 }
2021 
2022 /* Return true if X should not be moved directly into register $25.
2023    We need this because many versions of GAS will treat "la $25,foo" as
2024    part of a call sequence and so allow a global "foo" to be lazily bound.  */
2025 
2026 bool
mips_dangerous_for_la25_p(rtx x)2027 mips_dangerous_for_la25_p (rtx x)
2028 {
2029   return (!TARGET_EXPLICIT_RELOCS
2030 	  && TARGET_USE_GOT
2031 	  && GET_CODE (x) == SYMBOL_REF
2032 	  && mips_global_symbol_p (x));
2033 }
2034 
2035 /* Return true if calls to X might need $25 to be valid on entry.  */
2036 
2037 bool
mips_use_pic_fn_addr_reg_p(const_rtx x)2038 mips_use_pic_fn_addr_reg_p (const_rtx x)
2039 {
2040   if (!TARGET_USE_PIC_FN_ADDR_REG)
2041     return false;
2042 
2043   /* MIPS16 stub functions are guaranteed not to use $25.  */
2044   if (mips16_stub_function_p (x))
2045     return false;
2046 
2047   if (GET_CODE (x) == SYMBOL_REF)
2048     {
2049       /* If PLTs and copy relocations are available, the static linker
2050 	 will make sure that $25 is valid on entry to the target function.  */
2051       if (TARGET_ABICALLS_PIC0)
2052 	return false;
2053 
2054       /* Locally-defined functions use absolute accesses to set up
2055 	 the global pointer.  */
2056       if (TARGET_ABSOLUTE_ABICALLS
2057 	  && mips_symbol_binds_local_p (x)
2058 	  && !SYMBOL_REF_EXTERNAL_P (x))
2059 	return false;
2060     }
2061 
2062   return true;
2063 }
2064 
2065 /* Return the method that should be used to access SYMBOL_REF or
2066    LABEL_REF X in context CONTEXT.  */
2067 
2068 static enum mips_symbol_type
mips_classify_symbol(const_rtx x,enum mips_symbol_context context)2069 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
2070 {
2071   if (TARGET_RTP_PIC)
2072     return SYMBOL_GOT_DISP;
2073 
2074   if (GET_CODE (x) == LABEL_REF)
2075     {
2076       /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
2077 	 code and if we know that the label is in the current function's
2078 	 text section.  LABEL_REFs are used for jump tables as well as
2079 	 text labels, so we must check whether jump tables live in the
2080 	 text section.  */
2081       if (TARGET_MIPS16_SHORT_JUMP_TABLES
2082 	  && !LABEL_REF_NONLOCAL_P (x))
2083 	return SYMBOL_PC_RELATIVE;
2084 
2085       if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
2086 	return SYMBOL_GOT_PAGE_OFST;
2087 
2088       return SYMBOL_ABSOLUTE;
2089     }
2090 
2091   gcc_assert (GET_CODE (x) == SYMBOL_REF);
2092 
2093   if (SYMBOL_REF_TLS_MODEL (x))
2094     return SYMBOL_TLS;
2095 
2096   if (CONSTANT_POOL_ADDRESS_P (x))
2097     {
2098       if (TARGET_MIPS16_TEXT_LOADS)
2099 	return SYMBOL_PC_RELATIVE;
2100 
2101       if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
2102 	return SYMBOL_PC_RELATIVE;
2103 
2104       if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
2105 	return SYMBOL_GP_RELATIVE;
2106     }
2107 
2108   /* Do not use small-data accesses for weak symbols; they may end up
2109      being zero.  */
2110   if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
2111     return SYMBOL_GP_RELATIVE;
2112 
2113   /* Don't use GOT accesses for locally-binding symbols when -mno-shared
2114      is in effect.  */
2115   if (TARGET_ABICALLS_PIC2
2116       && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
2117     {
2118       /* There are three cases to consider:
2119 
2120 	    - o32 PIC (either with or without explicit relocs)
2121 	    - n32/n64 PIC without explicit relocs
2122 	    - n32/n64 PIC with explicit relocs
2123 
2124 	 In the first case, both local and global accesses will use an
2125 	 R_MIPS_GOT16 relocation.  We must correctly predict which of
2126 	 the two semantics (local or global) the assembler and linker
2127 	 will apply.  The choice depends on the symbol's binding rather
2128 	 than its visibility.
2129 
2130 	 In the second case, the assembler will not use R_MIPS_GOT16
2131 	 relocations, but it chooses between local and global accesses
2132 	 in the same way as for o32 PIC.
2133 
2134 	 In the third case we have more freedom since both forms of
2135 	 access will work for any kind of symbol.  However, there seems
2136 	 little point in doing things differently.  */
2137       if (mips_global_symbol_p (x))
2138 	return SYMBOL_GOT_DISP;
2139 
2140       return SYMBOL_GOT_PAGE_OFST;
2141     }
2142 
2143   return SYMBOL_ABSOLUTE;
2144 }
2145 
2146 /* Classify the base of symbolic expression X, given that X appears in
2147    context CONTEXT.  */
2148 
2149 static enum mips_symbol_type
mips_classify_symbolic_expression(rtx x,enum mips_symbol_context context)2150 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
2151 {
2152   rtx offset;
2153 
2154   split_const (x, &x, &offset);
2155   if (UNSPEC_ADDRESS_P (x))
2156     return UNSPEC_ADDRESS_TYPE (x);
2157 
2158   return mips_classify_symbol (x, context);
2159 }
2160 
2161 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
2162    is the alignment in bytes of SYMBOL_REF X.  */
2163 
2164 static bool
mips_offset_within_alignment_p(rtx x,HOST_WIDE_INT offset)2165 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
2166 {
2167   HOST_WIDE_INT align;
2168 
2169   align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
2170   return IN_RANGE (offset, 0, align - 1);
2171 }
2172 
2173 /* Return true if X is a symbolic constant that can be used in context
2174    CONTEXT.  If it is, store the type of the symbol in *SYMBOL_TYPE.  */
2175 
2176 bool
mips_symbolic_constant_p(rtx x,enum mips_symbol_context context,enum mips_symbol_type * symbol_type)2177 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
2178 			  enum mips_symbol_type *symbol_type)
2179 {
2180   rtx offset;
2181 
2182   split_const (x, &x, &offset);
2183   if (UNSPEC_ADDRESS_P (x))
2184     {
2185       *symbol_type = UNSPEC_ADDRESS_TYPE (x);
2186       x = UNSPEC_ADDRESS (x);
2187     }
2188   else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
2189     {
2190       *symbol_type = mips_classify_symbol (x, context);
2191       if (*symbol_type == SYMBOL_TLS)
2192 	return false;
2193     }
2194   else
2195     return false;
2196 
2197   if (offset == const0_rtx)
2198     return true;
2199 
2200   /* Check whether a nonzero offset is valid for the underlying
2201      relocations.  */
2202   switch (*symbol_type)
2203     {
2204     case SYMBOL_ABSOLUTE:
2205     case SYMBOL_64_HIGH:
2206     case SYMBOL_64_MID:
2207     case SYMBOL_64_LOW:
2208       /* If the target has 64-bit pointers and the object file only
2209 	 supports 32-bit symbols, the values of those symbols will be
2210 	 sign-extended.  In this case we can't allow an arbitrary offset
2211 	 in case the 32-bit value X + OFFSET has a different sign from X.  */
2212       if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
2213 	return offset_within_block_p (x, INTVAL (offset));
2214 
2215       /* In other cases the relocations can handle any offset.  */
2216       return true;
2217 
2218     case SYMBOL_PC_RELATIVE:
2219       /* Allow constant pool references to be converted to LABEL+CONSTANT.
2220 	 In this case, we no longer have access to the underlying constant,
2221 	 but the original symbol-based access was known to be valid.  */
2222       if (GET_CODE (x) == LABEL_REF)
2223 	return true;
2224 
2225       /* Fall through.  */
2226 
2227     case SYMBOL_GP_RELATIVE:
2228       /* Make sure that the offset refers to something within the
2229 	 same object block.  This should guarantee that the final
2230 	 PC- or GP-relative offset is within the 16-bit limit.  */
2231       return offset_within_block_p (x, INTVAL (offset));
2232 
2233     case SYMBOL_GOT_PAGE_OFST:
2234     case SYMBOL_GOTOFF_PAGE:
2235       /* If the symbol is global, the GOT entry will contain the symbol's
2236 	 address, and we will apply a 16-bit offset after loading it.
2237 	 If the symbol is local, the linker should provide enough local
2238 	 GOT entries for a 16-bit offset, but larger offsets may lead
2239 	 to GOT overflow.  */
2240       return SMALL_INT (offset);
2241 
2242     case SYMBOL_TPREL:
2243     case SYMBOL_DTPREL:
2244       /* There is no carry between the HI and LO REL relocations, so the
2245 	 offset is only valid if we know it won't lead to such a carry.  */
2246       return mips_offset_within_alignment_p (x, INTVAL (offset));
2247 
2248     case SYMBOL_GOT_DISP:
2249     case SYMBOL_GOTOFF_DISP:
2250     case SYMBOL_GOTOFF_CALL:
2251     case SYMBOL_GOTOFF_LOADGP:
2252     case SYMBOL_TLSGD:
2253     case SYMBOL_TLSLDM:
2254     case SYMBOL_GOTTPREL:
2255     case SYMBOL_TLS:
2256     case SYMBOL_HALF:
2257       return false;
2258     }
2259   gcc_unreachable ();
2260 }
2261 
2262 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
2263    single instruction.  We rely on the fact that, in the worst case,
2264    all instructions involved in a MIPS16 address calculation are usually
2265    extended ones.  */
2266 
2267 static int
mips_symbol_insns_1(enum mips_symbol_type type,machine_mode mode)2268 mips_symbol_insns_1 (enum mips_symbol_type type, machine_mode mode)
2269 {
2270   if (mips_use_pcrel_pool_p[(int) type])
2271     {
2272       if (mode == MAX_MACHINE_MODE)
2273 	/* LEAs will be converted into constant-pool references by
2274 	   mips_reorg.  */
2275 	type = SYMBOL_PC_RELATIVE;
2276       else
2277 	/* The constant must be loaded and then dereferenced.  */
2278 	return 0;
2279     }
2280 
2281   switch (type)
2282     {
2283     case SYMBOL_ABSOLUTE:
2284       /* When using 64-bit symbols, we need 5 preparatory instructions,
2285 	 such as:
2286 
2287 	     lui     $at,%highest(symbol)
2288 	     daddiu  $at,$at,%higher(symbol)
2289 	     dsll    $at,$at,16
2290 	     daddiu  $at,$at,%hi(symbol)
2291 	     dsll    $at,$at,16
2292 
2293 	 The final address is then $at + %lo(symbol).  With 32-bit
2294 	 symbols we just need a preparatory LUI for normal mode and
2295 	 a preparatory LI and SLL for MIPS16.  */
2296       return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
2297 
2298     case SYMBOL_GP_RELATIVE:
2299       /* Treat GP-relative accesses as taking a single instruction on
2300 	 MIPS16 too; the copy of $gp can often be shared.  */
2301       return 1;
2302 
2303     case SYMBOL_PC_RELATIVE:
2304       /* PC-relative constants can be only be used with ADDIUPC,
2305 	 DADDIUPC, LWPC and LDPC.  */
2306       if (mode == MAX_MACHINE_MODE
2307 	  || GET_MODE_SIZE (mode) == 4
2308 	  || GET_MODE_SIZE (mode) == 8)
2309 	return 1;
2310 
2311       /* The constant must be loaded using ADDIUPC or DADDIUPC first.  */
2312       return 0;
2313 
2314     case SYMBOL_GOT_DISP:
2315       /* The constant will have to be loaded from the GOT before it
2316 	 is used in an address.  */
2317       if (mode != MAX_MACHINE_MODE)
2318 	return 0;
2319 
2320       /* Fall through.  */
2321 
2322     case SYMBOL_GOT_PAGE_OFST:
2323       /* Unless -funit-at-a-time is in effect, we can't be sure whether the
2324 	 local/global classification is accurate.  The worst cases are:
2325 
2326 	 (1) For local symbols when generating o32 or o64 code.  The assembler
2327 	     will use:
2328 
2329 		 lw	      $at,%got(symbol)
2330 		 nop
2331 
2332 	     ...and the final address will be $at + %lo(symbol).
2333 
2334 	 (2) For global symbols when -mxgot.  The assembler will use:
2335 
2336 	         lui     $at,%got_hi(symbol)
2337 	         (d)addu $at,$at,$gp
2338 
2339 	     ...and the final address will be $at + %got_lo(symbol).  */
2340       return 3;
2341 
2342     case SYMBOL_GOTOFF_PAGE:
2343     case SYMBOL_GOTOFF_DISP:
2344     case SYMBOL_GOTOFF_CALL:
2345     case SYMBOL_GOTOFF_LOADGP:
2346     case SYMBOL_64_HIGH:
2347     case SYMBOL_64_MID:
2348     case SYMBOL_64_LOW:
2349     case SYMBOL_TLSGD:
2350     case SYMBOL_TLSLDM:
2351     case SYMBOL_DTPREL:
2352     case SYMBOL_GOTTPREL:
2353     case SYMBOL_TPREL:
2354     case SYMBOL_HALF:
2355       /* A 16-bit constant formed by a single relocation, or a 32-bit
2356 	 constant formed from a high 16-bit relocation and a low 16-bit
2357 	 relocation.  Use mips_split_p to determine which.  32-bit
2358 	 constants need an "lui; addiu" sequence for normal mode and
2359 	 an "li; sll; addiu" sequence for MIPS16 mode.  */
2360       return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
2361 
2362     case SYMBOL_TLS:
2363       /* We don't treat a bare TLS symbol as a constant.  */
2364       return 0;
2365     }
2366   gcc_unreachable ();
2367 }
2368 
2369 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
2370    to load symbols of type TYPE into a register.  Return 0 if the given
2371    type of symbol cannot be used as an immediate operand.
2372 
2373    Otherwise, return the number of instructions needed to load or store
2374    values of mode MODE to or from addresses of type TYPE.  Return 0 if
2375    the given type of symbol is not valid in addresses.
2376 
2377    In both cases, instruction counts are based off BASE_INSN_LENGTH.  */
2378 
2379 static int
mips_symbol_insns(enum mips_symbol_type type,machine_mode mode)2380 mips_symbol_insns (enum mips_symbol_type type, machine_mode mode)
2381 {
2382   /* MSA LD.* and ST.* cannot support loading symbols via an immediate
2383      operand.  */
2384   if (MSA_SUPPORTED_MODE_P (mode))
2385     return 0;
2386 
2387   return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
2388 }
2389 
2390 /* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
2391 
2392 static bool
mips_cannot_force_const_mem(machine_mode mode,rtx x)2393 mips_cannot_force_const_mem (machine_mode mode, rtx x)
2394 {
2395   enum mips_symbol_type type;
2396   rtx base, offset;
2397 
2398   /* There is no assembler syntax for expressing an address-sized
2399      high part.  */
2400   if (GET_CODE (x) == HIGH)
2401     return true;
2402 
2403   /* As an optimization, reject constants that mips_legitimize_move
2404      can expand inline.
2405 
2406      Suppose we have a multi-instruction sequence that loads constant C
2407      into register R.  If R does not get allocated a hard register, and
2408      R is used in an operand that allows both registers and memory
2409      references, reload will consider forcing C into memory and using
2410      one of the instruction's memory alternatives.  Returning false
2411      here will force it to use an input reload instead.  */
2412   if (CONST_INT_P (x) && mips_legitimate_constant_p (mode, x))
2413     return true;
2414 
2415   split_const (x, &base, &offset);
2416   if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type))
2417     {
2418       /* See whether we explicitly want these symbols in the pool.  */
2419       if (mips_use_pcrel_pool_p[(int) type])
2420 	return false;
2421 
2422       /* The same optimization as for CONST_INT.  */
2423       if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
2424 	return true;
2425 
2426       /* If MIPS16 constant pools live in the text section, they should
2427 	 not refer to anything that might need run-time relocation.  */
2428       if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
2429 	return true;
2430     }
2431 
2432   /* TLS symbols must be computed by mips_legitimize_move.  */
2433   if (tls_referenced_p (x))
2434     return true;
2435 
2436   return false;
2437 }
2438 
2439 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P.  We can't use blocks for
2440    constants when we're using a per-function constant pool.  */
2441 
2442 static bool
mips_use_blocks_for_constant_p(machine_mode mode ATTRIBUTE_UNUSED,const_rtx x ATTRIBUTE_UNUSED)2443 mips_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED,
2444 				const_rtx x ATTRIBUTE_UNUSED)
2445 {
2446   return !TARGET_MIPS16_PCREL_LOADS;
2447 }
2448 
2449 /* Return true if register REGNO is a valid base register for mode MODE.
2450    STRICT_P is true if REG_OK_STRICT is in effect.  */
2451 
2452 int
mips_regno_mode_ok_for_base_p(int regno,machine_mode mode,bool strict_p)2453 mips_regno_mode_ok_for_base_p (int regno, machine_mode mode,
2454 			       bool strict_p)
2455 {
2456   if (!HARD_REGISTER_NUM_P (regno))
2457     {
2458       if (!strict_p)
2459 	return true;
2460       regno = reg_renumber[regno];
2461     }
2462 
2463   /* These fake registers will be eliminated to either the stack or
2464      hard frame pointer, both of which are usually valid base registers.
2465      Reload deals with the cases where the eliminated form isn't valid.  */
2466   if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
2467     return true;
2468 
2469   /* In MIPS16 mode, the stack pointer can only address word and doubleword
2470      values, nothing smaller.  */
2471   if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
2472     return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
2473 
2474   return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
2475 }
2476 
2477 /* Return true if X is a valid base register for mode MODE.
2478    STRICT_P is true if REG_OK_STRICT is in effect.  */
2479 
2480 static bool
mips_valid_base_register_p(rtx x,machine_mode mode,bool strict_p)2481 mips_valid_base_register_p (rtx x, machine_mode mode, bool strict_p)
2482 {
2483   if (!strict_p && GET_CODE (x) == SUBREG)
2484     x = SUBREG_REG (x);
2485 
2486   return (REG_P (x)
2487 	  && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
2488 }
2489 
2490 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2491    can address a value of mode MODE.  */
2492 
2493 static bool
mips_valid_offset_p(rtx x,machine_mode mode)2494 mips_valid_offset_p (rtx x, machine_mode mode)
2495 {
2496   /* Check that X is a signed 16-bit number.  */
2497   if (!const_arith_operand (x, Pmode))
2498     return false;
2499 
2500   /* We may need to split multiword moves, so make sure that every word
2501      is accessible.  */
2502   if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2503       && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
2504     return false;
2505 
2506   /* MSA LD.* and ST.* supports 10-bit signed offsets.  */
2507   if (MSA_SUPPORTED_MODE_P (mode)
2508       && !mips_signed_immediate_p (INTVAL (x), 10,
2509 				   mips_ldst_scaled_shift (mode)))
2510     return false;
2511 
2512   return true;
2513 }
2514 
2515 /* Return true if a LO_SUM can address a value of mode MODE when the
2516    LO_SUM symbol has type SYMBOL_TYPE.  */
2517 
2518 static bool
mips_valid_lo_sum_p(enum mips_symbol_type symbol_type,machine_mode mode)2519 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, machine_mode mode)
2520 {
2521   /* Check that symbols of type SYMBOL_TYPE can be used to access values
2522      of mode MODE.  */
2523   if (mips_symbol_insns (symbol_type, mode) == 0)
2524     return false;
2525 
2526   /* Check that there is a known low-part relocation.  */
2527   if (mips_lo_relocs[symbol_type] == NULL)
2528     return false;
2529 
2530   /* We may need to split multiword moves, so make sure that each word
2531      can be accessed without inducing a carry.  This is mainly needed
2532      for o64, which has historically only guaranteed 64-bit alignment
2533      for 128-bit types.  */
2534   if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2535       && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
2536     return false;
2537 
2538   /* MSA LD.* and ST.* cannot support loading symbols via %lo($base).  */
2539   if (MSA_SUPPORTED_MODE_P (mode))
2540     return false;
2541 
2542   return true;
2543 }
2544 
2545 /* Return true if X is a valid address for machine mode MODE.  If it is,
2546    fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in
2547    effect.  */
2548 
2549 static bool
mips_classify_address(struct mips_address_info * info,rtx x,machine_mode mode,bool strict_p)2550 mips_classify_address (struct mips_address_info *info, rtx x,
2551 		       machine_mode mode, bool strict_p)
2552 {
2553   switch (GET_CODE (x))
2554     {
2555     case REG:
2556     case SUBREG:
2557       info->type = ADDRESS_REG;
2558       info->reg = x;
2559       info->offset = const0_rtx;
2560       return mips_valid_base_register_p (info->reg, mode, strict_p);
2561 
2562     case PLUS:
2563       info->type = ADDRESS_REG;
2564       info->reg = XEXP (x, 0);
2565       info->offset = XEXP (x, 1);
2566       return (mips_valid_base_register_p (info->reg, mode, strict_p)
2567 	      && mips_valid_offset_p (info->offset, mode));
2568 
2569     case LO_SUM:
2570       info->type = ADDRESS_LO_SUM;
2571       info->reg = XEXP (x, 0);
2572       info->offset = XEXP (x, 1);
2573       /* We have to trust the creator of the LO_SUM to do something vaguely
2574 	 sane.  Target-independent code that creates a LO_SUM should also
2575 	 create and verify the matching HIGH.  Target-independent code that
2576 	 adds an offset to a LO_SUM must prove that the offset will not
2577 	 induce a carry.  Failure to do either of these things would be
2578 	 a bug, and we are not required to check for it here.  The MIPS
2579 	 backend itself should only create LO_SUMs for valid symbolic
2580 	 constants, with the high part being either a HIGH or a copy
2581 	 of _gp. */
2582       info->symbol_type
2583 	= mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
2584       return (mips_valid_base_register_p (info->reg, mode, strict_p)
2585 	      && mips_valid_lo_sum_p (info->symbol_type, mode));
2586 
2587     case CONST_INT:
2588       /* Small-integer addresses don't occur very often, but they
2589 	 are legitimate if $0 is a valid base register.  */
2590       info->type = ADDRESS_CONST_INT;
2591       return !TARGET_MIPS16 && SMALL_INT (x);
2592 
2593     case CONST:
2594     case LABEL_REF:
2595     case SYMBOL_REF:
2596       info->type = ADDRESS_SYMBOLIC;
2597       return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2598 					&info->symbol_type)
2599 	      && mips_symbol_insns (info->symbol_type, mode) > 0
2600 	      && !mips_split_p[info->symbol_type]);
2601 
2602     default:
2603       return false;
2604     }
2605 }
2606 
2607 /* Implement TARGET_LEGITIMATE_ADDRESS_P.  */
2608 
2609 static bool
mips_legitimate_address_p(machine_mode mode,rtx x,bool strict_p)2610 mips_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
2611 {
2612   struct mips_address_info addr;
2613 
2614   return mips_classify_address (&addr, x, mode, strict_p);
2615 }
2616 
2617 /* Return true if X is a legitimate $sp-based address for mode MODE.  */
2618 
2619 bool
mips_stack_address_p(rtx x,machine_mode mode)2620 mips_stack_address_p (rtx x, machine_mode mode)
2621 {
2622   struct mips_address_info addr;
2623 
2624   return (mips_classify_address (&addr, x, mode, false)
2625 	  && addr.type == ADDRESS_REG
2626 	  && addr.reg == stack_pointer_rtx);
2627 }
2628 
2629 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2630    address instruction.  Note that such addresses are not considered
2631    legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
2632    is so restricted.  */
2633 
2634 static bool
mips_lwxs_address_p(rtx addr)2635 mips_lwxs_address_p (rtx addr)
2636 {
2637   if (ISA_HAS_LWXS
2638       && GET_CODE (addr) == PLUS
2639       && REG_P (XEXP (addr, 1)))
2640     {
2641       rtx offset = XEXP (addr, 0);
2642       if (GET_CODE (offset) == MULT
2643 	  && REG_P (XEXP (offset, 0))
2644 	  && CONST_INT_P (XEXP (offset, 1))
2645 	  && INTVAL (XEXP (offset, 1)) == 4)
2646 	return true;
2647     }
2648   return false;
2649 }
2650 
2651 /* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load
2652    indexed address instruction.  Note that such addresses are
2653    not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P
2654    sense, because their use is so restricted.  */
2655 
2656 static bool
mips_lx_address_p(rtx addr,machine_mode mode)2657 mips_lx_address_p (rtx addr, machine_mode mode)
2658 {
2659   if (GET_CODE (addr) != PLUS
2660       || !REG_P (XEXP (addr, 0))
2661       || !REG_P (XEXP (addr, 1)))
2662     return false;
2663   if (ISA_HAS_LBX && mode == QImode)
2664     return true;
2665   if (ISA_HAS_LHX && mode == HImode)
2666     return true;
2667   if (ISA_HAS_LWX && mode == SImode)
2668     return true;
2669   if (ISA_HAS_LDX && mode == DImode)
2670     return true;
2671   if (MSA_SUPPORTED_MODE_P (mode))
2672     return true;
2673   return false;
2674 }
2675 
2676 /* Return true if a value at OFFSET bytes from base register BASE can be
2677    accessed using an unextended MIPS16 instruction.  MODE is the mode of
2678    the value.
2679 
2680    Usually the offset in an unextended instruction is a 5-bit field.
2681    The offset is unsigned and shifted left once for LH and SH, twice
2682    for LW and SW, and so on.  An exception is LWSP and SWSP, which have
2683    an 8-bit immediate field that's shifted left twice.  */
2684 
2685 static bool
mips16_unextended_reference_p(machine_mode mode,rtx base,unsigned HOST_WIDE_INT offset)2686 mips16_unextended_reference_p (machine_mode mode, rtx base,
2687 			       unsigned HOST_WIDE_INT offset)
2688 {
2689   if (mode != BLKmode && offset % GET_MODE_SIZE (mode) == 0)
2690     {
2691       if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2692 	return offset < 256U * GET_MODE_SIZE (mode);
2693       return offset < 32U * GET_MODE_SIZE (mode);
2694     }
2695   return false;
2696 }
2697 
2698 /* Return the number of instructions needed to load or store a value
2699    of mode MODE at address X, assuming that BASE_INSN_LENGTH is the
2700    length of one instruction.  Return 0 if X isn't valid for MODE.
2701    Assume that multiword moves may need to be split into word moves
2702    if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2703    enough.  */
2704 
2705 int
mips_address_insns(rtx x,machine_mode mode,bool might_split_p)2706 mips_address_insns (rtx x, machine_mode mode, bool might_split_p)
2707 {
2708   struct mips_address_info addr;
2709   int factor;
2710   bool msa_p = (!might_split_p && MSA_SUPPORTED_MODE_P (mode));
2711 
2712   /* BLKmode is used for single unaligned loads and stores and should
2713      not count as a multiword mode.  (GET_MODE_SIZE (BLKmode) is pretty
2714      meaningless, so we have to single it out as a special case one way
2715      or the other.)  */
2716   if (mode != BLKmode && might_split_p)
2717     factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2718   else
2719     factor = 1;
2720 
2721   if (mips_classify_address (&addr, x, mode, false))
2722     switch (addr.type)
2723       {
2724       case ADDRESS_REG:
2725 	if (msa_p)
2726 	  {
2727 	    /* MSA LD.* and ST.* supports 10-bit signed offsets.  */
2728 	    if (mips_signed_immediate_p (INTVAL (addr.offset), 10,
2729 					 mips_ldst_scaled_shift (mode)))
2730 	      return 1;
2731 	    else
2732 	      return 0;
2733 	  }
2734 	if (TARGET_MIPS16
2735 	    && !mips16_unextended_reference_p (mode, addr.reg,
2736 					       UINTVAL (addr.offset)))
2737 	  return factor * 2;
2738 	return factor;
2739 
2740       case ADDRESS_LO_SUM:
2741 	return msa_p ? 0 : TARGET_MIPS16 ? factor * 2 : factor;
2742 
2743       case ADDRESS_CONST_INT:
2744 	return msa_p ? 0 : factor;
2745 
2746       case ADDRESS_SYMBOLIC:
2747 	return msa_p ? 0 : factor * mips_symbol_insns (addr.symbol_type, mode);
2748       }
2749   return 0;
2750 }
2751 
2752 /* Return true if X fits within an unsigned field of BITS bits that is
2753    shifted left SHIFT bits before being used.  */
2754 
2755 bool
2756 mips_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
2757 {
2758   return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits));
2759 }
2760 
2761 /* Return true if X fits within a signed field of BITS bits that is
2762    shifted left SHIFT bits before being used.  */
2763 
2764 bool
2765 mips_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
2766 {
2767   x += 1 << (bits + shift - 1);
2768   return mips_unsigned_immediate_p (x, bits, shift);
2769 }
2770 
2771 /* Return the scale shift that applied to MSA LD/ST address offset.  */
2772 
2773 int
mips_ldst_scaled_shift(machine_mode mode)2774 mips_ldst_scaled_shift (machine_mode mode)
2775 {
2776   int shift = exact_log2 (GET_MODE_UNIT_SIZE (mode));
2777 
2778   if (shift < 0 || shift > 8)
2779     gcc_unreachable ();
2780 
2781   return shift;
2782 }
2783 
2784 /* Return true if X is legitimate for accessing values of mode MODE,
2785    if it is based on a MIPS16 register, and if the offset satisfies
2786    OFFSET_PREDICATE.  */
2787 
2788 bool
m16_based_address_p(rtx x,machine_mode mode,insn_operand_predicate_fn offset_predicate)2789 m16_based_address_p (rtx x, machine_mode mode,
2790 		     insn_operand_predicate_fn offset_predicate)
2791 {
2792   struct mips_address_info addr;
2793 
2794   return (mips_classify_address (&addr, x, mode, false)
2795 	  && addr.type == ADDRESS_REG
2796 	  && M16_REG_P (REGNO (addr.reg))
2797 	  && offset_predicate (addr.offset, mode));
2798 }
2799 
2800 /* Return true if X is a legitimate address that conforms to the requirements
2801    for a microMIPS LWSP or SWSP insn.  */
2802 
2803 bool
lwsp_swsp_address_p(rtx x,machine_mode mode)2804 lwsp_swsp_address_p (rtx x, machine_mode mode)
2805 {
2806   struct mips_address_info addr;
2807 
2808   return (mips_classify_address (&addr, x, mode, false)
2809 	  && addr.type == ADDRESS_REG
2810 	  && REGNO (addr.reg) == STACK_POINTER_REGNUM
2811 	  && uw5_operand (addr.offset, mode));
2812 }
2813 
2814 /* Return true if X is a legitimate address with a 12-bit offset.
2815    MODE is the mode of the value being accessed.  */
2816 
2817 bool
umips_12bit_offset_address_p(rtx x,machine_mode mode)2818 umips_12bit_offset_address_p (rtx x, machine_mode mode)
2819 {
2820   struct mips_address_info addr;
2821 
2822   return (mips_classify_address (&addr, x, mode, false)
2823 	  && addr.type == ADDRESS_REG
2824 	  && CONST_INT_P (addr.offset)
2825 	  && UMIPS_12BIT_OFFSET_P (INTVAL (addr.offset)));
2826 }
2827 
2828 /* Return true if X is a legitimate address with a 9-bit offset.
2829    MODE is the mode of the value being accessed.  */
2830 
2831 bool
mips_9bit_offset_address_p(rtx x,machine_mode mode)2832 mips_9bit_offset_address_p (rtx x, machine_mode mode)
2833 {
2834   struct mips_address_info addr;
2835 
2836   return (mips_classify_address (&addr, x, mode, false)
2837 	  && addr.type == ADDRESS_REG
2838 	  && CONST_INT_P (addr.offset)
2839 	  && MIPS_9BIT_OFFSET_P (INTVAL (addr.offset)));
2840 }
2841 
2842 /* Return the number of instructions needed to load constant X,
2843    assuming that BASE_INSN_LENGTH is the length of one instruction.
2844    Return 0 if X isn't a valid constant.  */
2845 
2846 int
mips_const_insns(rtx x)2847 mips_const_insns (rtx x)
2848 {
2849   struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2850   enum mips_symbol_type symbol_type;
2851   rtx offset;
2852 
2853   switch (GET_CODE (x))
2854     {
2855     case HIGH:
2856       if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2857 				     &symbol_type)
2858 	  || !mips_split_p[symbol_type])
2859 	return 0;
2860 
2861       /* This is simply an LUI for normal mode.  It is an extended
2862 	 LI followed by an extended SLL for MIPS16.  */
2863       return TARGET_MIPS16 ? 4 : 1;
2864 
2865     case CONST_INT:
2866       if (TARGET_MIPS16)
2867 	/* Unsigned 8-bit constants can be loaded using an unextended
2868 	   LI instruction.  Unsigned 16-bit constants can be loaded
2869 	   using an extended LI.  Negative constants must be loaded
2870 	   using LI and then negated.  */
2871 	return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2872 		: SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2873 		: IN_RANGE (-INTVAL (x), 0, 255) ? 2
2874 		: SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2875 		: 0);
2876 
2877       return mips_build_integer (codes, INTVAL (x));
2878 
2879     case CONST_VECTOR:
2880       if (ISA_HAS_MSA
2881 	  && mips_const_vector_same_int_p (x, GET_MODE (x), -512, 511))
2882 	return 1;
2883       /* Fall through.  */
2884     case CONST_DOUBLE:
2885       /* Allow zeros for normal mode, where we can use $0.  */
2886       return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2887 
2888     case CONST:
2889       if (CONST_GP_P (x))
2890 	return 1;
2891 
2892       /* See if we can refer to X directly.  */
2893       if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2894 	return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2895 
2896       /* Otherwise try splitting the constant into a base and offset.
2897 	 If the offset is a 16-bit value, we can load the base address
2898 	 into a register and then use (D)ADDIU to add in the offset.
2899 	 If the offset is larger, we can load the base and offset
2900 	 into separate registers and add them together with (D)ADDU.
2901 	 However, the latter is only possible before reload; during
2902 	 and after reload, we must have the option of forcing the
2903 	 constant into the pool instead.  */
2904       split_const (x, &x, &offset);
2905       if (offset != 0)
2906 	{
2907 	  int n = mips_const_insns (x);
2908 	  if (n != 0)
2909 	    {
2910 	      if (SMALL_INT (offset))
2911 		return n + 1;
2912 	      else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
2913 		return n + 1 + mips_build_integer (codes, INTVAL (offset));
2914 	    }
2915 	}
2916       return 0;
2917 
2918     case SYMBOL_REF:
2919     case LABEL_REF:
2920       return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2921 				MAX_MACHINE_MODE);
2922 
2923     default:
2924       return 0;
2925     }
2926 }
2927 
2928 /* X is a doubleword constant that can be handled by splitting it into
2929    two words and loading each word separately.  Return the number of
2930    instructions required to do this, assuming that BASE_INSN_LENGTH
2931    is the length of one instruction.  */
2932 
2933 int
mips_split_const_insns(rtx x)2934 mips_split_const_insns (rtx x)
2935 {
2936   unsigned int low, high;
2937 
2938   low = mips_const_insns (mips_subword (x, false));
2939   high = mips_const_insns (mips_subword (x, true));
2940   gcc_assert (low > 0 && high > 0);
2941   return low + high;
2942 }
2943 
2944 /* Return one word of 128-bit value OP, taking into account the fixed
2945    endianness of certain registers.  BYTE selects from the byte address.  */
2946 
2947 rtx
mips_subword_at_byte(rtx op,unsigned int byte)2948 mips_subword_at_byte (rtx op, unsigned int byte)
2949 {
2950   machine_mode mode;
2951 
2952   mode = GET_MODE (op);
2953   if (mode == VOIDmode)
2954     mode = TImode;
2955 
2956   gcc_assert (!FP_REG_RTX_P (op));
2957 
2958   if (MEM_P (op))
2959     return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2960 
2961   return simplify_gen_subreg (word_mode, op, mode, byte);
2962 }
2963 
2964 /* Return the number of instructions needed to implement INSN,
2965    given that it loads from or stores to MEM.  Assume that
2966    BASE_INSN_LENGTH is the length of one instruction.  */
2967 
2968 int
mips_load_store_insns(rtx mem,rtx_insn * insn)2969 mips_load_store_insns (rtx mem, rtx_insn *insn)
2970 {
2971   machine_mode mode;
2972   bool might_split_p;
2973   rtx set;
2974 
2975   gcc_assert (MEM_P (mem));
2976   mode = GET_MODE (mem);
2977 
2978   /* Try to prove that INSN does not need to be split.  */
2979   might_split_p = GET_MODE_SIZE (mode) > UNITS_PER_WORD;
2980   if (might_split_p)
2981     {
2982       set = single_set (insn);
2983       if (set && !mips_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn))
2984 	might_split_p = false;
2985     }
2986 
2987   return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2988 }
2989 
2990 /* Return the number of instructions needed for an integer division,
2991    assuming that BASE_INSN_LENGTH is the length of one instruction.  */
2992 
2993 int
mips_idiv_insns(machine_mode mode)2994 mips_idiv_insns (machine_mode mode)
2995 {
2996   int count;
2997 
2998   count = 1;
2999   if (TARGET_CHECK_ZERO_DIV)
3000     {
3001       if (GENERATE_DIVIDE_TRAPS && !MSA_SUPPORTED_MODE_P (mode))
3002         count++;
3003       else
3004         count += 2;
3005     }
3006 
3007   if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
3008     count++;
3009   return count;
3010 }
3011 
3012 
3013 /* Emit a move from SRC to DEST.  Assume that the move expanders can
3014    handle all moves if !can_create_pseudo_p ().  The distinction is
3015    important because, unlike emit_move_insn, the move expanders know
3016    how to force Pmode objects into the constant pool even when the
3017    constant pool address is not itself legitimate.  */
3018 
3019 rtx_insn *
mips_emit_move(rtx dest,rtx src)3020 mips_emit_move (rtx dest, rtx src)
3021 {
3022   return (can_create_pseudo_p ()
3023 	  ? emit_move_insn (dest, src)
3024 	  : emit_move_insn_1 (dest, src));
3025 }
3026 
3027 /* Emit a move from SRC to DEST, splitting compound moves into individual
3028    instructions.  SPLIT_TYPE is the type of split to perform.  */
3029 
3030 static void
mips_emit_move_or_split(rtx dest,rtx src,enum mips_split_type split_type)3031 mips_emit_move_or_split (rtx dest, rtx src, enum mips_split_type split_type)
3032 {
3033   if (mips_split_move_p (dest, src, split_type))
3034     mips_split_move (dest, src, split_type, NULL);
3035   else
3036     mips_emit_move (dest, src);
3037 }
3038 
3039 /* Emit an instruction of the form (set TARGET (CODE OP0)).  */
3040 
3041 static void
mips_emit_unary(enum rtx_code code,rtx target,rtx op0)3042 mips_emit_unary (enum rtx_code code, rtx target, rtx op0)
3043 {
3044   emit_insn (gen_rtx_SET (target, gen_rtx_fmt_e (code, GET_MODE (op0), op0)));
3045 }
3046 
3047 /* Compute (CODE OP0) and store the result in a new register of mode MODE.
3048    Return that new register.  */
3049 
3050 static rtx
mips_force_unary(machine_mode mode,enum rtx_code code,rtx op0)3051 mips_force_unary (machine_mode mode, enum rtx_code code, rtx op0)
3052 {
3053   rtx reg;
3054 
3055   reg = gen_reg_rtx (mode);
3056   mips_emit_unary (code, reg, op0);
3057   return reg;
3058 }
3059 
3060 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)).  */
3061 
3062 void
mips_emit_binary(enum rtx_code code,rtx target,rtx op0,rtx op1)3063 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3064 {
3065   emit_insn (gen_rtx_SET (target, gen_rtx_fmt_ee (code, GET_MODE (target),
3066 						  op0, op1)));
3067 }
3068 
3069 /* Compute (CODE OP0 OP1) and store the result in a new register
3070    of mode MODE.  Return that new register.  */
3071 
3072 static rtx
mips_force_binary(machine_mode mode,enum rtx_code code,rtx op0,rtx op1)3073 mips_force_binary (machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
3074 {
3075   rtx reg;
3076 
3077   reg = gen_reg_rtx (mode);
3078   mips_emit_binary (code, reg, op0, op1);
3079   return reg;
3080 }
3081 
3082 /* Copy VALUE to a register and return that register.  If new pseudos
3083    are allowed, copy it into a new register, otherwise use DEST.  */
3084 
3085 static rtx
mips_force_temporary(rtx dest,rtx value)3086 mips_force_temporary (rtx dest, rtx value)
3087 {
3088   if (can_create_pseudo_p ())
3089     return force_reg (Pmode, value);
3090   else
3091     {
3092       mips_emit_move (dest, value);
3093       return dest;
3094     }
3095 }
3096 
3097 /* Emit a call sequence with call pattern PATTERN and return the call
3098    instruction itself (which is not necessarily the last instruction
3099    emitted).  ORIG_ADDR is the original, unlegitimized address,
3100    ADDR is the legitimized form, and LAZY_P is true if the call
3101    address is lazily-bound.  */
3102 
3103 static rtx_insn *
mips_emit_call_insn(rtx pattern,rtx orig_addr,rtx addr,bool lazy_p)3104 mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
3105 {
3106   rtx_insn *insn;
3107   rtx reg;
3108 
3109   insn = emit_call_insn (pattern);
3110 
3111   if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
3112     {
3113       /* MIPS16 JALRs only take MIPS16 registers.  If the target
3114 	 function requires $25 to be valid on entry, we must copy it
3115 	 there separately.  The move instruction can be put in the
3116 	 call's delay slot.  */
3117       reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
3118       emit_insn_before (gen_move_insn (reg, addr), insn);
3119       use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
3120     }
3121 
3122   if (lazy_p)
3123     /* Lazy-binding stubs require $gp to be valid on entry.  */
3124     use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3125 
3126   if (TARGET_USE_GOT)
3127     {
3128       /* See the comment above load_call<mode> for details.  */
3129       use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
3130 	       gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
3131       emit_insn (gen_update_got_version ());
3132     }
3133 
3134   if (TARGET_MIPS16
3135       && TARGET_EXPLICIT_RELOCS
3136       && TARGET_CALL_CLOBBERED_GP)
3137     {
3138       rtx post_call_tmp_reg = gen_rtx_REG (word_mode, POST_CALL_TMP_REG);
3139       clobber_reg (&CALL_INSN_FUNCTION_USAGE (insn), post_call_tmp_reg);
3140     }
3141 
3142   return insn;
3143 }
3144 
3145 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
3146    then add CONST_INT OFFSET to the result.  */
3147 
3148 static rtx
mips_unspec_address_offset(rtx base,rtx offset,enum mips_symbol_type symbol_type)3149 mips_unspec_address_offset (rtx base, rtx offset,
3150 			    enum mips_symbol_type symbol_type)
3151 {
3152   base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
3153 			 UNSPEC_ADDRESS_FIRST + symbol_type);
3154   if (offset != const0_rtx)
3155     base = gen_rtx_PLUS (Pmode, base, offset);
3156   return gen_rtx_CONST (Pmode, base);
3157 }
3158 
3159 /* Return an UNSPEC address with underlying address ADDRESS and symbol
3160    type SYMBOL_TYPE.  */
3161 
3162 rtx
mips_unspec_address(rtx address,enum mips_symbol_type symbol_type)3163 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
3164 {
3165   rtx base, offset;
3166 
3167   split_const (address, &base, &offset);
3168   return mips_unspec_address_offset (base, offset, symbol_type);
3169 }
3170 
3171 /* If OP is an UNSPEC address, return the address to which it refers,
3172    otherwise return OP itself.  */
3173 
3174 rtx
mips_strip_unspec_address(rtx op)3175 mips_strip_unspec_address (rtx op)
3176 {
3177   rtx base, offset;
3178 
3179   split_const (op, &base, &offset);
3180   if (UNSPEC_ADDRESS_P (base))
3181     op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
3182   return op;
3183 }
3184 
3185 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
3186    high part to BASE and return the result.  Just return BASE otherwise.
3187    TEMP is as for mips_force_temporary.
3188 
3189    The returned expression can be used as the first operand to a LO_SUM.  */
3190 
3191 static rtx
mips_unspec_offset_high(rtx temp,rtx base,rtx addr,enum mips_symbol_type symbol_type)3192 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
3193 			 enum mips_symbol_type symbol_type)
3194 {
3195   if (mips_split_p[symbol_type])
3196     {
3197       addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
3198       addr = mips_force_temporary (temp, addr);
3199       base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
3200     }
3201   return base;
3202 }
3203 
3204 /* Return an instruction that copies $gp into register REG.  We want
3205    GCC to treat the register's value as constant, so that its value
3206    can be rematerialized on demand.  */
3207 
3208 static rtx
gen_load_const_gp(rtx reg)3209 gen_load_const_gp (rtx reg)
3210 {
3211   return PMODE_INSN (gen_load_const_gp, (reg));
3212 }
3213 
3214 /* Return a pseudo register that contains the value of $gp throughout
3215    the current function.  Such registers are needed by MIPS16 functions,
3216    for which $gp itself is not a valid base register or addition operand.  */
3217 
3218 static rtx
mips16_gp_pseudo_reg(void)3219 mips16_gp_pseudo_reg (void)
3220 {
3221   if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
3222     {
3223       rtx_insn *scan;
3224 
3225       cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
3226 
3227       push_topmost_sequence ();
3228 
3229       scan = get_insns ();
3230       while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
3231 	scan = NEXT_INSN (scan);
3232 
3233       rtx set = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
3234       rtx_insn *insn = emit_insn_after (set, scan);
3235       INSN_LOCATION (insn) = 0;
3236 
3237       pop_topmost_sequence ();
3238     }
3239 
3240   return cfun->machine->mips16_gp_pseudo_rtx;
3241 }
3242 
3243 /* Return a base register that holds pic_offset_table_rtx.
3244    TEMP, if nonnull, is a scratch Pmode base register.  */
3245 
3246 rtx
mips_pic_base_register(rtx temp)3247 mips_pic_base_register (rtx temp)
3248 {
3249   if (!TARGET_MIPS16)
3250     return pic_offset_table_rtx;
3251 
3252   if (currently_expanding_to_rtl)
3253     return mips16_gp_pseudo_reg ();
3254 
3255   if (can_create_pseudo_p ())
3256     temp = gen_reg_rtx (Pmode);
3257 
3258   if (TARGET_USE_GOT)
3259     /* The first post-reload split exposes all references to $gp
3260        (both uses and definitions).  All references must remain
3261        explicit after that point.
3262 
3263        It is safe to introduce uses of $gp at any time, so for
3264        simplicity, we do that before the split too.  */
3265     mips_emit_move (temp, pic_offset_table_rtx);
3266   else
3267     emit_insn (gen_load_const_gp (temp));
3268   return temp;
3269 }
3270 
3271 /* Return the RHS of a load_call<mode> insn.  */
3272 
3273 static rtx
mips_unspec_call(rtx reg,rtx symbol)3274 mips_unspec_call (rtx reg, rtx symbol)
3275 {
3276   rtvec vec;
3277 
3278   vec = gen_rtvec (3, reg, symbol, gen_rtx_REG (SImode, GOT_VERSION_REGNUM));
3279   return gen_rtx_UNSPEC (Pmode, vec, UNSPEC_LOAD_CALL);
3280 }
3281 
3282 /* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
3283    reference.  Return NULL_RTX otherwise.  */
3284 
3285 static rtx
mips_strip_unspec_call(rtx src)3286 mips_strip_unspec_call (rtx src)
3287 {
3288   if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL)
3289     return mips_strip_unspec_address (XVECEXP (src, 0, 1));
3290   return NULL_RTX;
3291 }
3292 
3293 /* Create and return a GOT reference of type TYPE for address ADDR.
3294    TEMP, if nonnull, is a scratch Pmode base register.  */
3295 
3296 rtx
mips_got_load(rtx temp,rtx addr,enum mips_symbol_type type)3297 mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
3298 {
3299   rtx base, high, lo_sum_symbol;
3300 
3301   base = mips_pic_base_register (temp);
3302 
3303   /* If we used the temporary register to load $gp, we can't use
3304      it for the high part as well.  */
3305   if (temp != NULL && reg_overlap_mentioned_p (base, temp))
3306     temp = NULL;
3307 
3308   high = mips_unspec_offset_high (temp, base, addr, type);
3309   lo_sum_symbol = mips_unspec_address (addr, type);
3310 
3311   if (type == SYMBOL_GOTOFF_CALL)
3312     return mips_unspec_call (high, lo_sum_symbol);
3313   else
3314     return PMODE_INSN (gen_unspec_got, (high, lo_sum_symbol));
3315 }
3316 
3317 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
3318    it appears in a MEM of that mode.  Return true if ADDR is a legitimate
3319    constant in that context and can be split into high and low parts.
3320    If so, and if LOW_OUT is nonnull, emit the high part and store the
3321    low part in *LOW_OUT.  Leave *LOW_OUT unchanged otherwise.
3322 
3323    TEMP is as for mips_force_temporary and is used to load the high
3324    part into a register.
3325 
3326    When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
3327    a legitimize SET_SRC for an .md pattern, otherwise the low part
3328    is guaranteed to be a legitimate address for mode MODE.  */
3329 
3330 bool
mips_split_symbol(rtx temp,rtx addr,machine_mode mode,rtx * low_out)3331 mips_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
3332 {
3333   enum mips_symbol_context context;
3334   enum mips_symbol_type symbol_type;
3335   rtx high;
3336 
3337   context = (mode == MAX_MACHINE_MODE
3338 	     ? SYMBOL_CONTEXT_LEA
3339 	     : SYMBOL_CONTEXT_MEM);
3340   if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
3341     {
3342       addr = XEXP (addr, 0);
3343       if (mips_symbolic_constant_p (addr, context, &symbol_type)
3344 	  && mips_symbol_insns (symbol_type, mode) > 0
3345 	  && mips_split_hi_p[symbol_type])
3346 	{
3347 	  if (low_out)
3348 	    switch (symbol_type)
3349 	      {
3350 	      case SYMBOL_GOT_PAGE_OFST:
3351 		/* The high part of a page/ofst pair is loaded from the GOT.  */
3352 		*low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
3353 		break;
3354 
3355 	      default:
3356 		gcc_unreachable ();
3357 	      }
3358 	  return true;
3359 	}
3360     }
3361   else
3362     {
3363       if (mips_symbolic_constant_p (addr, context, &symbol_type)
3364 	  && mips_symbol_insns (symbol_type, mode) > 0
3365 	  && mips_split_p[symbol_type])
3366 	{
3367 	  if (low_out)
3368 	    switch (symbol_type)
3369 	      {
3370 	      case SYMBOL_GOT_DISP:
3371 		/* SYMBOL_GOT_DISP symbols are loaded from the GOT.  */
3372 		*low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
3373 		break;
3374 
3375 	      case SYMBOL_GP_RELATIVE:
3376 		high = mips_pic_base_register (temp);
3377 		*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
3378 		break;
3379 
3380 	      default:
3381 		high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
3382 		high = mips_force_temporary (temp, high);
3383 		*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
3384 		break;
3385 	      }
3386 	  return true;
3387 	}
3388     }
3389   return false;
3390 }
3391 
3392 /* Return a legitimate address for REG + OFFSET.  TEMP is as for
3393    mips_force_temporary; it is only needed when OFFSET is not a
3394    SMALL_OPERAND.  */
3395 
3396 static rtx
mips_add_offset(rtx temp,rtx reg,HOST_WIDE_INT offset)3397 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
3398 {
3399   if (!SMALL_OPERAND (offset))
3400     {
3401       rtx high;
3402 
3403       if (TARGET_MIPS16)
3404 	{
3405 	  /* Load the full offset into a register so that we can use
3406 	     an unextended instruction for the address itself.  */
3407 	  high = GEN_INT (offset);
3408 	  offset = 0;
3409 	}
3410       else
3411 	{
3412 	  /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
3413 	     The addition inside the macro CONST_HIGH_PART may cause an
3414 	     overflow, so we need to force a sign-extension check.  */
3415 	  high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
3416 	  offset = CONST_LOW_PART (offset);
3417 	}
3418       high = mips_force_temporary (temp, high);
3419       reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
3420     }
3421   return plus_constant (Pmode, reg, offset);
3422 }
3423 
3424 /* The __tls_get_attr symbol.  */
3425 static GTY(()) rtx mips_tls_symbol;
3426 
3427 /* Return an instruction sequence that calls __tls_get_addr.  SYM is
3428    the TLS symbol we are referencing and TYPE is the symbol type to use
3429    (either global dynamic or local dynamic).  V0 is an RTX for the
3430    return value location.  */
3431 
3432 static rtx_insn *
mips_call_tls_get_addr(rtx sym,enum mips_symbol_type type,rtx v0)3433 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
3434 {
3435   rtx loc, a0;
3436   rtx_insn *insn;
3437 
3438   a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
3439 
3440   if (!mips_tls_symbol)
3441     mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
3442 
3443   loc = mips_unspec_address (sym, type);
3444 
3445   start_sequence ();
3446 
3447   emit_insn (gen_rtx_SET (a0, gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx,
3448 					      loc)));
3449   insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
3450 			   const0_rtx, NULL_RTX, false);
3451   RTL_CONST_CALL_P (insn) = 1;
3452   use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
3453   insn = get_insns ();
3454 
3455   end_sequence ();
3456 
3457   return insn;
3458 }
3459 
3460 /* Return a pseudo register that contains the current thread pointer.  */
3461 
3462 rtx
mips_expand_thread_pointer(rtx tp)3463 mips_expand_thread_pointer (rtx tp)
3464 {
3465   rtx fn;
3466 
3467   if (TARGET_MIPS16)
3468     {
3469       if (!mips16_rdhwr_stub)
3470 	mips16_rdhwr_stub = new mips16_rdhwr_one_only_stub ();
3471       fn = mips16_stub_call_address (mips16_rdhwr_stub);
3472       emit_insn (PMODE_INSN (gen_tls_get_tp_mips16, (tp, fn)));
3473     }
3474   else
3475     emit_insn (PMODE_INSN (gen_tls_get_tp, (tp)));
3476   return tp;
3477 }
3478 
3479 static rtx
mips_get_tp(void)3480 mips_get_tp (void)
3481 {
3482   return mips_expand_thread_pointer (gen_reg_rtx (Pmode));
3483 }
3484 
3485 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
3486    its address.  The return value will be both a valid address and a valid
3487    SET_SRC (either a REG or a LO_SUM).  */
3488 
3489 static rtx
mips_legitimize_tls_address(rtx loc)3490 mips_legitimize_tls_address (rtx loc)
3491 {
3492   rtx dest, v0, tp, tmp1, tmp2, eqv, offset;
3493   enum tls_model model;
3494 
3495   model = SYMBOL_REF_TLS_MODEL (loc);
3496   /* Only TARGET_ABICALLS code can have more than one module; other
3497      code must be static and should not use a GOT.  All TLS models
3498      reduce to local exec in this situation.  */
3499   if (!TARGET_ABICALLS)
3500     model = TLS_MODEL_LOCAL_EXEC;
3501 
3502   switch (model)
3503     {
3504     case TLS_MODEL_GLOBAL_DYNAMIC:
3505       {
3506 	v0 = gen_rtx_REG (Pmode, GP_RETURN);
3507 	rtx_insn *insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
3508 	dest = gen_reg_rtx (Pmode);
3509 	emit_libcall_block (insn, dest, v0, loc);
3510 	break;
3511       }
3512 
3513     case TLS_MODEL_LOCAL_DYNAMIC:
3514       {
3515 	v0 = gen_rtx_REG (Pmode, GP_RETURN);
3516 	rtx_insn *insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
3517 	tmp1 = gen_reg_rtx (Pmode);
3518 
3519 	/* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3520 	   share the LDM result with other LD model accesses.  */
3521 	eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3522 			      UNSPEC_TLS_LDM);
3523 	emit_libcall_block (insn, tmp1, v0, eqv);
3524 
3525 	offset = mips_unspec_address (loc, SYMBOL_DTPREL);
3526 	if (mips_split_p[SYMBOL_DTPREL])
3527 	  {
3528 	    tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
3529 	    dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
3530 	  }
3531 	else
3532 	  dest = expand_binop (Pmode, add_optab, tmp1, offset,
3533 			       0, 0, OPTAB_DIRECT);
3534 	break;
3535       }
3536 
3537     case TLS_MODEL_INITIAL_EXEC:
3538       tp = mips_get_tp ();
3539       tmp1 = gen_reg_rtx (Pmode);
3540       tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
3541       if (Pmode == DImode)
3542 	emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
3543       else
3544 	emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
3545       dest = gen_reg_rtx (Pmode);
3546       emit_insn (gen_add3_insn (dest, tmp1, tp));
3547       break;
3548 
3549     case TLS_MODEL_LOCAL_EXEC:
3550       tmp1 = mips_get_tp ();
3551       offset = mips_unspec_address (loc, SYMBOL_TPREL);
3552       if (mips_split_p[SYMBOL_TPREL])
3553 	{
3554 	  tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_TPREL);
3555 	  dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
3556 	}
3557       else
3558 	dest = expand_binop (Pmode, add_optab, tmp1, offset,
3559 			     0, 0, OPTAB_DIRECT);
3560       break;
3561 
3562     default:
3563       gcc_unreachable ();
3564     }
3565   return dest;
3566 }
3567 
3568 /* Implement "TARGET = __builtin_mips_get_fcsr ()" for MIPS16,
3569    using a stub.  */
3570 
3571 void
mips16_expand_get_fcsr(rtx target)3572 mips16_expand_get_fcsr (rtx target)
3573 {
3574   if (!mips16_get_fcsr_stub)
3575     mips16_get_fcsr_stub = new mips16_get_fcsr_one_only_stub ();
3576   rtx fn = mips16_stub_call_address (mips16_get_fcsr_stub);
3577   emit_insn (PMODE_INSN (gen_mips_get_fcsr_mips16, (fn)));
3578   emit_move_insn (target, gen_rtx_REG (SImode, GET_FCSR_REGNUM));
3579 }
3580 
3581 /* Implement __builtin_mips_set_fcsr (TARGET) for MIPS16, using a stub.  */
3582 
3583 void
mips16_expand_set_fcsr(rtx newval)3584 mips16_expand_set_fcsr (rtx newval)
3585 {
3586   if (!mips16_set_fcsr_stub)
3587     mips16_set_fcsr_stub = new mips16_set_fcsr_one_only_stub ();
3588   rtx fn = mips16_stub_call_address (mips16_set_fcsr_stub);
3589   emit_move_insn (gen_rtx_REG (SImode, SET_FCSR_REGNUM), newval);
3590   emit_insn (PMODE_INSN (gen_mips_set_fcsr_mips16, (fn)));
3591 }
3592 
3593 /* If X is not a valid address for mode MODE, force it into a register.  */
3594 
3595 static rtx
mips_force_address(rtx x,machine_mode mode)3596 mips_force_address (rtx x, machine_mode mode)
3597 {
3598   if (!mips_legitimate_address_p (mode, x, false))
3599     x = force_reg (Pmode, x);
3600   return x;
3601 }
3602 
3603 /* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
3604    be legitimized in a way that the generic machinery might not expect,
3605    return a new address, otherwise return NULL.  MODE is the mode of
3606    the memory being accessed.  */
3607 
3608 static rtx
mips_legitimize_address(rtx x,rtx oldx ATTRIBUTE_UNUSED,machine_mode mode)3609 mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3610 			 machine_mode mode)
3611 {
3612   rtx base, addr;
3613   HOST_WIDE_INT offset;
3614 
3615   if (mips_tls_symbol_p (x))
3616     return mips_legitimize_tls_address (x);
3617 
3618   /* See if the address can split into a high part and a LO_SUM.  */
3619   if (mips_split_symbol (NULL, x, mode, &addr))
3620     return mips_force_address (addr, mode);
3621 
3622   /* Handle BASE + OFFSET using mips_add_offset.  */
3623   mips_split_plus (x, &base, &offset);
3624   if (offset != 0)
3625     {
3626       if (!mips_valid_base_register_p (base, mode, false))
3627 	base = copy_to_mode_reg (Pmode, base);
3628       addr = mips_add_offset (NULL, base, offset);
3629       return mips_force_address (addr, mode);
3630     }
3631 
3632   return x;
3633 }
3634 
3635 /* Load VALUE into DEST.  TEMP is as for mips_force_temporary.  */
3636 
3637 void
mips_move_integer(rtx temp,rtx dest,unsigned HOST_WIDE_INT value)3638 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
3639 {
3640   struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
3641   machine_mode mode;
3642   unsigned int i, num_ops;
3643   rtx x;
3644 
3645   mode = GET_MODE (dest);
3646   num_ops = mips_build_integer (codes, value);
3647 
3648   /* Apply each binary operation to X.  Invariant: X is a legitimate
3649      source operand for a SET pattern.  */
3650   x = GEN_INT (codes[0].value);
3651   for (i = 1; i < num_ops; i++)
3652     {
3653       if (!can_create_pseudo_p ())
3654 	{
3655 	  emit_insn (gen_rtx_SET (temp, x));
3656 	  x = temp;
3657 	}
3658       else
3659 	x = force_reg (mode, x);
3660       x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
3661     }
3662 
3663   emit_insn (gen_rtx_SET (dest, x));
3664 }
3665 
3666 /* Subroutine of mips_legitimize_move.  Move constant SRC into register
3667    DEST given that SRC satisfies immediate_operand but doesn't satisfy
3668    move_operand.  */
3669 
3670 static void
mips_legitimize_const_move(machine_mode mode,rtx dest,rtx src)3671 mips_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
3672 {
3673   rtx base, offset;
3674 
3675   /* Split moves of big integers into smaller pieces.  */
3676   if (splittable_const_int_operand (src, mode))
3677     {
3678       mips_move_integer (dest, dest, INTVAL (src));
3679       return;
3680     }
3681 
3682   /* Split moves of symbolic constants into high/low pairs.  */
3683   if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
3684     {
3685       emit_insn (gen_rtx_SET (dest, src));
3686       return;
3687     }
3688 
3689   /* Generate the appropriate access sequences for TLS symbols.  */
3690   if (mips_tls_symbol_p (src))
3691     {
3692       mips_emit_move (dest, mips_legitimize_tls_address (src));
3693       return;
3694     }
3695 
3696   /* If we have (const (plus symbol offset)), and that expression cannot
3697      be forced into memory, load the symbol first and add in the offset.
3698      In non-MIPS16 mode, prefer to do this even if the constant _can_ be
3699      forced into memory, as it usually produces better code.  */
3700   split_const (src, &base, &offset);
3701   if (offset != const0_rtx
3702       && (targetm.cannot_force_const_mem (mode, src)
3703 	  || (!TARGET_MIPS16 && can_create_pseudo_p ())))
3704     {
3705       base = mips_force_temporary (dest, base);
3706       mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
3707       return;
3708     }
3709 
3710   src = force_const_mem (mode, src);
3711 
3712   /* When using explicit relocs, constant pool references are sometimes
3713      not legitimate addresses.  */
3714   mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
3715   mips_emit_move (dest, src);
3716 }
3717 
3718 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
3719    sequence that is valid.  */
3720 
3721 bool
mips_legitimize_move(machine_mode mode,rtx dest,rtx src)3722 mips_legitimize_move (machine_mode mode, rtx dest, rtx src)
3723 {
3724   /* Both src and dest are non-registers;  one special case is supported where
3725      the source is (const_int 0) and the store can source the zero register.
3726      MIPS16 and MSA are never able to source the zero register directly in
3727      memory operations.  */
3728   if (!register_operand (dest, mode)
3729       && !register_operand (src, mode)
3730       && (TARGET_MIPS16 || !const_0_operand (src, mode)
3731 	  || MSA_SUPPORTED_MODE_P (mode)))
3732     {
3733       mips_emit_move (dest, force_reg (mode, src));
3734       return true;
3735     }
3736 
3737   /* We need to deal with constants that would be legitimate
3738      immediate_operands but aren't legitimate move_operands.  */
3739   if (CONSTANT_P (src) && !move_operand (src, mode))
3740     {
3741       mips_legitimize_const_move (mode, dest, src);
3742       set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
3743       return true;
3744     }
3745   return false;
3746 }
3747 
3748 /* Return true if value X in context CONTEXT is a small-data address
3749    that can be rewritten as a LO_SUM.  */
3750 
3751 static bool
mips_rewrite_small_data_p(rtx x,enum mips_symbol_context context)3752 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
3753 {
3754   enum mips_symbol_type symbol_type;
3755 
3756   return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
3757 	  && !mips_split_p[SYMBOL_GP_RELATIVE]
3758 	  && mips_symbolic_constant_p (x, context, &symbol_type)
3759 	  && symbol_type == SYMBOL_GP_RELATIVE);
3760 }
3761 
3762 /* Return true if OP refers to small data symbols directly, not through
3763    a LO_SUM.  CONTEXT is the context in which X appears.  */
3764 
3765 static int
mips_small_data_pattern_1(rtx x,enum mips_symbol_context context)3766 mips_small_data_pattern_1 (rtx x, enum mips_symbol_context context)
3767 {
3768   subrtx_var_iterator::array_type array;
3769   FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
3770     {
3771       rtx x = *iter;
3772 
3773       /* Ignore things like "g" constraints in asms.  We make no particular
3774 	 guarantee about which symbolic constants are acceptable as asm operands
3775 	 versus which must be forced into a GPR.  */
3776       if (GET_CODE (x) == LO_SUM || GET_CODE (x) == ASM_OPERANDS)
3777 	iter.skip_subrtxes ();
3778       else if (MEM_P (x))
3779 	{
3780 	  if (mips_small_data_pattern_1 (XEXP (x, 0), SYMBOL_CONTEXT_MEM))
3781 	    return true;
3782 	  iter.skip_subrtxes ();
3783 	}
3784       else if (mips_rewrite_small_data_p (x, context))
3785 	return true;
3786     }
3787   return false;
3788 }
3789 
3790 /* Return true if OP refers to small data symbols directly, not through
3791    a LO_SUM.  */
3792 
3793 bool
mips_small_data_pattern_p(rtx op)3794 mips_small_data_pattern_p (rtx op)
3795 {
3796   return mips_small_data_pattern_1 (op, SYMBOL_CONTEXT_LEA);
3797 }
3798 
3799 /* Rewrite *LOC so that it refers to small data using explicit
3800    relocations.  CONTEXT is the context in which *LOC appears.  */
3801 
3802 static void
mips_rewrite_small_data_1(rtx * loc,enum mips_symbol_context context)3803 mips_rewrite_small_data_1 (rtx *loc, enum mips_symbol_context context)
3804 {
3805   subrtx_ptr_iterator::array_type array;
3806   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3807     {
3808       rtx *loc = *iter;
3809       if (MEM_P (*loc))
3810 	{
3811 	  mips_rewrite_small_data_1 (&XEXP (*loc, 0), SYMBOL_CONTEXT_MEM);
3812 	  iter.skip_subrtxes ();
3813 	}
3814       else if (mips_rewrite_small_data_p (*loc, context))
3815 	{
3816 	  *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
3817 	  iter.skip_subrtxes ();
3818 	}
3819       else if (GET_CODE (*loc) == LO_SUM)
3820 	iter.skip_subrtxes ();
3821     }
3822 }
3823 
3824 /* Rewrite instruction pattern PATTERN so that it refers to small data
3825    using explicit relocations.  */
3826 
3827 rtx
mips_rewrite_small_data(rtx pattern)3828 mips_rewrite_small_data (rtx pattern)
3829 {
3830   pattern = copy_insn (pattern);
3831   mips_rewrite_small_data_1 (&pattern, SYMBOL_CONTEXT_LEA);
3832   return pattern;
3833 }
3834 
3835 /* The cost of loading values from the constant pool.  It should be
3836    larger than the cost of any constant we want to synthesize inline.  */
3837 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3838 
3839 /* Return the cost of X when used as an operand to the MIPS16 instruction
3840    that implements CODE.  Return -1 if there is no such instruction, or if
3841    X is not a valid immediate operand for it.  */
3842 
3843 static int
mips16_constant_cost(int code,HOST_WIDE_INT x)3844 mips16_constant_cost (int code, HOST_WIDE_INT x)
3845 {
3846   switch (code)
3847     {
3848     case ASHIFT:
3849     case ASHIFTRT:
3850     case LSHIFTRT:
3851       /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3852 	 other shifts are extended.  The shift patterns truncate the shift
3853 	 count to the right size, so there are no out-of-range values.  */
3854       if (IN_RANGE (x, 1, 8))
3855 	return 0;
3856       return COSTS_N_INSNS (1);
3857 
3858     case PLUS:
3859       if (IN_RANGE (x, -128, 127))
3860 	return 0;
3861       if (SMALL_OPERAND (x))
3862 	return COSTS_N_INSNS (1);
3863       return -1;
3864 
3865     case LEU:
3866       /* Like LE, but reject the always-true case.  */
3867       if (x == -1)
3868 	return -1;
3869       /* FALLTHRU */
3870     case LE:
3871       /* We add 1 to the immediate and use SLT.  */
3872       x += 1;
3873       /* FALLTHRU */
3874     case XOR:
3875       /* We can use CMPI for an xor with an unsigned 16-bit X.  */
3876     case LT:
3877     case LTU:
3878       if (IN_RANGE (x, 0, 255))
3879 	return 0;
3880       if (SMALL_OPERAND_UNSIGNED (x))
3881 	return COSTS_N_INSNS (1);
3882       return -1;
3883 
3884     case EQ:
3885     case NE:
3886       /* Equality comparisons with 0 are cheap.  */
3887       if (x == 0)
3888 	return 0;
3889       return -1;
3890 
3891     default:
3892       return -1;
3893     }
3894 }
3895 
3896 /* Return true if there is a non-MIPS16 instruction that implements CODE
3897    and if that instruction accepts X as an immediate operand.  */
3898 
3899 static int
mips_immediate_operand_p(int code,HOST_WIDE_INT x)3900 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3901 {
3902   switch (code)
3903     {
3904     case ASHIFT:
3905     case ASHIFTRT:
3906     case LSHIFTRT:
3907       /* All shift counts are truncated to a valid constant.  */
3908       return true;
3909 
3910     case ROTATE:
3911     case ROTATERT:
3912       /* Likewise rotates, if the target supports rotates at all.  */
3913       return ISA_HAS_ROR;
3914 
3915     case AND:
3916     case IOR:
3917     case XOR:
3918       /* These instructions take 16-bit unsigned immediates.  */
3919       return SMALL_OPERAND_UNSIGNED (x);
3920 
3921     case PLUS:
3922     case LT:
3923     case LTU:
3924       /* These instructions take 16-bit signed immediates.  */
3925       return SMALL_OPERAND (x);
3926 
3927     case EQ:
3928     case NE:
3929     case GT:
3930     case GTU:
3931       /* The "immediate" forms of these instructions are really
3932 	 implemented as comparisons with register 0.  */
3933       return x == 0;
3934 
3935     case GE:
3936     case GEU:
3937       /* Likewise, meaning that the only valid immediate operand is 1.  */
3938       return x == 1;
3939 
3940     case LE:
3941       /* We add 1 to the immediate and use SLT.  */
3942       return SMALL_OPERAND (x + 1);
3943 
3944     case LEU:
3945       /* Likewise SLTU, but reject the always-true case.  */
3946       return SMALL_OPERAND (x + 1) && x + 1 != 0;
3947 
3948     case SIGN_EXTRACT:
3949     case ZERO_EXTRACT:
3950       /* The bit position and size are immediate operands.  */
3951       return ISA_HAS_EXT_INS;
3952 
3953     default:
3954       /* By default assume that $0 can be used for 0.  */
3955       return x == 0;
3956     }
3957 }
3958 
3959 /* Return the cost of binary operation X, given that the instruction
3960    sequence for a word-sized or smaller operation has cost SINGLE_COST
3961    and that the sequence of a double-word operation has cost DOUBLE_COST.
3962    If SPEED is true, optimize for speed otherwise optimize for size.  */
3963 
3964 static int
mips_binary_cost(rtx x,int single_cost,int double_cost,bool speed)3965 mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed)
3966 {
3967   int cost;
3968 
3969   if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3970     cost = double_cost;
3971   else
3972     cost = single_cost;
3973   return (cost
3974 	  + set_src_cost (XEXP (x, 0), GET_MODE (x), speed)
3975 	  + rtx_cost (XEXP (x, 1), GET_MODE (x), GET_CODE (x), 1, speed));
3976 }
3977 
3978 /* Return the cost of floating-point multiplications of mode MODE.  */
3979 
3980 static int
mips_fp_mult_cost(machine_mode mode)3981 mips_fp_mult_cost (machine_mode mode)
3982 {
3983   return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3984 }
3985 
3986 /* Return the cost of floating-point divisions of mode MODE.  */
3987 
3988 static int
mips_fp_div_cost(machine_mode mode)3989 mips_fp_div_cost (machine_mode mode)
3990 {
3991   return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3992 }
3993 
3994 /* Return the cost of sign-extending OP to mode MODE, not including the
3995    cost of OP itself.  */
3996 
3997 static int
mips_sign_extend_cost(machine_mode mode,rtx op)3998 mips_sign_extend_cost (machine_mode mode, rtx op)
3999 {
4000   if (MEM_P (op))
4001     /* Extended loads are as cheap as unextended ones.  */
4002     return 0;
4003 
4004   if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
4005     /* A sign extension from SImode to DImode in 64-bit mode is free.  */
4006     return 0;
4007 
4008   if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
4009     /* We can use SEB or SEH.  */
4010     return COSTS_N_INSNS (1);
4011 
4012   /* We need to use a shift left and a shift right.  */
4013   return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
4014 }
4015 
4016 /* Return the cost of zero-extending OP to mode MODE, not including the
4017    cost of OP itself.  */
4018 
4019 static int
mips_zero_extend_cost(machine_mode mode,rtx op)4020 mips_zero_extend_cost (machine_mode mode, rtx op)
4021 {
4022   if (MEM_P (op))
4023     /* Extended loads are as cheap as unextended ones.  */
4024     return 0;
4025 
4026   if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
4027     /* We need a shift left by 32 bits and a shift right by 32 bits.  */
4028     return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
4029 
4030   if (GENERATE_MIPS16E)
4031     /* We can use ZEB or ZEH.  */
4032     return COSTS_N_INSNS (1);
4033 
4034   if (TARGET_MIPS16)
4035     /* We need to load 0xff or 0xffff into a register and use AND.  */
4036     return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
4037 
4038   /* We can use ANDI.  */
4039   return COSTS_N_INSNS (1);
4040 }
4041 
4042 /* Return the cost of moving between two registers of mode MODE,
4043    assuming that the move will be in pieces of at most UNITS bytes.  */
4044 
4045 static int
mips_set_reg_reg_piece_cost(machine_mode mode,unsigned int units)4046 mips_set_reg_reg_piece_cost (machine_mode mode, unsigned int units)
4047 {
4048   return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
4049 }
4050 
4051 /* Return the cost of moving between two registers of mode MODE.  */
4052 
4053 static int
mips_set_reg_reg_cost(machine_mode mode)4054 mips_set_reg_reg_cost (machine_mode mode)
4055 {
4056   switch (GET_MODE_CLASS (mode))
4057     {
4058     case MODE_CC:
4059       return mips_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (CCmode));
4060 
4061     case MODE_FLOAT:
4062     case MODE_COMPLEX_FLOAT:
4063     case MODE_VECTOR_FLOAT:
4064       if (TARGET_HARD_FLOAT)
4065 	return mips_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE);
4066       /* Fall through */
4067 
4068     default:
4069       return mips_set_reg_reg_piece_cost (mode, UNITS_PER_WORD);
4070     }
4071 }
4072 
4073 /* Implement TARGET_RTX_COSTS.  */
4074 
4075 static bool
mips_rtx_costs(rtx x,machine_mode mode,int outer_code,int opno ATTRIBUTE_UNUSED,int * total,bool speed)4076 mips_rtx_costs (rtx x, machine_mode mode, int outer_code,
4077 		int opno ATTRIBUTE_UNUSED, int *total, bool speed)
4078 {
4079   int code = GET_CODE (x);
4080   bool float_mode_p = FLOAT_MODE_P (mode);
4081   int cost;
4082   rtx addr;
4083 
4084   /* The cost of a COMPARE is hard to define for MIPS.  COMPAREs don't
4085      appear in the instruction stream, and the cost of a comparison is
4086      really the cost of the branch or scc condition.  At the time of
4087      writing, GCC only uses an explicit outer COMPARE code when optabs
4088      is testing whether a constant is expensive enough to force into a
4089      register.  We want optabs to pass such constants through the MIPS
4090      expanders instead, so make all constants very cheap here.  */
4091   if (outer_code == COMPARE)
4092     {
4093       gcc_assert (CONSTANT_P (x));
4094       *total = 0;
4095       return true;
4096     }
4097 
4098   switch (code)
4099     {
4100     case CONST_INT:
4101       /* Treat *clear_upper32-style ANDs as having zero cost in the
4102 	 second operand.  The cost is entirely in the first operand.
4103 
4104 	 ??? This is needed because we would otherwise try to CSE
4105 	 the constant operand.  Although that's the right thing for
4106 	 instructions that continue to be a register operation throughout
4107 	 compilation, it is disastrous for instructions that could
4108 	 later be converted into a memory operation.  */
4109       if (TARGET_64BIT
4110 	  && outer_code == AND
4111 	  && UINTVAL (x) == 0xffffffff)
4112 	{
4113 	  *total = 0;
4114 	  return true;
4115 	}
4116 
4117       if (TARGET_MIPS16)
4118 	{
4119 	  cost = mips16_constant_cost (outer_code, INTVAL (x));
4120 	  if (cost >= 0)
4121 	    {
4122 	      *total = cost;
4123 	      return true;
4124 	    }
4125 	}
4126       else
4127 	{
4128 	  /* When not optimizing for size, we care more about the cost
4129 	     of hot code, and hot code is often in a loop.  If a constant
4130 	     operand needs to be forced into a register, we will often be
4131 	     able to hoist the constant load out of the loop, so the load
4132 	     should not contribute to the cost.  */
4133 	  if (speed || mips_immediate_operand_p (outer_code, INTVAL (x)))
4134 	    {
4135 	      *total = 0;
4136 	      return true;
4137 	    }
4138 	}
4139       /* Fall through.  */
4140 
4141     case CONST:
4142     case SYMBOL_REF:
4143     case LABEL_REF:
4144     case CONST_DOUBLE:
4145       if (force_to_mem_operand (x, VOIDmode))
4146 	{
4147 	  *total = COSTS_N_INSNS (1);
4148 	  return true;
4149 	}
4150       cost = mips_const_insns (x);
4151       if (cost > 0)
4152 	{
4153 	  /* If the constant is likely to be stored in a GPR, SETs of
4154 	     single-insn constants are as cheap as register sets; we
4155 	     never want to CSE them.
4156 
4157 	     Don't reduce the cost of storing a floating-point zero in
4158 	     FPRs.  If we have a zero in an FPR for other reasons, we
4159 	     can get better cfg-cleanup and delayed-branch results by
4160 	     using it consistently, rather than using $0 sometimes and
4161 	     an FPR at other times.  Also, moves between floating-point
4162 	     registers are sometimes cheaper than (D)MTC1 $0.  */
4163 	  if (cost == 1
4164 	      && outer_code == SET
4165 	      && !(float_mode_p && TARGET_HARD_FLOAT))
4166 	    cost = 0;
4167 	  /* When non-MIPS16 code loads a constant N>1 times, we rarely
4168 	     want to CSE the constant itself.  It is usually better to
4169 	     have N copies of the last operation in the sequence and one
4170 	     shared copy of the other operations.  (Note that this is
4171 	     not true for MIPS16 code, where the final operation in the
4172 	     sequence is often an extended instruction.)
4173 
4174 	     Also, if we have a CONST_INT, we don't know whether it is
4175 	     for a word or doubleword operation, so we cannot rely on
4176 	     the result of mips_build_integer.  */
4177 	  else if (!TARGET_MIPS16
4178 		   && (outer_code == SET || GET_MODE (x) == VOIDmode))
4179 	    cost = 1;
4180 	  *total = COSTS_N_INSNS (cost);
4181 	  return true;
4182 	}
4183       /* The value will need to be fetched from the constant pool.  */
4184       *total = CONSTANT_POOL_COST;
4185       return true;
4186 
4187     case MEM:
4188       /* If the address is legitimate, return the number of
4189 	 instructions it needs.  */
4190       addr = XEXP (x, 0);
4191       cost = mips_address_insns (addr, mode, true);
4192       if (cost > 0)
4193 	{
4194 	  *total = COSTS_N_INSNS (cost + 1);
4195 	  return true;
4196 	}
4197       /* Check for a scaled indexed address.  */
4198       if (mips_lwxs_address_p (addr)
4199 	  || mips_lx_address_p (addr, mode))
4200 	{
4201 	  *total = COSTS_N_INSNS (2);
4202 	  return true;
4203 	}
4204       /* Otherwise use the default handling.  */
4205       return false;
4206 
4207     case FFS:
4208       *total = COSTS_N_INSNS (6);
4209       return false;
4210 
4211     case NOT:
4212       *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
4213       return false;
4214 
4215     case AND:
4216       /* Check for a *clear_upper32 pattern and treat it like a zero
4217 	 extension.  See the pattern's comment for details.  */
4218       if (TARGET_64BIT
4219 	  && mode == DImode
4220 	  && CONST_INT_P (XEXP (x, 1))
4221 	  && UINTVAL (XEXP (x, 1)) == 0xffffffff)
4222 	{
4223 	  *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
4224 		    + set_src_cost (XEXP (x, 0), mode, speed));
4225 	  return true;
4226 	}
4227       if (ISA_HAS_CINS && CONST_INT_P (XEXP (x, 1)))
4228 	{
4229 	  rtx op = XEXP (x, 0);
4230 	  if (GET_CODE (op) == ASHIFT
4231 	      && CONST_INT_P (XEXP (op, 1))
4232 	      && mask_low_and_shift_p (mode, XEXP (x, 1), XEXP (op, 1), 32))
4233 	    {
4234 	      *total = COSTS_N_INSNS (1);
4235 	      *total += set_src_cost (XEXP (op, 0), mode, speed);
4236 	      return true;
4237 	    }
4238 	}
4239       /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in
4240 	 a single instruction.  */
4241       if (!TARGET_MIPS16
4242 	  && GET_CODE (XEXP (x, 0)) == NOT
4243 	  && GET_CODE (XEXP (x, 1)) == NOT)
4244 	{
4245 	  cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1;
4246           *total = (COSTS_N_INSNS (cost)
4247 		    + set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed)
4248 		    + set_src_cost (XEXP (XEXP (x, 1), 0), mode, speed));
4249 	  return true;
4250 	}
4251 
4252       /* Fall through.  */
4253 
4254     case IOR:
4255     case XOR:
4256       /* Double-word operations use two single-word operations.  */
4257       *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
4258 				 speed);
4259       return true;
4260 
4261     case ASHIFT:
4262     case ASHIFTRT:
4263     case LSHIFTRT:
4264     case ROTATE:
4265     case ROTATERT:
4266       if (CONSTANT_P (XEXP (x, 1)))
4267 	*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
4268 				   speed);
4269       else
4270 	*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
4271 				   speed);
4272       return true;
4273 
4274     case ABS:
4275       if (float_mode_p)
4276         *total = mips_cost->fp_add;
4277       else
4278         *total = COSTS_N_INSNS (4);
4279       return false;
4280 
4281     case LO_SUM:
4282       /* Low-part immediates need an extended MIPS16 instruction.  */
4283       *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
4284 		+ set_src_cost (XEXP (x, 0), mode, speed));
4285       return true;
4286 
4287     case LT:
4288     case LTU:
4289     case LE:
4290     case LEU:
4291     case GT:
4292     case GTU:
4293     case GE:
4294     case GEU:
4295     case EQ:
4296     case NE:
4297     case UNORDERED:
4298     case LTGT:
4299     case UNGE:
4300     case UNGT:
4301     case UNLE:
4302     case UNLT:
4303       /* Branch comparisons have VOIDmode, so use the first operand's
4304 	 mode instead.  */
4305       mode = GET_MODE (XEXP (x, 0));
4306       if (FLOAT_MODE_P (mode))
4307 	{
4308 	  *total = mips_cost->fp_add;
4309 	  return false;
4310 	}
4311       *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
4312 				 speed);
4313       return true;
4314 
4315     case MINUS:
4316       if (float_mode_p && ISA_HAS_UNFUSED_MADD4 && !HONOR_SIGNED_ZEROS (mode))
4317 	{
4318 	  /* See if we can use NMADD or NMSUB via the *nmadd4<mode>_fastmath
4319 	     or *nmsub4<mode>_fastmath patterns.  These patterns check for
4320 	     HONOR_SIGNED_ZEROS so we check here too.  */
4321 	  rtx op0 = XEXP (x, 0);
4322 	  rtx op1 = XEXP (x, 1);
4323 	  if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
4324 	    {
4325 	      *total = (mips_fp_mult_cost (mode)
4326 			+ set_src_cost (XEXP (XEXP (op0, 0), 0), mode, speed)
4327 			+ set_src_cost (XEXP (op0, 1), mode, speed)
4328 			+ set_src_cost (op1, mode, speed));
4329 	      return true;
4330 	    }
4331 	  if (GET_CODE (op1) == MULT)
4332 	    {
4333 	      *total = (mips_fp_mult_cost (mode)
4334 			+ set_src_cost (op0, mode, speed)
4335 			+ set_src_cost (XEXP (op1, 0), mode, speed)
4336 			+ set_src_cost (XEXP (op1, 1), mode, speed));
4337 	      return true;
4338 	    }
4339 	}
4340       /* Fall through.  */
4341 
4342     case PLUS:
4343       if (float_mode_p)
4344 	{
4345 	  /* If this is part of a MADD or MSUB, treat the PLUS as
4346 	     being free.  */
4347 	  if (ISA_HAS_UNFUSED_MADD4 && GET_CODE (XEXP (x, 0)) == MULT)
4348 	    *total = 0;
4349 	  else
4350 	    *total = mips_cost->fp_add;
4351 	  return false;
4352 	}
4353 
4354       /* If it's an add + mult (which is equivalent to shift left) and
4355          it's immediate operand satisfies const_immlsa_operand predicate.  */
4356       if (((ISA_HAS_LSA && mode == SImode)
4357 	   || (ISA_HAS_DLSA && mode == DImode))
4358 	  && GET_CODE (XEXP (x, 0)) == MULT)
4359 	{
4360 	  rtx op2 = XEXP (XEXP (x, 0), 1);
4361 	  if (const_immlsa_operand (op2, mode))
4362 	    {
4363 	      *total = (COSTS_N_INSNS (1)
4364 			+ set_src_cost (XEXP (XEXP (x, 0), 0), mode, speed)
4365 			+ set_src_cost (XEXP (x, 1), mode, speed));
4366 	      return true;
4367 	    }
4368 	}
4369 
4370       /* Double-word operations require three single-word operations and
4371 	 an SLTU.  The MIPS16 version then needs to move the result of
4372 	 the SLTU from $24 to a MIPS16 register.  */
4373       *total = mips_binary_cost (x, COSTS_N_INSNS (1),
4374 				 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4),
4375 				 speed);
4376       return true;
4377 
4378     case NEG:
4379       if (float_mode_p && ISA_HAS_UNFUSED_MADD4)
4380 	{
4381 	  /* See if we can use NMADD or NMSUB via the *nmadd4<mode> or
4382 	     *nmsub4<mode> patterns.  */
4383 	  rtx op = XEXP (x, 0);
4384 	  if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
4385 	      && GET_CODE (XEXP (op, 0)) == MULT)
4386 	    {
4387 	      *total = (mips_fp_mult_cost (mode)
4388 			+ set_src_cost (XEXP (XEXP (op, 0), 0), mode, speed)
4389 			+ set_src_cost (XEXP (XEXP (op, 0), 1), mode, speed)
4390 			+ set_src_cost (XEXP (op, 1), mode, speed));
4391 	      return true;
4392 	    }
4393 	}
4394 
4395       if (float_mode_p)
4396 	*total = mips_cost->fp_add;
4397       else
4398 	*total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
4399       return false;
4400 
4401     case FMA:
4402       *total = mips_fp_mult_cost (mode);
4403       return false;
4404 
4405     case MULT:
4406       if (float_mode_p)
4407 	*total = mips_fp_mult_cost (mode);
4408       else if (mode == DImode && !TARGET_64BIT)
4409 	/* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
4410 	   where the mulsidi3 always includes an MFHI and an MFLO.  */
4411 	*total = (speed
4412 		  ? mips_cost->int_mult_si * 3 + 6
4413 		  : COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
4414       else if (!speed)
4415 	*total = COSTS_N_INSNS ((ISA_HAS_MUL3 || ISA_HAS_R6MUL) ? 1 : 2) + 1;
4416       else if (mode == DImode)
4417 	*total = mips_cost->int_mult_di;
4418       else
4419 	*total = mips_cost->int_mult_si;
4420       return false;
4421 
4422     case DIV:
4423       /* Check for a reciprocal.  */
4424       if (float_mode_p
4425 	  && ISA_HAS_FP_RECIP_RSQRT (mode)
4426 	  && flag_unsafe_math_optimizations
4427 	  && XEXP (x, 0) == CONST1_RTX (mode))
4428 	{
4429 	  if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
4430 	    /* An rsqrt<mode>a or rsqrt<mode>b pattern.  Count the
4431 	       division as being free.  */
4432 	    *total = set_src_cost (XEXP (x, 1), mode, speed);
4433 	  else
4434 	    *total = (mips_fp_div_cost (mode)
4435 		      + set_src_cost (XEXP (x, 1), mode, speed));
4436 	  return true;
4437 	}
4438       /* Fall through.  */
4439 
4440     case SQRT:
4441     case MOD:
4442       if (float_mode_p)
4443 	{
4444 	  *total = mips_fp_div_cost (mode);
4445 	  return false;
4446 	}
4447       /* Fall through.  */
4448 
4449     case UDIV:
4450     case UMOD:
4451       if (!speed)
4452 	{
4453 	  /* It is our responsibility to make division by a power of 2
4454 	     as cheap as 2 register additions if we want the division
4455 	     expanders to be used for such operations; see the setting
4456 	     of sdiv_pow2_cheap in optabs.c.  Using (D)DIV for MIPS16
4457 	     should always produce shorter code than using
4458 	     expand_sdiv2_pow2.  */
4459 	  if (TARGET_MIPS16
4460 	      && CONST_INT_P (XEXP (x, 1))
4461 	      && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
4462 	    {
4463 	      *total = COSTS_N_INSNS (2);
4464 	      *total += set_src_cost (XEXP (x, 0), mode, speed);
4465 	      return true;
4466 	    }
4467 	  *total = COSTS_N_INSNS (mips_idiv_insns (mode));
4468 	}
4469       else if (mode == DImode)
4470         *total = mips_cost->int_div_di;
4471       else
4472 	*total = mips_cost->int_div_si;
4473       return false;
4474 
4475     case SIGN_EXTEND:
4476       *total = mips_sign_extend_cost (mode, XEXP (x, 0));
4477       return false;
4478 
4479     case ZERO_EXTEND:
4480       if (outer_code == SET
4481 	  && ISA_HAS_BADDU
4482 	  && (GET_CODE (XEXP (x, 0)) == TRUNCATE
4483 	      || GET_CODE (XEXP (x, 0)) == SUBREG)
4484 	  && GET_MODE (XEXP (x, 0)) == QImode
4485 	  && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
4486 	{
4487 	  *total = set_src_cost (XEXP (XEXP (x, 0), 0), VOIDmode, speed);
4488 	  return true;
4489 	}
4490       *total = mips_zero_extend_cost (mode, XEXP (x, 0));
4491       return false;
4492     case TRUNCATE:
4493       /* Costings for highpart multiplies.  Matching patterns of the form:
4494 
4495 	 (lshiftrt:DI (mult:DI (sign_extend:DI (...)
4496 			       (sign_extend:DI (...))
4497 		      (const_int 32)
4498       */
4499       if (ISA_HAS_R6MUL
4500 	  && (GET_CODE (XEXP (x, 0)) == ASHIFTRT
4501 	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT)
4502 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4503 	  && ((INTVAL (XEXP (XEXP (x, 0), 1)) == 32
4504 	       && GET_MODE (XEXP (x, 0)) == DImode)
4505 	      || (ISA_HAS_R6DMUL
4506 		  && INTVAL (XEXP (XEXP (x, 0), 1)) == 64
4507 		  && GET_MODE (XEXP (x, 0)) == TImode))
4508 	  && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4509 	  && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND
4510 	       && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND)
4511 	      || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4512 		  && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))
4513 		      == ZERO_EXTEND))))
4514 	{
4515 	  if (!speed)
4516 	    *total = COSTS_N_INSNS (1) + 1;
4517 	  else if (mode == DImode)
4518 	    *total = mips_cost->int_mult_di;
4519 	  else
4520 	    *total = mips_cost->int_mult_si;
4521 
4522 	  /* Sign extension is free, zero extension costs for DImode when
4523 	     on a 64bit core / when DMUL is present.  */
4524 	  for (int i = 0; i < 2; ++i)
4525 	    {
4526 	      rtx op = XEXP (XEXP (XEXP (x, 0), 0), i);
4527 	      if (ISA_HAS_R6DMUL
4528 		  && GET_CODE (op) == ZERO_EXTEND
4529 		  && GET_MODE (op) == DImode)
4530 		*total += rtx_cost (op, DImode, MULT, i, speed);
4531 	      else
4532 		*total += rtx_cost (XEXP (op, 0), VOIDmode, GET_CODE (op),
4533 				    0, speed);
4534 	    }
4535 
4536 	  return true;
4537 	}
4538       return false;
4539 
4540     case FLOAT:
4541     case UNSIGNED_FLOAT:
4542     case FIX:
4543     case FLOAT_EXTEND:
4544     case FLOAT_TRUNCATE:
4545       *total = mips_cost->fp_add;
4546       return false;
4547 
4548     case SET:
4549       if (register_operand (SET_DEST (x), VOIDmode)
4550 	  && reg_or_0_operand (SET_SRC (x), VOIDmode))
4551 	{
4552 	  *total = mips_set_reg_reg_cost (GET_MODE (SET_DEST (x)));
4553 	  return true;
4554 	}
4555       return false;
4556 
4557     default:
4558       return false;
4559     }
4560 }
4561 
4562 /* Implement TARGET_ADDRESS_COST.  */
4563 
4564 static int
mips_address_cost(rtx addr,machine_mode mode,addr_space_t as ATTRIBUTE_UNUSED,bool speed ATTRIBUTE_UNUSED)4565 mips_address_cost (rtx addr, machine_mode mode,
4566 		   addr_space_t as ATTRIBUTE_UNUSED,
4567 		   bool speed ATTRIBUTE_UNUSED)
4568 {
4569   return mips_address_insns (addr, mode, false);
4570 }
4571 
4572 /* Implement TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P.  */
4573 
4574 static bool
mips_no_speculation_in_delay_slots_p()4575 mips_no_speculation_in_delay_slots_p ()
4576 {
4577   return TARGET_CB_MAYBE;
4578 }
4579 
4580 /* Information about a single instruction in a multi-instruction
4581    asm sequence.  */
4582 struct mips_multi_member {
4583   /* True if this is a label, false if it is code.  */
4584   bool is_label_p;
4585 
4586   /* The output_asm_insn format of the instruction.  */
4587   const char *format;
4588 
4589   /* The operands to the instruction.  */
4590   rtx operands[MAX_RECOG_OPERANDS];
4591 };
4592 typedef struct mips_multi_member mips_multi_member;
4593 
4594 /* The instructions that make up the current multi-insn sequence.  */
4595 static vec<mips_multi_member> mips_multi_members;
4596 
4597 /* How many instructions (as opposed to labels) are in the current
4598    multi-insn sequence.  */
4599 static unsigned int mips_multi_num_insns;
4600 
4601 /* Start a new multi-insn sequence.  */
4602 
4603 static void
mips_multi_start(void)4604 mips_multi_start (void)
4605 {
4606   mips_multi_members.truncate (0);
4607   mips_multi_num_insns = 0;
4608 }
4609 
4610 /* Add a new, zero initialized member to the current multi-insn sequence.  */
4611 
4612 static struct mips_multi_member *
mips_multi_add(void)4613 mips_multi_add (void)
4614 {
4615   mips_multi_member empty;
4616   memset (&empty, 0, sizeof (empty));
4617   return mips_multi_members.safe_push (empty);
4618 }
4619 
4620 /* Add a normal insn with the given asm format to the current multi-insn
4621    sequence.  The other arguments are a null-terminated list of operands.  */
4622 
4623 static void
mips_multi_add_insn(const char * format,...)4624 mips_multi_add_insn (const char *format, ...)
4625 {
4626   struct mips_multi_member *member;
4627   va_list ap;
4628   unsigned int i;
4629   rtx op;
4630 
4631   member = mips_multi_add ();
4632   member->is_label_p = false;
4633   member->format = format;
4634   va_start (ap, format);
4635   i = 0;
4636   while ((op = va_arg (ap, rtx)))
4637     member->operands[i++] = op;
4638   va_end (ap);
4639   mips_multi_num_insns++;
4640 }
4641 
4642 /* Add the given label definition to the current multi-insn sequence.
4643    The definition should include the colon.  */
4644 
4645 static void
mips_multi_add_label(const char * label)4646 mips_multi_add_label (const char *label)
4647 {
4648   struct mips_multi_member *member;
4649 
4650   member = mips_multi_add ();
4651   member->is_label_p = true;
4652   member->format = label;
4653 }
4654 
4655 /* Return the index of the last member of the current multi-insn sequence.  */
4656 
4657 static unsigned int
mips_multi_last_index(void)4658 mips_multi_last_index (void)
4659 {
4660   return mips_multi_members.length () - 1;
4661 }
4662 
4663 /* Add a copy of an existing instruction to the current multi-insn
4664    sequence.  I is the index of the instruction that should be copied.  */
4665 
4666 static void
mips_multi_copy_insn(unsigned int i)4667 mips_multi_copy_insn (unsigned int i)
4668 {
4669   struct mips_multi_member *member;
4670 
4671   member = mips_multi_add ();
4672   memcpy (member, &mips_multi_members[i], sizeof (*member));
4673   gcc_assert (!member->is_label_p);
4674 }
4675 
4676 /* Change the operand of an existing instruction in the current
4677    multi-insn sequence.  I is the index of the instruction,
4678    OP is the index of the operand, and X is the new value.  */
4679 
4680 static void
mips_multi_set_operand(unsigned int i,unsigned int op,rtx x)4681 mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
4682 {
4683   mips_multi_members[i].operands[op] = x;
4684 }
4685 
4686 /* Write out the asm code for the current multi-insn sequence.  */
4687 
4688 static void
mips_multi_write(void)4689 mips_multi_write (void)
4690 {
4691   struct mips_multi_member *member;
4692   unsigned int i;
4693 
4694   FOR_EACH_VEC_ELT (mips_multi_members, i, member)
4695     if (member->is_label_p)
4696       fprintf (asm_out_file, "%s\n", member->format);
4697     else
4698       output_asm_insn (member->format, member->operands);
4699 }
4700 
4701 /* Return one word of double-word value OP, taking into account the fixed
4702    endianness of certain registers.  HIGH_P is true to select the high part,
4703    false to select the low part.  */
4704 
4705 rtx
mips_subword(rtx op,bool high_p)4706 mips_subword (rtx op, bool high_p)
4707 {
4708   unsigned int byte, offset;
4709   machine_mode mode;
4710 
4711   mode = GET_MODE (op);
4712   if (mode == VOIDmode)
4713     mode = TARGET_64BIT ? TImode : DImode;
4714 
4715   if (TARGET_BIG_ENDIAN ? !high_p : high_p)
4716     byte = UNITS_PER_WORD;
4717   else
4718     byte = 0;
4719 
4720   if (FP_REG_RTX_P (op))
4721     {
4722       /* Paired FPRs are always ordered little-endian.  */
4723       offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
4724       return gen_rtx_REG (word_mode, REGNO (op) + offset);
4725     }
4726 
4727   if (MEM_P (op))
4728     return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
4729 
4730   return simplify_gen_subreg (word_mode, op, mode, byte);
4731 }
4732 
4733 /* Return true if SRC should be moved into DEST using "MULT $0, $0".
4734    SPLIT_TYPE is the condition under which moves should be split.  */
4735 
4736 static bool
mips_mult_move_p(rtx dest,rtx src,enum mips_split_type split_type)4737 mips_mult_move_p (rtx dest, rtx src, enum mips_split_type split_type)
4738 {
4739   return ((split_type != SPLIT_FOR_SPEED
4740 	   || mips_tuning_info.fast_mult_zero_zero_p)
4741 	  && src == const0_rtx
4742 	  && REG_P (dest)
4743 	  && GET_MODE_SIZE (GET_MODE (dest)) == 2 * UNITS_PER_WORD
4744 	  && (ISA_HAS_DSP_MULT
4745 	      ? ACC_REG_P (REGNO (dest))
4746 	      : MD_REG_P (REGNO (dest))));
4747 }
4748 
4749 /* Return true if a move from SRC to DEST should be split into two.
4750    SPLIT_TYPE describes the split condition.  */
4751 
4752 bool
mips_split_move_p(rtx dest,rtx src,enum mips_split_type split_type)4753 mips_split_move_p (rtx dest, rtx src, enum mips_split_type split_type)
4754 {
4755   /* Check whether the move can be done using some variant of MULT $0,$0.  */
4756   if (mips_mult_move_p (dest, src, split_type))
4757     return false;
4758 
4759   /* FPR-to-FPR moves can be done in a single instruction, if they're
4760      allowed at all.  */
4761   unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
4762   if (size == 8 && FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
4763     return false;
4764 
4765   /* Check for floating-point loads and stores.  */
4766   if (size == 8 && ISA_HAS_LDC1_SDC1)
4767     {
4768       if (FP_REG_RTX_P (dest) && MEM_P (src))
4769 	return false;
4770       if (FP_REG_RTX_P (src) && MEM_P (dest))
4771 	return false;
4772     }
4773 
4774   /* Check if MSA moves need splitting.  */
4775   if (MSA_SUPPORTED_MODE_P (GET_MODE (dest)))
4776     return mips_split_128bit_move_p (dest, src);
4777 
4778   /* Otherwise split all multiword moves.  */
4779   return size > UNITS_PER_WORD;
4780 }
4781 
4782 /* Split a move from SRC to DEST, given that mips_split_move_p holds.
4783    SPLIT_TYPE describes the split condition.  INSN is the insn being
4784    split, if we know it, NULL otherwise.  */
4785 
4786 void
mips_split_move(rtx dest,rtx src,enum mips_split_type split_type,rtx insn_)4787 mips_split_move (rtx dest, rtx src, enum mips_split_type split_type, rtx insn_)
4788 {
4789   rtx low_dest;
4790 
4791   gcc_checking_assert (mips_split_move_p (dest, src, split_type));
4792   if (MSA_SUPPORTED_MODE_P (GET_MODE (dest)))
4793     mips_split_128bit_move (dest, src);
4794   else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
4795     {
4796       if (!TARGET_64BIT && GET_MODE (dest) == DImode)
4797 	emit_insn (gen_move_doubleword_fprdi (dest, src));
4798       else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
4799 	emit_insn (gen_move_doubleword_fprdf (dest, src));
4800       else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
4801 	emit_insn (gen_move_doubleword_fprv2sf (dest, src));
4802       else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
4803 	emit_insn (gen_move_doubleword_fprv2si (dest, src));
4804       else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
4805 	emit_insn (gen_move_doubleword_fprv4hi (dest, src));
4806       else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
4807 	emit_insn (gen_move_doubleword_fprv8qi (dest, src));
4808       else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
4809 	emit_insn (gen_move_doubleword_fprtf (dest, src));
4810       else
4811 	gcc_unreachable ();
4812     }
4813   else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
4814     {
4815       low_dest = mips_subword (dest, false);
4816       mips_emit_move (low_dest, mips_subword (src, false));
4817       if (TARGET_64BIT)
4818 	emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
4819       else
4820 	emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
4821     }
4822   else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
4823     {
4824       mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
4825       if (TARGET_64BIT)
4826 	emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
4827       else
4828 	emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
4829     }
4830   else
4831     {
4832       /* The operation can be split into two normal moves.  Decide in
4833 	 which order to do them.  */
4834       low_dest = mips_subword (dest, false);
4835       if (REG_P (low_dest)
4836 	  && reg_overlap_mentioned_p (low_dest, src))
4837 	{
4838 	  mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4839 	  mips_emit_move (low_dest, mips_subword (src, false));
4840 	}
4841       else
4842 	{
4843 	  mips_emit_move (low_dest, mips_subword (src, false));
4844 	  mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4845 	}
4846     }
4847 
4848   /* This is a hack.  See if the next insn uses DEST and if so, see if we
4849      can forward SRC for DEST.  This is most useful if the next insn is a
4850      simple store.   */
4851   rtx_insn *insn = (rtx_insn *)insn_;
4852   struct mips_address_info addr = {};
4853   if (insn)
4854     {
4855       rtx_insn *next = next_nonnote_nondebug_insn_bb (insn);
4856       if (next)
4857 	{
4858 	  rtx set = single_set (next);
4859 	  if (set && SET_SRC (set) == dest)
4860 	    {
4861 	      if (MEM_P (src))
4862 		{
4863 		  rtx tmp = XEXP (src, 0);
4864 		  mips_classify_address (&addr, tmp, GET_MODE (tmp), true);
4865 		  if (addr.reg && !reg_overlap_mentioned_p (dest, addr.reg))
4866 		    validate_change (next, &SET_SRC (set), src, false);
4867 		}
4868 	      else
4869 		validate_change (next, &SET_SRC (set), src, false);
4870 	    }
4871 	}
4872     }
4873 }
4874 
4875 /* Return the split type for instruction INSN.  */
4876 
4877 static enum mips_split_type
mips_insn_split_type(rtx insn)4878 mips_insn_split_type (rtx insn)
4879 {
4880   basic_block bb = BLOCK_FOR_INSN (insn);
4881   if (bb)
4882     {
4883       if (optimize_bb_for_speed_p (bb))
4884 	return SPLIT_FOR_SPEED;
4885       else
4886 	return SPLIT_FOR_SIZE;
4887     }
4888   /* Once CFG information has been removed, we should trust the optimization
4889      decisions made by previous passes and only split where necessary.  */
4890   return SPLIT_IF_NECESSARY;
4891 }
4892 
4893 /* Return true if a 128-bit move from SRC to DEST should be split.  */
4894 
4895 bool
mips_split_128bit_move_p(rtx dest,rtx src)4896 mips_split_128bit_move_p (rtx dest, rtx src)
4897 {
4898   /* MSA-to-MSA moves can be done in a single instruction.  */
4899   if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
4900     return false;
4901 
4902   /* Check for MSA loads and stores.  */
4903   if (FP_REG_RTX_P (dest) && MEM_P (src))
4904     return false;
4905   if (FP_REG_RTX_P (src) && MEM_P (dest))
4906     return false;
4907 
4908   /* Check for MSA set to an immediate const vector with valid replicated
4909      element.  */
4910   if (FP_REG_RTX_P (dest)
4911       && mips_const_vector_same_int_p (src, GET_MODE (src), -512, 511))
4912     return false;
4913 
4914   /* Check for MSA load zero immediate.  */
4915   if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))
4916     return false;
4917 
4918   return true;
4919 }
4920 
4921 /* Split a 128-bit move from SRC to DEST.  */
4922 
4923 void
mips_split_128bit_move(rtx dest,rtx src)4924 mips_split_128bit_move (rtx dest, rtx src)
4925 {
4926   int byte, index;
4927   rtx low_dest, low_src, d, s;
4928 
4929   if (FP_REG_RTX_P (dest))
4930     {
4931       gcc_assert (!MEM_P (src));
4932 
4933       rtx new_dest = dest;
4934       if (!TARGET_64BIT)
4935 	{
4936 	  if (GET_MODE (dest) != V4SImode)
4937 	    new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
4938 	}
4939       else
4940 	{
4941 	  if (GET_MODE (dest) != V2DImode)
4942 	    new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0);
4943 	}
4944 
4945       for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
4946 	   byte += UNITS_PER_WORD, index++)
4947 	{
4948 	  s = mips_subword_at_byte (src, byte);
4949 	  if (!TARGET_64BIT)
4950 	    emit_insn (gen_msa_insert_w (new_dest, s, new_dest,
4951 					 GEN_INT (1 << index)));
4952 	  else
4953 	    emit_insn (gen_msa_insert_d (new_dest, s, new_dest,
4954 					 GEN_INT (1 << index)));
4955 	}
4956     }
4957   else if (FP_REG_RTX_P (src))
4958     {
4959       gcc_assert (!MEM_P (dest));
4960 
4961       rtx new_src = src;
4962       if (!TARGET_64BIT)
4963 	{
4964 	  if (GET_MODE (src) != V4SImode)
4965 	    new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
4966 	}
4967       else
4968 	{
4969 	  if (GET_MODE (src) != V2DImode)
4970 	    new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0);
4971 	}
4972 
4973       for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
4974 	   byte += UNITS_PER_WORD, index++)
4975 	{
4976 	  d = mips_subword_at_byte (dest, byte);
4977 	  if (!TARGET_64BIT)
4978 	    emit_insn (gen_msa_copy_s_w (d, new_src, GEN_INT (index)));
4979 	  else
4980 	    emit_insn (gen_msa_copy_s_d (d, new_src, GEN_INT (index)));
4981 	}
4982     }
4983   else
4984     {
4985       low_dest = mips_subword_at_byte (dest, 0);
4986       low_src = mips_subword_at_byte (src, 0);
4987       gcc_assert (REG_P (low_dest) && REG_P (low_src));
4988       /* Make sure the source register is not written before reading.  */
4989       if (REGNO (low_dest) <= REGNO (low_src))
4990 	{
4991 	  for (byte = 0; byte < GET_MODE_SIZE (TImode);
4992 	       byte += UNITS_PER_WORD)
4993 	    {
4994 	      d = mips_subword_at_byte (dest, byte);
4995 	      s = mips_subword_at_byte (src, byte);
4996 	      mips_emit_move (d, s);
4997 	    }
4998 	}
4999       else
5000 	{
5001 	  for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0;
5002 	       byte -= UNITS_PER_WORD)
5003 	    {
5004 	      d = mips_subword_at_byte (dest, byte);
5005 	      s = mips_subword_at_byte (src, byte);
5006 	      mips_emit_move (d, s);
5007 	    }
5008 	}
5009     }
5010 }
5011 
5012 /* Split a COPY_S.D with operands DEST, SRC and INDEX.  GEN is a function
5013    used to generate subregs.  */
5014 
5015 void
mips_split_msa_copy_d(rtx dest,rtx src,rtx index,rtx (* gen_fn)(rtx,rtx,rtx))5016 mips_split_msa_copy_d (rtx dest, rtx src, rtx index,
5017 		       rtx (*gen_fn)(rtx, rtx, rtx))
5018 {
5019   gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode)
5020 	      || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode));
5021 
5022   /* Note that low is always from the lower index, and high is always
5023      from the higher index.  */
5024   rtx low = mips_subword (dest, false);
5025   rtx high = mips_subword (dest, true);
5026   rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
5027 
5028   emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2)));
5029   emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1)));
5030 }
5031 
5032 /* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2.  */
5033 
5034 void
mips_split_msa_insert_d(rtx dest,rtx src1,rtx index,rtx src2)5035 mips_split_msa_insert_d (rtx dest, rtx src1, rtx index, rtx src2)
5036 {
5037   int i;
5038   gcc_assert (GET_MODE (dest) == GET_MODE (src1));
5039   gcc_assert ((GET_MODE (dest) == V2DImode
5040 	       && (GET_MODE (src2) == DImode || src2 == const0_rtx))
5041 	      || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode));
5042 
5043   /* Note that low is always from the lower index, and high is always
5044      from the higher index.  */
5045   rtx low = mips_subword (src2, false);
5046   rtx high = mips_subword (src2, true);
5047   rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
5048   rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0);
5049   i = exact_log2 (INTVAL (index));
5050   gcc_assert (i != -1);
5051 
5052   emit_insn (gen_msa_insert_w (new_dest, low, new_src1,
5053 			       GEN_INT (1 << (i * 2))));
5054   emit_insn (gen_msa_insert_w (new_dest, high, new_dest,
5055 			       GEN_INT (1 << (i * 2 + 1))));
5056 }
5057 
5058 /* Split FILL.D.  */
5059 
5060 void
mips_split_msa_fill_d(rtx dest,rtx src)5061 mips_split_msa_fill_d (rtx dest, rtx src)
5062 {
5063   gcc_assert ((GET_MODE (dest) == V2DImode
5064 	       && (GET_MODE (src) == DImode || src == const0_rtx))
5065 	      || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode));
5066 
5067   /* Note that low is always from the lower index, and high is always
5068      from the higher index.  */
5069   rtx low, high;
5070   if (src == const0_rtx)
5071     {
5072       low = src;
5073       high = src;
5074     }
5075   else
5076     {
5077       low = mips_subword (src, false);
5078       high = mips_subword (src, true);
5079     }
5080   rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
5081   emit_insn (gen_msa_fill_w (new_dest, low));
5082   emit_insn (gen_msa_insert_w (new_dest, high, new_dest, GEN_INT (1 << 1)));
5083   emit_insn (gen_msa_insert_w (new_dest, high, new_dest, GEN_INT (1 << 3)));
5084 }
5085 
5086 /* Return true if a move from SRC to DEST in INSN should be split.  */
5087 
5088 bool
mips_split_move_insn_p(rtx dest,rtx src,rtx insn)5089 mips_split_move_insn_p (rtx dest, rtx src, rtx insn)
5090 {
5091   return mips_split_move_p (dest, src, mips_insn_split_type (insn));
5092 }
5093 
5094 /* Split a move from SRC to DEST in INSN, given that mips_split_move_insn_p
5095    holds.  */
5096 
5097 void
mips_split_move_insn(rtx dest,rtx src,rtx insn)5098 mips_split_move_insn (rtx dest, rtx src, rtx insn)
5099 {
5100   mips_split_move (dest, src, mips_insn_split_type (insn), insn);
5101 }
5102 
5103 /* Return the appropriate instructions to move SRC into DEST.  Assume
5104    that SRC is operand 1 and DEST is operand 0.  */
5105 
5106 const char *
mips_output_move(rtx dest,rtx src)5107 mips_output_move (rtx dest, rtx src)
5108 {
5109   enum rtx_code dest_code = GET_CODE (dest);
5110   enum rtx_code src_code = GET_CODE (src);
5111   machine_mode mode = GET_MODE (dest);
5112   bool dbl_p = (GET_MODE_SIZE (mode) == 8);
5113   bool msa_p = MSA_SUPPORTED_MODE_P (mode);
5114   enum mips_symbol_type symbol_type;
5115 
5116   if (mips_split_move_p (dest, src, SPLIT_IF_NECESSARY))
5117     return "#";
5118 
5119   if (msa_p
5120       && dest_code == REG && FP_REG_P (REGNO (dest))
5121       && src_code == CONST_VECTOR
5122       && CONST_INT_P (CONST_VECTOR_ELT (src, 0)))
5123     {
5124       gcc_assert (mips_const_vector_same_int_p (src, mode, -512, 511));
5125       return "ldi.%v0\t%w0,%E1";
5126     }
5127 
5128   if ((src_code == REG && GP_REG_P (REGNO (src)))
5129       || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
5130     {
5131       if (dest_code == REG)
5132 	{
5133 	  if (GP_REG_P (REGNO (dest)))
5134 	    return "move\t%0,%z1";
5135 
5136 	  if (mips_mult_move_p (dest, src, SPLIT_IF_NECESSARY))
5137 	    {
5138 	      if (ISA_HAS_DSP_MULT)
5139 		return "mult\t%q0,%.,%.";
5140 	      else
5141 		return "mult\t%.,%.";
5142 	    }
5143 
5144 	  /* Moves to HI are handled by special .md insns.  */
5145 	  if (REGNO (dest) == LO_REGNUM)
5146 	    return "mtlo\t%z1";
5147 
5148 	  if (DSP_ACC_REG_P (REGNO (dest)))
5149 	    {
5150 	      static char retval[] = "mt__\t%z1,%q0";
5151 
5152 	      retval[2] = reg_names[REGNO (dest)][4];
5153 	      retval[3] = reg_names[REGNO (dest)][5];
5154 	      return retval;
5155 	    }
5156 
5157 	  if (FP_REG_P (REGNO (dest)))
5158 	    {
5159 	      if (msa_p)
5160 		{
5161 		  gcc_assert (src == CONST0_RTX (GET_MODE (src)));
5162 		  return "ldi.%v0\t%w0,0";
5163 		}
5164 
5165 	      return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
5166 	    }
5167 
5168 	  if (ALL_COP_REG_P (REGNO (dest)))
5169 	    {
5170 	      static char retval[] = "dmtc_\t%z1,%0";
5171 
5172 	      retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
5173 	      return dbl_p ? retval : retval + 1;
5174 	    }
5175 	}
5176       if (dest_code == MEM)
5177 	switch (GET_MODE_SIZE (mode))
5178 	  {
5179 	  case 1: return "sb\t%z1,%0";
5180 	  case 2: return "sh\t%z1,%0";
5181 	  case 4: return "sw\t%z1,%0";
5182 	  case 8: return "sd\t%z1,%0";
5183 	  default: gcc_unreachable ();
5184 	  }
5185     }
5186   if (dest_code == REG && GP_REG_P (REGNO (dest)))
5187     {
5188       if (src_code == REG)
5189 	{
5190 	  /* Moves from HI are handled by special .md insns.  */
5191 	  if (REGNO (src) == LO_REGNUM)
5192 	    {
5193 	      /* When generating VR4120 or VR4130 code, we use MACC and
5194 		 DMACC instead of MFLO.  This avoids both the normal
5195 		 MIPS III HI/LO hazards and the errata related to
5196 		 -mfix-vr4130.  */
5197 	      if (ISA_HAS_MACCHI)
5198 		return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
5199 	      return "mflo\t%0";
5200 	    }
5201 
5202 	  if (DSP_ACC_REG_P (REGNO (src)))
5203 	    {
5204 	      static char retval[] = "mf__\t%0,%q1";
5205 
5206 	      retval[2] = reg_names[REGNO (src)][4];
5207 	      retval[3] = reg_names[REGNO (src)][5];
5208 	      return retval;
5209 	    }
5210 
5211 	  if (FP_REG_P (REGNO (src)))
5212 	    {
5213 	      gcc_assert (!msa_p);
5214 	      return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
5215 	    }
5216 
5217 	  if (ALL_COP_REG_P (REGNO (src)))
5218 	    {
5219 	      static char retval[] = "dmfc_\t%0,%1";
5220 
5221 	      retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
5222 	      return dbl_p ? retval : retval + 1;
5223 	    }
5224 	}
5225 
5226       if (src_code == MEM)
5227 	switch (GET_MODE_SIZE (mode))
5228 	  {
5229 	  case 1: return "lbu\t%0,%1";
5230 	  case 2: return "lhu\t%0,%1";
5231 	  case 4: return "lw\t%0,%1";
5232 	  case 8: return "ld\t%0,%1";
5233 	  default: gcc_unreachable ();
5234 	  }
5235 
5236       if (src_code == CONST_INT)
5237 	{
5238 	  /* Don't use the X format for the operand itself, because that
5239 	     will give out-of-range numbers for 64-bit hosts and 32-bit
5240 	     targets.  */
5241 	  if (!TARGET_MIPS16)
5242 	    return "li\t%0,%1\t\t\t# %X1";
5243 
5244 	  if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
5245 	    return "li\t%0,%1";
5246 
5247 	  if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
5248 	    return "#";
5249 	}
5250 
5251       if (src_code == HIGH)
5252 	return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
5253 
5254       if (CONST_GP_P (src))
5255 	return "move\t%0,%1";
5256 
5257       if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
5258 	  && mips_lo_relocs[symbol_type] != 0)
5259 	{
5260 	  /* A signed 16-bit constant formed by applying a relocation
5261 	     operator to a symbolic address.  */
5262 	  gcc_assert (!mips_split_p[symbol_type]);
5263 	  return "li\t%0,%R1";
5264 	}
5265 
5266       if (symbolic_operand (src, VOIDmode))
5267 	{
5268 	  gcc_assert (TARGET_MIPS16
5269 		      ? TARGET_MIPS16_TEXT_LOADS
5270 		      : !TARGET_EXPLICIT_RELOCS);
5271 	  return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
5272 	}
5273     }
5274   if (src_code == REG && FP_REG_P (REGNO (src)))
5275     {
5276       if (dest_code == REG && FP_REG_P (REGNO (dest)))
5277 	{
5278 	  if (GET_MODE (dest) == V2SFmode)
5279 	    return "mov.ps\t%0,%1";
5280 	  else if (msa_p)
5281 	    return "move.v\t%w0,%w1";
5282 	  else
5283 	    return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
5284 	}
5285 
5286       if (dest_code == MEM)
5287 	{
5288 	  if (msa_p)
5289 	    return "st.%v1\t%w1,%0";
5290 
5291 	  return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
5292 	}
5293     }
5294   if (dest_code == REG && FP_REG_P (REGNO (dest)))
5295     {
5296       if (src_code == MEM)
5297 	{
5298 	  if (msa_p)
5299 	    return "ld.%v0\t%w0,%1";
5300 
5301 	  return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
5302 	}
5303     }
5304   if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
5305     {
5306       static char retval[] = "l_c_\t%0,%1";
5307 
5308       retval[1] = (dbl_p ? 'd' : 'w');
5309       retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
5310       return retval;
5311     }
5312   if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
5313     {
5314       static char retval[] = "s_c_\t%1,%0";
5315 
5316       retval[1] = (dbl_p ? 'd' : 'w');
5317       retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
5318       return retval;
5319     }
5320   gcc_unreachable ();
5321 }
5322 
5323 /* Return true if CMP1 is a suitable second operand for integer ordering
5324    test CODE.  See also the *sCC patterns in mips.md.  */
5325 
5326 static bool
mips_int_order_operand_ok_p(enum rtx_code code,rtx cmp1)5327 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
5328 {
5329   switch (code)
5330     {
5331     case GT:
5332     case GTU:
5333       return reg_or_0_operand (cmp1, VOIDmode);
5334 
5335     case GE:
5336     case GEU:
5337       return !TARGET_MIPS16 && cmp1 == const1_rtx;
5338 
5339     case LT:
5340     case LTU:
5341       return arith_operand (cmp1, VOIDmode);
5342 
5343     case LE:
5344       return sle_operand (cmp1, VOIDmode);
5345 
5346     case LEU:
5347       return sleu_operand (cmp1, VOIDmode);
5348 
5349     default:
5350       gcc_unreachable ();
5351     }
5352 }
5353 
5354 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
5355    integer ordering test *CODE, or if an equivalent combination can
5356    be formed by adjusting *CODE and *CMP1.  When returning true, update
5357    *CODE and *CMP1 with the chosen code and operand, otherwise leave
5358    them alone.  */
5359 
5360 static bool
mips_canonicalize_int_order_test(enum rtx_code * code,rtx * cmp1,machine_mode mode)5361 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
5362 				  machine_mode mode)
5363 {
5364   HOST_WIDE_INT plus_one;
5365 
5366   if (mips_int_order_operand_ok_p (*code, *cmp1))
5367     return true;
5368 
5369   if (CONST_INT_P (*cmp1))
5370     switch (*code)
5371       {
5372       case LE:
5373 	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
5374 	if (INTVAL (*cmp1) < plus_one)
5375 	  {
5376 	    *code = LT;
5377 	    *cmp1 = force_reg (mode, GEN_INT (plus_one));
5378 	    return true;
5379 	  }
5380 	break;
5381 
5382       case LEU:
5383 	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
5384 	if (plus_one != 0)
5385 	  {
5386 	    *code = LTU;
5387 	    *cmp1 = force_reg (mode, GEN_INT (plus_one));
5388 	    return true;
5389 	  }
5390 	break;
5391 
5392       default:
5393 	break;
5394       }
5395   return false;
5396 }
5397 
5398 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
5399    in TARGET.  CMP0 and TARGET are register_operands.  If INVERT_PTR
5400    is nonnull, it's OK to set TARGET to the inverse of the result and
5401    flip *INVERT_PTR instead.  */
5402 
5403 static void
mips_emit_int_order_test(enum rtx_code code,bool * invert_ptr,rtx target,rtx cmp0,rtx cmp1)5404 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
5405 			  rtx target, rtx cmp0, rtx cmp1)
5406 {
5407   machine_mode mode;
5408 
5409   /* First see if there is a MIPS instruction that can do this operation.
5410      If not, try doing the same for the inverse operation.  If that also
5411      fails, force CMP1 into a register and try again.  */
5412   mode = GET_MODE (cmp0);
5413   if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
5414     mips_emit_binary (code, target, cmp0, cmp1);
5415   else
5416     {
5417       enum rtx_code inv_code = reverse_condition (code);
5418       if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
5419 	{
5420 	  cmp1 = force_reg (mode, cmp1);
5421 	  mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
5422 	}
5423       else if (invert_ptr == 0)
5424 	{
5425 	  rtx inv_target;
5426 
5427 	  inv_target = mips_force_binary (GET_MODE (target),
5428 					  inv_code, cmp0, cmp1);
5429 	  mips_emit_binary (XOR, target, inv_target, const1_rtx);
5430 	}
5431       else
5432 	{
5433 	  *invert_ptr = !*invert_ptr;
5434 	  mips_emit_binary (inv_code, target, cmp0, cmp1);
5435 	}
5436     }
5437 }
5438 
5439 /* Return a register that is zero iff CMP0 and CMP1 are equal.
5440    The register will have the same mode as CMP0.  */
5441 
5442 static rtx
mips_zero_if_equal(rtx cmp0,rtx cmp1)5443 mips_zero_if_equal (rtx cmp0, rtx cmp1)
5444 {
5445   if (cmp1 == const0_rtx)
5446     return cmp0;
5447 
5448   if (uns_arith_operand (cmp1, VOIDmode))
5449     return expand_binop (GET_MODE (cmp0), xor_optab,
5450 			 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
5451 
5452   return expand_binop (GET_MODE (cmp0), sub_optab,
5453 		       cmp0, cmp1, 0, 0, OPTAB_DIRECT);
5454 }
5455 
5456 /* Convert *CODE into a code that can be used in a floating-point
5457    scc instruction (C.cond.fmt).  Return true if the values of
5458    the condition code registers will be inverted, with 0 indicating
5459    that the condition holds.  */
5460 
5461 static bool
mips_reversed_fp_cond(enum rtx_code * code)5462 mips_reversed_fp_cond (enum rtx_code *code)
5463 {
5464   switch (*code)
5465     {
5466     case NE:
5467     case LTGT:
5468     case ORDERED:
5469       *code = reverse_condition_maybe_unordered (*code);
5470       return true;
5471 
5472     default:
5473       return false;
5474     }
5475 }
5476 
5477 /* Allocate a floating-point condition-code register of mode MODE.
5478 
5479    These condition code registers are used for certain kinds
5480    of compound operation, such as compare and branches, vconds,
5481    and built-in functions.  At expand time, their use is entirely
5482    controlled by MIPS-specific code and is entirely internal
5483    to these compound operations.
5484 
5485    We could (and did in the past) expose condition-code values
5486    as pseudo registers and leave the register allocator to pick
5487    appropriate registers.  The problem is that it is not practically
5488    possible for the rtl optimizers to guarantee that no spills will
5489    be needed, even when AVOID_CCMODE_COPIES is defined.  We would
5490    therefore need spill and reload sequences to handle the worst case.
5491 
5492    Although such sequences do exist, they are very expensive and are
5493    not something we'd want to use.  This is especially true of CCV2 and
5494    CCV4, where all the shuffling would greatly outweigh whatever benefit
5495    the vectorization itself provides.
5496 
5497    The main benefit of having more than one condition-code register
5498    is to allow the pipelining of operations, especially those involving
5499    comparisons and conditional moves.  We don't really expect the
5500    registers to be live for long periods, and certainly never want
5501    them to be live across calls.
5502 
5503    Also, there should be no penalty attached to using all the available
5504    registers.  They are simply bits in the same underlying FPU control
5505    register.
5506 
5507    We therefore expose the hardware registers from the outset and use
5508    a simple round-robin allocation scheme.  */
5509 
5510 static rtx
mips_allocate_fcc(machine_mode mode)5511 mips_allocate_fcc (machine_mode mode)
5512 {
5513   unsigned int regno, count;
5514 
5515   gcc_assert (TARGET_HARD_FLOAT && ISA_HAS_8CC);
5516 
5517   if (mode == CCmode)
5518     count = 1;
5519   else if (mode == CCV2mode)
5520     count = 2;
5521   else if (mode == CCV4mode)
5522     count = 4;
5523   else
5524     gcc_unreachable ();
5525 
5526   cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1);
5527   if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST)
5528     cfun->machine->next_fcc = 0;
5529   regno = ST_REG_FIRST + cfun->machine->next_fcc;
5530   cfun->machine->next_fcc += count;
5531   return gen_rtx_REG (mode, regno);
5532 }
5533 
5534 /* Convert a comparison into something that can be used in a branch or
5535    conditional move.  On entry, *OP0 and *OP1 are the values being
5536    compared and *CODE is the code used to compare them.
5537 
5538    Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
5539    If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
5540    otherwise any standard branch condition can be used.  The standard branch
5541    conditions are:
5542 
5543       - EQ or NE between two registers.
5544       - any comparison between a register and zero.
5545       - if compact branches are available then any condition is valid.  */
5546 
5547 static void
mips_emit_compare(enum rtx_code * code,rtx * op0,rtx * op1,bool need_eq_ne_p)5548 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
5549 {
5550   rtx cmp_op0 = *op0;
5551   rtx cmp_op1 = *op1;
5552 
5553   if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
5554     {
5555       if (!need_eq_ne_p && *op1 == const0_rtx)
5556 	;
5557       else if (*code == EQ || *code == NE)
5558 	{
5559 	  if (need_eq_ne_p)
5560 	    {
5561 	      *op0 = mips_zero_if_equal (cmp_op0, cmp_op1);
5562 	      *op1 = const0_rtx;
5563 	    }
5564 	  else
5565 	    *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
5566 	}
5567       else if (!need_eq_ne_p && TARGET_CB_MAYBE)
5568 	{
5569 	  bool swap = false;
5570 	  switch (*code)
5571 	    {
5572 	    case LE:
5573 	      swap = true;
5574 	      *code = GE;
5575 	      break;
5576 	    case GT:
5577 	      swap = true;
5578 	      *code = LT;
5579 	      break;
5580 	    case LEU:
5581 	      swap = true;
5582 	      *code = GEU;
5583 	      break;
5584 	    case GTU:
5585 	      swap = true;
5586 	      *code = LTU;
5587 	      break;
5588 	    case GE:
5589 	    case LT:
5590 	    case GEU:
5591 	    case LTU:
5592 	      /* Do nothing.  */
5593 	      break;
5594 	    default:
5595 	      gcc_unreachable ();
5596 	    }
5597 	  *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
5598 	  if (swap)
5599 	    {
5600 	      rtx tmp = *op1;
5601 	      *op1 = *op0;
5602 	      *op0 = tmp;
5603 	    }
5604 	}
5605       else
5606 	{
5607 	  /* The comparison needs a separate scc instruction.  Store the
5608 	     result of the scc in *OP0 and compare it against zero.  */
5609 	  bool invert = false;
5610 	  *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
5611 	  mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1);
5612 	  *code = (invert ? EQ : NE);
5613 	  *op1 = const0_rtx;
5614 	}
5615     }
5616   else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0)))
5617     {
5618       *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
5619       mips_emit_binary (*code, *op0, cmp_op0, cmp_op1);
5620       *code = NE;
5621       *op1 = const0_rtx;
5622     }
5623   else
5624     {
5625       enum rtx_code cmp_code;
5626 
5627       /* Floating-point tests use a separate C.cond.fmt or CMP.cond.fmt
5628 	 comparison to set a register.  The branch or conditional move will
5629 	 then compare that register against zero.
5630 
5631 	 Set CMP_CODE to the code of the comparison instruction and
5632 	 *CODE to the code that the branch or move should use.  */
5633       cmp_code = *code;
5634       if (ISA_HAS_CCF)
5635 	{
5636 	  /* All FP conditions can be implemented directly with CMP.cond.fmt
5637 	     or by reversing the operands.  */
5638 	  *code = NE;
5639 	  *op0 = gen_reg_rtx (CCFmode);
5640 	}
5641       else
5642 	{
5643 	  /* Three FP conditions cannot be implemented by reversing the
5644 	     operands for C.cond.fmt, instead a reversed condition code is
5645 	     required and a test for false.  */
5646 	  *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
5647 	  if (ISA_HAS_8CC)
5648 	    *op0 = mips_allocate_fcc (CCmode);
5649 	  else
5650 	    *op0 = gen_rtx_REG (CCmode, FPSW_REGNUM);
5651 	}
5652 
5653       *op1 = const0_rtx;
5654       mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
5655     }
5656 }
5657 
5658 /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
5659    and OPERAND[3].  Store the result in OPERANDS[0].
5660 
5661    On 64-bit targets, the mode of the comparison and target will always be
5662    SImode, thus possibly narrower than that of the comparison's operands.  */
5663 
5664 void
mips_expand_scc(rtx operands[])5665 mips_expand_scc (rtx operands[])
5666 {
5667   rtx target = operands[0];
5668   enum rtx_code code = GET_CODE (operands[1]);
5669   rtx op0 = operands[2];
5670   rtx op1 = operands[3];
5671 
5672   gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
5673 
5674   if (code == EQ || code == NE)
5675     {
5676       if (ISA_HAS_SEQ_SNE
5677 	  && reg_imm10_operand (op1, GET_MODE (op1)))
5678 	mips_emit_binary (code, target, op0, op1);
5679       else
5680 	{
5681 	  rtx zie = mips_zero_if_equal (op0, op1);
5682 	  mips_emit_binary (code, target, zie, const0_rtx);
5683 	}
5684     }
5685   else
5686     mips_emit_int_order_test (code, 0, target, op0, op1);
5687 }
5688 
5689 /* Compare OPERANDS[1] with OPERANDS[2] using comparison code
5690    CODE and jump to OPERANDS[3] if the condition holds.  */
5691 
5692 void
mips_expand_conditional_branch(rtx * operands)5693 mips_expand_conditional_branch (rtx *operands)
5694 {
5695   enum rtx_code code = GET_CODE (operands[0]);
5696   rtx op0 = operands[1];
5697   rtx op1 = operands[2];
5698   rtx condition;
5699 
5700   mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
5701   condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5702   emit_jump_insn (gen_condjump (condition, operands[3]));
5703 }
5704 
5705 /* Implement:
5706 
5707    (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
5708    (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS))  */
5709 
5710 void
mips_expand_vcondv2sf(rtx dest,rtx true_src,rtx false_src,enum rtx_code cond,rtx cmp_op0,rtx cmp_op1)5711 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
5712 		       enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
5713 {
5714   rtx cmp_result;
5715   bool reversed_p;
5716 
5717   reversed_p = mips_reversed_fp_cond (&cond);
5718   cmp_result = mips_allocate_fcc (CCV2mode);
5719   emit_insn (gen_scc_ps (cmp_result,
5720 			 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
5721   if (reversed_p)
5722     emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
5723 					 cmp_result));
5724   else
5725     emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
5726 					 cmp_result));
5727 }
5728 
5729 /* Perform the comparison in OPERANDS[1].  Move OPERANDS[2] into OPERANDS[0]
5730    if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0].  */
5731 
5732 void
mips_expand_conditional_move(rtx * operands)5733 mips_expand_conditional_move (rtx *operands)
5734 {
5735   rtx cond;
5736   enum rtx_code code = GET_CODE (operands[1]);
5737   rtx op0 = XEXP (operands[1], 0);
5738   rtx op1 = XEXP (operands[1], 1);
5739 
5740   mips_emit_compare (&code, &op0, &op1, true);
5741   cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
5742 
5743   /* There is no direct support for general conditional GP move involving
5744      two registers using SEL.  */
5745   if (ISA_HAS_SEL
5746       && INTEGRAL_MODE_P (GET_MODE (operands[2]))
5747       && register_operand (operands[2], VOIDmode)
5748       && register_operand (operands[3], VOIDmode))
5749     {
5750       machine_mode mode = GET_MODE (operands[0]);
5751       rtx temp = gen_reg_rtx (mode);
5752       rtx temp2 = gen_reg_rtx (mode);
5753 
5754       emit_insn (gen_rtx_SET (temp,
5755 			      gen_rtx_IF_THEN_ELSE (mode, cond,
5756 						    operands[2], const0_rtx)));
5757 
5758       /* Flip the test for the second operand.  */
5759       cond = gen_rtx_fmt_ee ((code == EQ) ? NE : EQ, GET_MODE (op0), op0, op1);
5760 
5761       emit_insn (gen_rtx_SET (temp2,
5762 			      gen_rtx_IF_THEN_ELSE (mode, cond,
5763 						    operands[3], const0_rtx)));
5764 
5765       /* Merge the two results, at least one is guaranteed to be zero.  */
5766       emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2)));
5767     }
5768   else
5769     {
5770       if (FLOAT_MODE_P (GET_MODE (operands[2])) && !ISA_HAS_SEL)
5771 	{
5772 	  operands[2] = force_reg (GET_MODE (operands[0]), operands[2]);
5773 	  operands[3] = force_reg (GET_MODE (operands[0]), operands[3]);
5774 	}
5775 
5776       emit_insn (gen_rtx_SET (operands[0],
5777 			      gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
5778 						    operands[2], operands[3])));
5779     }
5780 }
5781 
5782 /* Perform the comparison in COMPARISON, then trap if the condition holds.  */
5783 
5784 void
mips_expand_conditional_trap(rtx comparison)5785 mips_expand_conditional_trap (rtx comparison)
5786 {
5787   rtx op0, op1;
5788   machine_mode mode;
5789   enum rtx_code code;
5790 
5791   /* MIPS conditional trap instructions don't have GT or LE flavors,
5792      so we must swap the operands and convert to LT and GE respectively.  */
5793   code = GET_CODE (comparison);
5794   switch (code)
5795     {
5796     case GT:
5797     case LE:
5798     case GTU:
5799     case LEU:
5800       code = swap_condition (code);
5801       op0 = XEXP (comparison, 1);
5802       op1 = XEXP (comparison, 0);
5803       break;
5804 
5805     default:
5806       op0 = XEXP (comparison, 0);
5807       op1 = XEXP (comparison, 1);
5808       break;
5809     }
5810 
5811   mode = GET_MODE (XEXP (comparison, 0));
5812   op0 = force_reg (mode, op0);
5813   if (!(ISA_HAS_COND_TRAPI
5814 	? arith_operand (op1, mode)
5815 	: reg_or_0_operand (op1, mode)))
5816     op1 = force_reg (mode, op1);
5817 
5818   emit_insn (gen_rtx_TRAP_IF (VOIDmode,
5819 			      gen_rtx_fmt_ee (code, mode, op0, op1),
5820 			      const0_rtx));
5821 }
5822 
5823 /* Initialize *CUM for a call to a function of type FNTYPE.  */
5824 
5825 void
mips_init_cumulative_args(CUMULATIVE_ARGS * cum,tree fntype)5826 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
5827 {
5828   memset (cum, 0, sizeof (*cum));
5829   cum->prototype = (fntype && prototype_p (fntype));
5830   cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
5831 }
5832 
5833 /* Fill INFO with information about a single argument.  CUM is the
5834    cumulative state for earlier arguments.  MODE is the mode of this
5835    argument and TYPE is its type (if known).  NAMED is true if this
5836    is a named (fixed) argument rather than a variable one.  */
5837 
5838 static void
mips_get_arg_info(struct mips_arg_info * info,const CUMULATIVE_ARGS * cum,machine_mode mode,const_tree type,bool named)5839 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
5840 		   machine_mode mode, const_tree type, bool named)
5841 {
5842   bool doubleword_aligned_p;
5843   unsigned int num_bytes, num_words, max_regs;
5844 
5845   /* Work out the size of the argument.  */
5846   num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
5847   num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5848 
5849   /* Decide whether it should go in a floating-point register, assuming
5850      one is free.  Later code checks for availability.
5851 
5852      The checks against UNITS_PER_FPVALUE handle the soft-float and
5853      single-float cases.  */
5854   switch (mips_abi)
5855     {
5856     case ABI_EABI:
5857       /* The EABI conventions have traditionally been defined in terms
5858 	 of TYPE_MODE, regardless of the actual type.  */
5859       info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
5860 		      || mode == V2SFmode)
5861 		     && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
5862       break;
5863 
5864     case ABI_32:
5865     case ABI_O64:
5866       /* Only leading floating-point scalars are passed in
5867 	 floating-point registers.  We also handle vector floats the same
5868 	 say, which is OK because they are not covered by the standard ABI.  */
5869       gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
5870       info->fpr_p = (!cum->gp_reg_found
5871 		     && cum->arg_number < 2
5872 		     && (type == 0
5873 			 || SCALAR_FLOAT_TYPE_P (type)
5874 			 || VECTOR_FLOAT_TYPE_P (type))
5875 		     && (GET_MODE_CLASS (mode) == MODE_FLOAT
5876 			 || mode == V2SFmode)
5877 		     && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
5878       break;
5879 
5880     case ABI_N32:
5881     case ABI_64:
5882       /* Scalar, complex and vector floating-point types are passed in
5883 	 floating-point registers, as long as this is a named rather
5884 	 than a variable argument.  */
5885       gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
5886       info->fpr_p = (named
5887 		     && (type == 0 || FLOAT_TYPE_P (type))
5888 		     && (GET_MODE_CLASS (mode) == MODE_FLOAT
5889 			 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5890 			 || mode == V2SFmode)
5891 		     && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
5892 
5893       /* ??? According to the ABI documentation, the real and imaginary
5894 	 parts of complex floats should be passed in individual registers.
5895 	 The real and imaginary parts of stack arguments are supposed
5896 	 to be contiguous and there should be an extra word of padding
5897 	 at the end.
5898 
5899 	 This has two problems.  First, it makes it impossible to use a
5900 	 single "void *" va_list type, since register and stack arguments
5901 	 are passed differently.  (At the time of writing, MIPSpro cannot
5902 	 handle complex float varargs correctly.)  Second, it's unclear
5903 	 what should happen when there is only one register free.
5904 
5905 	 For now, we assume that named complex floats should go into FPRs
5906 	 if there are two FPRs free, otherwise they should be passed in the
5907 	 same way as a struct containing two floats.  */
5908       if (info->fpr_p
5909 	  && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5910 	  && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
5911 	{
5912 	  if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
5913 	    info->fpr_p = false;
5914 	  else
5915 	    num_words = 2;
5916 	}
5917       break;
5918 
5919     default:
5920       gcc_unreachable ();
5921     }
5922 
5923   /* See whether the argument has doubleword alignment.  */
5924   doubleword_aligned_p = (mips_function_arg_boundary (mode, type)
5925 			  > BITS_PER_WORD);
5926 
5927   /* Set REG_OFFSET to the register count we're interested in.
5928      The EABI allocates the floating-point registers separately,
5929      but the other ABIs allocate them like integer registers.  */
5930   info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
5931 		      ? cum->num_fprs
5932 		      : cum->num_gprs);
5933 
5934   /* Advance to an even register if the argument is doubleword-aligned.  */
5935   if (doubleword_aligned_p)
5936     info->reg_offset += info->reg_offset & 1;
5937 
5938   /* Work out the offset of a stack argument.  */
5939   info->stack_offset = cum->stack_words;
5940   if (doubleword_aligned_p)
5941     info->stack_offset += info->stack_offset & 1;
5942 
5943   max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
5944 
5945   /* Partition the argument between registers and stack.  */
5946   info->reg_words = MIN (num_words, max_regs);
5947   info->stack_words = num_words - info->reg_words;
5948 }
5949 
5950 /* INFO describes a register argument that has the normal format for the
5951    argument's mode.  Return the register it uses, assuming that FPRs are
5952    available if HARD_FLOAT_P.  */
5953 
5954 static unsigned int
mips_arg_regno(const struct mips_arg_info * info,bool hard_float_p)5955 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
5956 {
5957   if (!info->fpr_p || !hard_float_p)
5958     return GP_ARG_FIRST + info->reg_offset;
5959   else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
5960     /* In o32, the second argument is always passed in $f14
5961        for TARGET_DOUBLE_FLOAT, regardless of whether the
5962        first argument was a word or doubleword.  */
5963     return FP_ARG_FIRST + 2;
5964   else
5965     return FP_ARG_FIRST + info->reg_offset;
5966 }
5967 
5968 /* Implement TARGET_STRICT_ARGUMENT_NAMING.  */
5969 
5970 static bool
mips_strict_argument_naming(cumulative_args_t ca ATTRIBUTE_UNUSED)5971 mips_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5972 {
5973   return !TARGET_OLDABI;
5974 }
5975 
5976 /* Implement TARGET_FUNCTION_ARG.  */
5977 
5978 static rtx
mips_function_arg(cumulative_args_t cum_v,machine_mode mode,const_tree type,bool named)5979 mips_function_arg (cumulative_args_t cum_v, machine_mode mode,
5980 		   const_tree type, bool named)
5981 {
5982   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5983   struct mips_arg_info info;
5984 
5985   /* We will be called with a mode of VOIDmode after the last argument
5986      has been seen.  Whatever we return will be passed to the call expander.
5987      If we need a MIPS16 fp_code, return a REG with the code stored as
5988      the mode.  */
5989   if (mode == VOIDmode)
5990     {
5991       if (TARGET_MIPS16 && cum->fp_code != 0)
5992 	return gen_rtx_REG ((machine_mode) cum->fp_code, 0);
5993       else
5994 	return NULL;
5995     }
5996 
5997   mips_get_arg_info (&info, cum, mode, type, named);
5998 
5999   /* Return straight away if the whole argument is passed on the stack.  */
6000   if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
6001     return NULL;
6002 
6003   /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
6004      contains a double in its entirety, then that 64-bit chunk is passed
6005      in a floating-point register.  */
6006   if (TARGET_NEWABI
6007       && TARGET_HARD_FLOAT
6008       && named
6009       && type != 0
6010       && TREE_CODE (type) == RECORD_TYPE
6011       && TYPE_SIZE_UNIT (type)
6012       && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
6013     {
6014       tree field;
6015 
6016       /* First check to see if there is any such field.  */
6017       for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6018 	if (TREE_CODE (field) == FIELD_DECL
6019 	    && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
6020 	    && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
6021 	    && tree_fits_shwi_p (bit_position (field))
6022 	    && int_bit_position (field) % BITS_PER_WORD == 0)
6023 	  break;
6024 
6025       if (field != 0)
6026 	{
6027 	  /* Now handle the special case by returning a PARALLEL
6028 	     indicating where each 64-bit chunk goes.  INFO.REG_WORDS
6029 	     chunks are passed in registers.  */
6030 	  unsigned int i;
6031 	  HOST_WIDE_INT bitpos;
6032 	  rtx ret;
6033 
6034 	  /* assign_parms checks the mode of ENTRY_PARM, so we must
6035 	     use the actual mode here.  */
6036 	  ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
6037 
6038 	  bitpos = 0;
6039 	  field = TYPE_FIELDS (type);
6040 	  for (i = 0; i < info.reg_words; i++)
6041 	    {
6042 	      rtx reg;
6043 
6044 	      for (; field; field = DECL_CHAIN (field))
6045 		if (TREE_CODE (field) == FIELD_DECL
6046 		    && int_bit_position (field) >= bitpos)
6047 		  break;
6048 
6049 	      if (field
6050 		  && int_bit_position (field) == bitpos
6051 		  && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
6052 		  && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
6053 		reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
6054 	      else
6055 		reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
6056 
6057 	      XVECEXP (ret, 0, i)
6058 		= gen_rtx_EXPR_LIST (VOIDmode, reg,
6059 				     GEN_INT (bitpos / BITS_PER_UNIT));
6060 
6061 	      bitpos += BITS_PER_WORD;
6062 	    }
6063 	  return ret;
6064 	}
6065     }
6066 
6067   /* Handle the n32/n64 conventions for passing complex floating-point
6068      arguments in FPR pairs.  The real part goes in the lower register
6069      and the imaginary part goes in the upper register.  */
6070   if (TARGET_NEWABI
6071       && info.fpr_p
6072       && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6073     {
6074       rtx real, imag;
6075       machine_mode inner;
6076       unsigned int regno;
6077 
6078       inner = GET_MODE_INNER (mode);
6079       regno = FP_ARG_FIRST + info.reg_offset;
6080       if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
6081 	{
6082 	  /* Real part in registers, imaginary part on stack.  */
6083 	  gcc_assert (info.stack_words == info.reg_words);
6084 	  return gen_rtx_REG (inner, regno);
6085 	}
6086       else
6087 	{
6088 	  gcc_assert (info.stack_words == 0);
6089 	  real = gen_rtx_EXPR_LIST (VOIDmode,
6090 				    gen_rtx_REG (inner, regno),
6091 				    const0_rtx);
6092 	  imag = gen_rtx_EXPR_LIST (VOIDmode,
6093 				    gen_rtx_REG (inner,
6094 						 regno + info.reg_words / 2),
6095 				    GEN_INT (GET_MODE_SIZE (inner)));
6096 	  return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
6097 	}
6098     }
6099 
6100   return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
6101 }
6102 
6103 /* Implement TARGET_FUNCTION_ARG_ADVANCE.  */
6104 
6105 static void
mips_function_arg_advance(cumulative_args_t cum_v,machine_mode mode,const_tree type,bool named)6106 mips_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
6107 			   const_tree type, bool named)
6108 {
6109   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6110   struct mips_arg_info info;
6111 
6112   mips_get_arg_info (&info, cum, mode, type, named);
6113 
6114   if (!info.fpr_p)
6115     cum->gp_reg_found = true;
6116 
6117   /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
6118      an explanation of what this code does.  It assumes that we're using
6119      either the o32 or the o64 ABI, both of which pass at most 2 arguments
6120      in FPRs.  */
6121   if (cum->arg_number < 2 && info.fpr_p)
6122     cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
6123 
6124   /* Advance the register count.  This has the effect of setting
6125      num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
6126      argument required us to skip the final GPR and pass the whole
6127      argument on the stack.  */
6128   if (mips_abi != ABI_EABI || !info.fpr_p)
6129     cum->num_gprs = info.reg_offset + info.reg_words;
6130   else if (info.reg_words > 0)
6131     cum->num_fprs += MAX_FPRS_PER_FMT;
6132 
6133   /* Advance the stack word count.  */
6134   if (info.stack_words > 0)
6135     cum->stack_words = info.stack_offset + info.stack_words;
6136 
6137   cum->arg_number++;
6138 }
6139 
6140 /* Implement TARGET_ARG_PARTIAL_BYTES.  */
6141 
6142 static int
mips_arg_partial_bytes(cumulative_args_t cum,machine_mode mode,tree type,bool named)6143 mips_arg_partial_bytes (cumulative_args_t cum,
6144 			machine_mode mode, tree type, bool named)
6145 {
6146   struct mips_arg_info info;
6147 
6148   mips_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
6149   return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
6150 }
6151 
6152 /* Implement TARGET_FUNCTION_ARG_BOUNDARY.  Every parameter gets at
6153    least PARM_BOUNDARY bits of alignment, but will be given anything up
6154    to STACK_BOUNDARY bits if the type requires it.  */
6155 
6156 static unsigned int
mips_function_arg_boundary(machine_mode mode,const_tree type)6157 mips_function_arg_boundary (machine_mode mode, const_tree type)
6158 {
6159   unsigned int alignment;
6160 
6161   alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
6162   if (alignment < PARM_BOUNDARY)
6163     alignment = PARM_BOUNDARY;
6164   if (alignment > STACK_BOUNDARY)
6165     alignment = STACK_BOUNDARY;
6166   return alignment;
6167 }
6168 
6169 /* Implement TARGET_GET_RAW_RESULT_MODE and TARGET_GET_RAW_ARG_MODE.  */
6170 
6171 static fixed_size_mode
mips_get_reg_raw_mode(int regno)6172 mips_get_reg_raw_mode (int regno)
6173 {
6174   if (TARGET_FLOATXX && FP_REG_P (regno))
6175     return DFmode;
6176   return default_get_reg_raw_mode (regno);
6177 }
6178 
6179 /* Implement TARGET_FUNCTION_ARG_PADDING; return PAD_UPWARD if the first
6180    byte of the stack slot has useful data, PAD_DOWNWARD if the last byte
6181    does.  */
6182 
6183 static pad_direction
mips_function_arg_padding(machine_mode mode,const_tree type)6184 mips_function_arg_padding (machine_mode mode, const_tree type)
6185 {
6186   /* On little-endian targets, the first byte of every stack argument
6187      is passed in the first byte of the stack slot.  */
6188   if (!BYTES_BIG_ENDIAN)
6189     return PAD_UPWARD;
6190 
6191   /* Otherwise, integral types are padded downward: the last byte of a
6192      stack argument is passed in the last byte of the stack slot.  */
6193   if (type != 0
6194       ? (INTEGRAL_TYPE_P (type)
6195 	 || POINTER_TYPE_P (type)
6196 	 || FIXED_POINT_TYPE_P (type))
6197       : (SCALAR_INT_MODE_P (mode)
6198 	 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
6199     return PAD_DOWNWARD;
6200 
6201   /* Big-endian o64 pads floating-point arguments downward.  */
6202   if (mips_abi == ABI_O64)
6203     if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
6204       return PAD_DOWNWARD;
6205 
6206   /* Other types are padded upward for o32, o64, n32 and n64.  */
6207   if (mips_abi != ABI_EABI)
6208     return PAD_UPWARD;
6209 
6210   /* Arguments smaller than a stack slot are padded downward.  */
6211   if (mode != BLKmode
6212       ? GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY
6213       : int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT))
6214     return PAD_UPWARD;
6215 
6216   return PAD_DOWNWARD;
6217 }
6218 
6219 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...).  Return !BYTES_BIG_ENDIAN
6220    if the least significant byte of the register has useful data.  Return
6221    the opposite if the most significant byte does.  */
6222 
6223 bool
mips_pad_reg_upward(machine_mode mode,tree type)6224 mips_pad_reg_upward (machine_mode mode, tree type)
6225 {
6226   /* No shifting is required for floating-point arguments.  */
6227   if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
6228     return !BYTES_BIG_ENDIAN;
6229 
6230   /* Otherwise, apply the same padding to register arguments as we do
6231      to stack arguments.  */
6232   return mips_function_arg_padding (mode, type) == PAD_UPWARD;
6233 }
6234 
6235 /* Return nonzero when an argument must be passed by reference.  */
6236 
6237 static bool
mips_pass_by_reference(cumulative_args_t cum ATTRIBUTE_UNUSED,machine_mode mode,const_tree type,bool named ATTRIBUTE_UNUSED)6238 mips_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6239 			machine_mode mode, const_tree type,
6240 			bool named ATTRIBUTE_UNUSED)
6241 {
6242   if (mips_abi == ABI_EABI)
6243     {
6244       int size;
6245 
6246       /* ??? How should SCmode be handled?  */
6247       if (mode == DImode || mode == DFmode
6248 	  || mode == DQmode || mode == UDQmode
6249 	  || mode == DAmode || mode == UDAmode)
6250 	return 0;
6251 
6252       size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
6253       return size == -1 || size > UNITS_PER_WORD;
6254     }
6255   else
6256     {
6257       /* If we have a variable-sized parameter, we have no choice.  */
6258       return targetm.calls.must_pass_in_stack (mode, type);
6259     }
6260 }
6261 
6262 /* Implement TARGET_CALLEE_COPIES.  */
6263 
6264 static bool
mips_callee_copies(cumulative_args_t cum ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED,bool named)6265 mips_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
6266 		    machine_mode mode ATTRIBUTE_UNUSED,
6267 		    const_tree type ATTRIBUTE_UNUSED, bool named)
6268 {
6269   return mips_abi == ABI_EABI && named;
6270 }
6271 
6272 /* See whether VALTYPE is a record whose fields should be returned in
6273    floating-point registers.  If so, return the number of fields and
6274    list them in FIELDS (which should have two elements).  Return 0
6275    otherwise.
6276 
6277    For n32 & n64, a structure with one or two fields is returned in
6278    floating-point registers as long as every field has a floating-point
6279    type.  */
6280 
6281 static int
mips_fpr_return_fields(const_tree valtype,tree * fields)6282 mips_fpr_return_fields (const_tree valtype, tree *fields)
6283 {
6284   tree field;
6285   int i;
6286 
6287   if (!TARGET_NEWABI)
6288     return 0;
6289 
6290   if (TREE_CODE (valtype) != RECORD_TYPE)
6291     return 0;
6292 
6293   i = 0;
6294   for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
6295     {
6296       if (TREE_CODE (field) != FIELD_DECL)
6297 	continue;
6298 
6299       if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
6300 	return 0;
6301 
6302       if (i == 2)
6303 	return 0;
6304 
6305       fields[i++] = field;
6306     }
6307   return i;
6308 }
6309 
6310 /* Implement TARGET_RETURN_IN_MSB.  For n32 & n64, we should return
6311    a value in the most significant part of $2/$3 if:
6312 
6313       - the target is big-endian;
6314 
6315       - the value has a structure or union type (we generalize this to
6316 	cover aggregates from other languages too); and
6317 
6318       - the structure is not returned in floating-point registers.  */
6319 
6320 static bool
mips_return_in_msb(const_tree valtype)6321 mips_return_in_msb (const_tree valtype)
6322 {
6323   tree fields[2];
6324 
6325   return (TARGET_NEWABI
6326 	  && TARGET_BIG_ENDIAN
6327 	  && AGGREGATE_TYPE_P (valtype)
6328 	  && mips_fpr_return_fields (valtype, fields) == 0);
6329 }
6330 
6331 /* Return true if the function return value MODE will get returned in a
6332    floating-point register.  */
6333 
6334 static bool
mips_return_mode_in_fpr_p(machine_mode mode)6335 mips_return_mode_in_fpr_p (machine_mode mode)
6336 {
6337   gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
6338   return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6339 	   || mode == V2SFmode
6340 	   || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6341 	  && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6342 }
6343 
6344 /* Return the representation of an FPR return register when the
6345    value being returned in FP_RETURN has mode VALUE_MODE and the
6346    return type itself has mode TYPE_MODE.  On NewABI targets,
6347    the two modes may be different for structures like:
6348 
6349        struct __attribute__((packed)) foo { float f; }
6350 
6351    where we return the SFmode value of "f" in FP_RETURN, but where
6352    the structure itself has mode BLKmode.  */
6353 
6354 static rtx
mips_return_fpr_single(machine_mode type_mode,machine_mode value_mode)6355 mips_return_fpr_single (machine_mode type_mode,
6356 			machine_mode value_mode)
6357 {
6358   rtx x;
6359 
6360   x = gen_rtx_REG (value_mode, FP_RETURN);
6361   if (type_mode != value_mode)
6362     {
6363       x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
6364       x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
6365     }
6366   return x;
6367 }
6368 
6369 /* Return a composite value in a pair of floating-point registers.
6370    MODE1 and OFFSET1 are the mode and byte offset for the first value,
6371    likewise MODE2 and OFFSET2 for the second.  MODE is the mode of the
6372    complete value.
6373 
6374    For n32 & n64, $f0 always holds the first value and $f2 the second.
6375    Otherwise the values are packed together as closely as possible.  */
6376 
6377 static rtx
mips_return_fpr_pair(machine_mode mode,machine_mode mode1,HOST_WIDE_INT offset1,machine_mode mode2,HOST_WIDE_INT offset2)6378 mips_return_fpr_pair (machine_mode mode,
6379 		      machine_mode mode1, HOST_WIDE_INT offset1,
6380 		      machine_mode mode2, HOST_WIDE_INT offset2)
6381 {
6382   int inc;
6383 
6384   inc = (TARGET_NEWABI || mips_abi == ABI_32 ? 2 : MAX_FPRS_PER_FMT);
6385   return gen_rtx_PARALLEL
6386     (mode,
6387      gen_rtvec (2,
6388 		gen_rtx_EXPR_LIST (VOIDmode,
6389 				   gen_rtx_REG (mode1, FP_RETURN),
6390 				   GEN_INT (offset1)),
6391 		gen_rtx_EXPR_LIST (VOIDmode,
6392 				   gen_rtx_REG (mode2, FP_RETURN + inc),
6393 				   GEN_INT (offset2))));
6394 
6395 }
6396 
6397 /* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
6398    For normal calls, VALTYPE is the return type and MODE is VOIDmode.
6399    For libcalls, VALTYPE is null and MODE is the mode of the return value.  */
6400 
6401 static rtx
mips_function_value_1(const_tree valtype,const_tree fn_decl_or_type,machine_mode mode)6402 mips_function_value_1 (const_tree valtype, const_tree fn_decl_or_type,
6403 		       machine_mode mode)
6404 {
6405   if (valtype)
6406     {
6407       tree fields[2];
6408       int unsigned_p;
6409       const_tree func;
6410 
6411       if (fn_decl_or_type && DECL_P (fn_decl_or_type))
6412 	func = fn_decl_or_type;
6413       else
6414 	func = NULL;
6415 
6416       mode = TYPE_MODE (valtype);
6417       unsigned_p = TYPE_UNSIGNED (valtype);
6418 
6419       /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
6420 	 return values, promote the mode here too.  */
6421       mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
6422 
6423       /* Handle structures whose fields are returned in $f0/$f2.  */
6424       switch (mips_fpr_return_fields (valtype, fields))
6425 	{
6426 	case 1:
6427 	  return mips_return_fpr_single (mode,
6428 					 TYPE_MODE (TREE_TYPE (fields[0])));
6429 
6430 	case 2:
6431 	  return mips_return_fpr_pair (mode,
6432 				       TYPE_MODE (TREE_TYPE (fields[0])),
6433 				       int_byte_position (fields[0]),
6434 				       TYPE_MODE (TREE_TYPE (fields[1])),
6435 				       int_byte_position (fields[1]));
6436 	}
6437 
6438       /* If a value is passed in the most significant part of a register, see
6439 	 whether we have to round the mode up to a whole number of words.  */
6440       if (mips_return_in_msb (valtype))
6441 	{
6442 	  HOST_WIDE_INT size = int_size_in_bytes (valtype);
6443 	  if (size % UNITS_PER_WORD != 0)
6444 	    {
6445 	      size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6446 	      mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
6447 	    }
6448 	}
6449 
6450       /* For EABI, the class of return register depends entirely on MODE.
6451 	 For example, "struct { some_type x; }" and "union { some_type x; }"
6452 	 are returned in the same way as a bare "some_type" would be.
6453 	 Other ABIs only use FPRs for scalar, complex or vector types.  */
6454       if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
6455 	return gen_rtx_REG (mode, GP_RETURN);
6456     }
6457 
6458   if (!TARGET_MIPS16)
6459     {
6460       /* Handle long doubles for n32 & n64.  */
6461       if (mode == TFmode)
6462 	return mips_return_fpr_pair (mode,
6463 				     DImode, 0,
6464 				     DImode, GET_MODE_SIZE (mode) / 2);
6465 
6466       if (mips_return_mode_in_fpr_p (mode))
6467 	{
6468 	  if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6469 	    return mips_return_fpr_pair (mode,
6470 					 GET_MODE_INNER (mode), 0,
6471 					 GET_MODE_INNER (mode),
6472 					 GET_MODE_SIZE (mode) / 2);
6473 	  else
6474 	    return gen_rtx_REG (mode, FP_RETURN);
6475 	}
6476     }
6477 
6478   return gen_rtx_REG (mode, GP_RETURN);
6479 }
6480 
6481 /* Implement TARGET_FUNCTION_VALUE.  */
6482 
6483 static rtx
mips_function_value(const_tree valtype,const_tree fn_decl_or_type,bool outgoing ATTRIBUTE_UNUSED)6484 mips_function_value (const_tree valtype, const_tree fn_decl_or_type,
6485 		     bool outgoing ATTRIBUTE_UNUSED)
6486 {
6487   return mips_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
6488 }
6489 
6490 /* Implement TARGET_LIBCALL_VALUE.  */
6491 
6492 static rtx
mips_libcall_value(machine_mode mode,const_rtx fun ATTRIBUTE_UNUSED)6493 mips_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
6494 {
6495   return mips_function_value_1 (NULL_TREE, NULL_TREE, mode);
6496 }
6497 
6498 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
6499 
6500    On the MIPS, R2 R3 and F0 F2 are the only register thus used.  */
6501 
6502 static bool
mips_function_value_regno_p(const unsigned int regno)6503 mips_function_value_regno_p (const unsigned int regno)
6504 {
6505   /* Most types only require one GPR or one FPR for return values but for
6506      hard-float two FPRs can be used for _Complex types (for all ABIs)
6507      and long doubles (for n64).  */
6508   if (regno == GP_RETURN
6509       || regno == FP_RETURN
6510       || (FP_RETURN != GP_RETURN
6511 	  && regno == FP_RETURN + 2))
6512     return true;
6513 
6514   /* For o32 FP32, _Complex double will be returned in four 32-bit registers.
6515      This does not apply to o32 FPXX as floating-point function argument and
6516      return registers are described as 64-bit even though floating-point
6517      registers are primarily described as 32-bit internally.
6518      See: mips_get_reg_raw_mode.  */
6519   if ((mips_abi == ABI_32 && TARGET_FLOAT32)
6520       && FP_RETURN != GP_RETURN
6521       && (regno == FP_RETURN + 1
6522 	  || regno == FP_RETURN + 3))
6523     return true;
6524 
6525   return false;
6526 }
6527 
6528 /* Implement TARGET_RETURN_IN_MEMORY.  Under the o32 and o64 ABIs,
6529    all BLKmode objects are returned in memory.  Under the n32, n64
6530    and embedded ABIs, small structures are returned in a register.
6531    Objects with varying size must still be returned in memory, of
6532    course.  */
6533 
6534 static bool
mips_return_in_memory(const_tree type,const_tree fndecl ATTRIBUTE_UNUSED)6535 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
6536 {
6537   if (TARGET_OLDABI)
6538     /* Ensure that any floating point vector types are returned via memory
6539        even if they are supported through a vector mode with some ASEs.  */
6540     return (VECTOR_FLOAT_TYPE_P (type)
6541 	    || TYPE_MODE (type) == BLKmode);
6542 
6543   return (!IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
6544 }
6545 
6546 /* Implement TARGET_SETUP_INCOMING_VARARGS.  */
6547 
6548 static void
mips_setup_incoming_varargs(cumulative_args_t cum,machine_mode mode,tree type,int * pretend_size ATTRIBUTE_UNUSED,int no_rtl)6549 mips_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
6550 			     tree type, int *pretend_size ATTRIBUTE_UNUSED,
6551 			     int no_rtl)
6552 {
6553   CUMULATIVE_ARGS local_cum;
6554   int gp_saved, fp_saved;
6555 
6556   /* The caller has advanced CUM up to, but not beyond, the last named
6557      argument.  Advance a local copy of CUM past the last "real" named
6558      argument, to find out how many registers are left over.  */
6559   local_cum = *get_cumulative_args (cum);
6560   mips_function_arg_advance (pack_cumulative_args (&local_cum), mode, type,
6561 			     true);
6562 
6563   /* Found out how many registers we need to save.  */
6564   gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
6565   fp_saved = (EABI_FLOAT_VARARGS_P
6566 	      ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
6567 	      : 0);
6568 
6569   if (!no_rtl)
6570     {
6571       if (gp_saved > 0)
6572 	{
6573 	  rtx ptr, mem;
6574 
6575 	  ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
6576 			       REG_PARM_STACK_SPACE (cfun->decl)
6577 			       - gp_saved * UNITS_PER_WORD);
6578 	  mem = gen_frame_mem (BLKmode, ptr);
6579 	  set_mem_alias_set (mem, get_varargs_alias_set ());
6580 
6581 	  move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
6582 			       mem, gp_saved);
6583 	}
6584       if (fp_saved > 0)
6585 	{
6586 	  /* We can't use move_block_from_reg, because it will use
6587 	     the wrong mode.  */
6588 	  machine_mode mode;
6589 	  int off, i;
6590 
6591 	  /* Set OFF to the offset from virtual_incoming_args_rtx of
6592 	     the first float register.  The FP save area lies below
6593 	     the integer one, and is aligned to UNITS_PER_FPVALUE bytes.  */
6594 	  off = ROUND_DOWN (-gp_saved * UNITS_PER_WORD, UNITS_PER_FPVALUE);
6595 	  off -= fp_saved * UNITS_PER_FPREG;
6596 
6597 	  mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
6598 
6599 	  for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
6600 	       i += MAX_FPRS_PER_FMT)
6601 	    {
6602 	      rtx ptr, mem;
6603 
6604 	      ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
6605 	      mem = gen_frame_mem (mode, ptr);
6606 	      set_mem_alias_set (mem, get_varargs_alias_set ());
6607 	      mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
6608 	      off += UNITS_PER_HWFPVALUE;
6609 	    }
6610 	}
6611     }
6612   if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
6613     cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
6614 				   + fp_saved * UNITS_PER_FPREG);
6615 }
6616 
6617 /* Implement TARGET_BUILTIN_VA_LIST.  */
6618 
6619 static tree
mips_build_builtin_va_list(void)6620 mips_build_builtin_va_list (void)
6621 {
6622   if (EABI_FLOAT_VARARGS_P)
6623     {
6624       /* We keep 3 pointers, and two offsets.
6625 
6626 	 Two pointers are to the overflow area, which starts at the CFA.
6627 	 One of these is constant, for addressing into the GPR save area
6628 	 below it.  The other is advanced up the stack through the
6629 	 overflow region.
6630 
6631 	 The third pointer is to the bottom of the GPR save area.
6632 	 Since the FPR save area is just below it, we can address
6633 	 FPR slots off this pointer.
6634 
6635 	 We also keep two one-byte offsets, which are to be subtracted
6636 	 from the constant pointers to yield addresses in the GPR and
6637 	 FPR save areas.  These are downcounted as float or non-float
6638 	 arguments are used, and when they get to zero, the argument
6639 	 must be obtained from the overflow region.  */
6640       tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
6641       tree array, index;
6642 
6643       record = lang_hooks.types.make_type (RECORD_TYPE);
6644 
6645       f_ovfl = build_decl (BUILTINS_LOCATION,
6646 			   FIELD_DECL, get_identifier ("__overflow_argptr"),
6647 			   ptr_type_node);
6648       f_gtop = build_decl (BUILTINS_LOCATION,
6649 			   FIELD_DECL, get_identifier ("__gpr_top"),
6650 			   ptr_type_node);
6651       f_ftop = build_decl (BUILTINS_LOCATION,
6652 			   FIELD_DECL, get_identifier ("__fpr_top"),
6653 			   ptr_type_node);
6654       f_goff = build_decl (BUILTINS_LOCATION,
6655 			   FIELD_DECL, get_identifier ("__gpr_offset"),
6656 			   unsigned_char_type_node);
6657       f_foff = build_decl (BUILTINS_LOCATION,
6658 			   FIELD_DECL, get_identifier ("__fpr_offset"),
6659 			   unsigned_char_type_node);
6660       /* Explicitly pad to the size of a pointer, so that -Wpadded won't
6661 	 warn on every user file.  */
6662       index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
6663       array = build_array_type (unsigned_char_type_node,
6664 			        build_index_type (index));
6665       f_res = build_decl (BUILTINS_LOCATION,
6666 			  FIELD_DECL, get_identifier ("__reserved"), array);
6667 
6668       DECL_FIELD_CONTEXT (f_ovfl) = record;
6669       DECL_FIELD_CONTEXT (f_gtop) = record;
6670       DECL_FIELD_CONTEXT (f_ftop) = record;
6671       DECL_FIELD_CONTEXT (f_goff) = record;
6672       DECL_FIELD_CONTEXT (f_foff) = record;
6673       DECL_FIELD_CONTEXT (f_res) = record;
6674 
6675       TYPE_FIELDS (record) = f_ovfl;
6676       DECL_CHAIN (f_ovfl) = f_gtop;
6677       DECL_CHAIN (f_gtop) = f_ftop;
6678       DECL_CHAIN (f_ftop) = f_goff;
6679       DECL_CHAIN (f_goff) = f_foff;
6680       DECL_CHAIN (f_foff) = f_res;
6681 
6682       layout_type (record);
6683       return record;
6684     }
6685   else
6686     /* Otherwise, we use 'void *'.  */
6687     return ptr_type_node;
6688 }
6689 
6690 /* Implement TARGET_EXPAND_BUILTIN_VA_START.  */
6691 
6692 static void
mips_va_start(tree valist,rtx nextarg)6693 mips_va_start (tree valist, rtx nextarg)
6694 {
6695   if (EABI_FLOAT_VARARGS_P)
6696     {
6697       const CUMULATIVE_ARGS *cum;
6698       tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
6699       tree ovfl, gtop, ftop, goff, foff;
6700       tree t;
6701       int gpr_save_area_size;
6702       int fpr_save_area_size;
6703       int fpr_offset;
6704 
6705       cum = &crtl->args.info;
6706       gpr_save_area_size
6707 	= (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
6708       fpr_save_area_size
6709 	= (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
6710 
6711       f_ovfl = TYPE_FIELDS (va_list_type_node);
6712       f_gtop = DECL_CHAIN (f_ovfl);
6713       f_ftop = DECL_CHAIN (f_gtop);
6714       f_goff = DECL_CHAIN (f_ftop);
6715       f_foff = DECL_CHAIN (f_goff);
6716 
6717       ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
6718 		     NULL_TREE);
6719       gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
6720 		     NULL_TREE);
6721       ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
6722 		     NULL_TREE);
6723       goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
6724 		     NULL_TREE);
6725       foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
6726 		     NULL_TREE);
6727 
6728       /* Emit code to initialize OVFL, which points to the next varargs
6729 	 stack argument.  CUM->STACK_WORDS gives the number of stack
6730 	 words used by named arguments.  */
6731       t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
6732       if (cum->stack_words > 0)
6733 	t = fold_build_pointer_plus_hwi (t, cum->stack_words * UNITS_PER_WORD);
6734       t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
6735       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6736 
6737       /* Emit code to initialize GTOP, the top of the GPR save area.  */
6738       t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
6739       t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
6740       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6741 
6742       /* Emit code to initialize FTOP, the top of the FPR save area.
6743 	 This address is gpr_save_area_bytes below GTOP, rounded
6744 	 down to the next fp-aligned boundary.  */
6745       t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
6746       fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
6747       fpr_offset &= -UNITS_PER_FPVALUE;
6748       if (fpr_offset)
6749 	t = fold_build_pointer_plus_hwi (t, -fpr_offset);
6750       t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
6751       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6752 
6753       /* Emit code to initialize GOFF, the offset from GTOP of the
6754 	 next GPR argument.  */
6755       t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
6756 		  build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
6757       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6758 
6759       /* Likewise emit code to initialize FOFF, the offset from FTOP
6760 	 of the next FPR argument.  */
6761       t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
6762 		  build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
6763       expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6764     }
6765   else
6766     {
6767       nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
6768       std_expand_builtin_va_start (valist, nextarg);
6769     }
6770 }
6771 
6772 /* Like std_gimplify_va_arg_expr, but apply alignment to zero-sized
6773    types as well.  */
6774 
6775 static tree
mips_std_gimplify_va_arg_expr(tree valist,tree type,gimple_seq * pre_p,gimple_seq * post_p)6776 mips_std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6777 			       gimple_seq *post_p)
6778 {
6779   tree addr, t, type_size, rounded_size, valist_tmp;
6780   unsigned HOST_WIDE_INT align, boundary;
6781   bool indirect;
6782 
6783   indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6784   if (indirect)
6785     type = build_pointer_type (type);
6786 
6787   align = PARM_BOUNDARY / BITS_PER_UNIT;
6788   boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
6789 
6790   /* When we align parameter on stack for caller, if the parameter
6791      alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
6792      aligned at MAX_SUPPORTED_STACK_ALIGNMENT.  We will match callee
6793      here with caller.  */
6794   if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
6795     boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
6796 
6797   boundary /= BITS_PER_UNIT;
6798 
6799   /* Hoist the valist value into a temporary for the moment.  */
6800   valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
6801 
6802   /* va_list pointer is aligned to PARM_BOUNDARY.  If argument actually
6803      requires greater alignment, we must perform dynamic alignment.  */
6804   if (boundary > align)
6805     {
6806       t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
6807 		  fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
6808       gimplify_and_add (t, pre_p);
6809 
6810       t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
6811 		  fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
6812 			       valist_tmp,
6813 			       build_int_cst (TREE_TYPE (valist), -boundary)));
6814       gimplify_and_add (t, pre_p);
6815     }
6816   else
6817     boundary = align;
6818 
6819   /* If the actual alignment is less than the alignment of the type,
6820      adjust the type accordingly so that we don't assume strict alignment
6821      when dereferencing the pointer.  */
6822   boundary *= BITS_PER_UNIT;
6823   if (boundary < TYPE_ALIGN (type))
6824     {
6825       type = build_variant_type_copy (type);
6826       SET_TYPE_ALIGN (type, boundary);
6827     }
6828 
6829   /* Compute the rounded size of the type.  */
6830   type_size = size_in_bytes (type);
6831   rounded_size = round_up (type_size, align);
6832 
6833   /* Reduce rounded_size so it's sharable with the postqueue.  */
6834   gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
6835 
6836   /* Get AP.  */
6837   addr = valist_tmp;
6838   if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
6839     {
6840       /* Small args are padded downward.  */
6841       t = fold_build2_loc (input_location, GT_EXPR, sizetype,
6842 		       rounded_size, size_int (align));
6843       t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
6844 		       size_binop (MINUS_EXPR, rounded_size, type_size));
6845       addr = fold_build_pointer_plus (addr, t);
6846     }
6847 
6848   /* Compute new value for AP.  */
6849   t = fold_build_pointer_plus (valist_tmp, rounded_size);
6850   t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6851   gimplify_and_add (t, pre_p);
6852 
6853   addr = fold_convert (build_pointer_type (type), addr);
6854 
6855   if (indirect)
6856     addr = build_va_arg_indirect_ref (addr);
6857 
6858   return build_va_arg_indirect_ref (addr);
6859 }
6860 
6861 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR.  */
6862 
6863 static tree
mips_gimplify_va_arg_expr(tree valist,tree type,gimple_seq * pre_p,gimple_seq * post_p)6864 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6865 			   gimple_seq *post_p)
6866 {
6867   tree addr;
6868   bool indirect_p;
6869 
6870   indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6871   if (indirect_p)
6872     type = build_pointer_type (type);
6873 
6874   if (!EABI_FLOAT_VARARGS_P)
6875     addr = mips_std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6876   else
6877     {
6878       tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
6879       tree ovfl, top, off, align;
6880       HOST_WIDE_INT size, rsize, osize;
6881       tree t, u;
6882 
6883       f_ovfl = TYPE_FIELDS (va_list_type_node);
6884       f_gtop = DECL_CHAIN (f_ovfl);
6885       f_ftop = DECL_CHAIN (f_gtop);
6886       f_goff = DECL_CHAIN (f_ftop);
6887       f_foff = DECL_CHAIN (f_goff);
6888 
6889       /* Let:
6890 
6891 	 TOP be the top of the GPR or FPR save area;
6892 	 OFF be the offset from TOP of the next register;
6893 	 ADDR_RTX be the address of the argument;
6894 	 SIZE be the number of bytes in the argument type;
6895 	 RSIZE be the number of bytes used to store the argument
6896 	   when it's in the register save area; and
6897 	 OSIZE be the number of bytes used to store it when it's
6898 	   in the stack overflow area.
6899 
6900 	 The code we want is:
6901 
6902 	 1: off &= -rsize;	  // round down
6903 	 2: if (off != 0)
6904 	 3:   {
6905 	 4:	addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
6906 	 5:	off -= rsize;
6907 	 6:   }
6908 	 7: else
6909 	 8:   {
6910 	 9:	ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
6911 	 10:	addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
6912 	 11:	ovfl += osize;
6913 	 14:  }
6914 
6915 	 [1] and [9] can sometimes be optimized away.  */
6916 
6917       ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
6918 		     NULL_TREE);
6919       size = int_size_in_bytes (type);
6920 
6921       if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
6922 	  && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
6923 	{
6924 	  top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
6925 			unshare_expr (valist), f_ftop, NULL_TREE);
6926 	  off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
6927 			unshare_expr (valist), f_foff, NULL_TREE);
6928 
6929 	  /* When va_start saves FPR arguments to the stack, each slot
6930 	     takes up UNITS_PER_HWFPVALUE bytes, regardless of the
6931 	     argument's precision.  */
6932 	  rsize = UNITS_PER_HWFPVALUE;
6933 
6934 	  /* Overflow arguments are padded to UNITS_PER_WORD bytes
6935 	     (= PARM_BOUNDARY bits).  This can be different from RSIZE
6936 	     in two cases:
6937 
6938 	     (1) On 32-bit targets when TYPE is a structure such as:
6939 
6940 	     struct s { float f; };
6941 
6942 	     Such structures are passed in paired FPRs, so RSIZE
6943 	     will be 8 bytes.  However, the structure only takes
6944 	     up 4 bytes of memory, so OSIZE will only be 4.
6945 
6946 	     (2) In combinations such as -mgp64 -msingle-float
6947 	     -fshort-double.  Doubles passed in registers will then take
6948 	     up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
6949 	     stack take up UNITS_PER_WORD bytes.  */
6950 	  osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
6951 	}
6952       else
6953 	{
6954 	  top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
6955 			unshare_expr (valist), f_gtop, NULL_TREE);
6956 	  off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
6957 			unshare_expr (valist), f_goff, NULL_TREE);
6958 	  rsize = ROUND_UP (size, UNITS_PER_WORD);
6959 	  if (rsize > UNITS_PER_WORD)
6960 	    {
6961 	      /* [1] Emit code for: off &= -rsize.	*/
6962 	      t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
6963 			  build_int_cst (TREE_TYPE (off), -rsize));
6964 	      gimplify_assign (unshare_expr (off), t, pre_p);
6965 	    }
6966 	  osize = rsize;
6967 	}
6968 
6969       /* [2] Emit code to branch if off == 0.  */
6970       t = build2 (NE_EXPR, boolean_type_node, unshare_expr (off),
6971 		  build_int_cst (TREE_TYPE (off), 0));
6972       addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
6973 
6974       /* [5] Emit code for: off -= rsize.  We do this as a form of
6975 	 post-decrement not available to C.  */
6976       t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
6977       t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
6978 
6979       /* [4] Emit code for:
6980 	 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0).  */
6981       t = fold_convert (sizetype, t);
6982       t = fold_build1 (NEGATE_EXPR, sizetype, t);
6983       t = fold_build_pointer_plus (top, t);
6984       if (BYTES_BIG_ENDIAN && rsize > size)
6985 	t = fold_build_pointer_plus_hwi (t, rsize - size);
6986       COND_EXPR_THEN (addr) = t;
6987 
6988       if (osize > UNITS_PER_WORD)
6989 	{
6990 	  /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize.  */
6991 	  t = fold_build_pointer_plus_hwi (unshare_expr (ovfl), osize - 1);
6992 	  u = build_int_cst (TREE_TYPE (t), -osize);
6993 	  t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6994 	  align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
6995 			  unshare_expr (ovfl), t);
6996 	}
6997       else
6998 	align = NULL;
6999 
7000       /* [10, 11] Emit code for:
7001 	 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
7002 	 ovfl += osize.  */
7003       u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
7004       t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
7005       if (BYTES_BIG_ENDIAN && osize > size)
7006 	t = fold_build_pointer_plus_hwi (t, osize - size);
7007 
7008       /* String [9] and [10, 11] together.  */
7009       if (align)
7010 	t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
7011       COND_EXPR_ELSE (addr) = t;
7012 
7013       addr = fold_convert (build_pointer_type (type), addr);
7014       addr = build_va_arg_indirect_ref (addr);
7015     }
7016 
7017   if (indirect_p)
7018     addr = build_va_arg_indirect_ref (addr);
7019 
7020   return addr;
7021 }
7022 
7023 /* Declare a unique, locally-binding function called NAME, then start
7024    its definition.  */
7025 
7026 static void
mips_start_unique_function(const char * name)7027 mips_start_unique_function (const char *name)
7028 {
7029   tree decl;
7030 
7031   decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
7032 		     get_identifier (name),
7033 		     build_function_type_list (void_type_node, NULL_TREE));
7034   DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
7035 				   NULL_TREE, void_type_node);
7036   TREE_PUBLIC (decl) = 1;
7037   TREE_STATIC (decl) = 1;
7038 
7039   cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
7040 
7041   targetm.asm_out.unique_section (decl, 0);
7042   switch_to_section (get_named_section (decl, NULL, 0));
7043 
7044   targetm.asm_out.globalize_label (asm_out_file, name);
7045   fputs ("\t.hidden\t", asm_out_file);
7046   assemble_name (asm_out_file, name);
7047   putc ('\n', asm_out_file);
7048 }
7049 
7050 /* Start a definition of function NAME.  MIPS16_P indicates whether the
7051    function contains MIPS16 code.  */
7052 
7053 static void
mips_start_function_definition(const char * name,bool mips16_p)7054 mips_start_function_definition (const char *name, bool mips16_p)
7055 {
7056   if (mips16_p)
7057     fprintf (asm_out_file, "\t.set\tmips16\n");
7058   else
7059     fprintf (asm_out_file, "\t.set\tnomips16\n");
7060 
7061   if (TARGET_MICROMIPS)
7062     fprintf (asm_out_file, "\t.set\tmicromips\n");
7063 #ifdef HAVE_GAS_MICROMIPS
7064   else
7065     fprintf (asm_out_file, "\t.set\tnomicromips\n");
7066 #endif
7067 
7068   if (!flag_inhibit_size_directive)
7069     {
7070       fputs ("\t.ent\t", asm_out_file);
7071       assemble_name (asm_out_file, name);
7072       fputs ("\n", asm_out_file);
7073     }
7074 
7075   ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
7076 
7077   /* Start the definition proper.  */
7078   assemble_name (asm_out_file, name);
7079   fputs (":\n", asm_out_file);
7080 }
7081 
7082 /* End a function definition started by mips_start_function_definition.  */
7083 
7084 static void
mips_end_function_definition(const char * name)7085 mips_end_function_definition (const char *name)
7086 {
7087   if (!flag_inhibit_size_directive)
7088     {
7089       fputs ("\t.end\t", asm_out_file);
7090       assemble_name (asm_out_file, name);
7091       fputs ("\n", asm_out_file);
7092     }
7093 }
7094 
7095 /* If *STUB_PTR points to a stub, output a comdat-style definition for it,
7096    then free *STUB_PTR.  */
7097 
7098 static void
mips_finish_stub(mips_one_only_stub ** stub_ptr)7099 mips_finish_stub (mips_one_only_stub **stub_ptr)
7100 {
7101   mips_one_only_stub *stub = *stub_ptr;
7102   if (!stub)
7103     return;
7104 
7105   const char *name = stub->get_name ();
7106   mips_start_unique_function (name);
7107   mips_start_function_definition (name, false);
7108   stub->output_body ();
7109   mips_end_function_definition (name);
7110   delete stub;
7111   *stub_ptr = 0;
7112 }
7113 
7114 /* Return true if calls to X can use R_MIPS_CALL* relocations.  */
7115 
7116 static bool
mips_ok_for_lazy_binding_p(rtx x)7117 mips_ok_for_lazy_binding_p (rtx x)
7118 {
7119   return (TARGET_USE_GOT
7120 	  && GET_CODE (x) == SYMBOL_REF
7121 	  && !SYMBOL_REF_BIND_NOW_P (x)
7122 	  && !mips_symbol_binds_local_p (x));
7123 }
7124 
7125 /* Load function address ADDR into register DEST.  TYPE is as for
7126    mips_expand_call.  Return true if we used an explicit lazy-binding
7127    sequence.  */
7128 
7129 static bool
mips_load_call_address(enum mips_call_type type,rtx dest,rtx addr)7130 mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
7131 {
7132   /* If we're generating PIC, and this call is to a global function,
7133      try to allow its address to be resolved lazily.  This isn't
7134      possible for sibcalls when $gp is call-saved because the value
7135      of $gp on entry to the stub would be our caller's gp, not ours.  */
7136   if (TARGET_EXPLICIT_RELOCS
7137       && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
7138       && mips_ok_for_lazy_binding_p (addr))
7139     {
7140       addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
7141       emit_insn (gen_rtx_SET (dest, addr));
7142       return true;
7143     }
7144   else
7145     {
7146       mips_emit_move (dest, addr);
7147       return false;
7148     }
7149 }
7150 
7151 /* Each locally-defined hard-float MIPS16 function has a local symbol
7152    associated with it.  This hash table maps the function symbol (FUNC)
7153    to the local symbol (LOCAL). */
7154 static GTY (()) hash_map<nofree_string_hash, rtx> *mips16_local_aliases;
7155 
7156 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
7157    Return a local alias for it, creating a new one if necessary.  */
7158 
7159 static rtx
mips16_local_alias(rtx func)7160 mips16_local_alias (rtx func)
7161 {
7162   /* Create the hash table if this is the first call.  */
7163   if (mips16_local_aliases == NULL)
7164     mips16_local_aliases = hash_map<nofree_string_hash, rtx>::create_ggc (37);
7165 
7166   /* Look up the function symbol, creating a new entry if need be.  */
7167   bool existed;
7168   const char *func_name = XSTR (func, 0);
7169   rtx *slot = &mips16_local_aliases->get_or_insert (func_name, &existed);
7170   gcc_assert (slot != NULL);
7171 
7172   if (!existed)
7173     {
7174       rtx local;
7175 
7176       /* Create a new SYMBOL_REF for the local symbol.  The choice of
7177 	 __fn_local_* is based on the __fn_stub_* names that we've
7178 	 traditionally used for the non-MIPS16 stub.  */
7179       func_name = targetm.strip_name_encoding (XSTR (func, 0));
7180       const char *local_name = ACONCAT (("__fn_local_", func_name, NULL));
7181       local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
7182       SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
7183 
7184       /* Create a new structure to represent the mapping.  */
7185       *slot = local;
7186     }
7187   return *slot;
7188 }
7189 
7190 /* A chained list of functions for which mips16_build_call_stub has already
7191    generated a stub.  NAME is the name of the function and FP_RET_P is true
7192    if the function returns a value in floating-point registers.  */
7193 struct mips16_stub {
7194   struct mips16_stub *next;
7195   char *name;
7196   bool fp_ret_p;
7197 };
7198 static struct mips16_stub *mips16_stubs;
7199 
7200 /* Return the two-character string that identifies floating-point
7201    return mode MODE in the name of a MIPS16 function stub.  */
7202 
7203 static const char *
mips16_call_stub_mode_suffix(machine_mode mode)7204 mips16_call_stub_mode_suffix (machine_mode mode)
7205 {
7206   if (mode == SFmode)
7207     return "sf";
7208   else if (mode == DFmode)
7209     return "df";
7210   else if (mode == SCmode)
7211     return "sc";
7212   else if (mode == DCmode)
7213     return "dc";
7214   else if (mode == V2SFmode)
7215     {
7216       gcc_assert (TARGET_PAIRED_SINGLE_FLOAT);
7217       return "df";
7218     }
7219   else
7220     gcc_unreachable ();
7221 }
7222 
7223 /* Write instructions to move a 32-bit value between general register
7224    GPREG and floating-point register FPREG.  DIRECTION is 't' to move
7225    from GPREG to FPREG and 'f' to move in the opposite direction.  */
7226 
7227 static void
mips_output_32bit_xfer(char direction,unsigned int gpreg,unsigned int fpreg)7228 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
7229 {
7230   fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
7231 	   reg_names[gpreg], reg_names[fpreg]);
7232 }
7233 
7234 /* Likewise for 64-bit values.  */
7235 
7236 static void
mips_output_64bit_xfer(char direction,unsigned int gpreg,unsigned int fpreg)7237 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
7238 {
7239   if (TARGET_64BIT)
7240     fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
7241  	     reg_names[gpreg], reg_names[fpreg]);
7242   else if (ISA_HAS_MXHC1)
7243     {
7244       fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
7245  	       reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
7246       fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
7247  	       reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
7248     }
7249   else if (TARGET_FLOATXX && direction == 't')
7250     {
7251       /* Use the argument save area to move via memory.  */
7252       fprintf (asm_out_file, "\tsw\t%s,0($sp)\n", reg_names[gpreg]);
7253       fprintf (asm_out_file, "\tsw\t%s,4($sp)\n", reg_names[gpreg + 1]);
7254       fprintf (asm_out_file, "\tldc1\t%s,0($sp)\n", reg_names[fpreg]);
7255     }
7256   else if (TARGET_FLOATXX && direction == 'f')
7257     {
7258       /* Use the argument save area to move via memory.  */
7259       fprintf (asm_out_file, "\tsdc1\t%s,0($sp)\n", reg_names[fpreg]);
7260       fprintf (asm_out_file, "\tlw\t%s,0($sp)\n", reg_names[gpreg]);
7261       fprintf (asm_out_file, "\tlw\t%s,4($sp)\n", reg_names[gpreg + 1]);
7262     }
7263   else
7264     {
7265       /* Move the least-significant word.  */
7266       fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
7267 	       reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
7268       /* ...then the most significant word.  */
7269       fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
7270 	       reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
7271     }
7272 }
7273 
7274 /* Write out code to move floating-point arguments into or out of
7275    general registers.  FP_CODE is the code describing which arguments
7276    are present (see the comment above the definition of CUMULATIVE_ARGS
7277    in mips.h).  DIRECTION is as for mips_output_32bit_xfer.  */
7278 
7279 static void
mips_output_args_xfer(int fp_code,char direction)7280 mips_output_args_xfer (int fp_code, char direction)
7281 {
7282   unsigned int gparg, fparg, f;
7283   CUMULATIVE_ARGS cum;
7284 
7285   /* This code only works for o32 and o64.  */
7286   gcc_assert (TARGET_OLDABI);
7287 
7288   mips_init_cumulative_args (&cum, NULL);
7289 
7290   for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7291     {
7292       machine_mode mode;
7293       struct mips_arg_info info;
7294 
7295       if ((f & 3) == 1)
7296 	mode = SFmode;
7297       else if ((f & 3) == 2)
7298 	mode = DFmode;
7299       else
7300 	gcc_unreachable ();
7301 
7302       mips_get_arg_info (&info, &cum, mode, NULL, true);
7303       gparg = mips_arg_regno (&info, false);
7304       fparg = mips_arg_regno (&info, true);
7305 
7306       if (mode == SFmode)
7307 	mips_output_32bit_xfer (direction, gparg, fparg);
7308       else
7309 	mips_output_64bit_xfer (direction, gparg, fparg);
7310 
7311       mips_function_arg_advance (pack_cumulative_args (&cum), mode, NULL, true);
7312     }
7313 }
7314 
7315 /* Write a MIPS16 stub for the current function.  This stub is used
7316    for functions which take arguments in the floating-point registers.
7317    It is normal-mode code that moves the floating-point arguments
7318    into the general registers and then jumps to the MIPS16 code.  */
7319 
7320 static void
mips16_build_function_stub(void)7321 mips16_build_function_stub (void)
7322 {
7323   const char *fnname, *alias_name, *separator;
7324   char *secname, *stubname;
7325   tree stubdecl;
7326   unsigned int f;
7327   rtx symbol, alias;
7328 
7329   /* Create the name of the stub, and its unique section.  */
7330   symbol = XEXP (DECL_RTL (current_function_decl), 0);
7331   alias = mips16_local_alias (symbol);
7332 
7333   fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
7334   alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
7335   secname = ACONCAT ((".mips16.fn.", fnname, NULL));
7336   stubname = ACONCAT (("__fn_stub_", fnname, NULL));
7337 
7338   /* Build a decl for the stub.  */
7339   stubdecl = build_decl (BUILTINS_LOCATION,
7340 			 FUNCTION_DECL, get_identifier (stubname),
7341 			 build_function_type_list (void_type_node, NULL_TREE));
7342   set_decl_section_name (stubdecl, secname);
7343   DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
7344 				       RESULT_DECL, NULL_TREE, void_type_node);
7345 
7346   /* Output a comment.  */
7347   fprintf (asm_out_file, "\t# Stub function for %s (",
7348 	   current_function_name ());
7349   separator = "";
7350   for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
7351     {
7352       fprintf (asm_out_file, "%s%s", separator,
7353 	       (f & 3) == 1 ? "float" : "double");
7354       separator = ", ";
7355     }
7356   fprintf (asm_out_file, ")\n");
7357 
7358   /* Start the function definition.  */
7359   assemble_start_function (stubdecl, stubname);
7360   mips_start_function_definition (stubname, false);
7361 
7362   /* If generating pic2 code, either set up the global pointer or
7363      switch to pic0.  */
7364   if (TARGET_ABICALLS_PIC2)
7365     {
7366       if (TARGET_ABSOLUTE_ABICALLS)
7367 	fprintf (asm_out_file, "\t.option\tpic0\n");
7368       else
7369 	{
7370 	  output_asm_insn ("%(.cpload\t%^%)", NULL);
7371 	  /* Emit an R_MIPS_NONE relocation to tell the linker what the
7372 	     target function is.  Use a local GOT access when loading the
7373 	     symbol, to cut down on the number of unnecessary GOT entries
7374 	     for stubs that aren't needed.  */
7375 	  output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
7376 	  symbol = alias;
7377 	}
7378     }
7379 
7380   /* Load the address of the MIPS16 function into $25.  Do this first so
7381      that targets with coprocessor interlocks can use an MFC1 to fill the
7382      delay slot.  */
7383   output_asm_insn ("la\t%^,%0", &symbol);
7384 
7385   /* Move the arguments from floating-point registers to general registers.  */
7386   mips_output_args_xfer (crtl->args.info.fp_code, 'f');
7387 
7388   /* Jump to the MIPS16 function.  */
7389   output_asm_insn ("jr\t%^", NULL);
7390 
7391   if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
7392     fprintf (asm_out_file, "\t.option\tpic2\n");
7393 
7394   mips_end_function_definition (stubname);
7395 
7396   /* If the linker needs to create a dynamic symbol for the target
7397      function, it will associate the symbol with the stub (which,
7398      unlike the target function, follows the proper calling conventions).
7399      It is therefore useful to have a local alias for the target function,
7400      so that it can still be identified as MIPS16 code.  As an optimization,
7401      this symbol can also be used for indirect MIPS16 references from
7402      within this file.  */
7403   ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
7404 
7405   switch_to_section (function_section (current_function_decl));
7406 }
7407 
7408 /* The current function is a MIPS16 function that returns a value in an FPR.
7409    Copy the return value from its soft-float to its hard-float location.
7410    libgcc2 has special non-MIPS16 helper functions for each case.  */
7411 
7412 static void
mips16_copy_fpr_return_value(void)7413 mips16_copy_fpr_return_value (void)
7414 {
7415   rtx fn, insn, retval;
7416   tree return_type;
7417   machine_mode return_mode;
7418   const char *name;
7419 
7420   return_type = DECL_RESULT (current_function_decl);
7421   return_mode = DECL_MODE (return_type);
7422 
7423   name = ACONCAT (("__mips16_ret_",
7424 		   mips16_call_stub_mode_suffix (return_mode),
7425 		   NULL));
7426   fn = mips16_stub_function (name);
7427 
7428   /* The function takes arguments in $2 (and possibly $3), so calls
7429      to it cannot be lazily bound.  */
7430   SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
7431 
7432   /* Model the call as something that takes the GPR return value as
7433      argument and returns an "updated" value.  */
7434   retval = gen_rtx_REG (return_mode, GP_RETURN);
7435   insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
7436 			   const0_rtx, NULL_RTX, false);
7437   use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
7438 }
7439 
7440 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
7441    RETVAL is the location of the return value, or null if this is
7442    a "call" rather than a "call_value".  ARGS_SIZE is the size of the
7443    arguments and FP_CODE is the code built by mips_function_arg;
7444    see the comment before the fp_code field in CUMULATIVE_ARGS for details.
7445 
7446    There are three alternatives:
7447 
7448    - If a stub was needed, emit the call and return the call insn itself.
7449 
7450    - If we can avoid using a stub by redirecting the call, set *FN_PTR
7451      to the new target and return null.
7452 
7453    - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
7454      unmodified.
7455 
7456    A stub is needed for calls to functions that, in normal mode,
7457    receive arguments in FPRs or return values in FPRs.  The stub
7458    copies the arguments from their soft-float positions to their
7459    hard-float positions, calls the real function, then copies the
7460    return value from its hard-float position to its soft-float
7461    position.
7462 
7463    We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
7464    If *FN_PTR turns out to be to a non-MIPS16 function, the linker
7465    automatically redirects the JAL to the stub, otherwise the JAL
7466    continues to call FN directly.  */
7467 
7468 static rtx_insn *
mips16_build_call_stub(rtx retval,rtx * fn_ptr,rtx args_size,int fp_code)7469 mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
7470 {
7471   const char *fnname;
7472   bool fp_ret_p;
7473   struct mips16_stub *l;
7474   rtx_insn *insn;
7475   rtx pattern, fn;
7476 
7477   /* We don't need to do anything if we aren't in MIPS16 mode, or if
7478      we were invoked with the -msoft-float option.  */
7479   if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
7480     return NULL;
7481 
7482   /* Figure out whether the value might come back in a floating-point
7483      register.  */
7484   fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
7485 
7486   /* We don't need to do anything if there were no floating-point
7487      arguments and the value will not be returned in a floating-point
7488      register.  */
7489   if (fp_code == 0 && !fp_ret_p)
7490     return NULL;
7491 
7492   /* We don't need to do anything if this is a call to a special
7493      MIPS16 support function.  */
7494   fn = *fn_ptr;
7495   if (mips16_stub_function_p (fn))
7496     return NULL;
7497 
7498   /* If we're calling a locally-defined MIPS16 function, we know that
7499      it will return values in both the "soft-float" and "hard-float"
7500      registers.  There is no need to use a stub to move the latter
7501      to the former.  */
7502   if (fp_code == 0 && mips16_local_function_p (fn))
7503     return NULL;
7504 
7505   /* This code will only work for o32 and o64 abis.  The other ABI's
7506      require more sophisticated support.  */
7507   gcc_assert (TARGET_OLDABI);
7508 
7509   /* If we're calling via a function pointer, use one of the magic
7510      libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
7511      Each stub expects the function address to arrive in register $2.  */
7512   if (GET_CODE (fn) != SYMBOL_REF
7513       || !call_insn_operand (fn, VOIDmode))
7514     {
7515       char buf[32];
7516       rtx stub_fn, addr;
7517       rtx_insn *insn;
7518       bool lazy_p;
7519 
7520       /* If this is a locally-defined and locally-binding function,
7521 	 avoid the stub by calling the local alias directly.  */
7522       if (mips16_local_function_p (fn))
7523 	{
7524 	  *fn_ptr = mips16_local_alias (fn);
7525 	  return NULL;
7526 	}
7527 
7528       /* Create a SYMBOL_REF for the libgcc.a function.  */
7529       if (fp_ret_p)
7530 	sprintf (buf, "__mips16_call_stub_%s_%d",
7531 		 mips16_call_stub_mode_suffix (GET_MODE (retval)),
7532 		 fp_code);
7533       else
7534 	sprintf (buf, "__mips16_call_stub_%d", fp_code);
7535       stub_fn = mips16_stub_function (buf);
7536 
7537       /* The function uses $2 as an argument, so calls to it
7538 	 cannot be lazily bound.  */
7539       SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
7540 
7541       /* Load the target function into $2.  */
7542       addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
7543       lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
7544 
7545       /* Emit the call.  */
7546       insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
7547 			       args_size, NULL_RTX, lazy_p);
7548 
7549       /* Tell GCC that this call does indeed use the value of $2.  */
7550       use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
7551 
7552       /* If we are handling a floating-point return value, we need to
7553          save $18 in the function prologue.  Putting a note on the
7554          call will mean that df_regs_ever_live_p ($18) will be true if the
7555          call is not eliminated, and we can check that in the prologue
7556          code.  */
7557       if (fp_ret_p)
7558 	CALL_INSN_FUNCTION_USAGE (insn) =
7559 	  gen_rtx_EXPR_LIST (VOIDmode,
7560 			     gen_rtx_CLOBBER (VOIDmode,
7561 					      gen_rtx_REG (word_mode, 18)),
7562 			     CALL_INSN_FUNCTION_USAGE (insn));
7563 
7564       return insn;
7565     }
7566 
7567   /* We know the function we are going to call.  If we have already
7568      built a stub, we don't need to do anything further.  */
7569   fnname = targetm.strip_name_encoding (XSTR (fn, 0));
7570   for (l = mips16_stubs; l != NULL; l = l->next)
7571     if (strcmp (l->name, fnname) == 0)
7572       break;
7573 
7574   if (l == NULL)
7575     {
7576       const char *separator;
7577       char *secname, *stubname;
7578       tree stubid, stubdecl;
7579       unsigned int f;
7580 
7581       /* If the function does not return in FPRs, the special stub
7582 	 section is named
7583 	     .mips16.call.FNNAME
7584 
7585 	 If the function does return in FPRs, the stub section is named
7586 	     .mips16.call.fp.FNNAME
7587 
7588 	 Build a decl for the stub.  */
7589       secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
7590 			  fnname, NULL));
7591       stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
7592 			   fnname, NULL));
7593       stubid = get_identifier (stubname);
7594       stubdecl = build_decl (BUILTINS_LOCATION,
7595 			     FUNCTION_DECL, stubid,
7596 			     build_function_type_list (void_type_node,
7597 						       NULL_TREE));
7598       set_decl_section_name (stubdecl, secname);
7599       DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
7600 					   RESULT_DECL, NULL_TREE,
7601 					   void_type_node);
7602 
7603       /* Output a comment.  */
7604       fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7605 	       (fp_ret_p
7606 		? (GET_MODE (retval) == SFmode ? "float " : "double ")
7607 		: ""),
7608 	       fnname);
7609       separator = "";
7610       for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7611 	{
7612 	  fprintf (asm_out_file, "%s%s", separator,
7613 		   (f & 3) == 1 ? "float" : "double");
7614 	  separator = ", ";
7615 	}
7616       fprintf (asm_out_file, ")\n");
7617 
7618       /* Start the function definition.  */
7619       assemble_start_function (stubdecl, stubname);
7620       mips_start_function_definition (stubname, false);
7621 
7622       if (fp_ret_p)
7623 	{
7624 	  fprintf (asm_out_file, "\t.cfi_startproc\n");
7625 
7626 	  /* Create a fake CFA 4 bytes below the stack pointer.
7627 	     This works around unwinders (like libgcc's) that expect
7628 	     the CFA for non-signal frames to be unique.  */
7629 	  fprintf (asm_out_file, "\t.cfi_def_cfa 29,-4\n");
7630 
7631 	  /* "Save" $sp in itself so we don't use the fake CFA.
7632 	     This is: DW_CFA_val_expression r29, { DW_OP_reg29 }.  */
7633 	  fprintf (asm_out_file, "\t.cfi_escape 0x16,29,1,0x6d\n");
7634 
7635 	  /* Save the return address in $18.  The stub's caller knows
7636 	     that $18 might be clobbered, even though $18 is usually
7637 	     a call-saved register.
7638 
7639 	     Do it early on in case the last move to a floating-point
7640 	     register can be scheduled into the delay slot of the
7641 	     call we are about to make.  */
7642 	  fprintf (asm_out_file, "\tmove\t%s,%s\n",
7643 		   reg_names[GP_REG_FIRST + 18],
7644 		   reg_names[RETURN_ADDR_REGNUM]);
7645 	}
7646       else
7647 	{
7648 	  /* Load the address of the MIPS16 function into $25.  Do this
7649 	     first so that targets with coprocessor interlocks can use
7650 	     an MFC1 to fill the delay slot.  */
7651 	  if (TARGET_EXPLICIT_RELOCS)
7652 	    {
7653 	      output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
7654 	      output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
7655 	    }
7656 	  else
7657 	    output_asm_insn ("la\t%^,%0", &fn);
7658 	}
7659 
7660       /* Move the arguments from general registers to floating-point
7661 	 registers.  */
7662       mips_output_args_xfer (fp_code, 't');
7663 
7664       if (fp_ret_p)
7665 	{
7666 	  /* Now call the non-MIPS16 function.  */
7667 	  output_asm_insn (mips_output_jump (&fn, 0, -1, true), &fn);
7668 	  fprintf (asm_out_file, "\t.cfi_register 31,18\n");
7669 
7670 	  /* Move the result from floating-point registers to
7671 	     general registers.  */
7672 	  switch (GET_MODE (retval))
7673 	    {
7674 	    case E_SCmode:
7675 	      mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
7676 				      TARGET_BIG_ENDIAN
7677 				      ? FP_REG_FIRST + 2
7678 				      : FP_REG_FIRST);
7679 	      mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
7680 				      TARGET_LITTLE_ENDIAN
7681 				      ? FP_REG_FIRST + 2
7682 				      : FP_REG_FIRST);
7683 	      if (GET_MODE (retval) == SCmode && TARGET_64BIT)
7684 		{
7685 		  /* On 64-bit targets, complex floats are returned in
7686 		     a single GPR, such that "sd" on a suitably-aligned
7687 		     target would store the value correctly.  */
7688 		  fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
7689 			   reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
7690 			   reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
7691 		  fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
7692 			   reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
7693 			   reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
7694 		  fprintf (asm_out_file, "\tdsrl\t%s,%s,32\n",
7695 			   reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
7696 			   reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
7697 		  fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
7698 			   reg_names[GP_RETURN],
7699 			   reg_names[GP_RETURN],
7700 			   reg_names[GP_RETURN + 1]);
7701 		}
7702 	      break;
7703 
7704 	    case E_SFmode:
7705 	      mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
7706 	      break;
7707 
7708 	    case E_DCmode:
7709 	      mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
7710 				      FP_REG_FIRST + 2);
7711 	      /* FALLTHRU */
7712  	    case E_DFmode:
7713 	    case E_V2SFmode:
7714 	      gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
7715 			  || GET_MODE (retval) != V2SFmode);
7716 	      mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
7717 	      break;
7718 
7719 	    default:
7720 	      gcc_unreachable ();
7721 	    }
7722 	  fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
7723 	  fprintf (asm_out_file, "\t.cfi_endproc\n");
7724 	}
7725       else
7726 	{
7727 	  /* Jump to the previously-loaded address.  */
7728 	  output_asm_insn ("jr\t%^", NULL);
7729 	}
7730 
7731 #ifdef ASM_DECLARE_FUNCTION_SIZE
7732       ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7733 #endif
7734 
7735       mips_end_function_definition (stubname);
7736 
7737       /* Record this stub.  */
7738       l = XNEW (struct mips16_stub);
7739       l->name = xstrdup (fnname);
7740       l->fp_ret_p = fp_ret_p;
7741       l->next = mips16_stubs;
7742       mips16_stubs = l;
7743     }
7744 
7745   /* If we expect a floating-point return value, but we've built a
7746      stub which does not expect one, then we're in trouble.  We can't
7747      use the existing stub, because it won't handle the floating-point
7748      value.  We can't build a new stub, because the linker won't know
7749      which stub to use for the various calls in this object file.
7750      Fortunately, this case is illegal, since it means that a function
7751      was declared in two different ways in a single compilation.  */
7752   if (fp_ret_p && !l->fp_ret_p)
7753     error ("cannot handle inconsistent calls to %qs", fnname);
7754 
7755   if (retval == NULL_RTX)
7756     pattern = gen_call_internal_direct (fn, args_size);
7757   else
7758     pattern = gen_call_value_internal_direct (retval, fn, args_size);
7759   insn = mips_emit_call_insn (pattern, fn, fn, false);
7760 
7761   /* If we are calling a stub which handles a floating-point return
7762      value, we need to arrange to save $18 in the prologue.  We do this
7763      by marking the function call as using the register.  The prologue
7764      will later see that it is used, and emit code to save it.  */
7765   if (fp_ret_p)
7766     CALL_INSN_FUNCTION_USAGE (insn) =
7767       gen_rtx_EXPR_LIST (VOIDmode,
7768 			 gen_rtx_CLOBBER (VOIDmode,
7769 					  gen_rtx_REG (word_mode, 18)),
7770 			 CALL_INSN_FUNCTION_USAGE (insn));
7771 
7772   return insn;
7773 }
7774 
7775 /* Expand a call of type TYPE.  RESULT is where the result will go (null
7776    for "call"s and "sibcall"s), ADDR is the address of the function,
7777    ARGS_SIZE is the size of the arguments and AUX is the value passed
7778    to us by mips_function_arg.  LAZY_P is true if this call already
7779    involves a lazily-bound function address (such as when calling
7780    functions through a MIPS16 hard-float stub).
7781 
7782    Return the call itself.  */
7783 
7784 rtx_insn *
mips_expand_call(enum mips_call_type type,rtx result,rtx addr,rtx args_size,rtx aux,bool lazy_p)7785 mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
7786 		  rtx args_size, rtx aux, bool lazy_p)
7787 {
7788   rtx orig_addr, pattern;
7789   rtx_insn *insn;
7790   int fp_code;
7791 
7792   fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
7793   insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
7794   if (insn)
7795     {
7796       gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
7797       return insn;
7798     }
7799 
7800   orig_addr = addr;
7801   if (!call_insn_operand (addr, VOIDmode))
7802     {
7803       if (type == MIPS_CALL_EPILOGUE)
7804 	addr = MIPS_EPILOGUE_TEMP (Pmode);
7805       else
7806 	addr = gen_reg_rtx (Pmode);
7807       lazy_p |= mips_load_call_address (type, addr, orig_addr);
7808     }
7809 
7810   if (result == 0)
7811     {
7812       rtx (*fn) (rtx, rtx);
7813 
7814       if (type == MIPS_CALL_SIBCALL)
7815 	fn = gen_sibcall_internal;
7816       else
7817 	fn = gen_call_internal;
7818 
7819       pattern = fn (addr, args_size);
7820     }
7821   else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
7822     {
7823       /* Handle return values created by mips_return_fpr_pair.  */
7824       rtx (*fn) (rtx, rtx, rtx, rtx);
7825       rtx reg1, reg2;
7826 
7827       if (type == MIPS_CALL_SIBCALL)
7828 	fn = gen_sibcall_value_multiple_internal;
7829       else
7830 	fn = gen_call_value_multiple_internal;
7831 
7832       reg1 = XEXP (XVECEXP (result, 0, 0), 0);
7833       reg2 = XEXP (XVECEXP (result, 0, 1), 0);
7834       pattern = fn (reg1, addr, args_size, reg2);
7835     }
7836   else
7837     {
7838       rtx (*fn) (rtx, rtx, rtx);
7839 
7840       if (type == MIPS_CALL_SIBCALL)
7841 	fn = gen_sibcall_value_internal;
7842       else
7843 	fn = gen_call_value_internal;
7844 
7845       /* Handle return values created by mips_return_fpr_single.  */
7846       if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
7847 	result = XEXP (XVECEXP (result, 0, 0), 0);
7848       pattern = fn (result, addr, args_size);
7849     }
7850 
7851   return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
7852 }
7853 
7854 /* Split call instruction INSN into a $gp-clobbering call and
7855    (where necessary) an instruction to restore $gp from its save slot.
7856    CALL_PATTERN is the pattern of the new call.  */
7857 
7858 void
mips_split_call(rtx insn,rtx call_pattern)7859 mips_split_call (rtx insn, rtx call_pattern)
7860 {
7861   emit_call_insn (call_pattern);
7862   if (!find_reg_note (insn, REG_NORETURN, 0))
7863     mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode,
7864 						      POST_CALL_TMP_REG));
7865 }
7866 
7867 /* Return true if a call to DECL may need to use JALX.  */
7868 
7869 static bool
mips_call_may_need_jalx_p(tree decl)7870 mips_call_may_need_jalx_p (tree decl)
7871 {
7872   /* If the current translation unit would use a different mode for DECL,
7873      assume that the call needs JALX.  */
7874   if (mips_get_compress_mode (decl) != TARGET_COMPRESSION)
7875     return true;
7876 
7877   /* mips_get_compress_mode is always accurate for locally-binding
7878      functions in the current translation unit.  */
7879   if (!DECL_EXTERNAL (decl) && targetm.binds_local_p (decl))
7880     return false;
7881 
7882   /* When -minterlink-compressed is in effect, assume that functions
7883      could use a different encoding mode unless an attribute explicitly
7884      tells us otherwise.  */
7885   if (TARGET_INTERLINK_COMPRESSED)
7886     {
7887       if (!TARGET_COMPRESSION
7888 	  && mips_get_compress_off_flags (DECL_ATTRIBUTES (decl)) ==0)
7889 	return true;
7890       if (TARGET_COMPRESSION
7891 	  && mips_get_compress_on_flags (DECL_ATTRIBUTES (decl)) == 0)
7892 	return true;
7893     }
7894 
7895   return false;
7896 }
7897 
7898 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL.  */
7899 
7900 static bool
mips_function_ok_for_sibcall(tree decl,tree exp ATTRIBUTE_UNUSED)7901 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7902 {
7903   if (!TARGET_SIBCALLS)
7904     return false;
7905 
7906   /* Interrupt handlers need special epilogue code and therefore can't
7907      use sibcalls.  */
7908   if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
7909     return false;
7910 
7911   /* Direct Js are only possible to functions that use the same ISA encoding.
7912      There is no JX counterpoart of JALX.  */
7913   if (decl
7914       && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode)
7915       && mips_call_may_need_jalx_p (decl))
7916     return false;
7917 
7918   /* Sibling calls should not prevent lazy binding.  Lazy-binding stubs
7919      require $gp to be valid on entry, so sibcalls can only use stubs
7920      if $gp is call-clobbered.  */
7921   if (decl
7922       && TARGET_CALL_SAVED_GP
7923       && !TARGET_ABICALLS_PIC0
7924       && !targetm.binds_local_p (decl))
7925     return false;
7926 
7927   /* Otherwise OK.  */
7928   return true;
7929 }
7930 
7931 /* Implement TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P.  */
7932 
7933 bool
mips_use_by_pieces_infrastructure_p(unsigned HOST_WIDE_INT size,unsigned int align,enum by_pieces_operation op,bool speed_p)7934 mips_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
7935 				     unsigned int align,
7936 				     enum by_pieces_operation op,
7937 				     bool speed_p)
7938 {
7939   if (op == STORE_BY_PIECES)
7940     return mips_store_by_pieces_p (size, align);
7941   if (op == MOVE_BY_PIECES && HAVE_movmemsi)
7942     {
7943       /* movmemsi is meant to generate code that is at least as good as
7944 	 move_by_pieces.  However, movmemsi effectively uses a by-pieces
7945 	 implementation both for moves smaller than a word and for
7946 	 word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
7947 	 bytes.  We should allow the tree-level optimisers to do such
7948 	 moves by pieces, as it often exposes other optimization
7949 	 opportunities.  We might as well continue to use movmemsi at
7950 	 the rtl level though, as it produces better code when
7951 	 scheduling is disabled (such as at -O).  */
7952       if (currently_expanding_to_rtl)
7953 	return false;
7954       if (align < BITS_PER_WORD)
7955 	return size < UNITS_PER_WORD;
7956       return size <= MIPS_MAX_MOVE_BYTES_STRAIGHT;
7957     }
7958 
7959   return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
7960 }
7961 
7962 /* Implement a handler for STORE_BY_PIECES operations
7963    for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P.  */
7964 
7965 bool
mips_store_by_pieces_p(unsigned HOST_WIDE_INT size,unsigned int align)7966 mips_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
7967 {
7968   /* Storing by pieces involves moving constants into registers
7969      of size MIN (ALIGN, BITS_PER_WORD), then storing them.
7970      We need to decide whether it is cheaper to load the address of
7971      constant data into a register and use a block move instead.  */
7972 
7973   /* If the data is only byte aligned, then:
7974 
7975      (a1) A block move of less than 4 bytes would involve three 3 LBs and
7976 	  3 SBs.  We might as well use 3 single-instruction LIs and 3 SBs
7977 	  instead.
7978 
7979      (a2) A block move of 4 bytes from aligned source data can use an
7980 	  LW/SWL/SWR sequence.  This is often better than the 4 LIs and
7981 	  4 SBs that we would generate when storing by pieces.  */
7982   if (align <= BITS_PER_UNIT)
7983     return size < 4;
7984 
7985   /* If the data is 2-byte aligned, then:
7986 
7987      (b1) A block move of less than 4 bytes would use a combination of LBs,
7988 	  LHs, SBs and SHs.  We get better code by using single-instruction
7989 	  LIs, SBs and SHs instead.
7990 
7991      (b2) A block move of 4 bytes from aligned source data would again use
7992 	  an LW/SWL/SWR sequence.  In most cases, loading the address of
7993 	  the source data would require at least one extra instruction.
7994 	  It is often more efficient to use 2 single-instruction LIs and
7995 	  2 SHs instead.
7996 
7997      (b3) A block move of up to 3 additional bytes would be like (b1).
7998 
7999      (b4) A block move of 8 bytes from aligned source data can use two
8000 	  LW/SWL/SWR sequences or a single LD/SDL/SDR sequence.  Both
8001 	  sequences are better than the 4 LIs and 4 SHs that we'd generate
8002 	  when storing by pieces.
8003 
8004      The reasoning for higher alignments is similar:
8005 
8006      (c1) A block move of less than 4 bytes would be the same as (b1).
8007 
8008      (c2) A block move of 4 bytes would use an LW/SW sequence.  Again,
8009 	  loading the address of the source data would typically require
8010 	  at least one extra instruction.  It is generally better to use
8011 	  LUI/ORI/SW instead.
8012 
8013      (c3) A block move of up to 3 additional bytes would be like (b1).
8014 
8015      (c4) A block move of 8 bytes can use two LW/SW sequences or a single
8016 	  LD/SD sequence, and in these cases we've traditionally preferred
8017 	  the memory copy over the more bulky constant moves.  */
8018   return size < 8;
8019 }
8020 
8021 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
8022    Assume that the areas do not overlap.  */
8023 
8024 static void
mips_block_move_straight(rtx dest,rtx src,HOST_WIDE_INT length)8025 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
8026 {
8027   HOST_WIDE_INT offset, delta;
8028   unsigned HOST_WIDE_INT bits;
8029   int i;
8030   machine_mode mode;
8031   rtx *regs;
8032 
8033   /* Work out how many bits to move at a time.  If both operands have
8034      half-word alignment, it is usually better to move in half words.
8035      For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
8036      and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
8037      Otherwise move word-sized chunks.
8038 
8039      For ISA_HAS_LWL_LWR we rely on the lwl/lwr & swl/swr load. Otherwise
8040      picking the minimum of alignment or BITS_PER_WORD gets us the
8041      desired size for bits.  */
8042 
8043   if (!ISA_HAS_LWL_LWR)
8044     bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest)));
8045   else
8046     {
8047       if (MEM_ALIGN (src) == BITS_PER_WORD / 2
8048 	  && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
8049 	bits = BITS_PER_WORD / 2;
8050       else
8051 	bits = BITS_PER_WORD;
8052     }
8053 
8054   mode = int_mode_for_size (bits, 0).require ();
8055   delta = bits / BITS_PER_UNIT;
8056 
8057   /* Allocate a buffer for the temporary registers.  */
8058   regs = XALLOCAVEC (rtx, length / delta);
8059 
8060   /* Load as many BITS-sized chunks as possible.  Use a normal load if
8061      the source has enough alignment, otherwise use left/right pairs.  */
8062   for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
8063     {
8064       regs[i] = gen_reg_rtx (mode);
8065       if (MEM_ALIGN (src) >= bits)
8066 	mips_emit_move (regs[i], adjust_address (src, mode, offset));
8067       else
8068 	{
8069 	  rtx part = adjust_address (src, BLKmode, offset);
8070 	  set_mem_size (part, delta);
8071 	  if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0, 0))
8072 	    gcc_unreachable ();
8073 	}
8074     }
8075 
8076   /* Copy the chunks to the destination.  */
8077   for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
8078     if (MEM_ALIGN (dest) >= bits)
8079       mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
8080     else
8081       {
8082 	rtx part = adjust_address (dest, BLKmode, offset);
8083 	set_mem_size (part, delta);
8084 	if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
8085 	  gcc_unreachable ();
8086       }
8087 
8088   /* Mop up any left-over bytes.  */
8089   if (offset < length)
8090     {
8091       src = adjust_address (src, BLKmode, offset);
8092       dest = adjust_address (dest, BLKmode, offset);
8093       move_by_pieces (dest, src, length - offset,
8094 		      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), RETURN_BEGIN);
8095     }
8096 }
8097 
8098 /* Helper function for doing a loop-based block operation on memory
8099    reference MEM.  Each iteration of the loop will operate on LENGTH
8100    bytes of MEM.
8101 
8102    Create a new base register for use within the loop and point it to
8103    the start of MEM.  Create a new memory reference that uses this
8104    register.  Store them in *LOOP_REG and *LOOP_MEM respectively.  */
8105 
8106 static void
mips_adjust_block_mem(rtx mem,HOST_WIDE_INT length,rtx * loop_reg,rtx * loop_mem)8107 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
8108 		       rtx *loop_reg, rtx *loop_mem)
8109 {
8110   *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
8111 
8112   /* Although the new mem does not refer to a known location,
8113      it does keep up to LENGTH bytes of alignment.  */
8114   *loop_mem = change_address (mem, BLKmode, *loop_reg);
8115   set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
8116 }
8117 
8118 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
8119    bytes at a time.  LENGTH must be at least BYTES_PER_ITER.  Assume that
8120    the memory regions do not overlap.  */
8121 
8122 static void
mips_block_move_loop(rtx dest,rtx src,HOST_WIDE_INT length,HOST_WIDE_INT bytes_per_iter)8123 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
8124 		      HOST_WIDE_INT bytes_per_iter)
8125 {
8126   rtx_code_label *label;
8127   rtx src_reg, dest_reg, final_src, test;
8128   HOST_WIDE_INT leftover;
8129 
8130   leftover = length % bytes_per_iter;
8131   length -= leftover;
8132 
8133   /* Create registers and memory references for use within the loop.  */
8134   mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
8135   mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
8136 
8137   /* Calculate the value that SRC_REG should have after the last iteration
8138      of the loop.  */
8139   final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
8140 				   0, 0, OPTAB_WIDEN);
8141 
8142   /* Emit the start of the loop.  */
8143   label = gen_label_rtx ();
8144   emit_label (label);
8145 
8146   /* Emit the loop body.  */
8147   mips_block_move_straight (dest, src, bytes_per_iter);
8148 
8149   /* Move on to the next block.  */
8150   mips_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
8151   mips_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
8152 
8153   /* Emit the loop condition.  */
8154   test = gen_rtx_NE (VOIDmode, src_reg, final_src);
8155   if (Pmode == DImode)
8156     emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
8157   else
8158     emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
8159 
8160   /* Mop up any left-over bytes.  */
8161   if (leftover)
8162     mips_block_move_straight (dest, src, leftover);
8163   else
8164     /* Temporary fix for PR79150.  */
8165     emit_insn (gen_nop ());
8166 }
8167 
8168 /* Expand a movmemsi instruction, which copies LENGTH bytes from
8169    memory reference SRC to memory reference DEST.  */
8170 
8171 bool
mips_expand_block_move(rtx dest,rtx src,rtx length)8172 mips_expand_block_move (rtx dest, rtx src, rtx length)
8173 {
8174   if (!ISA_HAS_LWL_LWR
8175       && (MEM_ALIGN (src) < MIPS_MIN_MOVE_MEM_ALIGN
8176 	  || MEM_ALIGN (dest) < MIPS_MIN_MOVE_MEM_ALIGN))
8177     return false;
8178 
8179   if (CONST_INT_P (length))
8180     {
8181       if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
8182 	{
8183 	  mips_block_move_straight (dest, src, INTVAL (length));
8184 	  return true;
8185 	}
8186       else if (optimize)
8187 	{
8188 	  mips_block_move_loop (dest, src, INTVAL (length),
8189 				MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
8190 	  return true;
8191 	}
8192     }
8193   return false;
8194 }
8195 
8196 /* Expand a loop of synci insns for the address range [BEGIN, END).  */
8197 
8198 void
mips_expand_synci_loop(rtx begin,rtx end)8199 mips_expand_synci_loop (rtx begin, rtx end)
8200 {
8201   rtx inc, cmp_result, mask, length;
8202   rtx_code_label *label, *end_label;
8203 
8204   /* Create end_label.  */
8205   end_label = gen_label_rtx ();
8206 
8207   /* Check if begin equals end.  */
8208   cmp_result = gen_rtx_EQ (VOIDmode, begin, end);
8209   emit_jump_insn (gen_condjump (cmp_result, end_label));
8210 
8211   /* Load INC with the cache line size (rdhwr INC,$1).  */
8212   inc = gen_reg_rtx (Pmode);
8213   emit_insn (PMODE_INSN (gen_rdhwr_synci_step, (inc)));
8214 
8215   /* Check if inc is 0.  */
8216   cmp_result = gen_rtx_EQ (VOIDmode, inc, const0_rtx);
8217   emit_jump_insn (gen_condjump (cmp_result, end_label));
8218 
8219   /* Calculate mask.  */
8220   mask = mips_force_unary (Pmode, NEG, inc);
8221 
8222   /* Mask out begin by mask.  */
8223   begin = mips_force_binary (Pmode, AND, begin, mask);
8224 
8225   /* Calculate length.  */
8226   length = mips_force_binary (Pmode, MINUS, end, begin);
8227 
8228   /* Loop back to here.  */
8229     label = gen_label_rtx ();
8230   emit_label (label);
8231 
8232   emit_insn (gen_synci (begin));
8233 
8234   /* Update length.  */
8235   mips_emit_binary (MINUS, length, length, inc);
8236 
8237   /* Update begin.  */
8238   mips_emit_binary (PLUS, begin, begin, inc);
8239 
8240   /* Check if length is greater than 0.  */
8241   cmp_result = gen_rtx_GT (VOIDmode, length, const0_rtx);
8242   emit_jump_insn (gen_condjump (cmp_result, label));
8243 
8244   emit_label (end_label);
8245 }
8246 
8247 /* Expand a QI or HI mode atomic memory operation.
8248 
8249    GENERATOR contains a pointer to the gen_* function that generates
8250    the SI mode underlying atomic operation using masks that we
8251    calculate.
8252 
8253    RESULT is the return register for the operation.  Its value is NULL
8254    if unused.
8255 
8256    MEM is the location of the atomic access.
8257 
8258    OLDVAL is the first operand for the operation.
8259 
8260    NEWVAL is the optional second operand for the operation.  Its value
8261    is NULL if unused.  */
8262 
8263 void
mips_expand_atomic_qihi(union mips_gen_fn_ptrs generator,rtx result,rtx mem,rtx oldval,rtx newval)8264 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
8265                          rtx result, rtx mem, rtx oldval, rtx newval)
8266 {
8267   rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
8268   rtx unshifted_mask_reg, mask, inverted_mask, si_op;
8269   rtx res = NULL;
8270   machine_mode mode;
8271 
8272   mode = GET_MODE (mem);
8273 
8274   /* Compute the address of the containing SImode value.  */
8275   orig_addr = force_reg (Pmode, XEXP (mem, 0));
8276   memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
8277 				  force_reg (Pmode, GEN_INT (-4)));
8278 
8279   /* Create a memory reference for it.  */
8280   memsi = gen_rtx_MEM (SImode, memsi_addr);
8281   set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8282   MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8283 
8284   /* Work out the byte offset of the QImode or HImode value,
8285      counting from the least significant byte.  */
8286   shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
8287   if (TARGET_BIG_ENDIAN)
8288     mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
8289 
8290   /* Multiply by eight to convert the shift value from bytes to bits.  */
8291   mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
8292 
8293   /* Make the final shift an SImode value, so that it can be used in
8294      SImode operations.  */
8295   shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
8296 
8297   /* Set MASK to an inclusive mask of the QImode or HImode value.  */
8298   unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
8299   unshifted_mask_reg = force_reg (SImode, unshifted_mask);
8300   mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
8301 
8302   /* Compute the equivalent exclusive mask.  */
8303   inverted_mask = gen_reg_rtx (SImode);
8304   emit_insn (gen_rtx_SET (inverted_mask, gen_rtx_NOT (SImode, mask)));
8305 
8306   /* Shift the old value into place.  */
8307   if (oldval != const0_rtx)
8308     {
8309       oldval = convert_modes (SImode, mode, oldval, true);
8310       oldval = force_reg (SImode, oldval);
8311       oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
8312     }
8313 
8314   /* Do the same for the new value.  */
8315   if (newval && newval != const0_rtx)
8316     {
8317       newval = convert_modes (SImode, mode, newval, true);
8318       newval = force_reg (SImode, newval);
8319       newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
8320     }
8321 
8322   /* Do the SImode atomic access.  */
8323   if (result)
8324     res = gen_reg_rtx (SImode);
8325   if (newval)
8326     si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
8327   else if (result)
8328     si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
8329   else
8330     si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
8331 
8332   emit_insn (si_op);
8333 
8334   if (result)
8335     {
8336       /* Shift and convert the result.  */
8337       mips_emit_binary (AND, res, res, mask);
8338       mips_emit_binary (LSHIFTRT, res, res, shiftsi);
8339       mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
8340     }
8341 }
8342 
8343 /* Return true if it is possible to use left/right accesses for a
8344    bitfield of WIDTH bits starting BITPOS bits into BLKmode memory OP.
8345    When returning true, update *LEFT and *RIGHT as follows:
8346 
8347    *LEFT is a QImode reference to the first byte if big endian or
8348    the last byte if little endian.  This address can be used in the
8349    left-side instructions (LWL, SWL, LDL, SDL).
8350 
8351    *RIGHT is a QImode reference to the opposite end of the field and
8352    can be used in the patterning right-side instruction.  */
8353 
8354 static bool
mips_get_unaligned_mem(rtx op,HOST_WIDE_INT width,HOST_WIDE_INT bitpos,rtx * left,rtx * right)8355 mips_get_unaligned_mem (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
8356 			rtx *left, rtx *right)
8357 {
8358   rtx first, last;
8359 
8360   /* Check that the size is valid.  */
8361   if (width != 32 && (!TARGET_64BIT || width != 64))
8362     return false;
8363 
8364   /* We can only access byte-aligned values.  Since we are always passed
8365      a reference to the first byte of the field, it is not necessary to
8366      do anything with BITPOS after this check.  */
8367   if (bitpos % BITS_PER_UNIT != 0)
8368     return false;
8369 
8370   /* Reject aligned bitfields: we want to use a normal load or store
8371      instead of a left/right pair.  */
8372   if (MEM_ALIGN (op) >= width)
8373     return false;
8374 
8375   /* Get references to both ends of the field.  */
8376   first = adjust_address (op, QImode, 0);
8377   last = adjust_address (op, QImode, width / BITS_PER_UNIT - 1);
8378 
8379   /* Allocate to LEFT and RIGHT according to endianness.  LEFT should
8380      correspond to the MSB and RIGHT to the LSB.  */
8381   if (TARGET_BIG_ENDIAN)
8382     *left = first, *right = last;
8383   else
8384     *left = last, *right = first;
8385 
8386   return true;
8387 }
8388 
8389 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
8390    DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
8391    the operation is the equivalent of:
8392 
8393       (set DEST (*_extract SRC WIDTH BITPOS))
8394 
8395    Return true on success.  */
8396 
8397 bool
mips_expand_ext_as_unaligned_load(rtx dest,rtx src,HOST_WIDE_INT width,HOST_WIDE_INT bitpos,bool unsigned_p)8398 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
8399 				   HOST_WIDE_INT bitpos, bool unsigned_p)
8400 {
8401   rtx left, right, temp;
8402   rtx dest1 = NULL_RTX;
8403 
8404   /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
8405      be a DImode, create a new temp and emit a zero extend at the end.  */
8406   if (GET_MODE (dest) == DImode
8407       && REG_P (dest)
8408       && GET_MODE_BITSIZE (SImode) == width)
8409     {
8410       dest1 = dest;
8411       dest = gen_reg_rtx (SImode);
8412     }
8413 
8414   if (!mips_get_unaligned_mem (src, width, bitpos, &left, &right))
8415     return false;
8416 
8417   temp = gen_reg_rtx (GET_MODE (dest));
8418   if (GET_MODE (dest) == DImode)
8419     {
8420       emit_insn (gen_mov_ldl (temp, src, left));
8421       emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
8422     }
8423   else
8424     {
8425       emit_insn (gen_mov_lwl (temp, src, left));
8426       emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
8427     }
8428 
8429   /* If we were loading 32bits and the original register was DI then
8430      sign/zero extend into the orignal dest.  */
8431   if (dest1)
8432     {
8433       if (unsigned_p)
8434         emit_insn (gen_zero_extendsidi2 (dest1, dest));
8435       else
8436         emit_insn (gen_extendsidi2 (dest1, dest));
8437     }
8438   return true;
8439 }
8440 
8441 /* Try to use left/right stores to expand an "ins" pattern.  DEST, WIDTH,
8442    BITPOS and SRC are the operands passed to the expander; the operation
8443    is the equivalent of:
8444 
8445        (set (zero_extract DEST WIDTH BITPOS) SRC)
8446 
8447    Return true on success.  */
8448 
8449 bool
mips_expand_ins_as_unaligned_store(rtx dest,rtx src,HOST_WIDE_INT width,HOST_WIDE_INT bitpos)8450 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
8451 				    HOST_WIDE_INT bitpos)
8452 {
8453   rtx left, right;
8454   machine_mode mode;
8455 
8456   if (!mips_get_unaligned_mem (dest, width, bitpos, &left, &right))
8457     return false;
8458 
8459   mode = int_mode_for_size (width, 0).require ();
8460   src = gen_lowpart (mode, src);
8461   if (mode == DImode)
8462     {
8463       emit_insn (gen_mov_sdl (dest, src, left));
8464       emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
8465     }
8466   else
8467     {
8468       emit_insn (gen_mov_swl (dest, src, left));
8469       emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
8470     }
8471   return true;
8472 }
8473 
8474 /* Return true if X is a MEM with the same size as MODE.  */
8475 
8476 bool
mips_mem_fits_mode_p(machine_mode mode,rtx x)8477 mips_mem_fits_mode_p (machine_mode mode, rtx x)
8478 {
8479   return (MEM_P (x)
8480 	  && MEM_SIZE_KNOWN_P (x)
8481 	  && MEM_SIZE (x) == GET_MODE_SIZE (mode));
8482 }
8483 
8484 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
8485    source of an "ext" instruction or the destination of an "ins"
8486    instruction.  OP must be a register operand and the following
8487    conditions must hold:
8488 
8489      0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
8490      0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
8491      0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
8492 
8493    Also reject lengths equal to a word as they are better handled
8494    by the move patterns.  */
8495 
8496 bool
mips_use_ins_ext_p(rtx op,HOST_WIDE_INT width,HOST_WIDE_INT bitpos)8497 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
8498 {
8499   if (!ISA_HAS_EXT_INS
8500       || !register_operand (op, VOIDmode)
8501       || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
8502     return false;
8503 
8504   if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
8505     return false;
8506 
8507   if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
8508     return false;
8509 
8510   return true;
8511 }
8512 
8513 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
8514    operation if MAXLEN is the maxium length of consecutive bits that
8515    can make up MASK.  MODE is the mode of the operation.  See
8516    mask_low_and_shift_len for the actual definition.  */
8517 
8518 bool
mask_low_and_shift_p(machine_mode mode,rtx mask,rtx shift,int maxlen)8519 mask_low_and_shift_p (machine_mode mode, rtx mask, rtx shift, int maxlen)
8520 {
8521   return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
8522 }
8523 
8524 /* Return true iff OP1 and OP2 are valid operands together for the
8525    *and<MODE>3 and *and<MODE>3_mips16 patterns.  For the cases to consider,
8526    see the table in the comment before the pattern.  */
8527 
8528 bool
and_operands_ok(machine_mode mode,rtx op1,rtx op2)8529 and_operands_ok (machine_mode mode, rtx op1, rtx op2)
8530 {
8531 
8532   if (memory_operand (op1, mode))
8533     {
8534       if (TARGET_MIPS16) {
8535 	struct mips_address_info addr;
8536 	if (!mips_classify_address (&addr, op1, mode, false))
8537 	  return false;
8538       }
8539       return and_load_operand (op2, mode);
8540     }
8541   else
8542     return and_reg_operand (op2, mode);
8543 }
8544 
8545 /* The canonical form of a mask-low-and-shift-left operation is
8546    (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
8547    cleared.  Thus we need to shift MASK to the right before checking if it
8548    is a valid mask value.  MODE is the mode of the operation.  If true
8549    return the length of the mask, otherwise return -1.  */
8550 
8551 int
mask_low_and_shift_len(machine_mode mode,rtx mask,rtx shift)8552 mask_low_and_shift_len (machine_mode mode, rtx mask, rtx shift)
8553 {
8554   HOST_WIDE_INT shval;
8555 
8556   shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
8557   return exact_log2 ((UINTVAL (mask) >> shval) + 1);
8558 }
8559 
8560 /* Return true if -msplit-addresses is selected and should be honored.
8561 
8562    -msplit-addresses is a half-way house between explicit relocations
8563    and the traditional assembler macros.  It can split absolute 32-bit
8564    symbolic constants into a high/lo_sum pair but uses macros for other
8565    sorts of access.
8566 
8567    Like explicit relocation support for REL targets, it relies
8568    on GNU extensions in the assembler and the linker.
8569 
8570    Although this code should work for -O0, it has traditionally
8571    been treated as an optimization.  */
8572 
8573 static bool
mips_split_addresses_p(void)8574 mips_split_addresses_p (void)
8575 {
8576   return (TARGET_SPLIT_ADDRESSES
8577 	  && optimize
8578 	  && !TARGET_MIPS16
8579 	  && !flag_pic
8580 	  && !ABI_HAS_64BIT_SYMBOLS);
8581 }
8582 
8583 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs.  */
8584 
8585 static void
mips_init_relocs(void)8586 mips_init_relocs (void)
8587 {
8588   memset (mips_split_p, '\0', sizeof (mips_split_p));
8589   memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
8590   memset (mips_use_pcrel_pool_p, '\0', sizeof (mips_use_pcrel_pool_p));
8591   memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
8592   memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
8593 
8594   if (TARGET_MIPS16_PCREL_LOADS)
8595     mips_use_pcrel_pool_p[SYMBOL_ABSOLUTE] = true;
8596   else
8597     {
8598       if (ABI_HAS_64BIT_SYMBOLS)
8599 	{
8600 	  if (TARGET_EXPLICIT_RELOCS)
8601 	    {
8602 	      mips_split_p[SYMBOL_64_HIGH] = true;
8603 	      mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
8604 	      mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
8605 
8606 	      mips_split_p[SYMBOL_64_MID] = true;
8607 	      mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
8608 	      mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
8609 
8610 	      mips_split_p[SYMBOL_64_LOW] = true;
8611 	      mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
8612 	      mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
8613 
8614 	      mips_split_p[SYMBOL_ABSOLUTE] = true;
8615 	      mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
8616 	    }
8617 	}
8618       else
8619 	{
8620 	  if (TARGET_EXPLICIT_RELOCS
8621 	      || mips_split_addresses_p ()
8622 	      || TARGET_MIPS16)
8623 	    {
8624 	      mips_split_p[SYMBOL_ABSOLUTE] = true;
8625 	      mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
8626 	      mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
8627 	    }
8628 	}
8629     }
8630 
8631   if (TARGET_MIPS16)
8632     {
8633       /* The high part is provided by a pseudo copy of $gp.  */
8634       mips_split_p[SYMBOL_GP_RELATIVE] = true;
8635       mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
8636     }
8637   else if (TARGET_EXPLICIT_RELOCS)
8638     /* Small data constants are kept whole until after reload,
8639        then lowered by mips_rewrite_small_data.  */
8640     mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
8641 
8642   if (TARGET_EXPLICIT_RELOCS)
8643     {
8644       mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
8645       if (TARGET_NEWABI)
8646 	{
8647 	  mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
8648 	  mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
8649 	}
8650       else
8651 	{
8652 	  mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
8653 	  mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
8654 	}
8655       if (TARGET_MIPS16)
8656 	/* Expose the use of $28 as soon as possible.  */
8657 	mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
8658 
8659       if (TARGET_XGOT)
8660 	{
8661 	  /* The HIGH and LO_SUM are matched by special .md patterns.  */
8662 	  mips_split_p[SYMBOL_GOT_DISP] = true;
8663 
8664 	  mips_split_p[SYMBOL_GOTOFF_DISP] = true;
8665 	  mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
8666 	  mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
8667 
8668 	  mips_split_p[SYMBOL_GOTOFF_CALL] = true;
8669 	  mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
8670 	  mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
8671 	}
8672       else
8673 	{
8674 	  if (TARGET_NEWABI)
8675 	    mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
8676 	  else
8677 	    mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
8678 	  mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
8679 	  if (TARGET_MIPS16)
8680 	    /* Expose the use of $28 as soon as possible.  */
8681 	    mips_split_p[SYMBOL_GOT_DISP] = true;
8682 	}
8683     }
8684 
8685   if (TARGET_NEWABI)
8686     {
8687       mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
8688       mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
8689       mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
8690     }
8691 
8692   mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
8693   mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
8694 
8695   if (TARGET_MIPS16_PCREL_LOADS)
8696     {
8697       mips_use_pcrel_pool_p[SYMBOL_DTPREL] = true;
8698       mips_use_pcrel_pool_p[SYMBOL_TPREL] = true;
8699     }
8700   else
8701     {
8702       mips_split_p[SYMBOL_DTPREL] = true;
8703       mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
8704       mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
8705 
8706       mips_split_p[SYMBOL_TPREL] = true;
8707       mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
8708       mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
8709     }
8710 
8711   mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
8712   mips_lo_relocs[SYMBOL_HALF] = "%half(";
8713 }
8714 
8715 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
8716    in context CONTEXT.  RELOCS is the array of relocations to use.  */
8717 
8718 static void
mips_print_operand_reloc(FILE * file,rtx op,enum mips_symbol_context context,const char ** relocs)8719 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
8720 			  const char **relocs)
8721 {
8722   enum mips_symbol_type symbol_type;
8723   const char *p;
8724 
8725   symbol_type = mips_classify_symbolic_expression (op, context);
8726   gcc_assert (relocs[symbol_type]);
8727 
8728   fputs (relocs[symbol_type], file);
8729   output_addr_const (file, mips_strip_unspec_address (op));
8730   for (p = relocs[symbol_type]; *p != 0; p++)
8731     if (*p == '(')
8732       fputc (')', file);
8733 }
8734 
8735 /* Start a new block with the given asm switch enabled.  If we need
8736    to print a directive, emit PREFIX before it and SUFFIX after it.  */
8737 
8738 static void
mips_push_asm_switch_1(struct mips_asm_switch * asm_switch,const char * prefix,const char * suffix)8739 mips_push_asm_switch_1 (struct mips_asm_switch *asm_switch,
8740 			const char *prefix, const char *suffix)
8741 {
8742   if (asm_switch->nesting_level == 0)
8743     fprintf (asm_out_file, "%s.set\tno%s%s", prefix, asm_switch->name, suffix);
8744   asm_switch->nesting_level++;
8745 }
8746 
8747 /* Likewise, but end a block.  */
8748 
8749 static void
mips_pop_asm_switch_1(struct mips_asm_switch * asm_switch,const char * prefix,const char * suffix)8750 mips_pop_asm_switch_1 (struct mips_asm_switch *asm_switch,
8751 		       const char *prefix, const char *suffix)
8752 {
8753   gcc_assert (asm_switch->nesting_level);
8754   asm_switch->nesting_level--;
8755   if (asm_switch->nesting_level == 0)
8756     fprintf (asm_out_file, "%s.set\t%s%s", prefix, asm_switch->name, suffix);
8757 }
8758 
8759 /* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
8760    that either print a complete line or print nothing.  */
8761 
8762 void
mips_push_asm_switch(struct mips_asm_switch * asm_switch)8763 mips_push_asm_switch (struct mips_asm_switch *asm_switch)
8764 {
8765   mips_push_asm_switch_1 (asm_switch, "\t", "\n");
8766 }
8767 
8768 void
mips_pop_asm_switch(struct mips_asm_switch * asm_switch)8769 mips_pop_asm_switch (struct mips_asm_switch *asm_switch)
8770 {
8771   mips_pop_asm_switch_1 (asm_switch, "\t", "\n");
8772 }
8773 
8774 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
8775    The punctuation characters are:
8776 
8777    '('	Start a nested ".set noreorder" block.
8778    ')'	End a nested ".set noreorder" block.
8779    '['	Start a nested ".set noat" block.
8780    ']'	End a nested ".set noat" block.
8781    '<'	Start a nested ".set nomacro" block.
8782    '>'	End a nested ".set nomacro" block.
8783    '*'	Behave like %(%< if generating a delayed-branch sequence.
8784    '#'	Print a nop if in a ".set noreorder" block.
8785    '/'	Like '#', but do nothing within a delayed-branch sequence.
8786    '?'	Print "l" if mips_branch_likely is true
8787    '~'	Print a nop if mips_branch_likely is true
8788    '.'	Print the name of the register with a hard-wired zero (zero or $0).
8789    '@'	Print the name of the assembler temporary register (at or $1).
8790    '^'	Print the name of the pic call-through register (t9 or $25).
8791    '+'	Print the name of the gp register (usually gp or $28).
8792    '$'	Print the name of the stack pointer register (sp or $29).
8793    ':'  Print "c" to use the compact version if the delay slot is a nop.
8794    '!'  Print "s" to use the short version if the delay slot contains a
8795 	16-bit instruction.
8796 
8797    See also mips_init_print_operand_punct.  */
8798 
8799 static void
mips_print_operand_punctuation(FILE * file,int ch)8800 mips_print_operand_punctuation (FILE *file, int ch)
8801 {
8802   switch (ch)
8803     {
8804     case '(':
8805       mips_push_asm_switch_1 (&mips_noreorder, "", "\n\t");
8806       break;
8807 
8808     case ')':
8809       mips_pop_asm_switch_1 (&mips_noreorder, "\n\t", "");
8810       break;
8811 
8812     case '[':
8813       mips_push_asm_switch_1 (&mips_noat, "", "\n\t");
8814       break;
8815 
8816     case ']':
8817       mips_pop_asm_switch_1 (&mips_noat, "\n\t", "");
8818       break;
8819 
8820     case '<':
8821       mips_push_asm_switch_1 (&mips_nomacro, "", "\n\t");
8822       break;
8823 
8824     case '>':
8825       mips_pop_asm_switch_1 (&mips_nomacro, "\n\t", "");
8826       break;
8827 
8828     case '*':
8829       if (final_sequence != 0)
8830 	{
8831 	  mips_print_operand_punctuation (file, '(');
8832 	  mips_print_operand_punctuation (file, '<');
8833 	}
8834       break;
8835 
8836     case '#':
8837       if (mips_noreorder.nesting_level > 0)
8838 	fputs ("\n\tnop", file);
8839       break;
8840 
8841     case '/':
8842       /* Print an extra newline so that the delayed insn is separated
8843 	 from the following ones.  This looks neater and is consistent
8844 	 with non-nop delayed sequences.  */
8845       if (mips_noreorder.nesting_level > 0 && final_sequence == 0)
8846 	fputs ("\n\tnop\n", file);
8847       break;
8848 
8849     case '?':
8850       if (mips_branch_likely)
8851 	putc ('l', file);
8852       break;
8853 
8854     case '~':
8855       if (mips_branch_likely)
8856 	fputs ("\n\tnop", file);
8857       break;
8858 
8859     case '.':
8860       fputs (reg_names[GP_REG_FIRST + 0], file);
8861       break;
8862 
8863     case '@':
8864       fputs (reg_names[AT_REGNUM], file);
8865       break;
8866 
8867     case '^':
8868       fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
8869       break;
8870 
8871     case '+':
8872       fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
8873       break;
8874 
8875     case '$':
8876       fputs (reg_names[STACK_POINTER_REGNUM], file);
8877       break;
8878 
8879     case ':':
8880       /* When final_sequence is 0, the delay slot will be a nop.  We can
8881 	 use the compact version where available.  The %: formatter will
8882 	 only be present if a compact form of the branch is available.  */
8883       if (final_sequence == 0)
8884 	putc ('c', file);
8885       break;
8886 
8887     case '!':
8888       /* If the delay slot instruction is short, then use the
8889 	 compact version.  */
8890       if (TARGET_MICROMIPS && !TARGET_INTERLINK_COMPRESSED && mips_isa_rev <= 5
8891 	  && (final_sequence == 0
8892 	      || get_attr_length (final_sequence->insn (1)) == 2))
8893 	putc ('s', file);
8894       break;
8895 
8896     default:
8897       gcc_unreachable ();
8898       break;
8899     }
8900 }
8901 
8902 /* Initialize mips_print_operand_punct.  */
8903 
8904 static void
mips_init_print_operand_punct(void)8905 mips_init_print_operand_punct (void)
8906 {
8907   const char *p;
8908 
8909   for (p = "()[]<>*#/?~.@^+$:!"; *p; p++)
8910     mips_print_operand_punct[(unsigned char) *p] = true;
8911 }
8912 
8913 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
8914    associated with condition CODE.  Print the condition part of the
8915    opcode to FILE.  */
8916 
8917 static void
mips_print_int_branch_condition(FILE * file,enum rtx_code code,int letter)8918 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
8919 {
8920   switch (code)
8921     {
8922     case EQ:
8923     case NE:
8924     case GT:
8925     case GE:
8926     case LT:
8927     case LE:
8928     case GTU:
8929     case GEU:
8930     case LTU:
8931     case LEU:
8932       /* Conveniently, the MIPS names for these conditions are the same
8933 	 as their RTL equivalents.  */
8934       fputs (GET_RTX_NAME (code), file);
8935       break;
8936 
8937     default:
8938       output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
8939       break;
8940     }
8941 }
8942 
8943 /* Likewise floating-point branches.  */
8944 
8945 static void
mips_print_float_branch_condition(FILE * file,enum rtx_code code,int letter)8946 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
8947 {
8948   switch (code)
8949     {
8950     case EQ:
8951       if (ISA_HAS_CCF)
8952 	fputs ("c1eqz", file);
8953       else
8954 	fputs ("c1f", file);
8955       break;
8956 
8957     case NE:
8958       if (ISA_HAS_CCF)
8959 	fputs ("c1nez", file);
8960       else
8961 	fputs ("c1t", file);
8962       break;
8963 
8964     default:
8965       output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
8966       break;
8967     }
8968 }
8969 
8970 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P.  */
8971 
8972 static bool
mips_print_operand_punct_valid_p(unsigned char code)8973 mips_print_operand_punct_valid_p (unsigned char code)
8974 {
8975   return mips_print_operand_punct[code];
8976 }
8977 
8978 /* Implement TARGET_PRINT_OPERAND.  The MIPS-specific operand codes are:
8979 
8980    'E'	Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal.
8981    'X'	Print CONST_INT OP in hexadecimal format.
8982    'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format.
8983    'd'	Print CONST_INT OP in decimal.
8984    'B'	Print CONST_INT OP element 0 of a replicated CONST_VECTOR
8985 	  as an unsigned byte [0..255].
8986    'm'	Print one less than CONST_INT OP in decimal.
8987    'y'	Print exact log2 of CONST_INT OP in decimal.
8988    'h'	Print the high-part relocation associated with OP, after stripping
8989 	  any outermost HIGH.
8990    'R'	Print the low-part relocation associated with OP.
8991    'C'	Print the integer branch condition for comparison OP.
8992    'N'	Print the inverse of the integer branch condition for comparison OP.
8993    'F'	Print the FPU branch condition for comparison OP.
8994    'W'	Print the inverse of the FPU branch condition for comparison OP.
8995    'w'	Print a MSA register.
8996    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
8997 	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
8998    't'	Like 'T', but with the EQ/NE cases reversed
8999    'Y'	Print mips_fp_conditions[INTVAL (OP)]
9000    'Z'	Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
9001    'q'	Print a DSP accumulator register.
9002    'D'	Print the second part of a double-word register or memory operand.
9003    'L'	Print the low-order register in a double-word register operand.
9004    'M'	Print high-order register in a double-word register operand.
9005    'z'	Print $0 if OP is zero, otherwise print OP normally.
9006    'b'	Print the address of a memory operand, without offset.
9007    'v'	Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI,
9008 	  V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively.
9009    'V'	Print exact log2 of CONST_INT OP element 0 of a replicated
9010 	  CONST_VECTOR in decimal.  */
9011 
9012 static void
mips_print_operand(FILE * file,rtx op,int letter)9013 mips_print_operand (FILE *file, rtx op, int letter)
9014 {
9015   enum rtx_code code;
9016 
9017   if (mips_print_operand_punct_valid_p (letter))
9018     {
9019       mips_print_operand_punctuation (file, letter);
9020       return;
9021     }
9022 
9023   gcc_assert (op);
9024   code = GET_CODE (op);
9025 
9026   switch (letter)
9027     {
9028     case 'E':
9029       if (GET_CODE (op) == CONST_VECTOR)
9030 	{
9031 	  gcc_assert (mips_const_vector_same_val_p (op, GET_MODE (op)));
9032 	  op = CONST_VECTOR_ELT (op, 0);
9033 	  gcc_assert (CONST_INT_P (op));
9034 	  fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
9035 	}
9036       else
9037 	output_operand_lossage ("invalid use of '%%%c'", letter);
9038       break;
9039 
9040     case 'X':
9041       if (CONST_INT_P (op))
9042 	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
9043       else
9044 	output_operand_lossage ("invalid use of '%%%c'", letter);
9045       break;
9046 
9047     case 'x':
9048       if (CONST_INT_P (op))
9049 	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
9050       else
9051 	output_operand_lossage ("invalid use of '%%%c'", letter);
9052       break;
9053 
9054     case 'd':
9055       if (CONST_INT_P (op))
9056 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
9057       else
9058 	output_operand_lossage ("invalid use of '%%%c'", letter);
9059       break;
9060 
9061     case 'B':
9062       if (GET_CODE (op) == CONST_VECTOR)
9063 	{
9064 	  gcc_assert (mips_const_vector_same_val_p (op, GET_MODE (op)));
9065 	  op = CONST_VECTOR_ELT (op, 0);
9066 	  gcc_assert (CONST_INT_P (op));
9067 	  unsigned HOST_WIDE_INT val8 = UINTVAL (op) & GET_MODE_MASK (QImode);
9068 	  fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, val8);
9069 	}
9070       else
9071 	output_operand_lossage ("invalid use of '%%%c'", letter);
9072       break;
9073 
9074     case 'm':
9075       if (CONST_INT_P (op))
9076 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
9077       else
9078 	output_operand_lossage ("invalid use of '%%%c'", letter);
9079       break;
9080 
9081     case 'y':
9082       if (CONST_INT_P (op))
9083 	{
9084 	  int val = exact_log2 (INTVAL (op));
9085 	  if (val != -1)
9086 	    fprintf (file, "%d", val);
9087 	  else
9088 	    output_operand_lossage ("invalid use of '%%%c'", letter);
9089 	}
9090       else
9091 	output_operand_lossage ("invalid use of '%%%c'", letter);
9092       break;
9093 
9094     case 'V':
9095       if (GET_CODE (op) == CONST_VECTOR)
9096 	{
9097 	  machine_mode mode = GET_MODE_INNER (GET_MODE (op));
9098 	  unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0));
9099 	  int vlog2 = exact_log2 (val & GET_MODE_MASK (mode));
9100 	  if (vlog2 != -1)
9101 	    fprintf (file, "%d", vlog2);
9102 	  else
9103 	    output_operand_lossage ("invalid use of '%%%c'", letter);
9104 	}
9105       else
9106 	output_operand_lossage ("invalid use of '%%%c'", letter);
9107       break;
9108 
9109     case 'h':
9110       if (code == HIGH)
9111 	op = XEXP (op, 0);
9112       mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
9113       break;
9114 
9115     case 'R':
9116       mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
9117       break;
9118 
9119     case 'C':
9120       mips_print_int_branch_condition (file, code, letter);
9121       break;
9122 
9123     case 'N':
9124       mips_print_int_branch_condition (file, reverse_condition (code), letter);
9125       break;
9126 
9127     case 'F':
9128       mips_print_float_branch_condition (file, code, letter);
9129       break;
9130 
9131     case 'W':
9132       mips_print_float_branch_condition (file, reverse_condition (code),
9133 					 letter);
9134       break;
9135 
9136     case 'T':
9137     case 't':
9138       {
9139 	int truth = (code == NE) == (letter == 'T');
9140 	fputc ("zfnt"[truth * 2 + ST_REG_P (REGNO (XEXP (op, 0)))], file);
9141       }
9142       break;
9143 
9144     case 'Y':
9145       if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
9146 	fputs (mips_fp_conditions[UINTVAL (op)], file);
9147       else
9148 	output_operand_lossage ("'%%%c' is not a valid operand prefix",
9149 				letter);
9150       break;
9151 
9152     case 'Z':
9153       if (ISA_HAS_8CC || ISA_HAS_CCF)
9154 	{
9155 	  mips_print_operand (file, op, 0);
9156 	  fputc (',', file);
9157 	}
9158       break;
9159 
9160     case 'q':
9161       if (code == REG && MD_REG_P (REGNO (op)))
9162 	fprintf (file, "$ac0");
9163       else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
9164 	fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
9165       else
9166 	output_operand_lossage ("invalid use of '%%%c'", letter);
9167       break;
9168 
9169     case 'w':
9170       if (code == REG && MSA_REG_P (REGNO (op)))
9171 	fprintf (file, "$w%s", &reg_names[REGNO (op)][2]);
9172       else
9173 	output_operand_lossage ("invalid use of '%%%c'", letter);
9174       break;
9175 
9176     case 'v':
9177       switch (GET_MODE (op))
9178 	{
9179 	case E_V16QImode:
9180 	  fprintf (file, "b");
9181 	  break;
9182 	case E_V8HImode:
9183 	  fprintf (file, "h");
9184 	  break;
9185 	case E_V4SImode:
9186 	case E_V4SFmode:
9187 	  fprintf (file, "w");
9188 	  break;
9189 	case E_V2DImode:
9190 	case E_V2DFmode:
9191 	  fprintf (file, "d");
9192 	  break;
9193 	default:
9194 	  output_operand_lossage ("invalid use of '%%%c'", letter);
9195 	}
9196       break;
9197 
9198     default:
9199       switch (code)
9200 	{
9201 	case REG:
9202 	  {
9203 	    unsigned int regno = REGNO (op);
9204 	    if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
9205 		|| (letter == 'L' && TARGET_BIG_ENDIAN)
9206 		|| letter == 'D')
9207 	      regno++;
9208 	    else if (letter && letter != 'z' && letter != 'M' && letter != 'L')
9209 	      output_operand_lossage ("invalid use of '%%%c'", letter);
9210 	    /* We need to print $0 .. $31 for COP0 registers.  */
9211 	    if (COP0_REG_P (regno))
9212 	      fprintf (file, "$%s", &reg_names[regno][4]);
9213 	    else
9214 	      fprintf (file, "%s", reg_names[regno]);
9215 	  }
9216 	  break;
9217 
9218 	case MEM:
9219 	  if (letter == 'D')
9220 	    output_address (GET_MODE (op), plus_constant (Pmode,
9221 							  XEXP (op, 0), 4));
9222 	  else if (letter == 'b')
9223 	    {
9224 	      gcc_assert (REG_P (XEXP (op, 0)));
9225 	      mips_print_operand (file, XEXP (op, 0), 0);
9226 	    }
9227 	  else if (letter && letter != 'z')
9228 	    output_operand_lossage ("invalid use of '%%%c'", letter);
9229 	  else
9230 	    output_address (GET_MODE (op), XEXP (op, 0));
9231 	  break;
9232 
9233 	default:
9234 	  if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
9235 	    fputs (reg_names[GP_REG_FIRST], file);
9236 	  else if (letter && letter != 'z')
9237 	    output_operand_lossage ("invalid use of '%%%c'", letter);
9238 	  else if (CONST_GP_P (op))
9239 	    fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
9240 	  else
9241 	    output_addr_const (file, mips_strip_unspec_address (op));
9242 	  break;
9243 	}
9244     }
9245 }
9246 
9247 /* Implement TARGET_PRINT_OPERAND_ADDRESS.  */
9248 
9249 static void
mips_print_operand_address(FILE * file,machine_mode,rtx x)9250 mips_print_operand_address (FILE *file, machine_mode /*mode*/, rtx x)
9251 {
9252   struct mips_address_info addr;
9253 
9254   if (mips_classify_address (&addr, x, word_mode, true))
9255     switch (addr.type)
9256       {
9257       case ADDRESS_REG:
9258 	mips_print_operand (file, addr.offset, 0);
9259 	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
9260 	return;
9261 
9262       case ADDRESS_LO_SUM:
9263 	mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
9264 				  mips_lo_relocs);
9265 	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
9266 	return;
9267 
9268       case ADDRESS_CONST_INT:
9269 	output_addr_const (file, x);
9270 	fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
9271 	return;
9272 
9273       case ADDRESS_SYMBOLIC:
9274 	output_addr_const (file, mips_strip_unspec_address (x));
9275 	return;
9276       }
9277   gcc_unreachable ();
9278 }
9279 
9280 /* Implement TARGET_ENCODE_SECTION_INFO.  */
9281 
9282 static void
mips_encode_section_info(tree decl,rtx rtl,int first)9283 mips_encode_section_info (tree decl, rtx rtl, int first)
9284 {
9285   default_encode_section_info (decl, rtl, first);
9286 
9287   if (TREE_CODE (decl) == FUNCTION_DECL)
9288     {
9289       rtx symbol = XEXP (rtl, 0);
9290       tree type = TREE_TYPE (decl);
9291 
9292       /* Encode whether the symbol is short or long.  */
9293       if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
9294 	  || mips_far_type_p (type))
9295 	SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
9296     }
9297 }
9298 
9299 /* Implement TARGET_SELECT_RTX_SECTION.  */
9300 
9301 static section *
mips_select_rtx_section(machine_mode mode,rtx x,unsigned HOST_WIDE_INT align)9302 mips_select_rtx_section (machine_mode mode, rtx x,
9303 			 unsigned HOST_WIDE_INT align)
9304 {
9305   /* ??? Consider using mergeable small data sections.  */
9306   if (mips_rtx_constant_in_small_data_p (mode))
9307     return get_named_section (NULL, ".sdata", 0);
9308 
9309   return default_elf_select_rtx_section (mode, x, align);
9310 }
9311 
9312 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
9313 
9314    The complication here is that, with the combination TARGET_ABICALLS
9315    && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
9316    absolute addresses, and should therefore not be included in the
9317    read-only part of a DSO.  Handle such cases by selecting a normal
9318    data section instead of a read-only one.  The logic apes that in
9319    default_function_rodata_section.  */
9320 
9321 static section *
mips_function_rodata_section(tree decl)9322 mips_function_rodata_section (tree decl)
9323 {
9324   if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
9325     return default_function_rodata_section (decl);
9326 
9327   if (decl && DECL_SECTION_NAME (decl))
9328     {
9329       const char *name = DECL_SECTION_NAME (decl);
9330       if (DECL_COMDAT_GROUP (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
9331 	{
9332 	  char *rname = ASTRDUP (name);
9333 	  rname[14] = 'd';
9334 	  return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
9335 	}
9336       else if (flag_function_sections
9337 	       && flag_data_sections
9338 	       && strncmp (name, ".text.", 6) == 0)
9339 	{
9340 	  char *rname = ASTRDUP (name);
9341 	  memcpy (rname + 1, "data", 4);
9342 	  return get_section (rname, SECTION_WRITE, decl);
9343 	}
9344     }
9345   return data_section;
9346 }
9347 
9348 /* Implement TARGET_IN_SMALL_DATA_P.  */
9349 
9350 static bool
mips_in_small_data_p(const_tree decl)9351 mips_in_small_data_p (const_tree decl)
9352 {
9353   unsigned HOST_WIDE_INT size;
9354 
9355   if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
9356     return false;
9357 
9358   /* We don't yet generate small-data references for -mabicalls
9359      or VxWorks RTP code.  See the related -G handling in
9360      mips_option_override.  */
9361   if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
9362     return false;
9363 
9364   if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
9365     {
9366       const char *name;
9367 
9368       /* Reject anything that isn't in a known small-data section.  */
9369       name = DECL_SECTION_NAME (decl);
9370       if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
9371 	return false;
9372 
9373       /* If a symbol is defined externally, the assembler will use the
9374 	 usual -G rules when deciding how to implement macros.  */
9375       if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
9376 	return true;
9377     }
9378   else if (TARGET_EMBEDDED_DATA)
9379     {
9380       /* Don't put constants into the small data section: we want them
9381 	 to be in ROM rather than RAM.  */
9382       if (TREE_CODE (decl) != VAR_DECL)
9383 	return false;
9384 
9385       if (TREE_READONLY (decl)
9386 	  && !TREE_SIDE_EFFECTS (decl)
9387 	  && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
9388 	return false;
9389     }
9390 
9391   /* Enforce -mlocal-sdata.  */
9392   if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
9393     return false;
9394 
9395   /* Enforce -mextern-sdata.  */
9396   if (!TARGET_EXTERN_SDATA && DECL_P (decl))
9397     {
9398       if (DECL_EXTERNAL (decl))
9399 	return false;
9400       if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
9401 	return false;
9402     }
9403 
9404   /* We have traditionally not treated zero-sized objects as small data,
9405      so this is now effectively part of the ABI.  */
9406   size = int_size_in_bytes (TREE_TYPE (decl));
9407   return size > 0 && size <= mips_small_data_threshold;
9408 }
9409 
9410 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P.  We don't want to use
9411    anchors for small data: the GP register acts as an anchor in that
9412    case.  We also don't want to use them for PC-relative accesses,
9413    where the PC acts as an anchor.  */
9414 
9415 static bool
mips_use_anchors_for_symbol_p(const_rtx symbol)9416 mips_use_anchors_for_symbol_p (const_rtx symbol)
9417 {
9418   switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
9419     {
9420     case SYMBOL_PC_RELATIVE:
9421     case SYMBOL_GP_RELATIVE:
9422       return false;
9423 
9424     default:
9425       return default_use_anchors_for_symbol_p (symbol);
9426     }
9427 }
9428 
9429 /* The MIPS debug format wants all automatic variables and arguments
9430    to be in terms of the virtual frame pointer (stack pointer before
9431    any adjustment in the function), while the MIPS 3.0 linker wants
9432    the frame pointer to be the stack pointer after the initial
9433    adjustment.  So, we do the adjustment here.  The arg pointer (which
9434    is eliminated) points to the virtual frame pointer, while the frame
9435    pointer (which may be eliminated) points to the stack pointer after
9436    the initial adjustments.  */
9437 
9438 HOST_WIDE_INT
mips_debugger_offset(rtx addr,HOST_WIDE_INT offset)9439 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
9440 {
9441   rtx offset2 = const0_rtx;
9442   rtx reg = eliminate_constant_term (addr, &offset2);
9443 
9444   if (offset == 0)
9445     offset = INTVAL (offset2);
9446 
9447   if (reg == stack_pointer_rtx
9448       || reg == frame_pointer_rtx
9449       || reg == hard_frame_pointer_rtx)
9450     {
9451       offset -= cfun->machine->frame.total_size;
9452       if (reg == hard_frame_pointer_rtx)
9453 	offset += cfun->machine->frame.hard_frame_pointer_offset;
9454     }
9455 
9456   return offset;
9457 }
9458 
9459 /* Implement ASM_OUTPUT_EXTERNAL.  */
9460 
9461 void
mips_output_external(FILE * file,tree decl,const char * name)9462 mips_output_external (FILE *file, tree decl, const char *name)
9463 {
9464   default_elf_asm_output_external (file, decl, name);
9465 
9466   /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9467      set in order to avoid putting out names that are never really
9468      used. */
9469   if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9470     {
9471       if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
9472 	{
9473 	  /* When using assembler macros, emit .extern directives for
9474 	     all small-data externs so that the assembler knows how
9475 	     big they are.
9476 
9477 	     In most cases it would be safe (though pointless) to emit
9478 	     .externs for other symbols too.  One exception is when an
9479 	     object is within the -G limit but declared by the user to
9480 	     be in a section other than .sbss or .sdata.  */
9481 	  fputs ("\t.extern\t", file);
9482 	  assemble_name (file, name);
9483 	  fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
9484 		   int_size_in_bytes (TREE_TYPE (decl)));
9485 	}
9486     }
9487 }
9488 
9489 /* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME.  */
9490 
9491 static void
mips_output_filename(FILE * stream,const char * name)9492 mips_output_filename (FILE *stream, const char *name)
9493 {
9494   /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
9495      directives.  */
9496   if (write_symbols == DWARF2_DEBUG)
9497     return;
9498   else if (mips_output_filename_first_time)
9499     {
9500       mips_output_filename_first_time = 0;
9501       num_source_filenames += 1;
9502       current_function_file = name;
9503       fprintf (stream, "\t.file\t%d ", num_source_filenames);
9504       output_quoted_string (stream, name);
9505       putc ('\n', stream);
9506     }
9507   /* If we are emitting stabs, let dbxout.c handle this (except for
9508      the mips_output_filename_first_time case).  */
9509   else if (write_symbols == DBX_DEBUG)
9510     return;
9511   else if (name != current_function_file
9512 	   && strcmp (name, current_function_file) != 0)
9513     {
9514       num_source_filenames += 1;
9515       current_function_file = name;
9516       fprintf (stream, "\t.file\t%d ", num_source_filenames);
9517       output_quoted_string (stream, name);
9518       putc ('\n', stream);
9519     }
9520 }
9521 
9522 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
9523 
9524 static void ATTRIBUTE_UNUSED
mips_output_dwarf_dtprel(FILE * file,int size,rtx x)9525 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
9526 {
9527   switch (size)
9528     {
9529     case 4:
9530       fputs ("\t.dtprelword\t", file);
9531       break;
9532 
9533     case 8:
9534       fputs ("\t.dtpreldword\t", file);
9535       break;
9536 
9537     default:
9538       gcc_unreachable ();
9539     }
9540   output_addr_const (file, x);
9541   fputs ("+0x8000", file);
9542 }
9543 
9544 /* Implement TARGET_DWARF_REGISTER_SPAN.  */
9545 
9546 static rtx
mips_dwarf_register_span(rtx reg)9547 mips_dwarf_register_span (rtx reg)
9548 {
9549   rtx high, low;
9550   machine_mode mode;
9551 
9552   /* TARGET_FLOATXX is implemented as 32-bit floating-point registers but
9553      ensures that double-precision registers are treated as if they were
9554      64-bit physical registers.  The code will run correctly with 32-bit or
9555      64-bit registers which means that dwarf information cannot be precise
9556      for all scenarios.  We choose to state that the 64-bit values are stored
9557      in a single 64-bit 'piece'.  This slightly unusual construct can then be
9558      interpreted as either a pair of registers if the registers are 32-bit or
9559      a single 64-bit register depending on hardware.  */
9560   mode = GET_MODE (reg);
9561   if (FP_REG_P (REGNO (reg))
9562       && TARGET_FLOATXX
9563       && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
9564     {
9565       return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, reg));
9566     }
9567   /* By default, GCC maps increasing register numbers to increasing
9568      memory locations, but paired FPRs are always little-endian,
9569      regardless of the prevailing endianness.  */
9570   else if (FP_REG_P (REGNO (reg))
9571 	   && TARGET_BIG_ENDIAN
9572 	   && MAX_FPRS_PER_FMT > 1
9573 	   && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
9574     {
9575       gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
9576       high = mips_subword (reg, true);
9577       low = mips_subword (reg, false);
9578       return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
9579     }
9580 
9581   return NULL_RTX;
9582 }
9583 
9584 /* Implement TARGET_DWARF_FRAME_REG_MODE.  */
9585 
9586 static machine_mode
mips_dwarf_frame_reg_mode(int regno)9587 mips_dwarf_frame_reg_mode (int regno)
9588 {
9589   machine_mode mode = default_dwarf_frame_reg_mode (regno);
9590 
9591   if (FP_REG_P (regno) && mips_abi == ABI_32 && !TARGET_FLOAT32)
9592     mode = SImode;
9593 
9594   return mode;
9595 }
9596 
9597 /* DSP ALU can bypass data with no delays for the following pairs. */
9598 enum insn_code dspalu_bypass_table[][2] =
9599 {
9600   {CODE_FOR_mips_addsc, CODE_FOR_mips_addwc},
9601   {CODE_FOR_mips_cmpu_eq_qb, CODE_FOR_mips_pick_qb},
9602   {CODE_FOR_mips_cmpu_lt_qb, CODE_FOR_mips_pick_qb},
9603   {CODE_FOR_mips_cmpu_le_qb, CODE_FOR_mips_pick_qb},
9604   {CODE_FOR_mips_cmp_eq_ph, CODE_FOR_mips_pick_ph},
9605   {CODE_FOR_mips_cmp_lt_ph, CODE_FOR_mips_pick_ph},
9606   {CODE_FOR_mips_cmp_le_ph, CODE_FOR_mips_pick_ph},
9607   {CODE_FOR_mips_wrdsp, CODE_FOR_mips_insv}
9608 };
9609 
9610 int
mips_dspalu_bypass_p(rtx out_insn,rtx in_insn)9611 mips_dspalu_bypass_p (rtx out_insn, rtx in_insn)
9612 {
9613   int i;
9614   int num_bypass = ARRAY_SIZE (dspalu_bypass_table);
9615   enum insn_code out_icode = (enum insn_code) INSN_CODE (out_insn);
9616   enum insn_code in_icode = (enum insn_code) INSN_CODE (in_insn);
9617 
9618   for (i = 0; i < num_bypass; i++)
9619     {
9620       if (out_icode == dspalu_bypass_table[i][0]
9621 	  && in_icode == dspalu_bypass_table[i][1])
9622        return true;
9623     }
9624 
9625   return false;
9626 }
9627 /* Implement ASM_OUTPUT_ASCII.  */
9628 
9629 void
mips_output_ascii(FILE * stream,const char * string,size_t len)9630 mips_output_ascii (FILE *stream, const char *string, size_t len)
9631 {
9632   size_t i;
9633   int cur_pos;
9634 
9635   cur_pos = 17;
9636   fprintf (stream, "\t.ascii\t\"");
9637   for (i = 0; i < len; i++)
9638     {
9639       int c;
9640 
9641       c = (unsigned char) string[i];
9642       if (ISPRINT (c))
9643 	{
9644 	  if (c == '\\' || c == '\"')
9645 	    {
9646 	      putc ('\\', stream);
9647 	      cur_pos++;
9648 	    }
9649 	  putc (c, stream);
9650 	  cur_pos++;
9651 	}
9652       else
9653 	{
9654 	  fprintf (stream, "\\%03o", c);
9655 	  cur_pos += 4;
9656 	}
9657 
9658       if (cur_pos > 72 && i+1 < len)
9659 	{
9660 	  cur_pos = 17;
9661 	  fprintf (stream, "\"\n\t.ascii\t\"");
9662 	}
9663     }
9664   fprintf (stream, "\"\n");
9665 }
9666 
9667 /* Return the pseudo-op for full SYMBOL_(D)TPREL address *ADDR.
9668    Update *ADDR with the operand that should be printed.  */
9669 
9670 const char *
mips_output_tls_reloc_directive(rtx * addr)9671 mips_output_tls_reloc_directive (rtx *addr)
9672 {
9673   enum mips_symbol_type type;
9674 
9675   type = mips_classify_symbolic_expression (*addr, SYMBOL_CONTEXT_LEA);
9676   *addr = mips_strip_unspec_address (*addr);
9677   switch (type)
9678     {
9679     case SYMBOL_DTPREL:
9680       return Pmode == SImode ? ".dtprelword\t%0" : ".dtpreldword\t%0";
9681 
9682     case SYMBOL_TPREL:
9683       return Pmode == SImode ? ".tprelword\t%0" : ".tpreldword\t%0";
9684 
9685     default:
9686       gcc_unreachable ();
9687     }
9688 }
9689 
9690 /* Emit either a label, .comm, or .lcomm directive.  When using assembler
9691    macros, mark the symbol as written so that mips_asm_output_external
9692    won't emit an .extern for it.  STREAM is the output file, NAME is the
9693    name of the symbol, INIT_STRING is the string that should be written
9694    before the symbol and FINAL_STRING is the string that should be
9695    written after it.  FINAL_STRING is a printf format that consumes the
9696    remaining arguments.  */
9697 
9698 void
mips_declare_object(FILE * stream,const char * name,const char * init_string,const char * final_string,...)9699 mips_declare_object (FILE *stream, const char *name, const char *init_string,
9700 		     const char *final_string, ...)
9701 {
9702   va_list ap;
9703 
9704   fputs (init_string, stream);
9705   assemble_name (stream, name);
9706   va_start (ap, final_string);
9707   vfprintf (stream, final_string, ap);
9708   va_end (ap);
9709 
9710   if (!TARGET_EXPLICIT_RELOCS)
9711     {
9712       tree name_tree = get_identifier (name);
9713       TREE_ASM_WRITTEN (name_tree) = 1;
9714     }
9715 }
9716 
9717 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
9718    NAME is the name of the object and ALIGN is the required alignment
9719    in bytes.  TAKES_ALIGNMENT_P is true if the directive takes a third
9720    alignment argument.  */
9721 
9722 void
mips_declare_common_object(FILE * stream,const char * name,const char * init_string,unsigned HOST_WIDE_INT size,unsigned int align,bool takes_alignment_p)9723 mips_declare_common_object (FILE *stream, const char *name,
9724 			    const char *init_string,
9725 			    unsigned HOST_WIDE_INT size,
9726 			    unsigned int align, bool takes_alignment_p)
9727 {
9728   if (!takes_alignment_p)
9729     {
9730       size += (align / BITS_PER_UNIT) - 1;
9731       size -= size % (align / BITS_PER_UNIT);
9732       mips_declare_object (stream, name, init_string,
9733 			   "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
9734     }
9735   else
9736     mips_declare_object (stream, name, init_string,
9737 			 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
9738 			 size, align / BITS_PER_UNIT);
9739 }
9740 
9741 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON.  This is usually the same as the
9742    elfos.h version, but we also need to handle -muninit-const-in-rodata.  */
9743 
9744 void
mips_output_aligned_decl_common(FILE * stream,tree decl,const char * name,unsigned HOST_WIDE_INT size,unsigned int align)9745 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
9746 				 unsigned HOST_WIDE_INT size,
9747 				 unsigned int align)
9748 {
9749   /* If the target wants uninitialized const declarations in
9750      .rdata then don't put them in .comm.  */
9751   if (TARGET_EMBEDDED_DATA
9752       && TARGET_UNINIT_CONST_IN_RODATA
9753       && TREE_CODE (decl) == VAR_DECL
9754       && TREE_READONLY (decl)
9755       && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
9756     {
9757       if (TREE_PUBLIC (decl) && DECL_NAME (decl))
9758 	targetm.asm_out.globalize_label (stream, name);
9759 
9760       switch_to_section (readonly_data_section);
9761       ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
9762       mips_declare_object (stream, name, "",
9763 			   ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
9764 			   size);
9765     }
9766   else
9767     mips_declare_common_object (stream, name, "\n\t.comm\t",
9768 				size, align, true);
9769 }
9770 
9771 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
9772 extern int size_directive_output;
9773 
9774 /* Implement ASM_DECLARE_OBJECT_NAME.  This is like most of the standard ELF
9775    definitions except that it uses mips_declare_object to emit the label.  */
9776 
9777 void
mips_declare_object_name(FILE * stream,const char * name,tree decl ATTRIBUTE_UNUSED)9778 mips_declare_object_name (FILE *stream, const char *name,
9779 			  tree decl ATTRIBUTE_UNUSED)
9780 {
9781 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
9782   ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
9783 #endif
9784 
9785   size_directive_output = 0;
9786   if (!flag_inhibit_size_directive && DECL_SIZE (decl))
9787     {
9788       HOST_WIDE_INT size;
9789 
9790       size_directive_output = 1;
9791       size = int_size_in_bytes (TREE_TYPE (decl));
9792       ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
9793     }
9794 
9795   mips_declare_object (stream, name, "", ":\n");
9796 }
9797 
9798 /* Implement ASM_FINISH_DECLARE_OBJECT.  This is generic ELF stuff.  */
9799 
9800 void
mips_finish_declare_object(FILE * stream,tree decl,int top_level,int at_end)9801 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
9802 {
9803   const char *name;
9804 
9805   name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
9806   if (!flag_inhibit_size_directive
9807       && DECL_SIZE (decl) != 0
9808       && !at_end
9809       && top_level
9810       && DECL_INITIAL (decl) == error_mark_node
9811       && !size_directive_output)
9812     {
9813       HOST_WIDE_INT size;
9814 
9815       size_directive_output = 1;
9816       size = int_size_in_bytes (TREE_TYPE (decl));
9817       ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
9818     }
9819 }
9820 #endif
9821 
9822 /* Mark text contents as code or data, mainly for the purpose of correct
9823    disassembly.  Emit a local symbol and set its type appropriately for
9824    that purpose.  Also emit `.insn' if marking contents as code so that
9825    the ISA mode is recorded and any padding that follows is disassembled
9826    as correct instructions.  */
9827 
9828 void
mips_set_text_contents_type(FILE * file ATTRIBUTE_UNUSED,const char * prefix ATTRIBUTE_UNUSED,unsigned long num ATTRIBUTE_UNUSED,bool function_p ATTRIBUTE_UNUSED)9829 mips_set_text_contents_type (FILE *file ATTRIBUTE_UNUSED,
9830 			     const char *prefix ATTRIBUTE_UNUSED,
9831 			     unsigned long num ATTRIBUTE_UNUSED,
9832 			     bool function_p ATTRIBUTE_UNUSED)
9833 {
9834 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
9835   char buf[(sizeof (num) * 10) / 4 + 2];
9836   const char *fnname;
9837   char *sname;
9838   rtx symbol;
9839 
9840   sprintf (buf, "%lu", num);
9841   symbol = XEXP (DECL_RTL (current_function_decl), 0);
9842   fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
9843   sname = ACONCAT ((prefix, fnname, "_", buf, NULL));
9844 
9845   ASM_OUTPUT_TYPE_DIRECTIVE (file, sname, function_p ? "function" : "object");
9846   assemble_name (file, sname);
9847   fputs (":\n", file);
9848   if (function_p)
9849     fputs ("\t.insn\n", file);
9850 #endif
9851 }
9852 
9853 /* Return the FOO in the name of the ".mdebug.FOO" section associated
9854    with the current ABI.  */
9855 
9856 static const char *
mips_mdebug_abi_name(void)9857 mips_mdebug_abi_name (void)
9858 {
9859   switch (mips_abi)
9860     {
9861     case ABI_32:
9862       return "abi32";
9863     case ABI_O64:
9864       return "abiO64";
9865     case ABI_N32:
9866       return "abiN32";
9867     case ABI_64:
9868       return "abi64";
9869     case ABI_EABI:
9870       return TARGET_64BIT ? "eabi64" : "eabi32";
9871     default:
9872       gcc_unreachable ();
9873     }
9874 }
9875 
9876 /* Implement TARGET_ASM_FILE_START.  */
9877 
9878 static void
mips_file_start(void)9879 mips_file_start (void)
9880 {
9881   default_file_start ();
9882 
9883   /* Generate a special section to describe the ABI switches used to
9884      produce the resultant binary.  */
9885 
9886   /* Record the ABI itself.  Modern versions of binutils encode
9887      this information in the ELF header flags, but GDB needs the
9888      information in order to correctly debug binaries produced by
9889      older binutils.  See the function mips_gdbarch_init in
9890      gdb/mips-tdep.c.  */
9891   fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
9892 	   mips_mdebug_abi_name ());
9893 
9894   /* There is no ELF header flag to distinguish long32 forms of the
9895      EABI from long64 forms.  Emit a special section to help tools
9896      such as GDB.  Do the same for o64, which is sometimes used with
9897      -mlong64.  */
9898   if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
9899     fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
9900 	     "\t.previous\n", TARGET_LONG64 ? 64 : 32);
9901 
9902   /* Record the NaN encoding.  */
9903   if (HAVE_AS_NAN || mips_nan != MIPS_IEEE_754_DEFAULT)
9904     fprintf (asm_out_file, "\t.nan\t%s\n",
9905 	     mips_nan == MIPS_IEEE_754_2008 ? "2008" : "legacy");
9906 
9907 #ifdef HAVE_AS_DOT_MODULE
9908   /* Record the FP ABI.  See below for comments.  */
9909   if (TARGET_NO_FLOAT)
9910 #ifdef HAVE_AS_GNU_ATTRIBUTE
9911     fputs ("\t.gnu_attribute 4, 0\n", asm_out_file);
9912 #else
9913     ;
9914 #endif
9915   else if (!TARGET_HARD_FLOAT_ABI)
9916     fputs ("\t.module\tsoftfloat\n", asm_out_file);
9917   else if (!TARGET_DOUBLE_FLOAT)
9918     fputs ("\t.module\tsinglefloat\n", asm_out_file);
9919   else if (TARGET_FLOATXX)
9920     fputs ("\t.module\tfp=xx\n", asm_out_file);
9921   else if (TARGET_FLOAT64)
9922     fputs ("\t.module\tfp=64\n", asm_out_file);
9923   else
9924     fputs ("\t.module\tfp=32\n", asm_out_file);
9925 
9926   if (TARGET_ODD_SPREG)
9927     fputs ("\t.module\toddspreg\n", asm_out_file);
9928   else
9929     fputs ("\t.module\tnooddspreg\n", asm_out_file);
9930 
9931 #else
9932 #ifdef HAVE_AS_GNU_ATTRIBUTE
9933   {
9934     int attr;
9935 
9936     /* No floating-point operations, -mno-float.  */
9937     if (TARGET_NO_FLOAT)
9938       attr = 0;
9939     /* Soft-float code, -msoft-float.  */
9940     else if (!TARGET_HARD_FLOAT_ABI)
9941       attr = 3;
9942     /* Single-float code, -msingle-float.  */
9943     else if (!TARGET_DOUBLE_FLOAT)
9944       attr = 2;
9945     /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64.
9946        Reserved attr=4.
9947        This case used 12 callee-saved double-precision registers
9948        and is deprecated.  */
9949     /* 64-bit or 32-bit FP registers on a 32-bit target, -mfpxx.  */
9950     else if (TARGET_FLOATXX)
9951       attr = 5;
9952     /* 64-bit FP registers on a 32-bit target, -mfp64 -modd-spreg.  */
9953     else if (mips_abi == ABI_32 && TARGET_FLOAT64 && TARGET_ODD_SPREG)
9954       attr = 6;
9955     /* 64-bit FP registers on a 32-bit target, -mfp64 -mno-odd-spreg.  */
9956     else if (mips_abi == ABI_32 && TARGET_FLOAT64)
9957       attr = 7;
9958     /* Regular FP code, FP regs same size as GP regs, -mdouble-float.  */
9959     else
9960       attr = 1;
9961 
9962     fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
9963 
9964     /* 128-bit MSA.  */
9965     if (ISA_HAS_MSA)
9966       fprintf (asm_out_file, "\t.gnu_attribute 8, 1\n");
9967   }
9968 #endif
9969 #endif
9970 
9971   /* If TARGET_ABICALLS, tell GAS to generate -KPIC code.  */
9972   if (TARGET_ABICALLS)
9973     {
9974       fprintf (asm_out_file, "\t.abicalls\n");
9975       if (TARGET_ABICALLS_PIC0)
9976 	fprintf (asm_out_file, "\t.option\tpic0\n");
9977     }
9978 
9979   if (flag_verbose_asm)
9980     fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
9981 	     ASM_COMMENT_START,
9982 	     mips_small_data_threshold, mips_arch_info->name, mips_isa);
9983 }
9984 
9985 /* Implement TARGET_ASM_CODE_END.  */
9986 
9987 static void
mips_code_end(void)9988 mips_code_end (void)
9989 {
9990   mips_finish_stub (&mips16_rdhwr_stub);
9991   mips_finish_stub (&mips16_get_fcsr_stub);
9992   mips_finish_stub (&mips16_set_fcsr_stub);
9993 }
9994 
9995 /* Make the last instruction frame-related and note that it performs
9996    the operation described by FRAME_PATTERN.  */
9997 
9998 static void
mips_set_frame_expr(rtx frame_pattern)9999 mips_set_frame_expr (rtx frame_pattern)
10000 {
10001   rtx_insn *insn;
10002 
10003   insn = get_last_insn ();
10004   RTX_FRAME_RELATED_P (insn) = 1;
10005   REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
10006 				      frame_pattern,
10007 				      REG_NOTES (insn));
10008 }
10009 
10010 /* Return a frame-related rtx that stores REG at MEM.
10011    REG must be a single register.  */
10012 
10013 static rtx
mips_frame_set(rtx mem,rtx reg)10014 mips_frame_set (rtx mem, rtx reg)
10015 {
10016   rtx set;
10017 
10018   set = gen_rtx_SET (mem, reg);
10019   RTX_FRAME_RELATED_P (set) = 1;
10020 
10021   return set;
10022 }
10023 
10024 /* Record that the epilogue has restored call-saved register REG.  */
10025 
10026 static void
mips_add_cfa_restore(rtx reg)10027 mips_add_cfa_restore (rtx reg)
10028 {
10029   mips_epilogue.cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
10030 					       mips_epilogue.cfa_restores);
10031 }
10032 
10033 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
10034    mips16e_s2_s8_regs[X], it must also save the registers in indexes
10035    X + 1 onwards.  Likewise mips16e_a0_a3_regs.  */
10036 static const unsigned char mips16e_s2_s8_regs[] = {
10037   30, 23, 22, 21, 20, 19, 18
10038 };
10039 static const unsigned char mips16e_a0_a3_regs[] = {
10040   4, 5, 6, 7
10041 };
10042 
10043 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
10044    ordered from the uppermost in memory to the lowest in memory.  */
10045 static const unsigned char mips16e_save_restore_regs[] = {
10046   31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
10047 };
10048 
10049 /* Return the index of the lowest X in the range [0, SIZE) for which
10050    bit REGS[X] is set in MASK.  Return SIZE if there is no such X.  */
10051 
10052 static unsigned int
mips16e_find_first_register(unsigned int mask,const unsigned char * regs,unsigned int size)10053 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
10054 			     unsigned int size)
10055 {
10056   unsigned int i;
10057 
10058   for (i = 0; i < size; i++)
10059     if (BITSET_P (mask, regs[i]))
10060       break;
10061 
10062   return i;
10063 }
10064 
10065 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
10066    is the number of set bits.  If *MASK_PTR contains REGS[X] for some X
10067    in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
10068    is true for all indexes (X, SIZE).  */
10069 
10070 static void
mips16e_mask_registers(unsigned int * mask_ptr,const unsigned char * regs,unsigned int size,unsigned int * num_regs_ptr)10071 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
10072 			unsigned int size, unsigned int *num_regs_ptr)
10073 {
10074   unsigned int i;
10075 
10076   i = mips16e_find_first_register (*mask_ptr, regs, size);
10077   for (i++; i < size; i++)
10078     if (!BITSET_P (*mask_ptr, regs[i]))
10079       {
10080 	*num_regs_ptr += 1;
10081 	*mask_ptr |= 1 << regs[i];
10082       }
10083 }
10084 
10085 /* Return a simplified form of X using the register values in REG_VALUES.
10086    REG_VALUES[R] is the last value assigned to hard register R, or null
10087    if R has not been modified.
10088 
10089    This function is rather limited, but is good enough for our purposes.  */
10090 
10091 static rtx
mips16e_collect_propagate_value(rtx x,rtx * reg_values)10092 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
10093 {
10094   x = avoid_constant_pool_reference (x);
10095 
10096   if (UNARY_P (x))
10097     {
10098       rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
10099       return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
10100 				 x0, GET_MODE (XEXP (x, 0)));
10101     }
10102 
10103   if (ARITHMETIC_P (x))
10104     {
10105       rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
10106       rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
10107       return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
10108     }
10109 
10110   if (REG_P (x)
10111       && reg_values[REGNO (x)]
10112       && !rtx_unstable_p (reg_values[REGNO (x)]))
10113     return reg_values[REGNO (x)];
10114 
10115   return x;
10116 }
10117 
10118 /* Return true if (set DEST SRC) stores an argument register into its
10119    caller-allocated save slot, storing the number of that argument
10120    register in *REGNO_PTR if so.  REG_VALUES is as for
10121    mips16e_collect_propagate_value.  */
10122 
10123 static bool
mips16e_collect_argument_save_p(rtx dest,rtx src,rtx * reg_values,unsigned int * regno_ptr)10124 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
10125 				 unsigned int *regno_ptr)
10126 {
10127   unsigned int argno, regno;
10128   HOST_WIDE_INT offset, required_offset;
10129   rtx addr, base;
10130 
10131   /* Check that this is a word-mode store.  */
10132   if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
10133     return false;
10134 
10135   /* Check that the register being saved is an unmodified argument
10136      register.  */
10137   regno = REGNO (src);
10138   if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
10139     return false;
10140   argno = regno - GP_ARG_FIRST;
10141 
10142   /* Check whether the address is an appropriate stack-pointer or
10143      frame-pointer access.  */
10144   addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
10145   mips_split_plus (addr, &base, &offset);
10146   required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
10147   if (base == hard_frame_pointer_rtx)
10148     required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
10149   else if (base != stack_pointer_rtx)
10150     return false;
10151   if (offset != required_offset)
10152     return false;
10153 
10154   *regno_ptr = regno;
10155   return true;
10156 }
10157 
10158 /* A subroutine of mips_expand_prologue, called only when generating
10159    MIPS16e SAVE instructions.  Search the start of the function for any
10160    instructions that save argument registers into their caller-allocated
10161    save slots.  Delete such instructions and return a value N such that
10162    saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
10163    instructions redundant.  */
10164 
10165 static unsigned int
mips16e_collect_argument_saves(void)10166 mips16e_collect_argument_saves (void)
10167 {
10168   rtx reg_values[FIRST_PSEUDO_REGISTER];
10169   rtx_insn *insn, *next;
10170   rtx set, dest, src;
10171   unsigned int nargs, regno;
10172 
10173   push_topmost_sequence ();
10174   nargs = 0;
10175   memset (reg_values, 0, sizeof (reg_values));
10176   for (insn = get_insns (); insn; insn = next)
10177     {
10178       next = NEXT_INSN (insn);
10179       if (NOTE_P (insn) || DEBUG_INSN_P (insn))
10180 	continue;
10181 
10182       if (!INSN_P (insn))
10183 	break;
10184 
10185       set = PATTERN (insn);
10186       if (GET_CODE (set) != SET)
10187 	break;
10188 
10189       dest = SET_DEST (set);
10190       src = SET_SRC (set);
10191       if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
10192 	{
10193 	  if (!BITSET_P (cfun->machine->frame.mask, regno))
10194 	    {
10195 	      delete_insn (insn);
10196 	      nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
10197 	    }
10198 	}
10199       else if (REG_P (dest) && GET_MODE (dest) == word_mode)
10200 	reg_values[REGNO (dest)]
10201 	  = mips16e_collect_propagate_value (src, reg_values);
10202       else
10203 	break;
10204     }
10205   pop_topmost_sequence ();
10206 
10207   return nargs;
10208 }
10209 
10210 /* Return a move between register REGNO and memory location SP + OFFSET.
10211    REG_PARM_P is true if SP + OFFSET belongs to REG_PARM_STACK_SPACE.
10212    Make the move a load if RESTORE_P, otherwise make it a store.  */
10213 
10214 static rtx
mips16e_save_restore_reg(bool restore_p,bool reg_parm_p,HOST_WIDE_INT offset,unsigned int regno)10215 mips16e_save_restore_reg (bool restore_p, bool reg_parm_p,
10216 			  HOST_WIDE_INT offset, unsigned int regno)
10217 {
10218   rtx reg, mem;
10219 
10220   mem = gen_frame_mem (SImode, plus_constant (Pmode, stack_pointer_rtx,
10221 					      offset));
10222   reg = gen_rtx_REG (SImode, regno);
10223   if (restore_p)
10224     {
10225       mips_add_cfa_restore (reg);
10226       return gen_rtx_SET (reg, mem);
10227     }
10228   if (reg_parm_p)
10229     return gen_rtx_SET (mem, reg);
10230   return mips_frame_set (mem, reg);
10231 }
10232 
10233 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
10234    The instruction must:
10235 
10236      - Allocate or deallocate SIZE bytes in total; SIZE is known
10237        to be nonzero.
10238 
10239      - Save or restore as many registers in *MASK_PTR as possible.
10240        The instruction saves the first registers at the top of the
10241        allocated area, with the other registers below it.
10242 
10243      - Save NARGS argument registers above the allocated area.
10244 
10245    (NARGS is always zero if RESTORE_P.)
10246 
10247    The SAVE and RESTORE instructions cannot save and restore all general
10248    registers, so there may be some registers left over for the caller to
10249    handle.  Destructively modify *MASK_PTR so that it contains the registers
10250    that still need to be saved or restored.  The caller can save these
10251    registers in the memory immediately below *OFFSET_PTR, which is a
10252    byte offset from the bottom of the allocated stack area.  */
10253 
10254 static rtx
mips16e_build_save_restore(bool restore_p,unsigned int * mask_ptr,HOST_WIDE_INT * offset_ptr,unsigned int nargs,HOST_WIDE_INT size)10255 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
10256 			    HOST_WIDE_INT *offset_ptr, unsigned int nargs,
10257 			    HOST_WIDE_INT size)
10258 {
10259   rtx pattern, set;
10260   HOST_WIDE_INT offset, top_offset;
10261   unsigned int i, regno;
10262   int n;
10263 
10264   gcc_assert (cfun->machine->frame.num_fp == 0);
10265 
10266   /* Calculate the number of elements in the PARALLEL.  We need one element
10267      for the stack adjustment, one for each argument register save, and one
10268      for each additional register move.  */
10269   n = 1 + nargs;
10270   for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
10271     if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
10272       n++;
10273 
10274   /* Create the final PARALLEL.  */
10275   pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
10276   n = 0;
10277 
10278   /* Add the stack pointer adjustment.  */
10279   set = gen_rtx_SET (stack_pointer_rtx,
10280 		     plus_constant (Pmode, stack_pointer_rtx,
10281 				    restore_p ? size : -size));
10282   RTX_FRAME_RELATED_P (set) = 1;
10283   XVECEXP (pattern, 0, n++) = set;
10284 
10285   /* Stack offsets in the PARALLEL are relative to the old stack pointer.  */
10286   top_offset = restore_p ? size : 0;
10287 
10288   /* Save the arguments.  */
10289   for (i = 0; i < nargs; i++)
10290     {
10291       offset = top_offset + i * UNITS_PER_WORD;
10292       set = mips16e_save_restore_reg (restore_p, true, offset,
10293 				      GP_ARG_FIRST + i);
10294       XVECEXP (pattern, 0, n++) = set;
10295     }
10296 
10297   /* Then fill in the other register moves.  */
10298   offset = top_offset;
10299   for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
10300     {
10301       regno = mips16e_save_restore_regs[i];
10302       if (BITSET_P (*mask_ptr, regno))
10303 	{
10304 	  offset -= UNITS_PER_WORD;
10305 	  set = mips16e_save_restore_reg (restore_p, false, offset, regno);
10306 	  XVECEXP (pattern, 0, n++) = set;
10307 	  *mask_ptr &= ~(1 << regno);
10308 	}
10309     }
10310 
10311   /* Tell the caller what offset it should use for the remaining registers.  */
10312   *offset_ptr = size + (offset - top_offset);
10313 
10314   gcc_assert (n == XVECLEN (pattern, 0));
10315 
10316   return pattern;
10317 }
10318 
10319 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
10320    pointer.  Return true if PATTERN matches the kind of instruction
10321    generated by mips16e_build_save_restore.  If INFO is nonnull,
10322    initialize it when returning true.  */
10323 
10324 bool
mips16e_save_restore_pattern_p(rtx pattern,HOST_WIDE_INT adjust,struct mips16e_save_restore_info * info)10325 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
10326 				struct mips16e_save_restore_info *info)
10327 {
10328   unsigned int i, nargs, mask, extra;
10329   HOST_WIDE_INT top_offset, save_offset, offset;
10330   rtx set, reg, mem, base;
10331   int n;
10332 
10333   if (!GENERATE_MIPS16E_SAVE_RESTORE)
10334     return false;
10335 
10336   /* Stack offsets in the PARALLEL are relative to the old stack pointer.  */
10337   top_offset = adjust > 0 ? adjust : 0;
10338 
10339   /* Interpret all other members of the PARALLEL.  */
10340   save_offset = top_offset - UNITS_PER_WORD;
10341   mask = 0;
10342   nargs = 0;
10343   i = 0;
10344   for (n = 1; n < XVECLEN (pattern, 0); n++)
10345     {
10346       /* Check that we have a SET.  */
10347       set = XVECEXP (pattern, 0, n);
10348       if (GET_CODE (set) != SET)
10349 	return false;
10350 
10351       /* Check that the SET is a load (if restoring) or a store
10352 	 (if saving).  */
10353       mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
10354       if (!MEM_P (mem))
10355 	return false;
10356 
10357       /* Check that the address is the sum of the stack pointer and a
10358 	 possibly-zero constant offset.  */
10359       mips_split_plus (XEXP (mem, 0), &base, &offset);
10360       if (base != stack_pointer_rtx)
10361 	return false;
10362 
10363       /* Check that SET's other operand is a register.  */
10364       reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
10365       if (!REG_P (reg))
10366 	return false;
10367 
10368       /* Check for argument saves.  */
10369       if (offset == top_offset + nargs * UNITS_PER_WORD
10370 	  && REGNO (reg) == GP_ARG_FIRST + nargs)
10371 	nargs++;
10372       else if (offset == save_offset)
10373 	{
10374 	  while (mips16e_save_restore_regs[i++] != REGNO (reg))
10375 	    if (i == ARRAY_SIZE (mips16e_save_restore_regs))
10376 	      return false;
10377 
10378 	  mask |= 1 << REGNO (reg);
10379 	  save_offset -= UNITS_PER_WORD;
10380 	}
10381       else
10382 	return false;
10383     }
10384 
10385   /* Check that the restrictions on register ranges are met.  */
10386   extra = 0;
10387   mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
10388 			  ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
10389   mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
10390 			  ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
10391   if (extra != 0)
10392     return false;
10393 
10394   /* Make sure that the topmost argument register is not saved twice.
10395      The checks above ensure that the same is then true for the other
10396      argument registers.  */
10397   if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
10398     return false;
10399 
10400   /* Pass back information, if requested.  */
10401   if (info)
10402     {
10403       info->nargs = nargs;
10404       info->mask = mask;
10405       info->size = (adjust > 0 ? adjust : -adjust);
10406     }
10407 
10408   return true;
10409 }
10410 
10411 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
10412    for the register range [MIN_REG, MAX_REG].  Return a pointer to
10413    the null terminator.  */
10414 
10415 static char *
mips16e_add_register_range(char * s,unsigned int min_reg,unsigned int max_reg)10416 mips16e_add_register_range (char *s, unsigned int min_reg,
10417 			    unsigned int max_reg)
10418 {
10419   if (min_reg != max_reg)
10420     s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
10421   else
10422     s += sprintf (s, ",%s", reg_names[min_reg]);
10423   return s;
10424 }
10425 
10426 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
10427    PATTERN and ADJUST are as for mips16e_save_restore_pattern_p.  */
10428 
10429 const char *
mips16e_output_save_restore(rtx pattern,HOST_WIDE_INT adjust)10430 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
10431 {
10432   static char buffer[300];
10433 
10434   struct mips16e_save_restore_info info;
10435   unsigned int i, end;
10436   char *s;
10437 
10438   /* Parse the pattern.  */
10439   if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
10440     gcc_unreachable ();
10441 
10442   /* Add the mnemonic.  */
10443   s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
10444   s += strlen (s);
10445 
10446   /* Save the arguments.  */
10447   if (info.nargs > 1)
10448     s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
10449 		  reg_names[GP_ARG_FIRST + info.nargs - 1]);
10450   else if (info.nargs == 1)
10451     s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
10452 
10453   /* Emit the amount of stack space to allocate or deallocate.  */
10454   s += sprintf (s, "%d", (int) info.size);
10455 
10456   /* Save or restore $16.  */
10457   if (BITSET_P (info.mask, 16))
10458     s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
10459 
10460   /* Save or restore $17.  */
10461   if (BITSET_P (info.mask, 17))
10462     s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
10463 
10464   /* Save or restore registers in the range $s2...$s8, which
10465      mips16e_s2_s8_regs lists in decreasing order.  Note that this
10466      is a software register range; the hardware registers are not
10467      numbered consecutively.  */
10468   end = ARRAY_SIZE (mips16e_s2_s8_regs);
10469   i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
10470   if (i < end)
10471     s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
10472 				    mips16e_s2_s8_regs[i]);
10473 
10474   /* Save or restore registers in the range $a0...$a3.  */
10475   end = ARRAY_SIZE (mips16e_a0_a3_regs);
10476   i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
10477   if (i < end)
10478     s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
10479 				    mips16e_a0_a3_regs[end - 1]);
10480 
10481   /* Save or restore $31.  */
10482   if (BITSET_P (info.mask, RETURN_ADDR_REGNUM))
10483     s += sprintf (s, ",%s", reg_names[RETURN_ADDR_REGNUM]);
10484 
10485   return buffer;
10486 }
10487 
10488 /* Return true if the current function returns its value in a floating-point
10489    register in MIPS16 mode.  */
10490 
10491 static bool
mips16_cfun_returns_in_fpr_p(void)10492 mips16_cfun_returns_in_fpr_p (void)
10493 {
10494   tree return_type = DECL_RESULT (current_function_decl);
10495   return (TARGET_MIPS16
10496 	  && TARGET_HARD_FLOAT_ABI
10497 	  && !aggregate_value_p (return_type, current_function_decl)
10498  	  && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
10499 }
10500 
10501 /* Return true if predicate PRED is true for at least one instruction.
10502    Cache the result in *CACHE, and assume that the result is true
10503    if *CACHE is already true.  */
10504 
10505 static bool
mips_find_gp_ref(bool * cache,bool (* pred)(rtx_insn *))10506 mips_find_gp_ref (bool *cache, bool (*pred) (rtx_insn *))
10507 {
10508   rtx_insn *insn, *subinsn;
10509 
10510   if (!*cache)
10511     {
10512       push_topmost_sequence ();
10513       for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10514 	FOR_EACH_SUBINSN (subinsn, insn)
10515 	  if (USEFUL_INSN_P (subinsn) && pred (subinsn))
10516 	    {
10517 	      *cache = true;
10518 	      break;
10519 	    }
10520       pop_topmost_sequence ();
10521     }
10522   return *cache;
10523 }
10524 
10525 /* Return true if INSN refers to the global pointer in an "inflexible" way.
10526    See mips_cfun_has_inflexible_gp_ref_p for details.  */
10527 
10528 static bool
mips_insn_has_inflexible_gp_ref_p(rtx_insn * insn)10529 mips_insn_has_inflexible_gp_ref_p (rtx_insn *insn)
10530 {
10531   /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
10532      indicate that the target could be a traditional MIPS
10533      lazily-binding stub.  */
10534   return find_reg_fusage (insn, USE, pic_offset_table_rtx);
10535 }
10536 
10537 /* Return true if the current function refers to the global pointer
10538    in a way that forces $28 to be valid.  This means that we can't
10539    change the choice of global pointer, even for NewABI code.
10540 
10541    One example of this (and one which needs several checks) is that
10542    $28 must be valid when calling traditional MIPS lazy-binding stubs.
10543    (This restriction does not apply to PLTs.)  */
10544 
10545 static bool
mips_cfun_has_inflexible_gp_ref_p(void)10546 mips_cfun_has_inflexible_gp_ref_p (void)
10547 {
10548   /* If the function has a nonlocal goto, $28 must hold the correct
10549      global pointer for the target function.  That is, the target
10550      of the goto implicitly uses $28.  */
10551   if (crtl->has_nonlocal_goto)
10552     return true;
10553 
10554   if (TARGET_ABICALLS_PIC2)
10555     {
10556       /* Symbolic accesses implicitly use the global pointer unless
10557 	 -mexplicit-relocs is in effect.  JAL macros to symbolic addresses
10558 	 might go to traditional MIPS lazy-binding stubs.  */
10559       if (!TARGET_EXPLICIT_RELOCS)
10560 	return true;
10561 
10562       /* FUNCTION_PROFILER includes a JAL to _mcount, which again
10563 	 can be lazily-bound.  */
10564       if (crtl->profile)
10565 	return true;
10566 
10567       /* MIPS16 functions that return in FPRs need to call an
10568 	 external libgcc routine.  This call is only made explict
10569 	 during mips_expand_epilogue, and it too might be lazily bound.  */
10570       if (mips16_cfun_returns_in_fpr_p ())
10571 	return true;
10572     }
10573 
10574   return mips_find_gp_ref (&cfun->machine->has_inflexible_gp_insn_p,
10575 			   mips_insn_has_inflexible_gp_ref_p);
10576 }
10577 
10578 /* Return true if INSN refers to the global pointer in a "flexible" way.
10579    See mips_cfun_has_flexible_gp_ref_p for details.  */
10580 
10581 static bool
mips_insn_has_flexible_gp_ref_p(rtx_insn * insn)10582 mips_insn_has_flexible_gp_ref_p (rtx_insn *insn)
10583 {
10584   return (get_attr_got (insn) != GOT_UNSET
10585 	  || mips_small_data_pattern_p (PATTERN (insn))
10586 	  || reg_overlap_mentioned_p (pic_offset_table_rtx, PATTERN (insn)));
10587 }
10588 
10589 /* Return true if the current function references the global pointer,
10590    but if those references do not inherently require the global pointer
10591    to be $28.  Assume !mips_cfun_has_inflexible_gp_ref_p ().  */
10592 
10593 static bool
mips_cfun_has_flexible_gp_ref_p(void)10594 mips_cfun_has_flexible_gp_ref_p (void)
10595 {
10596   /* Reload can sometimes introduce constant pool references
10597      into a function that otherwise didn't need them.  For example,
10598      suppose we have an instruction like:
10599 
10600 	(set (reg:DF R1) (float:DF (reg:SI R2)))
10601 
10602      If R2 turns out to be a constant such as 1, the instruction may
10603      have a REG_EQUAL note saying that R1 == 1.0.  Reload then has
10604      the option of using this constant if R2 doesn't get allocated
10605      to a register.
10606 
10607      In cases like these, reload will have added the constant to the
10608      pool but no instruction will yet refer to it.  */
10609   if (TARGET_ABICALLS_PIC2 && !reload_completed && crtl->uses_const_pool)
10610     return true;
10611 
10612   return mips_find_gp_ref (&cfun->machine->has_flexible_gp_insn_p,
10613 			   mips_insn_has_flexible_gp_ref_p);
10614 }
10615 
10616 /* Return the register that should be used as the global pointer
10617    within this function.  Return INVALID_REGNUM if the function
10618    doesn't need a global pointer.  */
10619 
10620 static unsigned int
mips_global_pointer(void)10621 mips_global_pointer (void)
10622 {
10623   unsigned int regno;
10624 
10625   /* $gp is always available unless we're using a GOT.  */
10626   if (!TARGET_USE_GOT)
10627     return GLOBAL_POINTER_REGNUM;
10628 
10629   /* If there are inflexible references to $gp, we must use the
10630      standard register.  */
10631   if (mips_cfun_has_inflexible_gp_ref_p ())
10632     return GLOBAL_POINTER_REGNUM;
10633 
10634   /* If there are no current references to $gp, then the only uses
10635      we can introduce later are those involved in long branches.  */
10636   if (TARGET_ABSOLUTE_JUMPS && !mips_cfun_has_flexible_gp_ref_p ())
10637     return INVALID_REGNUM;
10638 
10639   /* If the global pointer is call-saved, try to use a call-clobbered
10640      alternative.  */
10641   if (TARGET_CALL_SAVED_GP && crtl->is_leaf)
10642     for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
10643       if (!df_regs_ever_live_p (regno)
10644 	  && call_really_used_regs[regno]
10645 	  && !fixed_regs[regno]
10646 	  && regno != PIC_FUNCTION_ADDR_REGNUM)
10647 	return regno;
10648 
10649   return GLOBAL_POINTER_REGNUM;
10650 }
10651 
10652 /* Return true if the current function's prologue must load the global
10653    pointer value into pic_offset_table_rtx and store the same value in
10654    the function's cprestore slot (if any).
10655 
10656    One problem we have to deal with is that, when emitting GOT-based
10657    position independent code, long-branch sequences will need to load
10658    the address of the branch target from the GOT.  We don't know until
10659    the very end of compilation whether (and where) the function needs
10660    long branches, so we must ensure that _any_ branch can access the
10661    global pointer in some form.  However, we do not want to pessimize
10662    the usual case in which all branches are short.
10663 
10664    We handle this as follows:
10665 
10666    (1) During reload, we set cfun->machine->global_pointer to
10667        INVALID_REGNUM if we _know_ that the current function
10668        doesn't need a global pointer.  This is only valid if
10669        long branches don't need the GOT.
10670 
10671        Otherwise, we assume that we might need a global pointer
10672        and pick an appropriate register.
10673 
10674    (2) If cfun->machine->global_pointer != INVALID_REGNUM,
10675        we ensure that the global pointer is available at every
10676        block boundary bar entry and exit.  We do this in one of two ways:
10677 
10678        - If the function has a cprestore slot, we ensure that this
10679 	 slot is valid at every branch.  However, as explained in
10680 	 point (6) below, there is no guarantee that pic_offset_table_rtx
10681 	 itself is valid if new uses of the global pointer are introduced
10682 	 after the first post-epilogue split.
10683 
10684 	 We guarantee that the cprestore slot is valid by loading it
10685 	 into a fake register, CPRESTORE_SLOT_REGNUM.  We then make
10686 	 this register live at every block boundary bar function entry
10687 	 and exit.  It is then invalid to move the load (and thus the
10688 	 preceding store) across a block boundary.
10689 
10690        - If the function has no cprestore slot, we guarantee that
10691 	 pic_offset_table_rtx itself is valid at every branch.
10692 
10693        See mips_eh_uses for the handling of the register liveness.
10694 
10695    (3) During prologue and epilogue generation, we emit "ghost"
10696        placeholder instructions to manipulate the global pointer.
10697 
10698    (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
10699        and cfun->machine->must_restore_gp_when_clobbered_p if we already know
10700        that the function needs a global pointer.  (There is no need to set
10701        them earlier than this, and doing it as late as possible leads to
10702        fewer false positives.)
10703 
10704    (5) If cfun->machine->must_initialize_gp_p is true during a
10705        split_insns pass, we split the ghost instructions into real
10706        instructions.  These split instructions can then be optimized in
10707        the usual way.  Otherwise, we keep the ghost instructions intact,
10708        and optimize for the case where they aren't needed.  We still
10709        have the option of splitting them later, if we need to introduce
10710        new uses of the global pointer.
10711 
10712        For example, the scheduler ignores a ghost instruction that
10713        stores $28 to the stack, but it handles the split form of
10714        the ghost instruction as an ordinary store.
10715 
10716    (6) [OldABI only.]  If cfun->machine->must_restore_gp_when_clobbered_p
10717        is true during the first post-epilogue split_insns pass, we split
10718        calls and restore_gp patterns into instructions that explicitly
10719        load pic_offset_table_rtx from the cprestore slot.  Otherwise,
10720        we split these patterns into instructions that _don't_ load from
10721        the cprestore slot.
10722 
10723        If cfun->machine->must_restore_gp_when_clobbered_p is true at the
10724        time of the split, then any instructions that exist at that time
10725        can make free use of pic_offset_table_rtx.  However, if we want
10726        to introduce new uses of the global pointer after the split,
10727        we must explicitly load the value from the cprestore slot, since
10728        pic_offset_table_rtx itself might not be valid at a given point
10729        in the function.
10730 
10731        The idea is that we want to be able to delete redundant
10732        loads from the cprestore slot in the usual case where no
10733        long branches are needed.
10734 
10735    (7) If cfun->machine->must_initialize_gp_p is still false at the end
10736        of md_reorg, we decide whether the global pointer is needed for
10737        long branches.  If so, we set cfun->machine->must_initialize_gp_p
10738        to true and split the ghost instructions into real instructions
10739        at that stage.
10740 
10741    Note that the ghost instructions must have a zero length for three reasons:
10742 
10743    - Giving the length of the underlying $gp sequence might cause
10744      us to use long branches in cases where they aren't really needed.
10745 
10746    - They would perturb things like alignment calculations.
10747 
10748    - More importantly, the hazard detection in md_reorg relies on
10749      empty instructions having a zero length.
10750 
10751    If we find a long branch and split the ghost instructions at the
10752    end of md_reorg, the split could introduce more long branches.
10753    That isn't a problem though, because we still do the split before
10754    the final shorten_branches pass.
10755 
10756    This is extremely ugly, but it seems like the best compromise between
10757    correctness and efficiency.  */
10758 
10759 bool
mips_must_initialize_gp_p(void)10760 mips_must_initialize_gp_p (void)
10761 {
10762   return cfun->machine->must_initialize_gp_p;
10763 }
10764 
10765 /* Return true if REGNO is a register that is ordinarily call-clobbered
10766    but must nevertheless be preserved by an interrupt handler.  */
10767 
10768 static bool
mips_interrupt_extra_call_saved_reg_p(unsigned int regno)10769 mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
10770 {
10771   if ((ISA_HAS_HILO || TARGET_DSP)
10772       && MD_REG_P (regno))
10773     return true;
10774 
10775   if (TARGET_DSP && DSP_ACC_REG_P (regno))
10776     return true;
10777 
10778   if (GP_REG_P (regno)
10779       && cfun->machine->use_shadow_register_set == SHADOW_SET_NO)
10780     {
10781       /* $0 is hard-wired.  */
10782       if (regno == GP_REG_FIRST)
10783 	return false;
10784 
10785       /* The interrupt handler can treat kernel registers as
10786 	 scratch registers.  */
10787       if (KERNEL_REG_P (regno))
10788 	return false;
10789 
10790       /* The function will return the stack pointer to its original value
10791 	 anyway.  */
10792       if (regno == STACK_POINTER_REGNUM)
10793 	return false;
10794 
10795       /* Otherwise, return true for registers that aren't ordinarily
10796 	 call-clobbered.  */
10797       return call_really_used_regs[regno];
10798     }
10799 
10800   return false;
10801 }
10802 
10803 /* Return true if the current function should treat register REGNO
10804    as call-saved.  */
10805 
10806 static bool
mips_cfun_call_saved_reg_p(unsigned int regno)10807 mips_cfun_call_saved_reg_p (unsigned int regno)
10808 {
10809   /* If the user makes an ordinarily-call-saved register global,
10810      that register is no longer call-saved.  */
10811   if (global_regs[regno])
10812     return false;
10813 
10814   /* Interrupt handlers need to save extra registers.  */
10815   if (cfun->machine->interrupt_handler_p
10816       && mips_interrupt_extra_call_saved_reg_p (regno))
10817     return true;
10818 
10819   /* call_insns preserve $28 unless they explicitly say otherwise,
10820      so call_really_used_regs[] treats $28 as call-saved.  However,
10821      we want the ABI property rather than the default call_insn
10822      property here.  */
10823   return (regno == GLOBAL_POINTER_REGNUM
10824 	  ? TARGET_CALL_SAVED_GP
10825 	  : !call_really_used_regs[regno]);
10826 }
10827 
10828 /* Return true if the function body might clobber register REGNO.
10829    We know that REGNO is call-saved.  */
10830 
10831 static bool
mips_cfun_might_clobber_call_saved_reg_p(unsigned int regno)10832 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
10833 {
10834   /* Some functions should be treated as clobbering all call-saved
10835      registers.  */
10836   if (crtl->saves_all_registers)
10837     return true;
10838 
10839   /* DF handles cases where a register is explicitly referenced in
10840      the rtl.  Incoming values are passed in call-clobbered registers,
10841      so we can assume that any live call-saved register is set within
10842      the function.  */
10843   if (df_regs_ever_live_p (regno))
10844     return true;
10845 
10846   /* Check for registers that are clobbered by FUNCTION_PROFILER.
10847      These clobbers are not explicit in the rtl.  */
10848   if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
10849     return true;
10850 
10851   /* If we're using a call-saved global pointer, the function's
10852      prologue will need to set it up.  */
10853   if (cfun->machine->global_pointer == regno)
10854     return true;
10855 
10856   /* The function's prologue will need to set the frame pointer if
10857      frame_pointer_needed.  */
10858   if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
10859     return true;
10860 
10861   /* If a MIPS16 function returns a value in FPRs, its epilogue
10862      will need to call an external libgcc routine.  This yet-to-be
10863      generated call_insn will clobber $31.  */
10864   if (regno == RETURN_ADDR_REGNUM && mips16_cfun_returns_in_fpr_p ())
10865     return true;
10866 
10867   /* If REGNO is ordinarily call-clobbered, we must assume that any
10868      called function could modify it.  */
10869   if (cfun->machine->interrupt_handler_p
10870       && !crtl->is_leaf
10871       && mips_interrupt_extra_call_saved_reg_p (regno))
10872     return true;
10873 
10874   return false;
10875 }
10876 
10877 /* Return true if the current function must save register REGNO.  */
10878 
10879 static bool
mips_save_reg_p(unsigned int regno)10880 mips_save_reg_p (unsigned int regno)
10881 {
10882   if (mips_cfun_call_saved_reg_p (regno))
10883     {
10884       if (mips_cfun_might_clobber_call_saved_reg_p (regno))
10885 	return true;
10886 
10887       /* Save both registers in an FPR pair if either one is used.  This is
10888 	 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
10889 	 register to be used without the even register.  */
10890       if (FP_REG_P (regno)
10891 	  && MAX_FPRS_PER_FMT == 2
10892 	  && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
10893 	return true;
10894     }
10895 
10896   /* We need to save the incoming return address if __builtin_eh_return
10897      is being used to set a different return address.  */
10898   if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
10899     return true;
10900 
10901   return false;
10902 }
10903 
10904 /* Populate the current function's mips_frame_info structure.
10905 
10906    MIPS stack frames look like:
10907 
10908 	+-------------------------------+
10909 	|                               |
10910 	|  incoming stack arguments     |
10911 	|                               |
10912 	+-------------------------------+
10913 	|                               |
10914 	|  caller-allocated save area   |
10915       A |  for register arguments       |
10916 	|                               |
10917 	+-------------------------------+ <-- incoming stack pointer
10918 	|                               |
10919 	|  callee-allocated save area   |
10920       B |  for arguments that are       |
10921 	|  split between registers and  |
10922 	|  the stack                    |
10923 	|                               |
10924 	+-------------------------------+ <-- arg_pointer_rtx
10925 	|                               |
10926       C |  callee-allocated save area   |
10927 	|  for register varargs         |
10928 	|                               |
10929 	+-------------------------------+ <-- frame_pointer_rtx
10930 	|                               |       + cop0_sp_offset
10931 	|  COP0 reg save area           |	+ UNITS_PER_WORD
10932 	|                               |
10933 	+-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
10934 	|                               |       + UNITS_PER_WORD
10935 	|  accumulator save area        |
10936 	|                               |
10937 	+-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
10938 	|                               |       + UNITS_PER_HWFPVALUE
10939 	|  FPR save area                |
10940 	|                               |
10941 	+-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
10942 	|                               |       + UNITS_PER_WORD
10943 	|  GPR save area                |
10944 	|                               |
10945 	+-------------------------------+ <-- frame_pointer_rtx with
10946 	|                               | \     -fstack-protector
10947 	|  local variables              |  | var_size
10948 	|                               | /
10949 	+-------------------------------+
10950 	|                               | \
10951 	|  $gp save area                |  | cprestore_size
10952 	|                               | /
10953       P +-------------------------------+ <-- hard_frame_pointer_rtx for
10954 	|                               | \     MIPS16 code
10955 	|  outgoing stack arguments     |  |
10956 	|                               |  |
10957 	+-------------------------------+  | args_size
10958 	|                               |  |
10959 	|  caller-allocated save area   |  |
10960 	|  for register arguments       |  |
10961 	|                               | /
10962 	+-------------------------------+ <-- stack_pointer_rtx
10963 					      frame_pointer_rtx without
10964 					        -fstack-protector
10965 					      hard_frame_pointer_rtx for
10966 						non-MIPS16 code.
10967 
10968    At least two of A, B and C will be empty.
10969 
10970    Dynamic stack allocations such as alloca insert data at point P.
10971    They decrease stack_pointer_rtx but leave frame_pointer_rtx and
10972    hard_frame_pointer_rtx unchanged.  */
10973 
10974 static void
mips_compute_frame_info(void)10975 mips_compute_frame_info (void)
10976 {
10977   struct mips_frame_info *frame;
10978   HOST_WIDE_INT offset, size;
10979   unsigned int regno, i;
10980 
10981   /* Skip re-computing the frame info after reload completed.  */
10982   if (reload_completed)
10983     return;
10984 
10985   /* Set this function's interrupt properties.  */
10986   if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
10987     {
10988       if (mips_isa_rev < 2)
10989 	error ("the %<interrupt%> attribute requires a MIPS32r2 processor or greater");
10990       else if (TARGET_MIPS16)
10991 	error ("interrupt handlers cannot be MIPS16 functions");
10992       else
10993 	{
10994 	  cfun->machine->interrupt_handler_p = true;
10995 	  cfun->machine->int_mask =
10996 	    mips_interrupt_mask (TREE_TYPE (current_function_decl));
10997 	  cfun->machine->use_shadow_register_set =
10998 	    mips_use_shadow_register_set (TREE_TYPE (current_function_decl));
10999 	  cfun->machine->keep_interrupts_masked_p =
11000 	    mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
11001 	  cfun->machine->use_debug_exception_return_p =
11002 	    mips_use_debug_exception_return_p (TREE_TYPE
11003 					       (current_function_decl));
11004 	}
11005     }
11006 
11007   frame = &cfun->machine->frame;
11008   memset (frame, 0, sizeof (*frame));
11009   size = get_frame_size ();
11010 
11011   /* The first two blocks contain the outgoing argument area and the $gp save
11012      slot.  This area isn't needed in leaf functions.  We can also skip it
11013      if we know that none of the called functions will use this space.
11014 
11015      But if the target-independent frame size is nonzero, we have already
11016      committed to allocating these in TARGET_STARTING_FRAME_OFFSET for
11017      !FRAME_GROWS_DOWNWARD.  */
11018 
11019   if ((size == 0 || FRAME_GROWS_DOWNWARD)
11020       && (crtl->is_leaf || (cfun->machine->optimize_call_stack && !flag_pic)))
11021     {
11022       /* The MIPS 3.0 linker does not like functions that dynamically
11023 	 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
11024 	 looks like we are trying to create a second frame pointer to the
11025 	 function, so allocate some stack space to make it happy.  */
11026       if (cfun->calls_alloca)
11027 	frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
11028       else
11029 	frame->args_size = 0;
11030       frame->cprestore_size = 0;
11031     }
11032   else
11033     {
11034       frame->args_size = crtl->outgoing_args_size;
11035       frame->cprestore_size = MIPS_GP_SAVE_AREA_SIZE;
11036     }
11037 
11038   /* MIPS16 code offsets the frame pointer by the size of the outgoing
11039      arguments.  This tends to increase the chances of using unextended
11040      instructions for local variables and incoming arguments.  */
11041   if (TARGET_MIPS16)
11042     frame->hard_frame_pointer_offset = frame->args_size;
11043 
11044   /* PR 69129 / 69012: Beware of a possible race condition.  mips_global_pointer
11045      might call mips_cfun_has_inflexible_gp_ref_p which in turn can call
11046      mips_find_gp_ref which will iterate over the current insn sequence.
11047      If any of these insns use the cprestore_save_slot_operand or
11048      cprestore_load_slot_operand predicates in order to be recognised then
11049      they will call mips_cprestore_address_p which calls
11050      mips_get_cprestore_base_and_offset which expects the frame information
11051      to be filled in...  In fact mips_get_cprestore_base_and_offset only
11052      needs the args_size and hard_frame_pointer_offset fields to be filled
11053      in, which is why the global_pointer field is initialised here and not
11054      earlier.  */
11055   cfun->machine->global_pointer = mips_global_pointer ();
11056 
11057   offset = frame->args_size + frame->cprestore_size;
11058 
11059   /* Move above the local variables.  */
11060   frame->var_size = MIPS_STACK_ALIGN (size);
11061   offset += frame->var_size;
11062 
11063   /* Find out which GPRs we need to save.  */
11064   for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
11065     if (mips_save_reg_p (regno))
11066       {
11067 	frame->num_gp++;
11068 	frame->mask |= 1 << (regno - GP_REG_FIRST);
11069       }
11070 
11071   /* If this function calls eh_return, we must also save and restore the
11072      EH data registers.  */
11073   if (crtl->calls_eh_return)
11074     for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
11075       {
11076 	frame->num_gp++;
11077 	frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
11078       }
11079 
11080   /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
11081      $a3-$a0 and $s2-$s8.  If we save one register in the range, we must
11082      save all later registers too.  */
11083   if (GENERATE_MIPS16E_SAVE_RESTORE)
11084     {
11085       mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
11086  			      ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
11087       mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
11088  			      ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
11089     }
11090 
11091   /* Move above the GPR save area.  */
11092   if (frame->num_gp > 0)
11093     {
11094       offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
11095       frame->gp_sp_offset = offset - UNITS_PER_WORD;
11096     }
11097 
11098   /* Find out which FPRs we need to save.  This loop must iterate over
11099      the same space as its companion in mips_for_each_saved_gpr_and_fpr.  */
11100   if (TARGET_HARD_FLOAT)
11101     for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
11102       if (mips_save_reg_p (regno))
11103 	{
11104 	  frame->num_fp += MAX_FPRS_PER_FMT;
11105 	  frame->fmask |= ~(~0U << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
11106 	}
11107 
11108   /* Move above the FPR save area.  */
11109   if (frame->num_fp > 0)
11110     {
11111       offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
11112       frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
11113     }
11114 
11115   /* Add in space for the interrupt context information.  */
11116   if (cfun->machine->interrupt_handler_p)
11117     {
11118       /* Check HI/LO.  */
11119       if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
11120 	{
11121 	  frame->num_acc++;
11122 	  frame->acc_mask |= (1 << 0);
11123 	}
11124 
11125       /* Check accumulators 1, 2, 3.  */
11126       for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
11127 	if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
11128 	  {
11129 	    frame->num_acc++;
11130 	    frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
11131 	  }
11132 
11133       /* All interrupt context functions need space to preserve STATUS.  */
11134       frame->num_cop0_regs++;
11135 
11136       /* We need to save EPC regardless of whether interrupts remain masked
11137 	 as exceptions will corrupt EPC.  */
11138       frame->num_cop0_regs++;
11139     }
11140 
11141   /* Move above the accumulator save area.  */
11142   if (frame->num_acc > 0)
11143     {
11144       /* Each accumulator needs 2 words.  */
11145       offset += frame->num_acc * 2 * UNITS_PER_WORD;
11146       frame->acc_sp_offset = offset - UNITS_PER_WORD;
11147     }
11148 
11149   /* Move above the COP0 register save area.  */
11150   if (frame->num_cop0_regs > 0)
11151     {
11152       offset += frame->num_cop0_regs * UNITS_PER_WORD;
11153       frame->cop0_sp_offset = offset - UNITS_PER_WORD;
11154     }
11155 
11156   /* Determine if we can save the callee-saved registers in the frame
11157      header.  Restrict this to functions where there is no other reason
11158      to allocate stack space so that we can eliminate the instructions
11159      that modify the stack pointer.  */
11160 
11161   if (TARGET_OLDABI
11162       && optimize > 0
11163       && flag_frame_header_optimization
11164       && !MAIN_NAME_P (DECL_NAME (current_function_decl))
11165       && cfun->machine->varargs_size == 0
11166       && crtl->args.pretend_args_size == 0
11167       && frame->var_size == 0
11168       && frame->num_acc == 0
11169       && frame->num_cop0_regs == 0
11170       && frame->num_fp == 0
11171       && frame->num_gp > 0
11172       && frame->num_gp <= MAX_ARGS_IN_REGISTERS
11173       && !GENERATE_MIPS16E_SAVE_RESTORE
11174       && !cfun->machine->interrupt_handler_p
11175       && cfun->machine->does_not_use_frame_header
11176       && cfun->machine->optimize_call_stack
11177       && !cfun->machine->callers_may_not_allocate_frame
11178       && !mips_cfun_has_cprestore_slot_p ())
11179     {
11180       offset = 0;
11181       frame->gp_sp_offset = REG_PARM_STACK_SPACE(cfun) - UNITS_PER_WORD;
11182       cfun->machine->use_frame_header_for_callee_saved_regs = true;
11183     }
11184 
11185   /* Move above the callee-allocated varargs save area.  */
11186   offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
11187   frame->arg_pointer_offset = offset;
11188 
11189   /* Move above the callee-allocated area for pretend stack arguments.  */
11190   offset += crtl->args.pretend_args_size;
11191   frame->total_size = offset;
11192 
11193   /* Work out the offsets of the save areas from the top of the frame.  */
11194   if (frame->gp_sp_offset > 0)
11195     frame->gp_save_offset = frame->gp_sp_offset - offset;
11196   if (frame->fp_sp_offset > 0)
11197     frame->fp_save_offset = frame->fp_sp_offset - offset;
11198   if (frame->acc_sp_offset > 0)
11199     frame->acc_save_offset = frame->acc_sp_offset - offset;
11200   if (frame->num_cop0_regs > 0)
11201     frame->cop0_save_offset = frame->cop0_sp_offset - offset;
11202 }
11203 
11204 /* Return the style of GP load sequence that is being used for the
11205    current function.  */
11206 
11207 enum mips_loadgp_style
mips_current_loadgp_style(void)11208 mips_current_loadgp_style (void)
11209 {
11210   if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
11211     return LOADGP_NONE;
11212 
11213   if (TARGET_RTP_PIC)
11214     return LOADGP_RTP;
11215 
11216   if (TARGET_ABSOLUTE_ABICALLS)
11217     return LOADGP_ABSOLUTE;
11218 
11219   return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
11220 }
11221 
11222 /* Implement TARGET_FRAME_POINTER_REQUIRED.  */
11223 
11224 static bool
mips_frame_pointer_required(void)11225 mips_frame_pointer_required (void)
11226 {
11227   /* If the function contains dynamic stack allocations, we need to
11228      use the frame pointer to access the static parts of the frame.  */
11229   if (cfun->calls_alloca)
11230     return true;
11231 
11232   /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
11233      reload may be unable to compute the address of a local variable,
11234      since there is no way to add a large constant to the stack pointer
11235      without using a second temporary register.  */
11236   if (TARGET_MIPS16)
11237     {
11238       mips_compute_frame_info ();
11239       if (!SMALL_OPERAND (cfun->machine->frame.total_size))
11240 	return true;
11241     }
11242 
11243   return false;
11244 }
11245 
11246 /* Make sure that we're not trying to eliminate to the wrong hard frame
11247    pointer.  */
11248 
11249 static bool
mips_can_eliminate(const int from ATTRIBUTE_UNUSED,const int to)11250 mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
11251 {
11252   return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
11253 }
11254 
11255 /* Implement INITIAL_ELIMINATION_OFFSET.  FROM is either the frame pointer
11256    or argument pointer.  TO is either the stack pointer or hard frame
11257    pointer.  */
11258 
11259 HOST_WIDE_INT
mips_initial_elimination_offset(int from,int to)11260 mips_initial_elimination_offset (int from, int to)
11261 {
11262   HOST_WIDE_INT offset;
11263 
11264   mips_compute_frame_info ();
11265 
11266   /* Set OFFSET to the offset from the end-of-prologue stack pointer.  */
11267   switch (from)
11268     {
11269     case FRAME_POINTER_REGNUM:
11270       if (FRAME_GROWS_DOWNWARD)
11271 	offset = (cfun->machine->frame.args_size
11272 		  + cfun->machine->frame.cprestore_size
11273 		  + cfun->machine->frame.var_size);
11274       else
11275 	offset = 0;
11276       break;
11277 
11278     case ARG_POINTER_REGNUM:
11279       offset = cfun->machine->frame.arg_pointer_offset;
11280       break;
11281 
11282     default:
11283       gcc_unreachable ();
11284     }
11285 
11286   if (to == HARD_FRAME_POINTER_REGNUM)
11287     offset -= cfun->machine->frame.hard_frame_pointer_offset;
11288 
11289   return offset;
11290 }
11291 
11292 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY.  */
11293 
11294 static void
mips_extra_live_on_entry(bitmap regs)11295 mips_extra_live_on_entry (bitmap regs)
11296 {
11297   if (TARGET_USE_GOT)
11298     {
11299       /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
11300 	 the global pointer.   */
11301       if (!TARGET_ABSOLUTE_ABICALLS)
11302 	bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
11303 
11304       /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
11305 	 the global pointer.  */
11306       if (TARGET_MIPS16)
11307 	bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
11308 
11309       /* See the comment above load_call<mode> for details.  */
11310       bitmap_set_bit (regs, GOT_VERSION_REGNUM);
11311     }
11312 }
11313 
11314 /* Implement RETURN_ADDR_RTX.  We do not support moving back to a
11315    previous frame.  */
11316 
11317 rtx
mips_return_addr(int count,rtx frame ATTRIBUTE_UNUSED)11318 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
11319 {
11320   if (count != 0)
11321     return const0_rtx;
11322 
11323   return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
11324 }
11325 
11326 /* Emit code to change the current function's return address to
11327    ADDRESS.  SCRATCH is available as a scratch register, if needed.
11328    ADDRESS and SCRATCH are both word-mode GPRs.  */
11329 
11330 void
mips_set_return_address(rtx address,rtx scratch)11331 mips_set_return_address (rtx address, rtx scratch)
11332 {
11333   rtx slot_address;
11334 
11335   gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
11336   slot_address = mips_add_offset (scratch, stack_pointer_rtx,
11337 				  cfun->machine->frame.gp_sp_offset);
11338   mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
11339 }
11340 
11341 /* Return true if the current function has a cprestore slot.  */
11342 
11343 bool
mips_cfun_has_cprestore_slot_p(void)11344 mips_cfun_has_cprestore_slot_p (void)
11345 {
11346   return (cfun->machine->global_pointer != INVALID_REGNUM
11347 	  && cfun->machine->frame.cprestore_size > 0);
11348 }
11349 
11350 /* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
11351    cprestore slot.  LOAD_P is true if the caller wants to load from
11352    the cprestore slot; it is false if the caller wants to store to
11353    the slot.  */
11354 
11355 static void
mips_get_cprestore_base_and_offset(rtx * base,HOST_WIDE_INT * offset,bool load_p)11356 mips_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset,
11357 				    bool load_p)
11358 {
11359   const struct mips_frame_info *frame;
11360 
11361   frame = &cfun->machine->frame;
11362   /* .cprestore always uses the stack pointer instead of the frame pointer.
11363      We have a free choice for direct stores for non-MIPS16 functions,
11364      and for MIPS16 functions whose cprestore slot is in range of the
11365      stack pointer.  Using the stack pointer would sometimes give more
11366      (early) scheduling freedom, but using the frame pointer would
11367      sometimes give more (late) scheduling freedom.  It's hard to
11368      predict which applies to a given function, so let's keep things
11369      simple.
11370 
11371      Loads must always use the frame pointer in functions that call
11372      alloca, and there's little benefit to using the stack pointer
11373      otherwise.  */
11374   if (frame_pointer_needed && !(TARGET_CPRESTORE_DIRECTIVE && !load_p))
11375     {
11376       *base = hard_frame_pointer_rtx;
11377       *offset = frame->args_size - frame->hard_frame_pointer_offset;
11378     }
11379   else
11380     {
11381       *base = stack_pointer_rtx;
11382       *offset = frame->args_size;
11383     }
11384 }
11385 
11386 /* Return true if X is the load or store address of the cprestore slot;
11387    LOAD_P says which.  */
11388 
11389 bool
mips_cprestore_address_p(rtx x,bool load_p)11390 mips_cprestore_address_p (rtx x, bool load_p)
11391 {
11392   rtx given_base, required_base;
11393   HOST_WIDE_INT given_offset, required_offset;
11394 
11395   mips_split_plus (x, &given_base, &given_offset);
11396   mips_get_cprestore_base_and_offset (&required_base, &required_offset, load_p);
11397   return given_base == required_base && given_offset == required_offset;
11398 }
11399 
11400 /* Return a MEM rtx for the cprestore slot.  LOAD_P is true if we are
11401    going to load from it, false if we are going to store to it.
11402    Use TEMP as a temporary register if need be.  */
11403 
11404 static rtx
mips_cprestore_slot(rtx temp,bool load_p)11405 mips_cprestore_slot (rtx temp, bool load_p)
11406 {
11407   rtx base;
11408   HOST_WIDE_INT offset;
11409 
11410   mips_get_cprestore_base_and_offset (&base, &offset, load_p);
11411   return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
11412 }
11413 
11414 /* Emit instructions to save global pointer value GP into cprestore
11415    slot MEM.  OFFSET is the offset that MEM applies to the base register.
11416 
11417    MEM may not be a legitimate address.  If it isn't, TEMP is a
11418    temporary register that can be used, otherwise it is a SCRATCH.  */
11419 
11420 void
mips_save_gp_to_cprestore_slot(rtx mem,rtx offset,rtx gp,rtx temp)11421 mips_save_gp_to_cprestore_slot (rtx mem, rtx offset, rtx gp, rtx temp)
11422 {
11423   if (TARGET_CPRESTORE_DIRECTIVE)
11424     {
11425       gcc_assert (gp == pic_offset_table_rtx);
11426       emit_insn (PMODE_INSN (gen_cprestore, (mem, offset)));
11427     }
11428   else
11429     mips_emit_move (mips_cprestore_slot (temp, false), gp);
11430 }
11431 
11432 /* Restore $gp from its save slot, using TEMP as a temporary base register
11433    if need be.  This function is for o32 and o64 abicalls only.
11434 
11435    See mips_must_initialize_gp_p for details about how we manage the
11436    global pointer.  */
11437 
11438 void
mips_restore_gp_from_cprestore_slot(rtx temp)11439 mips_restore_gp_from_cprestore_slot (rtx temp)
11440 {
11441   gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && epilogue_completed);
11442 
11443   if (!cfun->machine->must_restore_gp_when_clobbered_p)
11444     {
11445       emit_note (NOTE_INSN_DELETED);
11446       return;
11447     }
11448 
11449   if (TARGET_MIPS16)
11450     {
11451       mips_emit_move (temp, mips_cprestore_slot (temp, true));
11452       mips_emit_move (pic_offset_table_rtx, temp);
11453     }
11454   else
11455     mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp, true));
11456   if (!TARGET_EXPLICIT_RELOCS)
11457     emit_insn (gen_blockage ());
11458 }
11459 
11460 /* A function to save or store a register.  The first argument is the
11461    register and the second is the stack slot.  */
11462 typedef void (*mips_save_restore_fn) (rtx, rtx);
11463 
11464 /* Use FN to save or restore register REGNO.  MODE is the register's
11465    mode and OFFSET is the offset of its save slot from the current
11466    stack pointer.  */
11467 
11468 static void
mips_save_restore_reg(machine_mode mode,int regno,HOST_WIDE_INT offset,mips_save_restore_fn fn)11469 mips_save_restore_reg (machine_mode mode, int regno,
11470 		       HOST_WIDE_INT offset, mips_save_restore_fn fn)
11471 {
11472   rtx mem;
11473 
11474   mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx,
11475 					    offset));
11476   fn (gen_rtx_REG (mode, regno), mem);
11477 }
11478 
11479 /* Call FN for each accumulator that is saved by the current function.
11480    SP_OFFSET is the offset of the current stack pointer from the start
11481    of the frame.  */
11482 
11483 static void
mips_for_each_saved_acc(HOST_WIDE_INT sp_offset,mips_save_restore_fn fn)11484 mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
11485 {
11486   HOST_WIDE_INT offset;
11487   int regno;
11488 
11489   offset = cfun->machine->frame.acc_sp_offset - sp_offset;
11490   if (BITSET_P (cfun->machine->frame.acc_mask, 0))
11491     {
11492       mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
11493       offset -= UNITS_PER_WORD;
11494       mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
11495       offset -= UNITS_PER_WORD;
11496     }
11497 
11498   for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
11499     if (BITSET_P (cfun->machine->frame.acc_mask,
11500 		  ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
11501       {
11502 	mips_save_restore_reg (word_mode, regno, offset, fn);
11503 	offset -= UNITS_PER_WORD;
11504       }
11505 }
11506 
11507 /* Save register REG to MEM.  Make the instruction frame-related.  */
11508 
11509 static void
mips_save_reg(rtx reg,rtx mem)11510 mips_save_reg (rtx reg, rtx mem)
11511 {
11512   if (GET_MODE (reg) == DFmode
11513       && (!TARGET_FLOAT64
11514 	  || mips_abi == ABI_32))
11515     {
11516       rtx x1, x2;
11517 
11518       mips_emit_move_or_split (mem, reg, SPLIT_IF_NECESSARY);
11519 
11520       x1 = mips_frame_set (mips_subword (mem, false),
11521 			   mips_subword (reg, false));
11522       x2 = mips_frame_set (mips_subword (mem, true),
11523 			   mips_subword (reg, true));
11524       mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
11525     }
11526   else
11527     mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
11528 }
11529 
11530 /* Capture the register combinations that are allowed in a SWM or LWM
11531    instruction.  The entries are ordered by number of registers set in
11532    the mask.  We also ignore the single register encodings because a
11533    normal SW/LW is preferred.  */
11534 
11535 static const unsigned int umips_swm_mask[17] = {
11536   0xc0ff0000, 0x80ff0000, 0x40ff0000, 0x807f0000,
11537   0x00ff0000, 0x803f0000, 0x007f0000, 0x801f0000,
11538   0x003f0000, 0x800f0000, 0x001f0000, 0x80070000,
11539   0x000f0000, 0x80030000, 0x00070000, 0x80010000,
11540   0x00030000
11541 };
11542 
11543 static const unsigned int umips_swm_encoding[17] = {
11544   25, 24, 9, 23, 8, 22, 7, 21, 6, 20, 5, 19, 4, 18, 3, 17, 2
11545 };
11546 
11547 /* Try to use a microMIPS LWM or SWM instruction to save or restore
11548    as many GPRs in *MASK as possible.  *OFFSET is the offset from the
11549    stack pointer of the topmost save slot.
11550 
11551    Remove from *MASK all registers that were handled using LWM and SWM.
11552    Update *OFFSET so that it points to the first unused save slot.  */
11553 
11554 static bool
umips_build_save_restore(mips_save_restore_fn fn,unsigned * mask,HOST_WIDE_INT * offset)11555 umips_build_save_restore (mips_save_restore_fn fn,
11556 			  unsigned *mask, HOST_WIDE_INT *offset)
11557 {
11558   int nregs;
11559   unsigned int i, j;
11560   rtx pattern, set, reg, mem;
11561   HOST_WIDE_INT this_offset;
11562   rtx this_base;
11563 
11564   /* Try matching $16 to $31 (s0 to ra).  */
11565   for (i = 0; i < ARRAY_SIZE (umips_swm_mask); i++)
11566     if ((*mask & 0xffff0000) == umips_swm_mask[i])
11567       break;
11568 
11569   if (i == ARRAY_SIZE (umips_swm_mask))
11570     return false;
11571 
11572   /* Get the offset of the lowest save slot.  */
11573   nregs = (umips_swm_encoding[i] & 0xf) + (umips_swm_encoding[i] >> 4);
11574   this_offset = *offset - UNITS_PER_WORD * (nregs - 1);
11575 
11576   /* LWM/SWM can only support offsets from -2048 to 2047.  */
11577   if (!UMIPS_12BIT_OFFSET_P (this_offset))
11578     return false;
11579 
11580   /* Create the final PARALLEL.  */
11581   pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs));
11582   this_base = stack_pointer_rtx;
11583 
11584   /* For registers $16-$23 and $30.  */
11585   for (j = 0; j < (umips_swm_encoding[i] & 0xf); j++)
11586     {
11587       HOST_WIDE_INT offset = this_offset + j * UNITS_PER_WORD;
11588       mem = gen_frame_mem (SImode, plus_constant (Pmode, this_base, offset));
11589       unsigned int regno = (j != 8) ? 16 + j : 30;
11590       *mask &= ~(1 << regno);
11591       reg = gen_rtx_REG (SImode, regno);
11592       if (fn == mips_save_reg)
11593 	set = mips_frame_set (mem, reg);
11594       else
11595 	{
11596 	  set = gen_rtx_SET (reg, mem);
11597 	  mips_add_cfa_restore (reg);
11598 	}
11599       XVECEXP (pattern, 0, j) = set;
11600     }
11601 
11602   /* For register $31.  */
11603   if (umips_swm_encoding[i] >> 4)
11604     {
11605       HOST_WIDE_INT offset = this_offset + j * UNITS_PER_WORD;
11606       *mask &= ~(1 << 31);
11607       mem = gen_frame_mem (SImode, plus_constant (Pmode, this_base, offset));
11608       reg = gen_rtx_REG (SImode, 31);
11609       if (fn == mips_save_reg)
11610 	set = mips_frame_set (mem, reg);
11611       else
11612 	{
11613 	  set = gen_rtx_SET (reg, mem);
11614 	  mips_add_cfa_restore (reg);
11615 	}
11616       XVECEXP (pattern, 0, j) = set;
11617     }
11618 
11619   pattern = emit_insn (pattern);
11620   if (fn == mips_save_reg)
11621     RTX_FRAME_RELATED_P (pattern) = 1;
11622 
11623   /* Adjust the last offset.  */
11624   *offset -= UNITS_PER_WORD * nregs;
11625 
11626   return true;
11627 }
11628 
11629 /* Call FN for each register that is saved by the current function.
11630    SP_OFFSET is the offset of the current stack pointer from the start
11631    of the frame.  */
11632 
11633 static void
mips_for_each_saved_gpr_and_fpr(HOST_WIDE_INT sp_offset,mips_save_restore_fn fn)11634 mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
11635 				 mips_save_restore_fn fn)
11636 {
11637   machine_mode fpr_mode;
11638   int regno;
11639   const struct mips_frame_info *frame = &cfun->machine->frame;
11640   HOST_WIDE_INT offset;
11641   unsigned int mask;
11642 
11643   /* Save registers starting from high to low.  The debuggers prefer at least
11644      the return register be stored at func+4, and also it allows us not to
11645      need a nop in the epilogue if at least one register is reloaded in
11646      addition to return address.  */
11647   offset = frame->gp_sp_offset - sp_offset;
11648   mask = frame->mask;
11649 
11650   if (TARGET_MICROMIPS)
11651     umips_build_save_restore (fn, &mask, &offset);
11652 
11653   for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
11654     if (BITSET_P (mask, regno - GP_REG_FIRST))
11655       {
11656 	/* Record the ra offset for use by mips_function_profiler.  */
11657 	if (regno == RETURN_ADDR_REGNUM)
11658 	  cfun->machine->frame.ra_fp_offset = offset + sp_offset;
11659 	mips_save_restore_reg (word_mode, regno, offset, fn);
11660 	offset -= UNITS_PER_WORD;
11661       }
11662 
11663   /* This loop must iterate over the same space as its companion in
11664      mips_compute_frame_info.  */
11665   offset = cfun->machine->frame.fp_sp_offset - sp_offset;
11666   fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
11667   for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
11668        regno >= FP_REG_FIRST;
11669        regno -= MAX_FPRS_PER_FMT)
11670     if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
11671       {
11672 	if (!TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT
11673 	    && (fixed_regs[regno] || fixed_regs[regno + 1]))
11674 	  {
11675 	    if (fixed_regs[regno])
11676 	      mips_save_restore_reg (SFmode, regno + 1, offset, fn);
11677 	    else
11678 	      mips_save_restore_reg (SFmode, regno, offset, fn);
11679 	  }
11680 	else
11681 	  mips_save_restore_reg (fpr_mode, regno, offset, fn);
11682 	offset -= GET_MODE_SIZE (fpr_mode);
11683       }
11684 }
11685 
11686 /* Return true if a move between register REGNO and its save slot (MEM)
11687    can be done in a single move.  LOAD_P is true if we are loading
11688    from the slot, false if we are storing to it.  */
11689 
11690 static bool
mips_direct_save_slot_move_p(unsigned int regno,rtx mem,bool load_p)11691 mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p)
11692 {
11693   /* There is a specific MIPS16 instruction for saving $31 to the stack.  */
11694   if (TARGET_MIPS16 && !load_p && regno == RETURN_ADDR_REGNUM)
11695     return false;
11696 
11697   return mips_secondary_reload_class (REGNO_REG_CLASS (regno),
11698 				      GET_MODE (mem), mem, load_p) == NO_REGS;
11699 }
11700 
11701 /* Emit a move from SRC to DEST, given that one of them is a register
11702    save slot and that the other is a register.  TEMP is a temporary
11703    GPR of the same mode that is available if need be.  */
11704 
11705 void
mips_emit_save_slot_move(rtx dest,rtx src,rtx temp)11706 mips_emit_save_slot_move (rtx dest, rtx src, rtx temp)
11707 {
11708   unsigned int regno;
11709   rtx mem;
11710 
11711   if (REG_P (src))
11712     {
11713       regno = REGNO (src);
11714       mem = dest;
11715     }
11716   else
11717     {
11718       regno = REGNO (dest);
11719       mem = src;
11720     }
11721 
11722   if (regno == cfun->machine->global_pointer && !mips_must_initialize_gp_p ())
11723     {
11724       /* We don't yet know whether we'll need this instruction or not.
11725 	 Postpone the decision by emitting a ghost move.  This move
11726 	 is specifically not frame-related; only the split version is.  */
11727       if (TARGET_64BIT)
11728 	emit_insn (gen_move_gpdi (dest, src));
11729       else
11730 	emit_insn (gen_move_gpsi (dest, src));
11731       return;
11732     }
11733 
11734   if (regno == HI_REGNUM)
11735     {
11736       if (REG_P (dest))
11737 	{
11738 	  mips_emit_move (temp, src);
11739 	  if (TARGET_64BIT)
11740 	    emit_insn (gen_mthidi_ti (gen_rtx_REG (TImode, MD_REG_FIRST),
11741 				      temp, gen_rtx_REG (DImode, LO_REGNUM)));
11742 	  else
11743 	    emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
11744 				      temp, gen_rtx_REG (SImode, LO_REGNUM)));
11745 	}
11746       else
11747 	{
11748 	  if (TARGET_64BIT)
11749 	    emit_insn (gen_mfhidi_ti (temp,
11750 				      gen_rtx_REG (TImode, MD_REG_FIRST)));
11751 	  else
11752 	    emit_insn (gen_mfhisi_di (temp,
11753 				      gen_rtx_REG (DImode, MD_REG_FIRST)));
11754 	  mips_emit_move (dest, temp);
11755 	}
11756     }
11757   else if (mips_direct_save_slot_move_p (regno, mem, mem == src))
11758     mips_emit_move (dest, src);
11759   else
11760     {
11761       gcc_assert (!reg_overlap_mentioned_p (dest, temp));
11762       mips_emit_move (temp, src);
11763       mips_emit_move (dest, temp);
11764     }
11765   if (MEM_P (dest))
11766     mips_set_frame_expr (mips_frame_set (dest, src));
11767 }
11768 
11769 /* If we're generating n32 or n64 abicalls, and the current function
11770    does not use $28 as its global pointer, emit a cplocal directive.
11771    Use pic_offset_table_rtx as the argument to the directive.  */
11772 
11773 static void
mips_output_cplocal(void)11774 mips_output_cplocal (void)
11775 {
11776   if (!TARGET_EXPLICIT_RELOCS
11777       && mips_must_initialize_gp_p ()
11778       && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
11779     output_asm_insn (".cplocal %+", 0);
11780 }
11781 
11782 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE.  */
11783 
11784 static void
mips_output_function_prologue(FILE * file)11785 mips_output_function_prologue (FILE *file)
11786 {
11787   const char *fnname;
11788 
11789   /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
11790      floating-point arguments.  */
11791   if (TARGET_MIPS16
11792       && TARGET_HARD_FLOAT_ABI
11793       && crtl->args.info.fp_code != 0)
11794     mips16_build_function_stub ();
11795 
11796   /* Get the function name the same way that toplev.c does before calling
11797      assemble_start_function.  This is needed so that the name used here
11798      exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */
11799   fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
11800   mips_start_function_definition (fnname, TARGET_MIPS16);
11801 
11802   /* Output MIPS-specific frame information.  */
11803   if (!flag_inhibit_size_directive)
11804     {
11805       const struct mips_frame_info *frame;
11806 
11807       frame = &cfun->machine->frame;
11808 
11809       /* .frame FRAMEREG, FRAMESIZE, RETREG.  */
11810       fprintf (file,
11811 	       "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
11812 	       "# vars= " HOST_WIDE_INT_PRINT_DEC
11813 	       ", regs= %d/%d"
11814 	       ", args= " HOST_WIDE_INT_PRINT_DEC
11815 	       ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
11816 	       reg_names[frame_pointer_needed
11817 			 ? HARD_FRAME_POINTER_REGNUM
11818 			 : STACK_POINTER_REGNUM],
11819 	       (frame_pointer_needed
11820 		? frame->total_size - frame->hard_frame_pointer_offset
11821 		: frame->total_size),
11822 	       reg_names[RETURN_ADDR_REGNUM],
11823 	       frame->var_size,
11824 	       frame->num_gp, frame->num_fp,
11825 	       frame->args_size,
11826 	       frame->cprestore_size);
11827 
11828       /* .mask MASK, OFFSET.  */
11829       fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
11830 	       frame->mask, frame->gp_save_offset);
11831 
11832       /* .fmask MASK, OFFSET.  */
11833       fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
11834 	       frame->fmask, frame->fp_save_offset);
11835     }
11836 
11837   /* Handle the initialization of $gp for SVR4 PIC, if applicable.
11838      Also emit the ".set noreorder; .set nomacro" sequence for functions
11839      that need it.  */
11840   if (mips_must_initialize_gp_p ()
11841       && mips_current_loadgp_style () == LOADGP_OLDABI)
11842     {
11843       if (TARGET_MIPS16)
11844 	{
11845 	  /* This is a fixed-form sequence.  The position of the
11846 	     first two instructions is important because of the
11847 	     way _gp_disp is defined.  */
11848 	  output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
11849 	  output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
11850 	  output_asm_insn ("sll\t$2,16", 0);
11851 	  output_asm_insn ("addu\t$2,$3", 0);
11852 	}
11853       else
11854 	{
11855 	  /* .cpload must be in a .set noreorder but not a
11856 	     .set nomacro block.  */
11857 	  mips_push_asm_switch (&mips_noreorder);
11858 	  output_asm_insn (".cpload\t%^", 0);
11859 	  if (!cfun->machine->all_noreorder_p)
11860 	    mips_pop_asm_switch (&mips_noreorder);
11861 	  else
11862 	    mips_push_asm_switch (&mips_nomacro);
11863 	}
11864     }
11865   else if (cfun->machine->all_noreorder_p)
11866     {
11867       mips_push_asm_switch (&mips_noreorder);
11868       mips_push_asm_switch (&mips_nomacro);
11869     }
11870 
11871   /* Tell the assembler which register we're using as the global
11872      pointer.  This is needed for thunks, since they can use either
11873      explicit relocs or assembler macros.  */
11874   mips_output_cplocal ();
11875 }
11876 
11877 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE.  */
11878 
11879 static void
mips_output_function_epilogue(FILE *)11880 mips_output_function_epilogue (FILE *)
11881 {
11882   const char *fnname;
11883 
11884   /* Reinstate the normal $gp.  */
11885   SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
11886   mips_output_cplocal ();
11887 
11888   if (cfun->machine->all_noreorder_p)
11889     {
11890       mips_pop_asm_switch (&mips_nomacro);
11891       mips_pop_asm_switch (&mips_noreorder);
11892     }
11893 
11894   /* Get the function name the same way that toplev.c does before calling
11895      assemble_start_function.  This is needed so that the name used here
11896      exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */
11897   fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
11898   mips_end_function_definition (fnname);
11899 }
11900 
11901 /* Emit an optimisation barrier for accesses to the current frame.  */
11902 
11903 static void
mips_frame_barrier(void)11904 mips_frame_barrier (void)
11905 {
11906   emit_clobber (gen_frame_mem (BLKmode, stack_pointer_rtx));
11907 }
11908 
11909 
11910 /* The __gnu_local_gp symbol.  */
11911 
11912 static GTY(()) rtx mips_gnu_local_gp;
11913 
11914 /* If we're generating n32 or n64 abicalls, emit instructions
11915    to set up the global pointer.  */
11916 
11917 static void
mips_emit_loadgp(void)11918 mips_emit_loadgp (void)
11919 {
11920   rtx addr, offset, incoming_address, base, index, pic_reg;
11921 
11922   pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
11923   switch (mips_current_loadgp_style ())
11924     {
11925     case LOADGP_ABSOLUTE:
11926       if (mips_gnu_local_gp == NULL)
11927 	{
11928 	  mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
11929 	  SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
11930 	}
11931       emit_insn (PMODE_INSN (gen_loadgp_absolute,
11932 			     (pic_reg, mips_gnu_local_gp)));
11933       break;
11934 
11935     case LOADGP_OLDABI:
11936       /* Added by mips_output_function_prologue.  */
11937       break;
11938 
11939     case LOADGP_NEWABI:
11940       addr = XEXP (DECL_RTL (current_function_decl), 0);
11941       offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
11942       incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
11943       emit_insn (PMODE_INSN (gen_loadgp_newabi,
11944 			     (pic_reg, offset, incoming_address)));
11945       break;
11946 
11947     case LOADGP_RTP:
11948       base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
11949       index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
11950       emit_insn (PMODE_INSN (gen_loadgp_rtp, (pic_reg, base, index)));
11951       break;
11952 
11953     default:
11954       return;
11955     }
11956 
11957   if (TARGET_MIPS16)
11958     emit_insn (PMODE_INSN (gen_copygp_mips16,
11959 			   (pic_offset_table_rtx, pic_reg)));
11960 
11961   /* Emit a blockage if there are implicit uses of the GP register.
11962      This includes profiled functions, because FUNCTION_PROFILE uses
11963      a jal macro.  */
11964   if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
11965     emit_insn (gen_loadgp_blockage ());
11966 }
11967 
11968 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
11969 
11970 #if PROBE_INTERVAL > 32768
11971 #error Cannot use indexed addressing mode for stack probing
11972 #endif
11973 
11974 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
11975    inclusive.  These are offsets from the current stack pointer.  */
11976 
11977 static void
mips_emit_probe_stack_range(HOST_WIDE_INT first,HOST_WIDE_INT size)11978 mips_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
11979 {
11980   if (TARGET_MIPS16)
11981     sorry ("%<-fstack-check=specific%> not implemented for MIPS16");
11982 
11983   /* See if we have a constant small number of probes to generate.  If so,
11984      that's the easy case.  */
11985   if (first + size <= 32768)
11986     {
11987       HOST_WIDE_INT i;
11988 
11989       /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
11990 	 it exceeds SIZE.  If only one probe is needed, this will not
11991 	 generate any code.  Then probe at FIRST + SIZE.  */
11992       for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
11993         emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
11994 					 -(first + i)));
11995 
11996       emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
11997 				       -(first + size)));
11998     }
11999 
12000   /* Otherwise, do the same as above, but in a loop.  Note that we must be
12001      extra careful with variables wrapping around because we might be at
12002      the very top (or the very bottom) of the address space and we have
12003      to be able to handle this case properly; in particular, we use an
12004      equality test for the loop condition.  */
12005   else
12006     {
12007       HOST_WIDE_INT rounded_size;
12008       rtx r3 = MIPS_PROLOGUE_TEMP (Pmode);
12009       rtx r12 = MIPS_PROLOGUE_TEMP2 (Pmode);
12010 
12011       /* Sanity check for the addressing mode we're going to use.  */
12012       gcc_assert (first <= 32768);
12013 
12014 
12015       /* Step 1: round SIZE to the previous multiple of the interval.  */
12016 
12017       rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
12018 
12019 
12020       /* Step 2: compute initial and final value of the loop counter.  */
12021 
12022       /* TEST_ADDR = SP + FIRST.  */
12023       emit_insn (gen_rtx_SET (r3, plus_constant (Pmode, stack_pointer_rtx,
12024 						 -first)));
12025 
12026       /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE.  */
12027       if (rounded_size > 32768)
12028 	{
12029           emit_move_insn (r12, GEN_INT (rounded_size));
12030 	  emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r3, r12)));
12031 	}
12032       else
12033 	emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, r3,
12034 						    -rounded_size)));
12035 
12036 
12037       /* Step 3: the loop
12038 
12039 	do
12040 	  {
12041 	    TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
12042 	    probe at TEST_ADDR
12043 	  }
12044 	while (TEST_ADDR != LAST_ADDR)
12045 
12046 	probes at FIRST + N * PROBE_INTERVAL for values of N from 1
12047 	until it is equal to ROUNDED_SIZE.  */
12048 
12049       emit_insn (PMODE_INSN (gen_probe_stack_range, (r3, r3, r12)));
12050 
12051 
12052       /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
12053 	 that SIZE is equal to ROUNDED_SIZE.  */
12054 
12055       if (size != rounded_size)
12056 	emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
12057     }
12058 
12059   /* Make sure nothing is scheduled before we are done.  */
12060   emit_insn (gen_blockage ());
12061 }
12062 
12063 /* Probe a range of stack addresses from REG1 to REG2 inclusive.  These are
12064    absolute addresses.  */
12065 
12066 const char *
mips_output_probe_stack_range(rtx reg1,rtx reg2)12067 mips_output_probe_stack_range (rtx reg1, rtx reg2)
12068 {
12069   static int labelno = 0;
12070   char loop_lab[32], tmp[64];
12071   rtx xops[2];
12072 
12073   ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
12074 
12075   /* Loop.  */
12076   ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
12077 
12078   /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL.  */
12079   xops[0] = reg1;
12080   xops[1] = GEN_INT (-PROBE_INTERVAL);
12081   if (TARGET_64BIT && TARGET_LONG64)
12082     output_asm_insn ("daddiu\t%0,%0,%1", xops);
12083   else
12084     output_asm_insn ("addiu\t%0,%0,%1", xops);
12085 
12086   /* Probe at TEST_ADDR, test if TEST_ADDR == LAST_ADDR and branch.  */
12087   xops[1] = reg2;
12088   strcpy (tmp, "%(%<bne\t%0,%1,");
12089   output_asm_insn (strcat (tmp, &loop_lab[1]), xops);
12090   if (TARGET_64BIT)
12091     output_asm_insn ("sd\t$0,0(%0)%)", xops);
12092   else
12093     output_asm_insn ("sw\t$0,0(%0)%)", xops);
12094 
12095   return "";
12096 }
12097 
12098 /* Return true if X contains a kernel register.  */
12099 
12100 static bool
mips_refers_to_kernel_reg_p(const_rtx x)12101 mips_refers_to_kernel_reg_p (const_rtx x)
12102 {
12103   subrtx_iterator::array_type array;
12104   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
12105     if (REG_P (*iter) && KERNEL_REG_P (REGNO (*iter)))
12106       return true;
12107   return false;
12108 }
12109 
12110 /* Expand the "prologue" pattern.  */
12111 
12112 void
mips_expand_prologue(void)12113 mips_expand_prologue (void)
12114 {
12115   const struct mips_frame_info *frame;
12116   HOST_WIDE_INT size;
12117   unsigned int nargs;
12118 
12119   if (cfun->machine->global_pointer != INVALID_REGNUM)
12120     {
12121       /* Check whether an insn uses pic_offset_table_rtx, either explicitly
12122 	 or implicitly.  If so, we can commit to using a global pointer
12123 	 straight away, otherwise we need to defer the decision.  */
12124       if (mips_cfun_has_inflexible_gp_ref_p ()
12125 	  || mips_cfun_has_flexible_gp_ref_p ())
12126 	{
12127 	  cfun->machine->must_initialize_gp_p = true;
12128 	  cfun->machine->must_restore_gp_when_clobbered_p = true;
12129 	}
12130 
12131       SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
12132     }
12133 
12134   frame = &cfun->machine->frame;
12135   size = frame->total_size;
12136 
12137   if (flag_stack_usage_info)
12138     current_function_static_stack_size = size;
12139 
12140   if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
12141       || flag_stack_clash_protection)
12142     {
12143       if (crtl->is_leaf && !cfun->calls_alloca)
12144 	{
12145 	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
12146 	    mips_emit_probe_stack_range (get_stack_check_protect (),
12147 					 size - get_stack_check_protect ());
12148 	}
12149       else if (size > 0)
12150 	mips_emit_probe_stack_range (get_stack_check_protect (), size);
12151     }
12152 
12153   /* Save the registers.  Allocate up to MIPS_MAX_FIRST_STACK_STEP
12154      bytes beforehand; this is enough to cover the register save area
12155      without going out of range.  */
12156   if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
12157       || frame->num_cop0_regs > 0)
12158     {
12159       HOST_WIDE_INT step1;
12160 
12161       step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
12162       if (GENERATE_MIPS16E_SAVE_RESTORE)
12163  	{
12164  	  HOST_WIDE_INT offset;
12165  	  unsigned int mask, regno;
12166 
12167 	  /* Try to merge argument stores into the save instruction.  */
12168 	  nargs = mips16e_collect_argument_saves ();
12169 
12170 	  /* Build the save instruction.  */
12171 	  mask = frame->mask;
12172 	  rtx insn = mips16e_build_save_restore (false, &mask, &offset,
12173 						 nargs, step1);
12174 	  RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
12175 	  mips_frame_barrier ();
12176  	  size -= step1;
12177 
12178  	  /* Check if we need to save other registers.  */
12179  	  for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
12180  	    if (BITSET_P (mask, regno - GP_REG_FIRST))
12181  	      {
12182 		offset -= UNITS_PER_WORD;
12183 		mips_save_restore_reg (word_mode, regno,
12184 				       offset, mips_save_reg);
12185  	      }
12186  	}
12187       else
12188  	{
12189 	  if (cfun->machine->interrupt_handler_p)
12190 	    {
12191 	      HOST_WIDE_INT offset;
12192 	      rtx mem;
12193 
12194 	      /* If this interrupt is using a shadow register set, we need to
12195 		 get the stack pointer from the previous register set.  */
12196 	      if (cfun->machine->use_shadow_register_set == SHADOW_SET_YES)
12197 		emit_insn (PMODE_INSN (gen_mips_rdpgpr, (stack_pointer_rtx,
12198 							 stack_pointer_rtx)));
12199 
12200 	      if (!cfun->machine->keep_interrupts_masked_p)
12201 		{
12202 		  if (cfun->machine->int_mask == INT_MASK_EIC)
12203 		    /* Move from COP0 Cause to K0.  */
12204 		    emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
12205 			gen_rtx_REG (SImode, COP0_CAUSE_REG_NUM)));
12206 		}
12207 	      /* Move from COP0 EPC to K1.  */
12208 	      emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
12209 					gen_rtx_REG (SImode,
12210 						     COP0_EPC_REG_NUM)));
12211 
12212 	      /* Allocate the first part of the frame.  */
12213 	      rtx insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
12214 					GEN_INT (-step1));
12215 	      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
12216 	      mips_frame_barrier ();
12217 	      size -= step1;
12218 
12219 	      /* Start at the uppermost location for saving.  */
12220 	      offset = frame->cop0_sp_offset - size;
12221 
12222 	      /* Push EPC into its stack slot.  */
12223 	      mem = gen_frame_mem (word_mode,
12224 				   plus_constant (Pmode, stack_pointer_rtx,
12225 						  offset));
12226 	      mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
12227 	      offset -= UNITS_PER_WORD;
12228 
12229 	      /* Move from COP0 Status to K1.  */
12230 	      emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
12231 					gen_rtx_REG (SImode,
12232 						     COP0_STATUS_REG_NUM)));
12233 
12234 	      /* Right justify the RIPL in k0.  */
12235 	      if (!cfun->machine->keep_interrupts_masked_p
12236 		  && cfun->machine->int_mask == INT_MASK_EIC)
12237 		emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
12238 					gen_rtx_REG (SImode, K0_REG_NUM),
12239 					GEN_INT (CAUSE_IPL)));
12240 
12241 	      /* Push Status into its stack slot.  */
12242 	      mem = gen_frame_mem (word_mode,
12243 				   plus_constant (Pmode, stack_pointer_rtx,
12244 						  offset));
12245 	      mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
12246 	      offset -= UNITS_PER_WORD;
12247 
12248 	      /* Insert the RIPL into our copy of SR (k1) as the new IPL.  */
12249 	      if (!cfun->machine->keep_interrupts_masked_p
12250 		  && cfun->machine->int_mask == INT_MASK_EIC)
12251 		emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
12252 				       GEN_INT (6),
12253 				       GEN_INT (SR_IPL),
12254 				       gen_rtx_REG (SImode, K0_REG_NUM)));
12255 
12256 	      /* Clear all interrupt mask bits up to and including the
12257 		 handler's interrupt line.  */
12258 	      if (!cfun->machine->keep_interrupts_masked_p
12259 		  && cfun->machine->int_mask != INT_MASK_EIC)
12260 		emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
12261 				       GEN_INT (cfun->machine->int_mask + 1),
12262 				       GEN_INT (SR_IM0),
12263 				       gen_rtx_REG (SImode, GP_REG_FIRST)));
12264 
12265 	      if (!cfun->machine->keep_interrupts_masked_p)
12266 		/* Enable interrupts by clearing the KSU ERL and EXL bits.
12267 		   IE is already the correct value, so we don't have to do
12268 		   anything explicit.  */
12269 		emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
12270 				       GEN_INT (4),
12271 				       GEN_INT (SR_EXL),
12272 				       gen_rtx_REG (SImode, GP_REG_FIRST)));
12273 	      else
12274 		/* Disable interrupts by clearing the KSU, ERL, EXL,
12275 		   and IE bits.  */
12276 		emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
12277 				       GEN_INT (5),
12278 				       GEN_INT (SR_IE),
12279 				       gen_rtx_REG (SImode, GP_REG_FIRST)));
12280 
12281 	      if (TARGET_HARD_FLOAT)
12282 		/* Disable COP1 for hard-float.  This will lead to an exception
12283 		   if floating-point code is executed in an ISR.  */
12284 		emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
12285 				       GEN_INT (1),
12286 				       GEN_INT (SR_COP1),
12287 				       gen_rtx_REG (SImode, GP_REG_FIRST)));
12288 	    }
12289 	  else
12290 	    {
12291 	      if (step1 != 0)
12292 		{
12293 		  rtx insn = gen_add3_insn (stack_pointer_rtx,
12294 					    stack_pointer_rtx,
12295 					    GEN_INT (-step1));
12296 		  RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
12297 		  mips_frame_barrier ();
12298 		  size -= step1;
12299 		}
12300 	    }
12301 	  mips_for_each_saved_acc (size, mips_save_reg);
12302 	  mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
12303 	}
12304     }
12305 
12306   /* Allocate the rest of the frame.  */
12307   if (size > 0)
12308     {
12309       if (SMALL_OPERAND (-size))
12310 	RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
12311 						       stack_pointer_rtx,
12312 						       GEN_INT (-size)))) = 1;
12313       else
12314 	{
12315 	  mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
12316 	  if (TARGET_MIPS16)
12317 	    {
12318 	      /* There are no instructions to add or subtract registers
12319 		 from the stack pointer, so use the frame pointer as a
12320 		 temporary.  We should always be using a frame pointer
12321 		 in this case anyway.  */
12322 	      gcc_assert (frame_pointer_needed);
12323 	      mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
12324 	      emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
12325 					hard_frame_pointer_rtx,
12326 					MIPS_PROLOGUE_TEMP (Pmode)));
12327 	      mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
12328 	    }
12329 	  else
12330 	    emit_insn (gen_sub3_insn (stack_pointer_rtx,
12331 				      stack_pointer_rtx,
12332 				      MIPS_PROLOGUE_TEMP (Pmode)));
12333 
12334 	  /* Describe the combined effect of the previous instructions.  */
12335 	  mips_set_frame_expr
12336 	    (gen_rtx_SET (stack_pointer_rtx,
12337 			  plus_constant (Pmode, stack_pointer_rtx, -size)));
12338 	}
12339       mips_frame_barrier ();
12340     }
12341 
12342   /* Set up the frame pointer, if we're using one.  */
12343   if (frame_pointer_needed)
12344     {
12345       HOST_WIDE_INT offset;
12346 
12347       offset = frame->hard_frame_pointer_offset;
12348       if (offset == 0)
12349 	{
12350 	  rtx insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
12351 	  RTX_FRAME_RELATED_P (insn) = 1;
12352 	}
12353       else if (SMALL_OPERAND (offset))
12354 	{
12355 	  rtx insn = gen_add3_insn (hard_frame_pointer_rtx,
12356 				    stack_pointer_rtx, GEN_INT (offset));
12357 	  RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
12358 	}
12359       else
12360 	{
12361 	  mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
12362 	  mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
12363 	  emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
12364 				    hard_frame_pointer_rtx,
12365 				    MIPS_PROLOGUE_TEMP (Pmode)));
12366 	  mips_set_frame_expr
12367 	    (gen_rtx_SET (hard_frame_pointer_rtx,
12368 			  plus_constant (Pmode, stack_pointer_rtx, offset)));
12369 	}
12370     }
12371 
12372   mips_emit_loadgp ();
12373 
12374   /* Initialize the $gp save slot.  */
12375   if (mips_cfun_has_cprestore_slot_p ())
12376     {
12377       rtx base, mem, gp, temp;
12378       HOST_WIDE_INT offset;
12379 
12380       mips_get_cprestore_base_and_offset (&base, &offset, false);
12381       mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
12382       gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
12383       temp = (SMALL_OPERAND (offset)
12384 	      ? gen_rtx_SCRATCH (Pmode)
12385 	      : MIPS_PROLOGUE_TEMP (Pmode));
12386       emit_insn (PMODE_INSN (gen_potential_cprestore,
12387 			     (mem, GEN_INT (offset), gp, temp)));
12388 
12389       mips_get_cprestore_base_and_offset (&base, &offset, true);
12390       mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
12391       emit_insn (PMODE_INSN (gen_use_cprestore, (mem)));
12392     }
12393 
12394   /* We need to search back to the last use of K0 or K1.  */
12395   if (cfun->machine->interrupt_handler_p)
12396     {
12397       rtx_insn *insn;
12398       for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
12399 	if (INSN_P (insn)
12400 	    && mips_refers_to_kernel_reg_p (PATTERN (insn)))
12401 	  break;
12402       /* Emit a move from K1 to COP0 Status after insn.  */
12403       gcc_assert (insn != NULL_RTX);
12404       emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
12405 				      gen_rtx_REG (SImode, K1_REG_NUM)),
12406 		       insn);
12407     }
12408 
12409   /* If we are profiling, make sure no instructions are scheduled before
12410      the call to mcount.  */
12411   if (crtl->profile)
12412     emit_insn (gen_blockage ());
12413 }
12414 
12415 /* Attach all pending register saves to the previous instruction.
12416    Return that instruction.  */
12417 
12418 static rtx_insn *
mips_epilogue_emit_cfa_restores(void)12419 mips_epilogue_emit_cfa_restores (void)
12420 {
12421   rtx_insn *insn;
12422 
12423   insn = get_last_insn ();
12424   if (mips_epilogue.cfa_restores)
12425     {
12426       gcc_assert (insn && !REG_NOTES (insn));
12427       RTX_FRAME_RELATED_P (insn) = 1;
12428       REG_NOTES (insn) = mips_epilogue.cfa_restores;
12429       mips_epilogue.cfa_restores = 0;
12430     }
12431   return insn;
12432 }
12433 
12434 /* Like mips_epilogue_emit_cfa_restores, but also record that the CFA is
12435    now at REG + OFFSET.  */
12436 
12437 static void
mips_epilogue_set_cfa(rtx reg,HOST_WIDE_INT offset)12438 mips_epilogue_set_cfa (rtx reg, HOST_WIDE_INT offset)
12439 {
12440   rtx_insn *insn;
12441 
12442   insn = mips_epilogue_emit_cfa_restores ();
12443   if (reg != mips_epilogue.cfa_reg || offset != mips_epilogue.cfa_offset)
12444     {
12445       RTX_FRAME_RELATED_P (insn) = 1;
12446       REG_NOTES (insn) = alloc_reg_note (REG_CFA_DEF_CFA,
12447 					 plus_constant (Pmode, reg, offset),
12448 					 REG_NOTES (insn));
12449       mips_epilogue.cfa_reg = reg;
12450       mips_epilogue.cfa_offset = offset;
12451     }
12452 }
12453 
12454 /* Emit instructions to restore register REG from slot MEM.  Also update
12455    the cfa_restores list.  */
12456 
12457 static void
mips_restore_reg(rtx reg,rtx mem)12458 mips_restore_reg (rtx reg, rtx mem)
12459 {
12460   /* There's no MIPS16 instruction to load $31 directly.  Load into
12461      $7 instead and adjust the return insn appropriately.  */
12462   if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
12463     reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
12464   else if (GET_MODE (reg) == DFmode
12465 	   && (!TARGET_FLOAT64
12466 	       || mips_abi == ABI_32))
12467     {
12468       mips_add_cfa_restore (mips_subword (reg, true));
12469       mips_add_cfa_restore (mips_subword (reg, false));
12470     }
12471   else
12472     mips_add_cfa_restore (reg);
12473 
12474   mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
12475   if (REGNO (reg) == REGNO (mips_epilogue.cfa_reg))
12476     /* The CFA is currently defined in terms of the register whose
12477        value we have just restored.  Redefine the CFA in terms of
12478        the stack pointer.  */
12479     mips_epilogue_set_cfa (stack_pointer_rtx,
12480 			   mips_epilogue.cfa_restore_sp_offset);
12481 }
12482 
12483 /* Emit code to set the stack pointer to BASE + OFFSET, given that
12484    BASE + OFFSET is NEW_FRAME_SIZE bytes below the top of the frame.
12485    BASE, if not the stack pointer, is available as a temporary.  */
12486 
12487 static void
mips_deallocate_stack(rtx base,rtx offset,HOST_WIDE_INT new_frame_size)12488 mips_deallocate_stack (rtx base, rtx offset, HOST_WIDE_INT new_frame_size)
12489 {
12490   if (base == stack_pointer_rtx && offset == const0_rtx)
12491     return;
12492 
12493   mips_frame_barrier ();
12494   if (offset == const0_rtx)
12495     {
12496       emit_move_insn (stack_pointer_rtx, base);
12497       mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
12498     }
12499   else if (TARGET_MIPS16 && base != stack_pointer_rtx)
12500     {
12501       emit_insn (gen_add3_insn (base, base, offset));
12502       mips_epilogue_set_cfa (base, new_frame_size);
12503       emit_move_insn (stack_pointer_rtx, base);
12504     }
12505   else
12506     {
12507       emit_insn (gen_add3_insn (stack_pointer_rtx, base, offset));
12508       mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
12509     }
12510 }
12511 
12512 /* Emit any instructions needed before a return.  */
12513 
12514 void
mips_expand_before_return(void)12515 mips_expand_before_return (void)
12516 {
12517   /* When using a call-clobbered gp, we start out with unified call
12518      insns that include instructions to restore the gp.  We then split
12519      these unified calls after reload.  These split calls explicitly
12520      clobber gp, so there is no need to define
12521      PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
12522 
12523      For consistency, we should also insert an explicit clobber of $28
12524      before return insns, so that the post-reload optimizers know that
12525      the register is not live on exit.  */
12526   if (TARGET_CALL_CLOBBERED_GP)
12527     emit_clobber (pic_offset_table_rtx);
12528 }
12529 
12530 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
12531    says which.  */
12532 
12533 void
mips_expand_epilogue(bool sibcall_p)12534 mips_expand_epilogue (bool sibcall_p)
12535 {
12536   const struct mips_frame_info *frame;
12537   HOST_WIDE_INT step1, step2;
12538   rtx base, adjust;
12539   rtx_insn *insn;
12540   bool use_jraddiusp_p = false;
12541 
12542   if (!sibcall_p && mips_can_use_return_insn ())
12543     {
12544       emit_jump_insn (gen_return ());
12545       return;
12546     }
12547 
12548   /* In MIPS16 mode, if the return value should go into a floating-point
12549      register, we need to call a helper routine to copy it over.  */
12550   if (mips16_cfun_returns_in_fpr_p ())
12551     mips16_copy_fpr_return_value ();
12552 
12553   /* Split the frame into two.  STEP1 is the amount of stack we should
12554      deallocate before restoring the registers.  STEP2 is the amount we
12555      should deallocate afterwards.
12556 
12557      Start off by assuming that no registers need to be restored.  */
12558   frame = &cfun->machine->frame;
12559   step1 = frame->total_size;
12560   step2 = 0;
12561 
12562   /* Work out which register holds the frame address.  */
12563   if (!frame_pointer_needed)
12564     base = stack_pointer_rtx;
12565   else
12566     {
12567       base = hard_frame_pointer_rtx;
12568       step1 -= frame->hard_frame_pointer_offset;
12569     }
12570   mips_epilogue.cfa_reg = base;
12571   mips_epilogue.cfa_offset = step1;
12572   mips_epilogue.cfa_restores = NULL_RTX;
12573 
12574   /* If we need to restore registers, deallocate as much stack as
12575      possible in the second step without going out of range.  */
12576   if ((frame->mask | frame->fmask | frame->acc_mask) != 0
12577       || frame->num_cop0_regs > 0)
12578     {
12579       step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
12580       step1 -= step2;
12581     }
12582 
12583   /* Get an rtx for STEP1 that we can add to BASE.  */
12584   adjust = GEN_INT (step1);
12585   if (!SMALL_OPERAND (step1))
12586     {
12587       mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
12588       adjust = MIPS_EPILOGUE_TEMP (Pmode);
12589     }
12590   mips_deallocate_stack (base, adjust, step2);
12591 
12592   /* If we're using addressing macros, $gp is implicitly used by all
12593      SYMBOL_REFs.  We must emit a blockage insn before restoring $gp
12594      from the stack.  */
12595   if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
12596     emit_insn (gen_blockage ());
12597 
12598   mips_epilogue.cfa_restore_sp_offset = step2;
12599   if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
12600     {
12601       unsigned int regno, mask;
12602       HOST_WIDE_INT offset;
12603       rtx restore;
12604 
12605       /* Generate the restore instruction.  */
12606       mask = frame->mask;
12607       restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
12608 
12609       /* Restore any other registers manually.  */
12610       for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
12611  	if (BITSET_P (mask, regno - GP_REG_FIRST))
12612  	  {
12613  	    offset -= UNITS_PER_WORD;
12614  	    mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
12615  	  }
12616 
12617       /* Restore the remaining registers and deallocate the final bit
12618 	 of the frame.  */
12619       mips_frame_barrier ();
12620       emit_insn (restore);
12621       mips_epilogue_set_cfa (stack_pointer_rtx, 0);
12622     }
12623   else
12624     {
12625       /* Restore the registers.  */
12626       mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
12627       mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
12628 				       mips_restore_reg);
12629 
12630       if (cfun->machine->interrupt_handler_p)
12631 	{
12632 	  HOST_WIDE_INT offset;
12633 	  rtx mem;
12634 
12635 	  offset = frame->cop0_sp_offset - (frame->total_size - step2);
12636 
12637 	  /* Restore the original EPC.  */
12638 	  mem = gen_frame_mem (word_mode,
12639 			       plus_constant (Pmode, stack_pointer_rtx,
12640 					      offset));
12641 	  mips_emit_move (gen_rtx_REG (word_mode, K1_REG_NUM), mem);
12642 	  offset -= UNITS_PER_WORD;
12643 
12644 	  /* Move to COP0 EPC.  */
12645 	  emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
12646 				    gen_rtx_REG (SImode, K1_REG_NUM)));
12647 
12648 	  /* Restore the original Status.  */
12649 	  mem = gen_frame_mem (word_mode,
12650 			       plus_constant (Pmode, stack_pointer_rtx,
12651 					      offset));
12652 	  mips_emit_move (gen_rtx_REG (word_mode, K1_REG_NUM), mem);
12653 	  offset -= UNITS_PER_WORD;
12654 
12655 	  /* If we don't use shadow register set, we need to update SP.  */
12656 	  if (cfun->machine->use_shadow_register_set == SHADOW_SET_NO)
12657 	    mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
12658 	  else
12659 	    /* The choice of position is somewhat arbitrary in this case.  */
12660 	    mips_epilogue_emit_cfa_restores ();
12661 
12662 	  /* Move to COP0 Status.  */
12663 	  emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
12664 				    gen_rtx_REG (SImode, K1_REG_NUM)));
12665 	}
12666       else if (TARGET_MICROMIPS
12667 	       && !crtl->calls_eh_return
12668 	       && !sibcall_p
12669 	       && step2 > 0
12670 	       && mips_unsigned_immediate_p (step2, 5, 2))
12671 	use_jraddiusp_p = true;
12672       else
12673 	/* Deallocate the final bit of the frame.  */
12674 	mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
12675     }
12676 
12677   if (cfun->machine->use_frame_header_for_callee_saved_regs)
12678     mips_epilogue_emit_cfa_restores ();
12679   else if (!use_jraddiusp_p)
12680     gcc_assert (!mips_epilogue.cfa_restores);
12681 
12682   /* Add in the __builtin_eh_return stack adjustment.  We need to
12683      use a temporary in MIPS16 code.  */
12684   if (crtl->calls_eh_return)
12685     {
12686       if (TARGET_MIPS16)
12687 	{
12688 	  mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
12689 	  emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
12690 				    MIPS_EPILOGUE_TEMP (Pmode),
12691 				    EH_RETURN_STACKADJ_RTX));
12692 	  mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
12693 	}
12694       else
12695 	emit_insn (gen_add3_insn (stack_pointer_rtx,
12696 				  stack_pointer_rtx,
12697 				  EH_RETURN_STACKADJ_RTX));
12698     }
12699 
12700   if (!sibcall_p)
12701     {
12702       mips_expand_before_return ();
12703       if (cfun->machine->interrupt_handler_p)
12704 	{
12705 	  /* Interrupt handlers generate eret or deret.  */
12706 	  if (cfun->machine->use_debug_exception_return_p)
12707 	    emit_jump_insn (gen_mips_deret ());
12708 	  else
12709 	    emit_jump_insn (gen_mips_eret ());
12710 	}
12711       else
12712 	{
12713 	  rtx pat;
12714 
12715 	  /* When generating MIPS16 code, the normal
12716 	     mips_for_each_saved_gpr_and_fpr path will restore the return
12717 	     address into $7 rather than $31.  */
12718 	  if (TARGET_MIPS16
12719 	      && !GENERATE_MIPS16E_SAVE_RESTORE
12720 	      && BITSET_P (frame->mask, RETURN_ADDR_REGNUM))
12721 	    {
12722 	      /* simple_returns cannot rely on values that are only available
12723 		 on paths through the epilogue (because return paths that do
12724 		 not pass through the epilogue may nevertheless reuse a
12725 		 simple_return that occurs at the end of the epilogue).
12726 		 Use a normal return here instead.  */
12727 	      rtx reg = gen_rtx_REG (Pmode, GP_REG_FIRST + 7);
12728 	      pat = gen_return_internal (reg);
12729 	    }
12730 	  else if (use_jraddiusp_p)
12731 	    pat = gen_jraddiusp (GEN_INT (step2));
12732 	  else
12733 	    {
12734 	      rtx reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
12735 	      pat = gen_simple_return_internal (reg);
12736 	    }
12737 	  emit_jump_insn (pat);
12738 	  if (use_jraddiusp_p)
12739 	    mips_epilogue_set_cfa (stack_pointer_rtx, step2);
12740 	}
12741     }
12742 
12743   /* Search from the beginning to the first use of K0 or K1.  */
12744   if (cfun->machine->interrupt_handler_p
12745       && !cfun->machine->keep_interrupts_masked_p)
12746     {
12747       for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
12748 	if (INSN_P (insn)
12749 	    && mips_refers_to_kernel_reg_p (PATTERN (insn)))
12750 	  break;
12751       gcc_assert (insn != NULL_RTX);
12752       /* Insert disable interrupts before the first use of K0 or K1.  */
12753       emit_insn_before (gen_mips_di (), insn);
12754       emit_insn_before (gen_mips_ehb (), insn);
12755     }
12756 }
12757 
12758 /* Return nonzero if this function is known to have a null epilogue.
12759    This allows the optimizer to omit jumps to jumps if no stack
12760    was created.  */
12761 
12762 bool
mips_can_use_return_insn(void)12763 mips_can_use_return_insn (void)
12764 {
12765   /* Interrupt handlers need to go through the epilogue.  */
12766   if (cfun->machine->interrupt_handler_p)
12767     return false;
12768 
12769   if (!reload_completed)
12770     return false;
12771 
12772   if (crtl->profile)
12773     return false;
12774 
12775   /* In MIPS16 mode, a function that returns a floating-point value
12776      needs to arrange to copy the return value into the floating-point
12777      registers.  */
12778   if (mips16_cfun_returns_in_fpr_p ())
12779     return false;
12780 
12781   return (cfun->machine->frame.total_size == 0
12782 	  && !cfun->machine->use_frame_header_for_callee_saved_regs);
12783 }
12784 
12785 /* Return true if register REGNO can store a value of mode MODE.
12786    The result of this function is cached in mips_hard_regno_mode_ok.  */
12787 
12788 static bool
mips_hard_regno_mode_ok_uncached(unsigned int regno,machine_mode mode)12789 mips_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
12790 {
12791   unsigned int size;
12792   enum mode_class mclass;
12793 
12794   if (mode == CCV2mode)
12795     return (ISA_HAS_8CC
12796 	    && ST_REG_P (regno)
12797 	    && (regno - ST_REG_FIRST) % 2 == 0);
12798 
12799   if (mode == CCV4mode)
12800     return (ISA_HAS_8CC
12801 	    && ST_REG_P (regno)
12802 	    && (regno - ST_REG_FIRST) % 4 == 0);
12803 
12804   if (mode == CCmode)
12805     return ISA_HAS_8CC ? ST_REG_P (regno) : regno == FPSW_REGNUM;
12806 
12807   size = GET_MODE_SIZE (mode);
12808   mclass = GET_MODE_CLASS (mode);
12809 
12810   if (GP_REG_P (regno) && mode != CCFmode && !MSA_SUPPORTED_MODE_P (mode))
12811     return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
12812 
12813   /* For MSA, allow TImode and 128-bit vector modes in all FPR.  */
12814   if (FP_REG_P (regno) && MSA_SUPPORTED_MODE_P (mode))
12815     return true;
12816 
12817   if (FP_REG_P (regno)
12818       && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
12819 	  || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
12820     {
12821       /* Deny use of odd-numbered registers for 32-bit data for
12822 	 the o32 FP64A ABI.  */
12823       if (TARGET_O32_FP64A_ABI && size <= 4 && (regno & 1) != 0)
12824 	return false;
12825 
12826       /* The FPXX ABI requires double-precision values to be placed in
12827 	 even-numbered registers.  Disallow odd-numbered registers with
12828 	 CCFmode because CCFmode double-precision compares will write a
12829 	 64-bit value to a register.  */
12830       if (mode == CCFmode)
12831 	return !(TARGET_FLOATXX && (regno & 1) != 0);
12832 
12833       /* Allow 64-bit vector modes for Loongson MultiMedia extensions
12834 	 Instructions (MMI).  */
12835       if (TARGET_LOONGSON_MMI
12836 	  && (mode == V2SImode
12837 	      || mode == V4HImode
12838 	      || mode == V8QImode
12839 	      || mode == DImode))
12840 	return true;
12841 
12842       if (mclass == MODE_FLOAT
12843 	  || mclass == MODE_COMPLEX_FLOAT
12844 	  || mclass == MODE_VECTOR_FLOAT)
12845 	return size <= UNITS_PER_FPVALUE;
12846 
12847       /* Allow integer modes that fit into a single register.  We need
12848 	 to put integers into FPRs when using instructions like CVT
12849 	 and TRUNC.  There's no point allowing sizes smaller than a word,
12850 	 because the FPU has no appropriate load/store instructions.  */
12851       if (mclass == MODE_INT)
12852 	return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
12853     }
12854 
12855   /* Don't allow vector modes in accumulators.  */
12856   if (ACC_REG_P (regno)
12857       && !VECTOR_MODE_P (mode)
12858       && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
12859     {
12860       if (MD_REG_P (regno))
12861 	{
12862 	  /* After a multiplication or division, clobbering HI makes
12863 	     the value of LO unpredictable, and vice versa.  This means
12864 	     that, for all interesting cases, HI and LO are effectively
12865 	     a single register.
12866 
12867 	     We model this by requiring that any value that uses HI
12868 	     also uses LO.  */
12869 	  if (size <= UNITS_PER_WORD * 2)
12870 	    return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
12871 	}
12872       else
12873 	{
12874 	  /* DSP accumulators do not have the same restrictions as
12875 	     HI and LO, so we can treat them as normal doubleword
12876 	     registers.  */
12877 	  if (size <= UNITS_PER_WORD)
12878 	    return true;
12879 
12880 	  if (size <= UNITS_PER_WORD * 2
12881 	      && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
12882 	    return true;
12883 	}
12884     }
12885 
12886   if (ALL_COP_REG_P (regno))
12887     return mclass == MODE_INT && size <= UNITS_PER_WORD;
12888 
12889   if (regno == GOT_VERSION_REGNUM)
12890     return mode == SImode;
12891 
12892   return false;
12893 }
12894 
12895 /* Implement TARGET_HARD_REGNO_MODE_OK.  */
12896 
12897 static bool
mips_hard_regno_mode_ok(unsigned int regno,machine_mode mode)12898 mips_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
12899 {
12900   return mips_hard_regno_mode_ok_p[mode][regno];
12901 }
12902 
12903 /* Return nonzero if register OLD_REG can be renamed to register NEW_REG.  */
12904 
12905 bool
mips_hard_regno_rename_ok(unsigned int old_reg ATTRIBUTE_UNUSED,unsigned int new_reg)12906 mips_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
12907 			   unsigned int new_reg)
12908 {
12909   /* Interrupt functions can only use registers that have already been
12910      saved by the prologue, even if they would normally be call-clobbered.  */
12911   if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (new_reg))
12912     return false;
12913 
12914   return true;
12915 }
12916 
12917 /* Return nonzero if register REGNO can be used as a scratch register
12918    in peephole2.  */
12919 
12920 bool
mips_hard_regno_scratch_ok(unsigned int regno)12921 mips_hard_regno_scratch_ok (unsigned int regno)
12922 {
12923   /* See mips_hard_regno_rename_ok.  */
12924   if (cfun->machine->interrupt_handler_p && !df_regs_ever_live_p (regno))
12925     return false;
12926 
12927   return true;
12928 }
12929 
12930 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED.  Odd-numbered
12931    single-precision registers are not considered callee-saved for o32
12932    FPXX as they will be clobbered when run on an FR=1 FPU.  MSA vector
12933    registers with MODE > 64 bits are part clobbered too.  */
12934 
12935 static bool
mips_hard_regno_call_part_clobbered(rtx_insn * insn ATTRIBUTE_UNUSED,unsigned int regno,machine_mode mode)12936 mips_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
12937 				     unsigned int regno, machine_mode mode)
12938 {
12939   if (TARGET_FLOATXX
12940       && hard_regno_nregs (regno, mode) == 1
12941       && FP_REG_P (regno)
12942       && (regno & 1) != 0)
12943     return true;
12944 
12945   if (ISA_HAS_MSA && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8)
12946     return true;
12947 
12948   return false;
12949 }
12950 
12951 /* Implement TARGET_HARD_REGNO_NREGS.  */
12952 
12953 static unsigned int
mips_hard_regno_nregs(unsigned int regno,machine_mode mode)12954 mips_hard_regno_nregs (unsigned int regno, machine_mode mode)
12955 {
12956   if (ST_REG_P (regno))
12957     /* The size of FP status registers is always 4, because they only hold
12958        CCmode values, and CCmode is always considered to be 4 bytes wide.  */
12959     return (GET_MODE_SIZE (mode) + 3) / 4;
12960 
12961   if (FP_REG_P (regno))
12962     {
12963       if (MSA_SUPPORTED_MODE_P (mode))
12964 	return 1;
12965 
12966       return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
12967     }
12968 
12969   /* All other registers are word-sized.  */
12970   return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
12971 }
12972 
12973 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
12974    in mips_hard_regno_nregs.  */
12975 
12976 int
mips_class_max_nregs(enum reg_class rclass,machine_mode mode)12977 mips_class_max_nregs (enum reg_class rclass, machine_mode mode)
12978 {
12979   int size;
12980   HARD_REG_SET left;
12981 
12982   size = 0x8000;
12983   COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
12984   if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
12985     {
12986       if (mips_hard_regno_mode_ok (ST_REG_FIRST, mode))
12987 	size = MIN (size, 4);
12988 
12989       AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
12990     }
12991   if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
12992     {
12993       if (mips_hard_regno_mode_ok (FP_REG_FIRST, mode))
12994 	{
12995 	  if (MSA_SUPPORTED_MODE_P (mode))
12996 	    size = MIN (size, UNITS_PER_MSA_REG);
12997 	  else
12998 	    size = MIN (size, UNITS_PER_FPREG);
12999 	}
13000 
13001       AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
13002     }
13003   if (!hard_reg_set_empty_p (left))
13004     size = MIN (size, UNITS_PER_WORD);
13005   return (GET_MODE_SIZE (mode) + size - 1) / size;
13006 }
13007 
13008 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.  */
13009 
13010 static bool
mips_can_change_mode_class(machine_mode from,machine_mode to,reg_class_t rclass)13011 mips_can_change_mode_class (machine_mode from,
13012 			    machine_mode to, reg_class_t rclass)
13013 {
13014   /* Allow conversions between different Loongson integer vectors,
13015      and between those vectors and DImode.  */
13016   if (GET_MODE_SIZE (from) == 8 && GET_MODE_SIZE (to) == 8
13017       && INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to))
13018     return true;
13019 
13020   /* Allow conversions between different MSA vector modes.  */
13021   if (MSA_SUPPORTED_MODE_P (from) && MSA_SUPPORTED_MODE_P (to))
13022     return true;
13023 
13024   /* Otherwise, there are several problems with changing the modes of
13025      values in floating-point registers:
13026 
13027      - When a multi-word value is stored in paired floating-point
13028        registers, the first register always holds the low word.  We
13029        therefore can't allow FPRs to change between single-word and
13030        multi-word modes on big-endian targets.
13031 
13032      - GCC assumes that each word of a multiword register can be
13033        accessed individually using SUBREGs.  This is not true for
13034        floating-point registers if they are bigger than a word.
13035 
13036      - Loading a 32-bit value into a 64-bit floating-point register
13037        will not sign-extend the value, despite what LOAD_EXTEND_OP
13038        says.  We can't allow FPRs to change from SImode to a wider
13039        mode on 64-bit targets.
13040 
13041      - If the FPU has already interpreted a value in one format, we
13042        must not ask it to treat the value as having a different
13043        format.
13044 
13045      We therefore disallow all mode changes involving FPRs.  */
13046 
13047   return !reg_classes_intersect_p (FP_REGS, rclass);
13048 }
13049 
13050 /* Implement target hook small_register_classes_for_mode_p.  */
13051 
13052 static bool
mips_small_register_classes_for_mode_p(machine_mode mode ATTRIBUTE_UNUSED)13053 mips_small_register_classes_for_mode_p (machine_mode mode
13054 					ATTRIBUTE_UNUSED)
13055 {
13056   return TARGET_MIPS16;
13057 }
13058 
13059 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction,
13060    or use the MSA's move.v instruction.  */
13061 
13062 static bool
mips_mode_ok_for_mov_fmt_p(machine_mode mode)13063 mips_mode_ok_for_mov_fmt_p (machine_mode mode)
13064 {
13065   switch (mode)
13066     {
13067     case E_CCFmode:
13068     case E_SFmode:
13069       return TARGET_HARD_FLOAT;
13070 
13071     case E_DFmode:
13072       return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
13073 
13074     case E_V2SFmode:
13075       return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
13076 
13077     default:
13078       return MSA_SUPPORTED_MODE_P (mode);
13079     }
13080 }
13081 
13082 /* Implement TARGET_MODES_TIEABLE_P.  */
13083 
13084 static bool
mips_modes_tieable_p(machine_mode mode1,machine_mode mode2)13085 mips_modes_tieable_p (machine_mode mode1, machine_mode mode2)
13086 {
13087   /* FPRs allow no mode punning, so it's not worth tying modes if we'd
13088      prefer to put one of them in FPRs.  */
13089   return (mode1 == mode2
13090 	  || (!mips_mode_ok_for_mov_fmt_p (mode1)
13091 	      && !mips_mode_ok_for_mov_fmt_p (mode2)));
13092 }
13093 
13094 /* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
13095 
13096 static reg_class_t
mips_preferred_reload_class(rtx x,reg_class_t rclass)13097 mips_preferred_reload_class (rtx x, reg_class_t rclass)
13098 {
13099   if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
13100     return LEA_REGS;
13101 
13102   if (reg_class_subset_p (FP_REGS, rclass)
13103       && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
13104     return FP_REGS;
13105 
13106   if (reg_class_subset_p (GR_REGS, rclass))
13107     rclass = GR_REGS;
13108 
13109   if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
13110     rclass = M16_REGS;
13111 
13112   return rclass;
13113 }
13114 
13115 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
13116    Return a "canonical" class to represent it in later calculations.  */
13117 
13118 static reg_class_t
mips_canonicalize_move_class(reg_class_t rclass)13119 mips_canonicalize_move_class (reg_class_t rclass)
13120 {
13121   /* All moves involving accumulator registers have the same cost.  */
13122   if (reg_class_subset_p (rclass, ACC_REGS))
13123     rclass = ACC_REGS;
13124 
13125   /* Likewise promote subclasses of general registers to the most
13126      interesting containing class.  */
13127   if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
13128     rclass = M16_REGS;
13129   else if (reg_class_subset_p (rclass, GENERAL_REGS))
13130     rclass = GENERAL_REGS;
13131 
13132   return rclass;
13133 }
13134 
13135 /* Return the cost of moving a value from a register of class FROM to a GPR.
13136    Return 0 for classes that are unions of other classes handled by this
13137    function.  */
13138 
13139 static int
mips_move_to_gpr_cost(reg_class_t from)13140 mips_move_to_gpr_cost (reg_class_t from)
13141 {
13142   switch (from)
13143     {
13144     case M16_REGS:
13145     case GENERAL_REGS:
13146       /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro.  */
13147       return 2;
13148 
13149     case ACC_REGS:
13150       /* MFLO and MFHI.  */
13151       return 6;
13152 
13153     case FP_REGS:
13154       /* MFC1, etc.  */
13155       return 4;
13156 
13157     case COP0_REGS:
13158     case COP2_REGS:
13159     case COP3_REGS:
13160       /* This choice of value is historical.  */
13161       return 5;
13162 
13163     default:
13164       return 0;
13165     }
13166 }
13167 
13168 /* Return the cost of moving a value from a GPR to a register of class TO.
13169    Return 0 for classes that are unions of other classes handled by this
13170    function.  */
13171 
13172 static int
mips_move_from_gpr_cost(reg_class_t to)13173 mips_move_from_gpr_cost (reg_class_t to)
13174 {
13175   switch (to)
13176     {
13177     case M16_REGS:
13178     case GENERAL_REGS:
13179       /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro.  */
13180       return 2;
13181 
13182     case ACC_REGS:
13183       /* MTLO and MTHI.  */
13184       return 6;
13185 
13186     case FP_REGS:
13187       /* MTC1, etc.  */
13188       return 4;
13189 
13190     case COP0_REGS:
13191     case COP2_REGS:
13192     case COP3_REGS:
13193       /* This choice of value is historical.  */
13194       return 5;
13195 
13196     default:
13197       return 0;
13198     }
13199 }
13200 
13201 /* Implement TARGET_REGISTER_MOVE_COST.  Return 0 for classes that are the
13202    maximum of the move costs for subclasses; regclass will work out
13203    the maximum for us.  */
13204 
13205 static int
mips_register_move_cost(machine_mode mode,reg_class_t from,reg_class_t to)13206 mips_register_move_cost (machine_mode mode,
13207 			 reg_class_t from, reg_class_t to)
13208 {
13209   reg_class_t dregs;
13210   int cost1, cost2;
13211 
13212   from = mips_canonicalize_move_class (from);
13213   to = mips_canonicalize_move_class (to);
13214 
13215   /* Handle moves that can be done without using general-purpose registers.  */
13216   if (from == FP_REGS)
13217     {
13218       if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
13219 	/* MOV.FMT.  */
13220 	return 4;
13221     }
13222 
13223   /* Handle cases in which only one class deviates from the ideal.  */
13224   dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
13225   if (from == dregs)
13226     return mips_move_from_gpr_cost (to);
13227   if (to == dregs)
13228     return mips_move_to_gpr_cost (from);
13229 
13230   /* Handles cases that require a GPR temporary.  */
13231   cost1 = mips_move_to_gpr_cost (from);
13232   if (cost1 != 0)
13233     {
13234       cost2 = mips_move_from_gpr_cost (to);
13235       if (cost2 != 0)
13236 	return cost1 + cost2;
13237     }
13238 
13239   return 0;
13240 }
13241 
13242 /* Implement TARGET_REGISTER_PRIORITY.  */
13243 
13244 static int
mips_register_priority(int hard_regno)13245 mips_register_priority (int hard_regno)
13246 {
13247   /* Treat MIPS16 registers with higher priority than other regs.  */
13248   if (TARGET_MIPS16
13249       && TEST_HARD_REG_BIT (reg_class_contents[M16_REGS], hard_regno))
13250     return 1;
13251   return 0;
13252 }
13253 
13254 /* Implement TARGET_MEMORY_MOVE_COST.  */
13255 
13256 static int
mips_memory_move_cost(machine_mode mode,reg_class_t rclass,bool in)13257 mips_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in)
13258 {
13259   return (mips_cost->memory_latency
13260 	  + memory_move_secondary_cost (mode, rclass, in));
13261 }
13262 
13263 /* Implement TARGET_SECONDARY_MEMORY_NEEDED.
13264 
13265    When targeting the o32 FPXX ABI, all moves with a length of doubleword
13266    or greater must be performed by FR-mode-aware instructions.
13267    This can be achieved using MFHC1/MTHC1 when these instructions are
13268    available but otherwise moves must go via memory.
13269    For the o32 FP64A ABI, all odd-numbered moves with a length of
13270    doubleword or greater are required to use memory.  Using MTC1/MFC1
13271    to access the lower-half of these registers would require a forbidden
13272    single-precision access.  We require all double-word moves to use
13273    memory because adding even and odd floating-point registers classes
13274    would have a significant impact on the backend.  */
13275 
13276 static bool
mips_secondary_memory_needed(machine_mode mode,reg_class_t class1,reg_class_t class2)13277 mips_secondary_memory_needed (machine_mode mode, reg_class_t class1,
13278 			      reg_class_t class2)
13279 {
13280   /* Ignore spilled pseudos.  */
13281   if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS))
13282     return false;
13283 
13284   if (((class1 == FP_REGS) != (class2 == FP_REGS))
13285       && ((TARGET_FLOATXX && !ISA_HAS_MXHC1)
13286 	  || TARGET_O32_FP64A_ABI)
13287       && GET_MODE_SIZE (mode) >= 8)
13288     return true;
13289 
13290   return false;
13291 }
13292 
13293 /* Return the register class required for a secondary register when
13294    copying between one of the registers in RCLASS and value X, which
13295    has mode MODE.  X is the source of the move if IN_P, otherwise it
13296    is the destination.  Return NO_REGS if no secondary register is
13297    needed.  */
13298 
13299 enum reg_class
mips_secondary_reload_class(enum reg_class rclass,machine_mode mode,rtx x,bool)13300 mips_secondary_reload_class (enum reg_class rclass,
13301 			     machine_mode mode, rtx x, bool)
13302 {
13303   int regno;
13304 
13305   /* If X is a constant that cannot be loaded into $25, it must be loaded
13306      into some other GPR.  No other register class allows a direct move.  */
13307   if (mips_dangerous_for_la25_p (x))
13308     return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
13309 
13310   regno = true_regnum (x);
13311   if (TARGET_MIPS16)
13312     {
13313       /* In MIPS16 mode, every move must involve a member of M16_REGS.  */
13314       if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
13315 	return M16_REGS;
13316 
13317       return NO_REGS;
13318     }
13319 
13320   /* Copying from accumulator registers to anywhere other than a general
13321      register requires a temporary general register.  */
13322   if (reg_class_subset_p (rclass, ACC_REGS))
13323     return GP_REG_P (regno) ? NO_REGS : GR_REGS;
13324   if (ACC_REG_P (regno))
13325     return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
13326 
13327   if (reg_class_subset_p (rclass, FP_REGS))
13328     {
13329       if (regno < 0
13330 	  || (MEM_P (x)
13331 	      && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)))
13332 	/* In this case we can use lwc1, swc1, ldc1 or sdc1.  We'll use
13333 	   pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported.  */
13334 	return NO_REGS;
13335 
13336       if (MEM_P (x) && MSA_SUPPORTED_MODE_P (mode))
13337 	/* In this case we can use MSA LD.* and ST.*.  */
13338 	return NO_REGS;
13339 
13340       if (GP_REG_P (regno) || x == CONST0_RTX (mode))
13341 	/* In this case we can use mtc1, mfc1, dmtc1 or dmfc1.  */
13342 	return NO_REGS;
13343 
13344       if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
13345 	/* We can force the constant to memory and use lwc1
13346 	   and ldc1.  As above, we will use pairs of lwc1s if
13347 	   ldc1 is not supported.  */
13348 	return NO_REGS;
13349 
13350       if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
13351 	/* In this case we can use mov.fmt.  */
13352 	return NO_REGS;
13353 
13354       /* Otherwise, we need to reload through an integer register.  */
13355       return GR_REGS;
13356     }
13357   if (FP_REG_P (regno))
13358     return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
13359 
13360   return NO_REGS;
13361 }
13362 
13363 /* Implement TARGET_MODE_REP_EXTENDED.  */
13364 
13365 static int
mips_mode_rep_extended(scalar_int_mode mode,scalar_int_mode mode_rep)13366 mips_mode_rep_extended (scalar_int_mode mode, scalar_int_mode mode_rep)
13367 {
13368   /* On 64-bit targets, SImode register values are sign-extended to DImode.  */
13369   if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
13370     return SIGN_EXTEND;
13371 
13372   return UNKNOWN;
13373 }
13374 
13375 /* Implement TARGET_VALID_POINTER_MODE.  */
13376 
13377 static bool
mips_valid_pointer_mode(scalar_int_mode mode)13378 mips_valid_pointer_mode (scalar_int_mode mode)
13379 {
13380   return mode == SImode || (TARGET_64BIT && mode == DImode);
13381 }
13382 
13383 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P.  */
13384 
13385 static bool
mips_vector_mode_supported_p(machine_mode mode)13386 mips_vector_mode_supported_p (machine_mode mode)
13387 {
13388   switch (mode)
13389     {
13390     case E_V2SFmode:
13391       return TARGET_PAIRED_SINGLE_FLOAT;
13392 
13393     case E_V2HImode:
13394     case E_V4QImode:
13395     case E_V2HQmode:
13396     case E_V2UHQmode:
13397     case E_V2HAmode:
13398     case E_V2UHAmode:
13399     case E_V4QQmode:
13400     case E_V4UQQmode:
13401       return TARGET_DSP;
13402 
13403     case E_V2SImode:
13404     case E_V4HImode:
13405     case E_V8QImode:
13406       return TARGET_LOONGSON_MMI;
13407 
13408     default:
13409       return MSA_SUPPORTED_MODE_P (mode);
13410     }
13411 }
13412 
13413 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */
13414 
13415 static bool
mips_scalar_mode_supported_p(scalar_mode mode)13416 mips_scalar_mode_supported_p (scalar_mode mode)
13417 {
13418   if (ALL_FIXED_POINT_MODE_P (mode)
13419       && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
13420     return true;
13421 
13422   return default_scalar_mode_supported_p (mode);
13423 }
13424 
13425 /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE.  */
13426 
13427 static machine_mode
mips_preferred_simd_mode(scalar_mode mode)13428 mips_preferred_simd_mode (scalar_mode mode)
13429 {
13430   if (TARGET_PAIRED_SINGLE_FLOAT
13431       && mode == SFmode)
13432     return V2SFmode;
13433 
13434   if (!ISA_HAS_MSA)
13435     return word_mode;
13436 
13437   switch (mode)
13438     {
13439     case E_QImode:
13440       return V16QImode;
13441     case E_HImode:
13442       return V8HImode;
13443     case E_SImode:
13444       return V4SImode;
13445     case E_DImode:
13446       return V2DImode;
13447 
13448     case E_SFmode:
13449       return V4SFmode;
13450 
13451     case E_DFmode:
13452       return V2DFmode;
13453 
13454     default:
13455       break;
13456     }
13457   return word_mode;
13458 }
13459 
13460 /* Implement TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES.  */
13461 
13462 static void
mips_autovectorize_vector_sizes(vector_sizes * sizes)13463 mips_autovectorize_vector_sizes (vector_sizes *sizes)
13464 {
13465   if (ISA_HAS_MSA)
13466     sizes->safe_push (16);
13467 }
13468 
13469 /* Implement TARGET_INIT_LIBFUNCS.  */
13470 
13471 static void
mips_init_libfuncs(void)13472 mips_init_libfuncs (void)
13473 {
13474   if (TARGET_FIX_VR4120)
13475     {
13476       /* Register the special divsi3 and modsi3 functions needed to work
13477 	 around VR4120 division errata.  */
13478       set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
13479       set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
13480     }
13481 
13482   if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
13483     {
13484       /* Register the MIPS16 -mhard-float stubs.  */
13485       set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
13486       set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
13487       set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
13488       set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
13489 
13490       set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
13491       set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
13492       set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
13493       set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
13494       set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
13495       set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
13496       set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
13497 
13498       set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
13499       set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
13500       set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
13501 
13502       if (TARGET_DOUBLE_FLOAT)
13503 	{
13504 	  set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
13505 	  set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
13506 	  set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
13507 	  set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
13508 
13509 	  set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
13510 	  set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
13511 	  set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
13512 	  set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
13513 	  set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
13514 	  set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
13515 	  set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
13516 
13517 	  set_conv_libfunc (sext_optab, DFmode, SFmode,
13518 			    "__mips16_extendsfdf2");
13519 	  set_conv_libfunc (trunc_optab, SFmode, DFmode,
13520 			    "__mips16_truncdfsf2");
13521 	  set_conv_libfunc (sfix_optab, SImode, DFmode,
13522 			    "__mips16_fix_truncdfsi");
13523 	  set_conv_libfunc (sfloat_optab, DFmode, SImode,
13524 			    "__mips16_floatsidf");
13525 	  set_conv_libfunc (ufloat_optab, DFmode, SImode,
13526 			    "__mips16_floatunsidf");
13527 	}
13528     }
13529 
13530   /* The MIPS16 ISA does not have an encoding for "sync", so we rely
13531      on an external non-MIPS16 routine to implement __sync_synchronize.
13532      Similarly for the rest of the ll/sc libfuncs.  */
13533   if (TARGET_MIPS16)
13534     {
13535       synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
13536       init_sync_libfuncs (UNITS_PER_WORD);
13537     }
13538 }
13539 
13540 /* Build up a multi-insn sequence that loads label TARGET into $AT.  */
13541 
13542 static void
mips_process_load_label(rtx target)13543 mips_process_load_label (rtx target)
13544 {
13545   rtx base, gp, intop;
13546   HOST_WIDE_INT offset;
13547 
13548   mips_multi_start ();
13549   switch (mips_abi)
13550     {
13551     case ABI_N32:
13552       mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target, 0);
13553       mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target, 0);
13554       break;
13555 
13556     case ABI_64:
13557       mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target, 0);
13558       mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target, 0);
13559       break;
13560 
13561     default:
13562       gp = pic_offset_table_rtx;
13563       if (mips_cfun_has_cprestore_slot_p ())
13564 	{
13565 	  gp = gen_rtx_REG (Pmode, AT_REGNUM);
13566 	  mips_get_cprestore_base_and_offset (&base, &offset, true);
13567 	  if (!SMALL_OPERAND (offset))
13568 	    {
13569 	      intop = GEN_INT (CONST_HIGH_PART (offset));
13570 	      mips_multi_add_insn ("lui\t%0,%1", gp, intop, 0);
13571 	      mips_multi_add_insn ("addu\t%0,%0,%1", gp, base, 0);
13572 
13573 	      base = gp;
13574 	      offset = CONST_LOW_PART (offset);
13575 	    }
13576 	  intop = GEN_INT (offset);
13577 	  if (ISA_HAS_LOAD_DELAY)
13578 	    mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp, intop, base, 0);
13579 	  else
13580 	    mips_multi_add_insn ("lw\t%0,%1(%2)", gp, intop, base, 0);
13581 	}
13582       if (ISA_HAS_LOAD_DELAY)
13583 	mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target, gp, 0);
13584       else
13585 	mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target, gp, 0);
13586       mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target, 0);
13587       break;
13588     }
13589 }
13590 
13591 /* Return the number of instructions needed to load a label into $AT.  */
13592 
13593 static unsigned int
mips_load_label_num_insns(void)13594 mips_load_label_num_insns (void)
13595 {
13596   if (cfun->machine->load_label_num_insns == 0)
13597     {
13598       mips_process_load_label (pc_rtx);
13599       cfun->machine->load_label_num_insns = mips_multi_num_insns;
13600     }
13601   return cfun->machine->load_label_num_insns;
13602 }
13603 
13604 /* Emit an asm sequence to start a noat block and load the address
13605    of a label into $1.  */
13606 
13607 void
mips_output_load_label(rtx target)13608 mips_output_load_label (rtx target)
13609 {
13610   mips_push_asm_switch (&mips_noat);
13611   if (TARGET_EXPLICIT_RELOCS)
13612     {
13613       mips_process_load_label (target);
13614       mips_multi_write ();
13615     }
13616   else
13617     {
13618       if (Pmode == DImode)
13619 	output_asm_insn ("dla\t%@,%0", &target);
13620       else
13621 	output_asm_insn ("la\t%@,%0", &target);
13622     }
13623 }
13624 
13625 /* Return the length of INSN.  LENGTH is the initial length computed by
13626    attributes in the machine-description file.  */
13627 
13628 int
mips_adjust_insn_length(rtx_insn * insn,int length)13629 mips_adjust_insn_length (rtx_insn *insn, int length)
13630 {
13631   /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
13632      of a PIC long-branch sequence.  Substitute the correct value.  */
13633   if (length == MAX_PIC_BRANCH_LENGTH
13634       && JUMP_P (insn)
13635       && INSN_CODE (insn) >= 0
13636       && get_attr_type (insn) == TYPE_BRANCH)
13637     {
13638       /* Add the branch-over instruction and its delay slot, if this
13639 	 is a conditional branch.  */
13640       length = simplejump_p (insn) ? 0 : 8;
13641 
13642       /* Add the size of a load into $AT.  */
13643       length += BASE_INSN_LENGTH * mips_load_label_num_insns ();
13644 
13645       /* Add the length of an indirect jump, ignoring the delay slot.  */
13646       length += TARGET_COMPRESSION ? 2 : 4;
13647     }
13648 
13649   /* A unconditional jump has an unfilled delay slot if it is not part
13650      of a sequence.  A conditional jump normally has a delay slot, but
13651      does not on MIPS16.  */
13652   if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
13653     length += TARGET_MIPS16 ? 2 : 4;
13654 
13655   /* See how many nops might be needed to avoid hardware hazards.  */
13656   if (!cfun->machine->ignore_hazard_length_p
13657       && INSN_P (insn)
13658       && INSN_CODE (insn) >= 0)
13659     switch (get_attr_hazard (insn))
13660       {
13661       case HAZARD_NONE:
13662 	break;
13663 
13664       case HAZARD_DELAY:
13665       case HAZARD_FORBIDDEN_SLOT:
13666 	length += NOP_INSN_LENGTH;
13667 	break;
13668 
13669       case HAZARD_HILO:
13670 	length += NOP_INSN_LENGTH * 2;
13671 	break;
13672       }
13673 
13674   return length;
13675 }
13676 
13677 /* Return the asm template for a call.  OPERANDS are the operands, TARGET_OPNO
13678    is the operand number of the target.  SIZE_OPNO is the operand number of
13679    the argument size operand that can optionally hold the call attributes.  If
13680    SIZE_OPNO is not -1 and the call is indirect, use the function symbol from
13681    the call attributes to attach a R_MIPS_JALR relocation to the call.  LINK_P
13682    indicates whether the jump is a call and needs to set the link register.
13683 
13684    When generating GOT code without explicit relocation operators, all calls
13685    should use assembly macros.  Otherwise, all indirect calls should use "jr"
13686    or "jalr"; we will arrange to restore $gp afterwards if necessary.  Finally,
13687    we can only generate direct calls for -mabicalls by temporarily switching
13688    to non-PIC mode.
13689 
13690    For microMIPS jal(r), we try to generate jal(r)s when a 16-bit
13691    instruction is in the delay slot of jal(r).
13692 
13693    Where compact branches are available, we try to use them if the delay slot
13694    has a NOP (or equivalently delay slots were not enabled for the instruction
13695    anyway).  */
13696 
13697 const char *
mips_output_jump(rtx * operands,int target_opno,int size_opno,bool link_p)13698 mips_output_jump (rtx *operands, int target_opno, int size_opno, bool link_p)
13699 {
13700   static char buffer[300];
13701   char *s = buffer;
13702   bool reg_p = REG_P (operands[target_opno]);
13703 
13704   const char *and_link = link_p ? "al" : "";
13705   const char *reg = reg_p ? "r" : "";
13706   const char *compact = "";
13707   const char *nop = "%/";
13708   const char *short_delay = link_p ? "%!" : "";
13709   const char *insn_name = TARGET_CB_NEVER || reg_p ? "j" : "b";
13710 
13711   /* Compact branches can only be described when the ISA has support for them
13712      as both the compact formatter '%:' and the delay slot NOP formatter '%/'
13713      work as a mutually exclusive pair.  I.e. a NOP is never required if a
13714      compact form is available.  */
13715   if (!final_sequence
13716       && (TARGET_CB_MAYBE
13717 	  || (ISA_HAS_JRC && !link_p && reg_p)))
13718     {
13719       compact = "c";
13720       nop = "";
13721     }
13722 
13723   if (TARGET_USE_GOT && !TARGET_EXPLICIT_RELOCS)
13724     sprintf (s, "%%*%s%s\t%%%d%%/", insn_name, and_link, target_opno);
13725   else
13726     {
13727       if (!reg_p && TARGET_ABICALLS_PIC2)
13728 	s += sprintf (s, ".option\tpic0\n\t");
13729 
13730       if (reg_p && mips_get_pic_call_symbol (operands, size_opno))
13731 	s += sprintf (s, "%%*.reloc\t1f,%s,%%%d\n1:\t",
13732 		      TARGET_MICROMIPS ? "R_MICROMIPS_JALR" : "R_MIPS_JALR",
13733 		      size_opno);
13734       else
13735 	s += sprintf (s, "%%*");
13736 
13737       s += sprintf (s, "%s%s%s%s%s\t%%%d%s",
13738 		    insn_name, and_link, reg, compact, short_delay,
13739 		    target_opno, nop);
13740 
13741       if (!reg_p && TARGET_ABICALLS_PIC2)
13742 	s += sprintf (s, "\n\t.option\tpic2");
13743     }
13744   return buffer;
13745 }
13746 
13747 /* Return the assembly code for INSN, which has the operands given by
13748    OPERANDS, and which branches to OPERANDS[0] if some condition is true.
13749    BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
13750    is in range of a direct branch.  BRANCH_IF_FALSE is an inverted
13751    version of BRANCH_IF_TRUE.  */
13752 
13753 const char *
mips_output_conditional_branch(rtx_insn * insn,rtx * operands,const char * branch_if_true,const char * branch_if_false)13754 mips_output_conditional_branch (rtx_insn *insn, rtx *operands,
13755 				const char *branch_if_true,
13756 				const char *branch_if_false)
13757 {
13758   unsigned int length;
13759   rtx taken;
13760 
13761   gcc_assert (LABEL_P (operands[0]));
13762 
13763   length = get_attr_length (insn);
13764   if (length <= 8)
13765     {
13766       /* Just a simple conditional branch.  */
13767       mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
13768       return branch_if_true;
13769     }
13770 
13771   /* Generate a reversed branch around a direct jump.  This fallback does
13772      not use branch-likely instructions.  */
13773   mips_branch_likely = false;
13774   rtx_code_label *not_taken = gen_label_rtx ();
13775   taken = operands[0];
13776 
13777   /* Generate the reversed branch to NOT_TAKEN.  */
13778   operands[0] = not_taken;
13779   output_asm_insn (branch_if_false, operands);
13780 
13781   /* If INSN has a delay slot, we must provide delay slots for both the
13782      branch to NOT_TAKEN and the conditional jump.  We must also ensure
13783      that INSN's delay slot is executed in the appropriate cases.  */
13784   if (final_sequence)
13785     {
13786       /* This first delay slot will always be executed, so use INSN's
13787 	 delay slot if is not annulled.  */
13788       if (!INSN_ANNULLED_BRANCH_P (insn))
13789 	{
13790 	  final_scan_insn (final_sequence->insn (1),
13791 			   asm_out_file, optimize, 1, NULL);
13792 	  final_sequence->insn (1)->set_deleted ();
13793 	}
13794       else
13795 	output_asm_insn ("nop", 0);
13796       fprintf (asm_out_file, "\n");
13797     }
13798 
13799   /* Output the unconditional branch to TAKEN.  */
13800   if (TARGET_ABSOLUTE_JUMPS && TARGET_CB_MAYBE)
13801     {
13802       /* Add a hazard nop.  */
13803       if (!final_sequence)
13804 	{
13805 	  output_asm_insn ("nop\t\t# hazard nop", 0);
13806 	  fprintf (asm_out_file, "\n");
13807 	}
13808       output_asm_insn (MIPS_ABSOLUTE_JUMP ("bc\t%0"), &taken);
13809     }
13810   else if (TARGET_ABSOLUTE_JUMPS)
13811     output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken);
13812   else
13813     {
13814       mips_output_load_label (taken);
13815       if (TARGET_CB_MAYBE)
13816 	output_asm_insn ("jrc\t%@%]", 0);
13817       else
13818 	output_asm_insn ("jr\t%@%]%/", 0);
13819     }
13820 
13821   /* Now deal with its delay slot; see above.  */
13822   if (final_sequence)
13823     {
13824       /* This delay slot will only be executed if the branch is taken.
13825 	 Use INSN's delay slot if is annulled.  */
13826       if (INSN_ANNULLED_BRANCH_P (insn))
13827 	{
13828 	  final_scan_insn (final_sequence->insn (1),
13829 			   asm_out_file, optimize, 1, NULL);
13830 	  final_sequence->insn (1)->set_deleted ();
13831 	}
13832       else if (TARGET_CB_NEVER)
13833 	output_asm_insn ("nop", 0);
13834       fprintf (asm_out_file, "\n");
13835     }
13836 
13837   /* Output NOT_TAKEN.  */
13838   targetm.asm_out.internal_label (asm_out_file, "L",
13839 				  CODE_LABEL_NUMBER (not_taken));
13840   return "";
13841 }
13842 
13843 /* Return the assembly code for INSN, which branches to OPERANDS[0]
13844    if some equality condition is true.  The condition is given by
13845    OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
13846    OPERANDS[1].  OPERANDS[2] is the comparison's first operand;
13847    OPERANDS[3] is the second operand and may be zero or a register.  */
13848 
13849 const char *
mips_output_equal_conditional_branch(rtx_insn * insn,rtx * operands,bool inverted_p)13850 mips_output_equal_conditional_branch (rtx_insn* insn, rtx *operands,
13851 				      bool inverted_p)
13852 {
13853   const char *branch[2];
13854   /* For a simple BNEZ or BEQZ microMIPSr3 branch.  */
13855   if (TARGET_MICROMIPS
13856       && mips_isa_rev <= 5
13857       && operands[3] == const0_rtx
13858       && get_attr_length (insn) <= 8)
13859     {
13860       if (mips_cb == MIPS_CB_OPTIMAL)
13861 	{
13862 	  branch[!inverted_p] = "%*b%C1z%:\t%2,%0";
13863 	  branch[inverted_p] = "%*b%N1z%:\t%2,%0";
13864 	}
13865       else
13866 	{
13867 	  branch[!inverted_p] = "%*b%C1z\t%2,%0%/";
13868 	  branch[inverted_p] = "%*b%N1z\t%2,%0%/";
13869 	}
13870     }
13871   else if (TARGET_CB_MAYBE)
13872     {
13873       if (operands[3] == const0_rtx)
13874 	{
13875 	  branch[!inverted_p] = MIPS_BRANCH_C ("b%C1z", "%2,%0");
13876 	  branch[inverted_p] = MIPS_BRANCH_C ("b%N1z", "%2,%0");
13877 	}
13878       else if (REGNO (operands[2]) != REGNO (operands[3]))
13879 	{
13880 	  branch[!inverted_p] = MIPS_BRANCH_C ("b%C1", "%2,%3,%0");
13881 	  branch[inverted_p] = MIPS_BRANCH_C ("b%N1", "%2,%3,%0");
13882 	}
13883       else
13884 	{
13885 	  /* This case is degenerate.  It should not happen, but does.  */
13886 	  if (GET_CODE (operands[1]) == NE)
13887 	    inverted_p = !inverted_p;
13888 
13889 	  branch[!inverted_p] = MIPS_BRANCH_C ("b", "%0");
13890 	  branch[inverted_p] = "%*\t\t# branch never";
13891 	}
13892     }
13893   else
13894     {
13895       branch[!inverted_p] = MIPS_BRANCH ("b%C1", "%2,%z3,%0");
13896       branch[inverted_p] = MIPS_BRANCH ("b%N1", "%2,%z3,%0");
13897     }
13898 
13899   return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
13900 }
13901 
13902 /* Return the assembly code for INSN, which branches to OPERANDS[0]
13903    if some ordering condition is true.  The condition is given by
13904    OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
13905    OPERANDS[1].  OPERANDS[2] is the comparison's first operand;
13906    OPERANDS[3] is the second operand and may be zero or a register.  */
13907 
13908 const char *
mips_output_order_conditional_branch(rtx_insn * insn,rtx * operands,bool inverted_p)13909 mips_output_order_conditional_branch (rtx_insn *insn, rtx *operands,
13910 				      bool inverted_p)
13911 {
13912   const char *branch[2];
13913 
13914   /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
13915      Make BRANCH[0] branch on the inverse condition.  */
13916   if (operands[3] != const0_rtx)
13917     {
13918       /* Handle degenerate cases that should not, but do, occur.  */
13919       if (REGNO (operands[2]) == REGNO (operands[3]))
13920 	{
13921 	  switch (GET_CODE (operands[1]))
13922 	    {
13923 	    case LT:
13924 	    case LTU:
13925 	      inverted_p = !inverted_p;
13926 	      /* Fall through.  */
13927 	    case GE:
13928 	    case GEU:
13929 	      branch[!inverted_p] = MIPS_BRANCH_C ("b", "%0");
13930 	      branch[inverted_p] = "%*\t\t# branch never";
13931 	      break;
13932 	   default:
13933 	      gcc_unreachable ();
13934 	    }
13935 	}
13936       else
13937 	{
13938 	  branch[!inverted_p] = MIPS_BRANCH_C ("b%C1", "%2,%3,%0");
13939 	  branch[inverted_p] = MIPS_BRANCH_C ("b%N1", "%2,%3,%0");
13940 	}
13941     }
13942   else
13943     {
13944       switch (GET_CODE (operands[1]))
13945 	{
13946 	  /* These cases are equivalent to comparisons against zero.  */
13947 	case LEU:
13948 	  inverted_p = !inverted_p;
13949 	  /* Fall through.  */
13950 	case GTU:
13951 	  if (TARGET_CB_MAYBE)
13952 	    {
13953 	      branch[!inverted_p] = MIPS_BRANCH_C ("bnez", "%2,%0");
13954 	      branch[inverted_p] = MIPS_BRANCH_C ("beqz", "%2,%0");
13955 	    }
13956 	  else
13957 	    {
13958 	      branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%0");
13959 	      branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%0");
13960 	    }
13961 	  break;
13962 
13963 	  /* These cases are always true or always false.  */
13964 	case LTU:
13965 	  inverted_p = !inverted_p;
13966 	  /* Fall through.  */
13967 	case GEU:
13968 	  if (TARGET_CB_MAYBE)
13969 	    {
13970 	      branch[!inverted_p] = MIPS_BRANCH_C ("b", "%0");
13971 	      branch[inverted_p] = "%*\t\t# branch never";
13972 	    }
13973 	  else
13974 	    {
13975 	      branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%0");
13976 	      branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%0");
13977 	    }
13978 	  break;
13979 
13980 	default:
13981 	  if (TARGET_CB_MAYBE)
13982 	    {
13983 	      branch[!inverted_p] = MIPS_BRANCH_C ("b%C1z", "%2,%0");
13984 	      branch[inverted_p] = MIPS_BRANCH_C ("b%N1z", "%2,%0");
13985 	    }
13986 	  else
13987 	    {
13988 	      branch[!inverted_p] = MIPS_BRANCH ("b%C1z", "%2,%0");
13989 	      branch[inverted_p] = MIPS_BRANCH ("b%N1z", "%2,%0");
13990 	    }
13991 	  break;
13992 	}
13993     }
13994   return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
13995 }
13996 
13997 /* Start a block of code that needs access to the LL, SC and SYNC
13998    instructions.  */
13999 
14000 static void
mips_start_ll_sc_sync_block(void)14001 mips_start_ll_sc_sync_block (void)
14002 {
14003   if (!ISA_HAS_LL_SC)
14004     {
14005       output_asm_insn (".set\tpush", 0);
14006       if (TARGET_64BIT)
14007 	output_asm_insn (".set\tmips3", 0);
14008       else
14009 	output_asm_insn (".set\tmips2", 0);
14010     }
14011 }
14012 
14013 /* End a block started by mips_start_ll_sc_sync_block.  */
14014 
14015 static void
mips_end_ll_sc_sync_block(void)14016 mips_end_ll_sc_sync_block (void)
14017 {
14018   if (!ISA_HAS_LL_SC)
14019     output_asm_insn (".set\tpop", 0);
14020 }
14021 
14022 /* Output and/or return the asm template for a sync instruction.  */
14023 
14024 const char *
mips_output_sync(void)14025 mips_output_sync (void)
14026 {
14027   mips_start_ll_sc_sync_block ();
14028   output_asm_insn ("sync", 0);
14029   mips_end_ll_sc_sync_block ();
14030   return "";
14031 }
14032 
14033 /* Return the asm template associated with sync_insn1 value TYPE.
14034    IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation.  */
14035 
14036 static const char *
mips_sync_insn1_template(enum attr_sync_insn1 type,bool is_64bit_p)14037 mips_sync_insn1_template (enum attr_sync_insn1 type, bool is_64bit_p)
14038 {
14039   switch (type)
14040     {
14041     case SYNC_INSN1_MOVE:
14042       return "move\t%0,%z2";
14043     case SYNC_INSN1_LI:
14044       return "li\t%0,%2";
14045     case SYNC_INSN1_ADDU:
14046       return is_64bit_p ? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
14047     case SYNC_INSN1_ADDIU:
14048       return is_64bit_p ? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
14049     case SYNC_INSN1_SUBU:
14050       return is_64bit_p ? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
14051     case SYNC_INSN1_AND:
14052       return "and\t%0,%1,%z2";
14053     case SYNC_INSN1_ANDI:
14054       return "andi\t%0,%1,%2";
14055     case SYNC_INSN1_OR:
14056       return "or\t%0,%1,%z2";
14057     case SYNC_INSN1_ORI:
14058       return "ori\t%0,%1,%2";
14059     case SYNC_INSN1_XOR:
14060       return "xor\t%0,%1,%z2";
14061     case SYNC_INSN1_XORI:
14062       return "xori\t%0,%1,%2";
14063     }
14064   gcc_unreachable ();
14065 }
14066 
14067 /* Return the asm template associated with sync_insn2 value TYPE.  */
14068 
14069 static const char *
mips_sync_insn2_template(enum attr_sync_insn2 type)14070 mips_sync_insn2_template (enum attr_sync_insn2 type)
14071 {
14072   switch (type)
14073     {
14074     case SYNC_INSN2_NOP:
14075       gcc_unreachable ();
14076     case SYNC_INSN2_AND:
14077       return "and\t%0,%1,%z2";
14078     case SYNC_INSN2_XOR:
14079       return "xor\t%0,%1,%z2";
14080     case SYNC_INSN2_NOT:
14081       return "nor\t%0,%1,%.";
14082     }
14083   gcc_unreachable ();
14084 }
14085 
14086 /* OPERANDS are the operands to a sync loop instruction and INDEX is
14087    the value of the one of the sync_* attributes.  Return the operand
14088    referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
14089    have the associated attribute.  */
14090 
14091 static rtx
mips_get_sync_operand(rtx * operands,int index,rtx default_value)14092 mips_get_sync_operand (rtx *operands, int index, rtx default_value)
14093 {
14094   if (index > 0)
14095     default_value = operands[index - 1];
14096   return default_value;
14097 }
14098 
14099 /* INSN is a sync loop with operands OPERANDS.  Build up a multi-insn
14100    sequence for it.  */
14101 
14102 static void
mips_process_sync_loop(rtx_insn * insn,rtx * operands)14103 mips_process_sync_loop (rtx_insn *insn, rtx *operands)
14104 {
14105   rtx at, mem, oldval, newval, inclusive_mask, exclusive_mask;
14106   rtx required_oldval, insn1_op2, tmp1, tmp2, tmp3, cmp;
14107   unsigned int tmp3_insn;
14108   enum attr_sync_insn1 insn1;
14109   enum attr_sync_insn2 insn2;
14110   bool is_64bit_p;
14111   int memmodel_attr;
14112   enum memmodel model;
14113 
14114   /* Read an operand from the sync_WHAT attribute and store it in
14115      variable WHAT.  DEFAULT is the default value if no attribute
14116      is specified.  */
14117 #define READ_OPERAND(WHAT, DEFAULT) \
14118   WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
14119   				DEFAULT)
14120 
14121   /* Read the memory.  */
14122   READ_OPERAND (mem, 0);
14123   gcc_assert (mem);
14124   is_64bit_p = (GET_MODE_BITSIZE (GET_MODE (mem)) == 64);
14125 
14126   /* Read the other attributes.  */
14127   at = gen_rtx_REG (GET_MODE (mem), AT_REGNUM);
14128   READ_OPERAND (oldval, at);
14129   READ_OPERAND (cmp, 0);
14130   READ_OPERAND (newval, at);
14131   READ_OPERAND (inclusive_mask, 0);
14132   READ_OPERAND (exclusive_mask, 0);
14133   READ_OPERAND (required_oldval, 0);
14134   READ_OPERAND (insn1_op2, 0);
14135   insn1 = get_attr_sync_insn1 (insn);
14136   insn2 = get_attr_sync_insn2 (insn);
14137 
14138   /* Don't bother setting CMP result that is never used.  */
14139   if (cmp && find_reg_note (insn, REG_UNUSED, cmp))
14140     cmp = 0;
14141 
14142   memmodel_attr = get_attr_sync_memmodel (insn);
14143   switch (memmodel_attr)
14144     {
14145     case 10:
14146       model = MEMMODEL_ACQ_REL;
14147       break;
14148     case 11:
14149       model = MEMMODEL_ACQUIRE;
14150       break;
14151     default:
14152       model = memmodel_from_int (INTVAL (operands[memmodel_attr]));
14153     }
14154 
14155   mips_multi_start ();
14156 
14157   /* Output the release side of the memory barrier.  */
14158   if (need_atomic_barrier_p (model, true))
14159     {
14160       if (required_oldval == 0 && TARGET_OCTEON)
14161 	{
14162 	  /* Octeon doesn't reorder reads, so a full barrier can be
14163 	     created by using SYNCW to order writes combined with the
14164 	     write from the following SC.  When the SC successfully
14165 	     completes, we know that all preceding writes are also
14166 	     committed to the coherent memory system.  It is possible
14167 	     for a single SYNCW to fail, but a pair of them will never
14168 	     fail, so we use two.  */
14169 	  mips_multi_add_insn ("syncw", NULL);
14170 	  mips_multi_add_insn ("syncw", NULL);
14171 	}
14172       else
14173 	mips_multi_add_insn ("sync", NULL);
14174     }
14175 
14176   /* Output the branch-back label.  */
14177   mips_multi_add_label ("1:");
14178 
14179   /* OLDVAL = *MEM.  */
14180   mips_multi_add_insn (is_64bit_p ? "lld\t%0,%1" : "ll\t%0,%1",
14181 		       oldval, mem, NULL);
14182 
14183   /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2.  */
14184   if (required_oldval)
14185     {
14186       if (inclusive_mask == 0)
14187 	tmp1 = oldval;
14188       else
14189 	{
14190 	  gcc_assert (oldval != at);
14191 	  mips_multi_add_insn ("and\t%0,%1,%2",
14192 			       at, oldval, inclusive_mask, NULL);
14193 	  tmp1 = at;
14194 	}
14195       if (TARGET_CB_NEVER)
14196 	mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1, required_oldval, NULL);
14197 
14198       /* CMP = 0 [delay slot].  */
14199       if (cmp)
14200         mips_multi_add_insn ("li\t%0,0", cmp, NULL);
14201 
14202       if (TARGET_CB_MAYBE && required_oldval == const0_rtx)
14203 	mips_multi_add_insn ("bnezc\t%0,2f", tmp1, NULL);
14204       else if (TARGET_CB_MAYBE)
14205 	mips_multi_add_insn ("bnec\t%0,%1,2f", tmp1, required_oldval, NULL);
14206 
14207     }
14208 
14209   /* $TMP1 = OLDVAL & EXCLUSIVE_MASK.  */
14210   if (exclusive_mask == 0)
14211     tmp1 = const0_rtx;
14212   else
14213     {
14214       gcc_assert (oldval != at);
14215       mips_multi_add_insn ("and\t%0,%1,%z2",
14216 			   at, oldval, exclusive_mask, NULL);
14217       tmp1 = at;
14218     }
14219 
14220   /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
14221 
14222      We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
14223      at least one instruction in that case.  */
14224   if (insn1 == SYNC_INSN1_MOVE
14225       && (tmp1 != const0_rtx || insn2 != SYNC_INSN2_NOP))
14226     tmp2 = insn1_op2;
14227   else
14228     {
14229       mips_multi_add_insn (mips_sync_insn1_template (insn1, is_64bit_p),
14230 			   newval, oldval, insn1_op2, NULL);
14231       tmp2 = newval;
14232     }
14233 
14234   /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK).  */
14235   if (insn2 == SYNC_INSN2_NOP)
14236     tmp3 = tmp2;
14237   else
14238     {
14239       mips_multi_add_insn (mips_sync_insn2_template (insn2),
14240 			   newval, tmp2, inclusive_mask, NULL);
14241       tmp3 = newval;
14242     }
14243   tmp3_insn = mips_multi_last_index ();
14244 
14245   /* $AT = $TMP1 | $TMP3.  */
14246   if (tmp1 == const0_rtx || tmp3 == const0_rtx)
14247     {
14248       mips_multi_set_operand (tmp3_insn, 0, at);
14249       tmp3 = at;
14250     }
14251   else
14252     {
14253       gcc_assert (tmp1 != tmp3);
14254       mips_multi_add_insn ("or\t%0,%1,%2", at, tmp1, tmp3, NULL);
14255     }
14256 
14257   /* if (!commit (*MEM = $AT)) goto 1.
14258 
14259      This will sometimes be a delayed branch; see the write code below
14260      for details.  */
14261   mips_multi_add_insn (is_64bit_p ? "scd\t%0,%1" : "sc\t%0,%1", at, mem, NULL);
14262 
14263   /* When using branch likely (-mfix-r10000), the delay slot instruction
14264      will be annulled on false.  The normal delay slot instructions
14265      calculate the overall result of the atomic operation and must not
14266      be annulled.  To ensure this behavior unconditionally use a NOP
14267      in the delay slot for the branch likely case.  */
14268 
14269   if (TARGET_CB_MAYBE)
14270     mips_multi_add_insn ("beqzc\t%0,1b", at, NULL);
14271   else
14272     mips_multi_add_insn ("beq%?\t%0,%.,1b%~", at, NULL);
14273 
14274   /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot].  */
14275   if (insn1 != SYNC_INSN1_MOVE && insn1 != SYNC_INSN1_LI && tmp3 != newval)
14276     {
14277       mips_multi_copy_insn (tmp3_insn);
14278       mips_multi_set_operand (mips_multi_last_index (), 0, newval);
14279     }
14280   else if (!(required_oldval && cmp) && !mips_branch_likely)
14281     mips_multi_add_insn ("nop", NULL);
14282 
14283   /* CMP = 1 -- either standalone or in a delay slot.  */
14284   if (required_oldval && cmp)
14285     mips_multi_add_insn ("li\t%0,1", cmp, NULL);
14286 
14287   /* Output the acquire side of the memory barrier.  */
14288   if (TARGET_SYNC_AFTER_SC && need_atomic_barrier_p (model, false))
14289     mips_multi_add_insn ("sync", NULL);
14290 
14291   /* Output the exit label, if needed.  */
14292   if (required_oldval)
14293     mips_multi_add_label ("2:");
14294 
14295 #undef READ_OPERAND
14296 }
14297 
14298 /* Output and/or return the asm template for sync loop INSN, which has
14299    the operands given by OPERANDS.  */
14300 
14301 const char *
mips_output_sync_loop(rtx_insn * insn,rtx * operands)14302 mips_output_sync_loop (rtx_insn *insn, rtx *operands)
14303 {
14304   /* Use branch-likely instructions to work around the LL/SC R10000
14305      errata.  */
14306   mips_branch_likely = TARGET_FIX_R10000;
14307 
14308   mips_process_sync_loop (insn, operands);
14309 
14310   mips_push_asm_switch (&mips_noreorder);
14311   mips_push_asm_switch (&mips_nomacro);
14312   mips_push_asm_switch (&mips_noat);
14313   mips_start_ll_sc_sync_block ();
14314 
14315   mips_multi_write ();
14316 
14317   mips_end_ll_sc_sync_block ();
14318   mips_pop_asm_switch (&mips_noat);
14319   mips_pop_asm_switch (&mips_nomacro);
14320   mips_pop_asm_switch (&mips_noreorder);
14321 
14322   return "";
14323 }
14324 
14325 /* Return the number of individual instructions in sync loop INSN,
14326    which has the operands given by OPERANDS.  */
14327 
14328 unsigned int
mips_sync_loop_insns(rtx_insn * insn,rtx * operands)14329 mips_sync_loop_insns (rtx_insn *insn, rtx *operands)
14330 {
14331   /* Use branch-likely instructions to work around the LL/SC R10000
14332      errata.  */
14333   mips_branch_likely = TARGET_FIX_R10000;
14334   mips_process_sync_loop (insn, operands);
14335   return mips_multi_num_insns;
14336 }
14337 
14338 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
14339    the operands given by OPERANDS.  Add in a divide-by-zero check if needed.
14340 
14341    When working around R4000 and R4400 errata, we need to make sure that
14342    the division is not immediately followed by a shift[1][2].  We also
14343    need to stop the division from being put into a branch delay slot[3].
14344    The easiest way to avoid both problems is to add a nop after the
14345    division.  When a divide-by-zero check is needed, this nop can be
14346    used to fill the branch delay slot.
14347 
14348    [1] If a double-word or a variable shift executes immediately
14349        after starting an integer division, the shift may give an
14350        incorrect result.  See quotations of errata #16 and #28 from
14351        "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
14352        in mips.md for details.
14353 
14354    [2] A similar bug to [1] exists for all revisions of the
14355        R4000 and the R4400 when run in an MC configuration.
14356        From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
14357 
14358        "19. In this following sequence:
14359 
14360 		    ddiv		(or ddivu or div or divu)
14361 		    dsll32		(or dsrl32, dsra32)
14362 
14363 	    if an MPT stall occurs, while the divide is slipping the cpu
14364 	    pipeline, then the following double shift would end up with an
14365 	    incorrect result.
14366 
14367 	    Workaround: The compiler needs to avoid generating any
14368 	    sequence with divide followed by extended double shift."
14369 
14370        This erratum is also present in "MIPS R4400MC Errata, Processor
14371        Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
14372        & 3.0" as errata #10 and #4, respectively.
14373 
14374    [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
14375        (also valid for MIPS R4000MC processors):
14376 
14377        "52. R4000SC: This bug does not apply for the R4000PC.
14378 
14379 	    There are two flavors of this bug:
14380 
14381 	    1) If the instruction just after divide takes an RF exception
14382 	       (tlb-refill, tlb-invalid) and gets an instruction cache
14383 	       miss (both primary and secondary) and the line which is
14384 	       currently in secondary cache at this index had the first
14385 	       data word, where the bits 5..2 are set, then R4000 would
14386 	       get a wrong result for the div.
14387 
14388 	    ##1
14389 		    nop
14390 		    div	r8, r9
14391 		    -------------------		# end-of page. -tlb-refill
14392 		    nop
14393 	    ##2
14394 		    nop
14395 		    div	r8, r9
14396 		    -------------------		# end-of page. -tlb-invalid
14397 		    nop
14398 
14399 	    2) If the divide is in the taken branch delay slot, where the
14400 	       target takes RF exception and gets an I-cache miss for the
14401 	       exception vector or where I-cache miss occurs for the
14402 	       target address, under the above mentioned scenarios, the
14403 	       div would get wrong results.
14404 
14405 	    ##1
14406 		    j	r2		# to next page mapped or unmapped
14407 		    div	r8,r9		# this bug would be there as long
14408 					# as there is an ICache miss and
14409 		    nop			# the "data pattern" is present
14410 
14411 	    ##2
14412 		    beq	r0, r0, NextPage	# to Next page
14413 		    div	r8,r9
14414 		    nop
14415 
14416 	    This bug is present for div, divu, ddiv, and ddivu
14417 	    instructions.
14418 
14419 	    Workaround: For item 1), OS could make sure that the next page
14420 	    after the divide instruction is also mapped.  For item 2), the
14421 	    compiler could make sure that the divide instruction is not in
14422 	    the branch delay slot."
14423 
14424        These processors have PRId values of 0x00004220 and 0x00004300 for
14425        the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400.  */
14426 
14427 const char *
mips_output_division(const char * division,rtx * operands)14428 mips_output_division (const char *division, rtx *operands)
14429 {
14430   const char *s;
14431 
14432   s = division;
14433   if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
14434     {
14435       output_asm_insn (s, operands);
14436       s = "nop";
14437     }
14438   if (TARGET_CHECK_ZERO_DIV)
14439     {
14440       if (TARGET_MIPS16)
14441 	{
14442 	  output_asm_insn (s, operands);
14443 	  s = "bnez\t%2,1f\n\tbreak\t7\n1:";
14444 	}
14445       else if (GENERATE_DIVIDE_TRAPS)
14446 	{
14447 	  /* Avoid long replay penalty on load miss by putting the trap before
14448 	     the divide.  */
14449 	  if (TUNE_74K)
14450 	    output_asm_insn ("teq\t%2,%.,7", operands);
14451 	  else
14452 	    {
14453 	      output_asm_insn (s, operands);
14454 	      s = "teq\t%2,%.,7";
14455 	    }
14456 	}
14457       else
14458 	{
14459 	  if (flag_delayed_branch)
14460 	    {
14461 	      output_asm_insn ("%(bne\t%2,%.,1f", operands);
14462 	      output_asm_insn (s, operands);
14463 	      s = "break\t7%)\n1:";
14464 	    }
14465 	  else
14466 	    {
14467 	      output_asm_insn (s, operands);
14468 	      s = "bne\t%2,%.,1f\n\tnop\n\tbreak\t7\n1:";
14469 	    }
14470 	}
14471     }
14472   return s;
14473 }
14474 
14475 /* Return the assembly code for MSA DIV_{S,U}.DF or MOD_{S,U}.DF instructions,
14476    which has the operands given by OPERANDS.  Add in a divide-by-zero check
14477    if needed.  */
14478 
14479 const char *
mips_msa_output_division(const char * division,rtx * operands)14480 mips_msa_output_division (const char *division, rtx *operands)
14481 {
14482   const char *s;
14483 
14484   s = division;
14485   if (TARGET_CHECK_ZERO_DIV)
14486     {
14487       output_asm_insn ("%(bnz.%v0\t%w2,1f", operands);
14488       output_asm_insn (s, operands);
14489       s = "break\t7%)\n1:";
14490     }
14491   return s;
14492 }
14493 
14494 /* Return true if destination of IN_INSN is used as add source in
14495    OUT_INSN. Both IN_INSN and OUT_INSN are of type fmadd. Example:
14496    madd.s dst, x, y, z
14497    madd.s a, dst, b, c  */
14498 
14499 bool
mips_fmadd_bypass(rtx_insn * out_insn,rtx_insn * in_insn)14500 mips_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn)
14501 {
14502   int dst_reg, src_reg;
14503 
14504   gcc_assert (get_attr_type (in_insn) == TYPE_FMADD);
14505   gcc_assert (get_attr_type (out_insn) == TYPE_FMADD);
14506 
14507   extract_insn (in_insn);
14508   dst_reg = REG_P (recog_data.operand[0]);
14509 
14510   extract_insn (out_insn);
14511   src_reg = REG_P (recog_data.operand[1]);
14512 
14513   if (dst_reg == src_reg)
14514     return true;
14515 
14516   return false;
14517 }
14518 
14519 /* Return true if IN_INSN is a multiply-add or multiply-subtract
14520    instruction and if OUT_INSN assigns to the accumulator operand.  */
14521 
14522 bool
mips_linked_madd_p(rtx_insn * out_insn,rtx_insn * in_insn)14523 mips_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn)
14524 {
14525   enum attr_accum_in accum_in;
14526   int accum_in_opnum;
14527   rtx accum_in_op;
14528 
14529   if (recog_memoized (in_insn) < 0)
14530     return false;
14531 
14532   accum_in = get_attr_accum_in (in_insn);
14533   if (accum_in == ACCUM_IN_NONE)
14534     return false;
14535 
14536   accum_in_opnum = accum_in - ACCUM_IN_0;
14537 
14538   extract_insn (in_insn);
14539   gcc_assert (accum_in_opnum < recog_data.n_operands);
14540   accum_in_op = recog_data.operand[accum_in_opnum];
14541 
14542   return reg_set_p (accum_in_op, out_insn);
14543 }
14544 
14545 /* True if the dependency between OUT_INSN and IN_INSN is on the store
14546    data rather than the address.  We need this because the cprestore
14547    pattern is type "store", but is defined using an UNSPEC_VOLATILE,
14548    which causes the default routine to abort.  We just return false
14549    for that case.  */
14550 
14551 bool
mips_store_data_bypass_p(rtx_insn * out_insn,rtx_insn * in_insn)14552 mips_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
14553 {
14554   if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
14555     return false;
14556 
14557   return store_data_bypass_p (out_insn, in_insn);
14558 }
14559 
14560 
14561 /* Variables and flags used in scheduler hooks when tuning for
14562    Loongson 2E/2F.  */
14563 static struct
14564 {
14565   /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
14566      strategy.  */
14567 
14568   /* If true, then next ALU1/2 instruction will go to ALU1.  */
14569   bool alu1_turn_p;
14570 
14571   /* If true, then next FALU1/2 unstruction will go to FALU1.  */
14572   bool falu1_turn_p;
14573 
14574   /* Codes to query if [f]alu{1,2}_core units are subscribed or not.  */
14575   int alu1_core_unit_code;
14576   int alu2_core_unit_code;
14577   int falu1_core_unit_code;
14578   int falu2_core_unit_code;
14579 
14580   /* True if current cycle has a multi instruction.
14581      This flag is used in mips_ls2_dfa_post_advance_cycle.  */
14582   bool cycle_has_multi_p;
14583 
14584   /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
14585      These are used in mips_ls2_dfa_post_advance_cycle to initialize
14586      DFA state.
14587      E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
14588      instruction to go ALU1.  */
14589   rtx_insn *alu1_turn_enabled_insn;
14590   rtx_insn *alu2_turn_enabled_insn;
14591   rtx_insn *falu1_turn_enabled_insn;
14592   rtx_insn *falu2_turn_enabled_insn;
14593 } mips_ls2;
14594 
14595 /* Implement TARGET_SCHED_ADJUST_COST.  We assume that anti and output
14596    dependencies have no cost, except on the 20Kc where output-dependence
14597    is treated like input-dependence.  */
14598 
14599 static int
mips_adjust_cost(rtx_insn *,int dep_type,rtx_insn *,int cost,unsigned int)14600 mips_adjust_cost (rtx_insn *, int dep_type, rtx_insn *, int cost, unsigned int)
14601 {
14602   if (dep_type != 0 && (dep_type != REG_DEP_OUTPUT || !TUNE_20KC))
14603     return 0;
14604   return cost;
14605 }
14606 
14607 /* Return the number of instructions that can be issued per cycle.  */
14608 
14609 static int
mips_issue_rate(void)14610 mips_issue_rate (void)
14611 {
14612   switch (mips_tune)
14613     {
14614     case PROCESSOR_74KC:
14615     case PROCESSOR_74KF2_1:
14616     case PROCESSOR_74KF1_1:
14617     case PROCESSOR_74KF3_2:
14618       /* The 74k is not strictly quad-issue cpu, but can be seen as one
14619 	 by the scheduler.  It can issue 1 ALU, 1 AGEN and 2 FPU insns,
14620 	 but in reality only a maximum of 3 insns can be issued as
14621 	 floating-point loads and stores also require a slot in the
14622 	 AGEN pipe.  */
14623     case PROCESSOR_R10000:
14624       /* All R10K Processors are quad-issue (being the first MIPS
14625          processors to support this feature). */
14626       return 4;
14627 
14628     case PROCESSOR_20KC:
14629     case PROCESSOR_R4130:
14630     case PROCESSOR_R5400:
14631     case PROCESSOR_R5500:
14632     case PROCESSOR_R5900:
14633     case PROCESSOR_R7000:
14634     case PROCESSOR_R9000:
14635     case PROCESSOR_OCTEON:
14636     case PROCESSOR_OCTEON2:
14637     case PROCESSOR_OCTEON3:
14638     case PROCESSOR_I6400:
14639     case PROCESSOR_GS264E:
14640       return 2;
14641 
14642     case PROCESSOR_SB1:
14643     case PROCESSOR_SB1A:
14644       /* This is actually 4, but we get better performance if we claim 3.
14645 	 This is partly because of unwanted speculative code motion with the
14646 	 larger number, and partly because in most common cases we can't
14647 	 reach the theoretical max of 4.  */
14648       return 3;
14649 
14650     case PROCESSOR_LOONGSON_2E:
14651     case PROCESSOR_LOONGSON_2F:
14652     case PROCESSOR_GS464:
14653     case PROCESSOR_GS464E:
14654     case PROCESSOR_P5600:
14655     case PROCESSOR_P6600:
14656       return 4;
14657 
14658     case PROCESSOR_XLP:
14659       return (reload_completed ? 4 : 3);
14660 
14661     default:
14662       return 1;
14663     }
14664 }
14665 
14666 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2.  */
14667 
14668 static void
mips_ls2_init_dfa_post_cycle_insn(void)14669 mips_ls2_init_dfa_post_cycle_insn (void)
14670 {
14671   start_sequence ();
14672   emit_insn (gen_ls2_alu1_turn_enabled_insn ());
14673   mips_ls2.alu1_turn_enabled_insn = get_insns ();
14674   end_sequence ();
14675 
14676   start_sequence ();
14677   emit_insn (gen_ls2_alu2_turn_enabled_insn ());
14678   mips_ls2.alu2_turn_enabled_insn = get_insns ();
14679   end_sequence ();
14680 
14681   start_sequence ();
14682   emit_insn (gen_ls2_falu1_turn_enabled_insn ());
14683   mips_ls2.falu1_turn_enabled_insn = get_insns ();
14684   end_sequence ();
14685 
14686   start_sequence ();
14687   emit_insn (gen_ls2_falu2_turn_enabled_insn ());
14688   mips_ls2.falu2_turn_enabled_insn = get_insns ();
14689   end_sequence ();
14690 
14691   mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
14692   mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
14693   mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
14694   mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
14695 }
14696 
14697 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
14698    Init data used in mips_dfa_post_advance_cycle.  */
14699 
14700 static void
mips_init_dfa_post_cycle_insn(void)14701 mips_init_dfa_post_cycle_insn (void)
14702 {
14703   if (TUNE_LOONGSON_2EF)
14704     mips_ls2_init_dfa_post_cycle_insn ();
14705 }
14706 
14707 /* Initialize STATE when scheduling for Loongson 2E/2F.
14708    Support round-robin dispatch scheme by enabling only one of
14709    ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
14710    respectively.  */
14711 
14712 static void
mips_ls2_dfa_post_advance_cycle(state_t state)14713 mips_ls2_dfa_post_advance_cycle (state_t state)
14714 {
14715   if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
14716     {
14717       /* Though there are no non-pipelined ALU1 insns,
14718 	 we can get an instruction of type 'multi' before reload.  */
14719       gcc_assert (mips_ls2.cycle_has_multi_p);
14720       mips_ls2.alu1_turn_p = false;
14721     }
14722 
14723   mips_ls2.cycle_has_multi_p = false;
14724 
14725   if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
14726     /* We have a non-pipelined alu instruction in the core,
14727        adjust round-robin counter.  */
14728     mips_ls2.alu1_turn_p = true;
14729 
14730   if (mips_ls2.alu1_turn_p)
14731     {
14732       if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
14733 	gcc_unreachable ();
14734     }
14735   else
14736     {
14737       if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
14738 	gcc_unreachable ();
14739     }
14740 
14741   if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
14742     {
14743       /* There are no non-pipelined FALU1 insns.  */
14744       gcc_unreachable ();
14745       mips_ls2.falu1_turn_p = false;
14746     }
14747 
14748   if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
14749     /* We have a non-pipelined falu instruction in the core,
14750        adjust round-robin counter.  */
14751     mips_ls2.falu1_turn_p = true;
14752 
14753   if (mips_ls2.falu1_turn_p)
14754     {
14755       if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
14756 	gcc_unreachable ();
14757     }
14758   else
14759     {
14760       if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
14761 	gcc_unreachable ();
14762     }
14763 }
14764 
14765 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
14766    This hook is being called at the start of each cycle.  */
14767 
14768 static void
mips_dfa_post_advance_cycle(void)14769 mips_dfa_post_advance_cycle (void)
14770 {
14771   if (TUNE_LOONGSON_2EF)
14772     mips_ls2_dfa_post_advance_cycle (curr_state);
14773 }
14774 
14775 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD.  This should
14776    be as wide as the scheduling freedom in the DFA.  */
14777 
14778 static int
mips_multipass_dfa_lookahead(void)14779 mips_multipass_dfa_lookahead (void)
14780 {
14781   /* Can schedule up to 4 of the 6 function units in any one cycle.  */
14782   if (TUNE_SB1)
14783     return 4;
14784 
14785   if (TUNE_LOONGSON_2EF || TUNE_GS464 || TUNE_GS464E)
14786     return 4;
14787 
14788   if (TUNE_OCTEON || TUNE_GS264E)
14789     return 2;
14790 
14791   if (TUNE_P5600 || TUNE_P6600 || TUNE_I6400)
14792     return 4;
14793 
14794   return 0;
14795 }
14796 
14797 /* Remove the instruction at index LOWER from ready queue READY and
14798    reinsert it in front of the instruction at index HIGHER.  LOWER must
14799    be <= HIGHER.  */
14800 
14801 static void
mips_promote_ready(rtx_insn ** ready,int lower,int higher)14802 mips_promote_ready (rtx_insn **ready, int lower, int higher)
14803 {
14804   rtx_insn *new_head;
14805   int i;
14806 
14807   new_head = ready[lower];
14808   for (i = lower; i < higher; i++)
14809     ready[i] = ready[i + 1];
14810   ready[i] = new_head;
14811 }
14812 
14813 /* If the priority of the instruction at POS2 in the ready queue READY
14814    is within LIMIT units of that of the instruction at POS1, swap the
14815    instructions if POS2 is not already less than POS1.  */
14816 
14817 static void
mips_maybe_swap_ready(rtx_insn ** ready,int pos1,int pos2,int limit)14818 mips_maybe_swap_ready (rtx_insn **ready, int pos1, int pos2, int limit)
14819 {
14820   if (pos1 < pos2
14821       && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
14822     {
14823       rtx_insn *temp;
14824 
14825       temp = ready[pos1];
14826       ready[pos1] = ready[pos2];
14827       ready[pos2] = temp;
14828     }
14829 }
14830 
14831 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
14832    that may clobber hi or lo.  */
14833 static rtx_insn *mips_macc_chains_last_hilo;
14834 
14835 /* A TUNE_MACC_CHAINS helper function.  Record that instruction INSN has
14836    been scheduled, updating mips_macc_chains_last_hilo appropriately.  */
14837 
14838 static void
mips_macc_chains_record(rtx_insn * insn)14839 mips_macc_chains_record (rtx_insn *insn)
14840 {
14841   if (get_attr_may_clobber_hilo (insn))
14842     mips_macc_chains_last_hilo = insn;
14843 }
14844 
14845 /* A TUNE_MACC_CHAINS helper function.  Search ready queue READY, which
14846    has NREADY elements, looking for a multiply-add or multiply-subtract
14847    instruction that is cumulative with mips_macc_chains_last_hilo.
14848    If there is one, promote it ahead of anything else that might
14849    clobber hi or lo.  */
14850 
14851 static void
mips_macc_chains_reorder(rtx_insn ** ready,int nready)14852 mips_macc_chains_reorder (rtx_insn **ready, int nready)
14853 {
14854   int i, j;
14855 
14856   if (mips_macc_chains_last_hilo != 0)
14857     for (i = nready - 1; i >= 0; i--)
14858       if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
14859 	{
14860 	  for (j = nready - 1; j > i; j--)
14861 	    if (recog_memoized (ready[j]) >= 0
14862 		&& get_attr_may_clobber_hilo (ready[j]))
14863 	      {
14864 		mips_promote_ready (ready, i, j);
14865 		break;
14866 	      }
14867 	  break;
14868 	}
14869 }
14870 
14871 /* The last instruction to be scheduled.  */
14872 static rtx_insn *vr4130_last_insn;
14873 
14874 /* A note_stores callback used by vr4130_true_reg_dependence_p.  DATA
14875    points to an rtx that is initially an instruction.  Nullify the rtx
14876    if the instruction uses the value of register X.  */
14877 
14878 static void
vr4130_true_reg_dependence_p_1(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)14879 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
14880 				void *data)
14881 {
14882   rtx *insn_ptr;
14883 
14884   insn_ptr = (rtx *) data;
14885   if (REG_P (x)
14886       && *insn_ptr != 0
14887       && reg_referenced_p (x, PATTERN (*insn_ptr)))
14888     *insn_ptr = 0;
14889 }
14890 
14891 /* Return true if there is true register dependence between vr4130_last_insn
14892    and INSN.  */
14893 
14894 static bool
vr4130_true_reg_dependence_p(rtx insn)14895 vr4130_true_reg_dependence_p (rtx insn)
14896 {
14897   note_stores (PATTERN (vr4130_last_insn),
14898 	       vr4130_true_reg_dependence_p_1, &insn);
14899   return insn == 0;
14900 }
14901 
14902 /* A TUNE_MIPS4130 helper function.  Given that INSN1 is at the head of
14903    the ready queue and that INSN2 is the instruction after it, return
14904    true if it is worth promoting INSN2 ahead of INSN1.  Look for cases
14905    in which INSN1 and INSN2 can probably issue in parallel, but for
14906    which (INSN2, INSN1) should be less sensitive to instruction
14907    alignment than (INSN1, INSN2).  See 4130.md for more details.  */
14908 
14909 static bool
vr4130_swap_insns_p(rtx_insn * insn1,rtx_insn * insn2)14910 vr4130_swap_insns_p (rtx_insn *insn1, rtx_insn *insn2)
14911 {
14912   sd_iterator_def sd_it;
14913   dep_t dep;
14914 
14915   /* Check for the following case:
14916 
14917      1) there is some other instruction X with an anti dependence on INSN1;
14918      2) X has a higher priority than INSN2; and
14919      3) X is an arithmetic instruction (and thus has no unit restrictions).
14920 
14921      If INSN1 is the last instruction blocking X, it would better to
14922      choose (INSN1, X) over (INSN2, INSN1).  */
14923   FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
14924     if (DEP_TYPE (dep) == REG_DEP_ANTI
14925 	&& INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
14926 	&& recog_memoized (DEP_CON (dep)) >= 0
14927 	&& get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
14928       return false;
14929 
14930   if (vr4130_last_insn != 0
14931       && recog_memoized (insn1) >= 0
14932       && recog_memoized (insn2) >= 0)
14933     {
14934       /* See whether INSN1 and INSN2 use different execution units,
14935 	 or if they are both ALU-type instructions.  If so, they can
14936 	 probably execute in parallel.  */
14937       enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
14938       enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
14939       if (class1 != class2 || class1 == VR4130_CLASS_ALU)
14940 	{
14941 	  /* If only one of the instructions has a dependence on
14942 	     vr4130_last_insn, prefer to schedule the other one first.  */
14943 	  bool dep1_p = vr4130_true_reg_dependence_p (insn1);
14944 	  bool dep2_p = vr4130_true_reg_dependence_p (insn2);
14945 	  if (dep1_p != dep2_p)
14946 	    return dep1_p;
14947 
14948 	  /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
14949 	     is not an ALU-type instruction and if INSN1 uses the same
14950 	     execution unit.  (Note that if this condition holds, we already
14951 	     know that INSN2 uses a different execution unit.)  */
14952 	  if (class1 != VR4130_CLASS_ALU
14953 	      && recog_memoized (vr4130_last_insn) >= 0
14954 	      && class1 == get_attr_vr4130_class (vr4130_last_insn))
14955 	    return true;
14956 	}
14957     }
14958   return false;
14959 }
14960 
14961 /* A TUNE_MIPS4130 helper function.  (READY, NREADY) describes a ready
14962    queue with at least two instructions.  Swap the first two if
14963    vr4130_swap_insns_p says that it could be worthwhile.  */
14964 
14965 static void
vr4130_reorder(rtx_insn ** ready,int nready)14966 vr4130_reorder (rtx_insn **ready, int nready)
14967 {
14968   if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
14969     mips_promote_ready (ready, nready - 2, nready - 1);
14970 }
14971 
14972 /* Record whether last 74k AGEN instruction was a load or store.  */
14973 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
14974 
14975 /* Initialize mips_last_74k_agen_insn from INSN.  A null argument
14976    resets to TYPE_UNKNOWN state.  */
14977 
14978 static void
mips_74k_agen_init(rtx_insn * insn)14979 mips_74k_agen_init (rtx_insn *insn)
14980 {
14981   if (!insn || CALL_P (insn) || JUMP_P (insn))
14982     mips_last_74k_agen_insn = TYPE_UNKNOWN;
14983   else
14984     {
14985       enum attr_type type = get_attr_type (insn);
14986       if (type == TYPE_LOAD || type == TYPE_STORE)
14987 	mips_last_74k_agen_insn = type;
14988     }
14989 }
14990 
14991 /* A TUNE_74K helper function.  The 74K AGEN pipeline likes multiple
14992    loads to be grouped together, and multiple stores to be grouped
14993    together.  Swap things around in the ready queue to make this happen.  */
14994 
14995 static void
mips_74k_agen_reorder(rtx_insn ** ready,int nready)14996 mips_74k_agen_reorder (rtx_insn **ready, int nready)
14997 {
14998   int i;
14999   int store_pos, load_pos;
15000 
15001   store_pos = -1;
15002   load_pos = -1;
15003 
15004   for (i = nready - 1; i >= 0; i--)
15005     {
15006       rtx_insn *insn = ready[i];
15007       if (USEFUL_INSN_P (insn))
15008 	switch (get_attr_type (insn))
15009 	  {
15010 	  case TYPE_STORE:
15011 	    if (store_pos == -1)
15012 	      store_pos = i;
15013 	    break;
15014 
15015 	  case TYPE_LOAD:
15016 	    if (load_pos == -1)
15017 	      load_pos = i;
15018 	    break;
15019 
15020 	  default:
15021 	    break;
15022 	  }
15023     }
15024 
15025   if (load_pos == -1 || store_pos == -1)
15026     return;
15027 
15028   switch (mips_last_74k_agen_insn)
15029     {
15030     case TYPE_UNKNOWN:
15031       /* Prefer to schedule loads since they have a higher latency.  */
15032     case TYPE_LOAD:
15033       /* Swap loads to the front of the queue.  */
15034       mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
15035       break;
15036     case TYPE_STORE:
15037       /* Swap stores to the front of the queue.  */
15038       mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
15039       break;
15040     default:
15041       break;
15042     }
15043 }
15044 
15045 /* Implement TARGET_SCHED_INIT.  */
15046 
15047 static void
mips_sched_init(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,int max_ready ATTRIBUTE_UNUSED)15048 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
15049 		 int max_ready ATTRIBUTE_UNUSED)
15050 {
15051   mips_macc_chains_last_hilo = 0;
15052   vr4130_last_insn = 0;
15053   mips_74k_agen_init (NULL);
15054 
15055   /* When scheduling for Loongson2, branch instructions go to ALU1,
15056      therefore basic block is most likely to start with round-robin counter
15057      pointed to ALU2.  */
15058   mips_ls2.alu1_turn_p = false;
15059   mips_ls2.falu1_turn_p = true;
15060 }
15061 
15062 /* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2.  */
15063 
15064 static void
mips_sched_reorder_1(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,rtx_insn ** ready,int * nreadyp,int cycle ATTRIBUTE_UNUSED)15065 mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
15066 		      rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
15067 {
15068   if (!reload_completed
15069       && TUNE_MACC_CHAINS
15070       && *nreadyp > 0)
15071     mips_macc_chains_reorder (ready, *nreadyp);
15072 
15073   if (reload_completed
15074       && TUNE_MIPS4130
15075       && !TARGET_VR4130_ALIGN
15076       && *nreadyp > 1)
15077     vr4130_reorder (ready, *nreadyp);
15078 
15079   if (TUNE_74K)
15080     mips_74k_agen_reorder (ready, *nreadyp);
15081 }
15082 
15083 /* Implement TARGET_SCHED_REORDER.  */
15084 
15085 static int
mips_sched_reorder(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,rtx_insn ** ready,int * nreadyp,int cycle ATTRIBUTE_UNUSED)15086 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
15087 		    rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
15088 {
15089   mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
15090   return mips_issue_rate ();
15091 }
15092 
15093 /* Implement TARGET_SCHED_REORDER2.  */
15094 
15095 static int
mips_sched_reorder2(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,rtx_insn ** ready,int * nreadyp,int cycle ATTRIBUTE_UNUSED)15096 mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
15097 		     rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
15098 {
15099   mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
15100   return cached_can_issue_more;
15101 }
15102 
15103 /* Update round-robin counters for ALU1/2 and FALU1/2.  */
15104 
15105 static void
mips_ls2_variable_issue(rtx_insn * insn)15106 mips_ls2_variable_issue (rtx_insn *insn)
15107 {
15108   if (mips_ls2.alu1_turn_p)
15109     {
15110       if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
15111 	mips_ls2.alu1_turn_p = false;
15112     }
15113   else
15114     {
15115       if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
15116 	mips_ls2.alu1_turn_p = true;
15117     }
15118 
15119   if (mips_ls2.falu1_turn_p)
15120     {
15121       if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
15122 	mips_ls2.falu1_turn_p = false;
15123     }
15124   else
15125     {
15126       if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
15127 	mips_ls2.falu1_turn_p = true;
15128     }
15129 
15130   if (recog_memoized (insn) >= 0)
15131     mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
15132 }
15133 
15134 /* Implement TARGET_SCHED_VARIABLE_ISSUE.  */
15135 
15136 static int
mips_variable_issue(FILE * file ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED,rtx_insn * insn,int more)15137 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
15138 		     rtx_insn *insn, int more)
15139 {
15140   /* Ignore USEs and CLOBBERs; don't count them against the issue rate.  */
15141   if (USEFUL_INSN_P (insn))
15142     {
15143       if (get_attr_type (insn) != TYPE_GHOST)
15144 	more--;
15145       if (!reload_completed && TUNE_MACC_CHAINS)
15146 	mips_macc_chains_record (insn);
15147       vr4130_last_insn = insn;
15148       if (TUNE_74K)
15149 	mips_74k_agen_init (insn);
15150       else if (TUNE_LOONGSON_2EF)
15151 	mips_ls2_variable_issue (insn);
15152     }
15153 
15154   /* Instructions of type 'multi' should all be split before
15155      the second scheduling pass.  */
15156   gcc_assert (!reload_completed
15157 	      || recog_memoized (insn) < 0
15158 	      || get_attr_type (insn) != TYPE_MULTI);
15159 
15160   cached_can_issue_more = more;
15161   return more;
15162 }
15163 
15164 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
15165    return the first operand of the associated PREF or PREFX insn.  */
15166 
15167 rtx
mips_prefetch_cookie(rtx write,rtx locality)15168 mips_prefetch_cookie (rtx write, rtx locality)
15169 {
15170   /* store_streamed / load_streamed.  */
15171   if (INTVAL (locality) <= 0)
15172     return GEN_INT (INTVAL (write) + 4);
15173 
15174   /* store / load.  */
15175   if (INTVAL (locality) <= 2)
15176     return write;
15177 
15178   /* store_retained / load_retained.  */
15179   return GEN_INT (INTVAL (write) + 6);
15180 }
15181 
15182 /* Loongson EXT2 only implements pref hint=0 (prefetch for load) and hint=1
15183    (prefetch for store), other hint just scale to hint = 0 and hint = 1.  */
15184 
15185 rtx
mips_loongson_ext2_prefetch_cookie(rtx write,rtx)15186 mips_loongson_ext2_prefetch_cookie (rtx write, rtx)
15187 {
15188   /* store.  */
15189   if (INTVAL (write) == 1)
15190     return GEN_INT (INTVAL (write));
15191 
15192   /* load.  */
15193   if (INTVAL (write) == 0)
15194     return GEN_INT (INTVAL (write));
15195 
15196   gcc_unreachable ();
15197 }
15198 
15199 
15200 /* Flags that indicate when a built-in function is available.
15201 
15202    BUILTIN_AVAIL_NON_MIPS16
15203 	The function is available on the current target if !TARGET_MIPS16.
15204 
15205    BUILTIN_AVAIL_MIPS16
15206 	The function is available on the current target if TARGET_MIPS16.  */
15207 #define BUILTIN_AVAIL_NON_MIPS16 1
15208 #define BUILTIN_AVAIL_MIPS16 2
15209 
15210 /* Declare an availability predicate for built-in functions that
15211    require non-MIPS16 mode and also require COND to be true.
15212    NAME is the main part of the predicate's name.  */
15213 #define AVAIL_NON_MIPS16(NAME, COND)					\
15214  static unsigned int							\
15215  mips_builtin_avail_##NAME (void)					\
15216  {									\
15217    return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0;			\
15218  }
15219 
15220 /* Declare an availability predicate for built-in functions that
15221    support both MIPS16 and non-MIPS16 code and also require COND
15222    to be true.  NAME is the main part of the predicate's name.  */
15223 #define AVAIL_ALL(NAME, COND)						\
15224  static unsigned int							\
15225  mips_builtin_avail_##NAME (void)					\
15226  {									\
15227    return (COND) ? BUILTIN_AVAIL_NON_MIPS16 | BUILTIN_AVAIL_MIPS16 : 0;	\
15228  }
15229 
15230 /* This structure describes a single built-in function.  */
15231 struct mips_builtin_description {
15232   /* The code of the main .md file instruction.  See mips_builtin_type
15233      for more information.  */
15234   enum insn_code icode;
15235 
15236   /* The floating-point comparison code to use with ICODE, if any.  */
15237   enum mips_fp_condition cond;
15238 
15239   /* The name of the built-in function.  */
15240   const char *name;
15241 
15242   /* Specifies how the function should be expanded.  */
15243   enum mips_builtin_type builtin_type;
15244 
15245   /* The function's prototype.  */
15246   enum mips_function_type function_type;
15247 
15248   /* Whether the function is available.  */
15249   unsigned int (*avail) (void);
15250 };
15251 
15252 AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
15253 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
15254 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
15255 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
15256 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
15257 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
15258 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
15259 AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
15260 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
15261 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_MMI)
15262 AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
15263 AVAIL_NON_MIPS16 (msa, TARGET_MSA)
15264 
15265 /* Construct a mips_builtin_description from the given arguments.
15266 
15267    INSN is the name of the associated instruction pattern, without the
15268    leading CODE_FOR_mips_.
15269 
15270    CODE is the floating-point condition code associated with the
15271    function.  It can be 'f' if the field is not applicable.
15272 
15273    NAME is the name of the function itself, without the leading
15274    "__builtin_mips_".
15275 
15276    BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
15277 
15278    AVAIL is the name of the availability predicate, without the leading
15279    mips_builtin_avail_.  */
15280 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE,			\
15281 		     FUNCTION_TYPE, AVAIL)				\
15282   { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND,			\
15283     "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE,		\
15284     mips_builtin_avail_ ## AVAIL }
15285 
15286 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
15287    mapped to instruction CODE_FOR_mips_<INSN>,  FUNCTION_TYPE and AVAIL
15288    are as for MIPS_BUILTIN.  */
15289 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)			\
15290   MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
15291 
15292 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
15293    are subject to mips_builtin_avail_<AVAIL>.  */
15294 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL)				\
15295   MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s",		\
15296 		MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL),	\
15297   MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d",		\
15298 		MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
15299 
15300 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
15301    The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
15302    while the any and all forms are subject to mips_builtin_avail_mips3d.  */
15303 #define CMP_PS_BUILTINS(INSN, COND, AVAIL)				\
15304   MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps",	\
15305 		MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF,		\
15306 		mips3d),						\
15307   MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps",	\
15308 		MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF,		\
15309 		mips3d),						\
15310   MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps",	\
15311 		MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF,	\
15312 		AVAIL),							\
15313   MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps",	\
15314 		MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF,	\
15315 		AVAIL)
15316 
15317 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s.  The functions
15318    are subject to mips_builtin_avail_mips3d.  */
15319 #define CMP_4S_BUILTINS(INSN, COND)					\
15320   MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s",	\
15321 		MIPS_BUILTIN_CMP_ANY,					\
15322 		MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d),		\
15323   MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s",	\
15324 		MIPS_BUILTIN_CMP_ALL,					\
15325 		MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
15326 
15327 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps.  The comparison
15328    instruction requires mips_builtin_avail_<AVAIL>.  */
15329 #define MOVTF_BUILTINS(INSN, COND, AVAIL)				\
15330   MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps",	\
15331 		MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,	\
15332 		AVAIL),							\
15333   MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps",	\
15334 		MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,	\
15335 		AVAIL)
15336 
15337 /* Define all the built-in functions related to C.cond.fmt condition COND.  */
15338 #define CMP_BUILTINS(COND)						\
15339   MOVTF_BUILTINS (c, COND, paired_single),				\
15340   MOVTF_BUILTINS (cabs, COND, mips3d),					\
15341   CMP_SCALAR_BUILTINS (cabs, COND, mips3d),				\
15342   CMP_PS_BUILTINS (c, COND, paired_single),				\
15343   CMP_PS_BUILTINS (cabs, COND, mips3d),					\
15344   CMP_4S_BUILTINS (c, COND),						\
15345   CMP_4S_BUILTINS (cabs, COND)
15346 
15347 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
15348    function mapped to instruction CODE_FOR_mips_<INSN>,  FUNCTION_TYPE
15349    and AVAIL are as for MIPS_BUILTIN.  */
15350 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)		\
15351   MIPS_BUILTIN (INSN, f, #INSN,	MIPS_BUILTIN_DIRECT_NO_TARGET,		\
15352 		FUNCTION_TYPE, AVAIL)
15353 
15354 /* Define __builtin_mips_bposge<VALUE>.  <VALUE> is 32 for the MIPS32 DSP
15355    branch instruction.  AVAIL is as for MIPS_BUILTIN.  */
15356 #define BPOSGE_BUILTIN(VALUE, AVAIL)					\
15357   MIPS_BUILTIN (bposge, f, "bposge" #VALUE,				\
15358 		MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
15359 
15360 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
15361    for instruction CODE_FOR_loongson_<INSN>.  FUNCTION_TYPE is a
15362    builtin_description field.  */
15363 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE)		\
15364   { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f,				\
15365     "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT,		\
15366     FUNCTION_TYPE, mips_builtin_avail_loongson }
15367 
15368 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
15369    for instruction CODE_FOR_loongson_<INSN>.  FUNCTION_TYPE is a
15370    builtin_description field.  */
15371 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE)				\
15372   LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
15373 
15374 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
15375    We use functions of this form when the same insn can be usefully applied
15376    to more than one datatype.  */
15377 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE)		\
15378   LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
15379 
15380 /* Define an MSA MIPS_BUILTIN_DIRECT function __builtin_msa_<INSN>
15381    for instruction CODE_FOR_msa_<INSN>.  FUNCTION_TYPE is a builtin_description
15382    field.  */
15383 #define MSA_BUILTIN(INSN, FUNCTION_TYPE)				\
15384     { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f,				\
15385     "__builtin_msa_" #INSN,  MIPS_BUILTIN_DIRECT,			\
15386     FUNCTION_TYPE, mips_builtin_avail_msa }
15387 
15388 /* Define a remapped MSA MIPS_BUILTIN_DIRECT function __builtin_msa_<INSN>
15389    for instruction CODE_FOR_msa_<INSN2>.  FUNCTION_TYPE is
15390    a builtin_description field.  */
15391 #define MSA_BUILTIN_REMAP(INSN, INSN2, FUNCTION_TYPE)	\
15392     { CODE_FOR_msa_ ## INSN2, MIPS_FP_COND_f,				\
15393     "__builtin_msa_" #INSN,  MIPS_BUILTIN_DIRECT,			\
15394     FUNCTION_TYPE, mips_builtin_avail_msa }
15395 
15396 /* Define an MSA MIPS_BUILTIN_MSA_TEST_BRANCH function __builtin_msa_<INSN>
15397    for instruction CODE_FOR_msa_<INSN>.  FUNCTION_TYPE is a builtin_description
15398    field.  */
15399 #define MSA_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE)			\
15400     { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f,				\
15401     "__builtin_msa_" #INSN, MIPS_BUILTIN_MSA_TEST_BRANCH,		\
15402     FUNCTION_TYPE, mips_builtin_avail_msa }
15403 
15404 /* Define an MSA MIPS_BUILTIN_DIRECT_NO_TARGET function __builtin_msa_<INSN>
15405    for instruction CODE_FOR_msa_<INSN>.  FUNCTION_TYPE is a builtin_description
15406    field.  */
15407 #define MSA_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE)			\
15408     { CODE_FOR_msa_ ## INSN, MIPS_FP_COND_f,				\
15409     "__builtin_msa_" #INSN,  MIPS_BUILTIN_DIRECT_NO_TARGET,		\
15410     FUNCTION_TYPE, mips_builtin_avail_msa }
15411 
15412 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
15413 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
15414 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
15415 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
15416 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
15417 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
15418 #define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
15419 #define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
15420 
15421 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
15422 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
15423 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
15424 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
15425 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
15426 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
15427 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
15428 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
15429 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
15430 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
15431 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
15432 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
15433 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
15434 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
15435 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
15436 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
15437 #define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
15438 #define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
15439 #define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
15440 #define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
15441 #define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
15442 #define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
15443 #define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
15444 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
15445 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
15446 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
15447 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
15448 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
15449 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
15450 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
15451 
15452 #define CODE_FOR_msa_adds_s_b CODE_FOR_ssaddv16qi3
15453 #define CODE_FOR_msa_adds_s_h CODE_FOR_ssaddv8hi3
15454 #define CODE_FOR_msa_adds_s_w CODE_FOR_ssaddv4si3
15455 #define CODE_FOR_msa_adds_s_d CODE_FOR_ssaddv2di3
15456 #define CODE_FOR_msa_adds_u_b CODE_FOR_usaddv16qi3
15457 #define CODE_FOR_msa_adds_u_h CODE_FOR_usaddv8hi3
15458 #define CODE_FOR_msa_adds_u_w CODE_FOR_usaddv4si3
15459 #define CODE_FOR_msa_adds_u_d CODE_FOR_usaddv2di3
15460 #define CODE_FOR_msa_addv_b CODE_FOR_addv16qi3
15461 #define CODE_FOR_msa_addv_h CODE_FOR_addv8hi3
15462 #define CODE_FOR_msa_addv_w CODE_FOR_addv4si3
15463 #define CODE_FOR_msa_addv_d CODE_FOR_addv2di3
15464 #define CODE_FOR_msa_addvi_b CODE_FOR_addv16qi3
15465 #define CODE_FOR_msa_addvi_h CODE_FOR_addv8hi3
15466 #define CODE_FOR_msa_addvi_w CODE_FOR_addv4si3
15467 #define CODE_FOR_msa_addvi_d CODE_FOR_addv2di3
15468 #define CODE_FOR_msa_and_v CODE_FOR_andv16qi3
15469 #define CODE_FOR_msa_andi_b CODE_FOR_andv16qi3
15470 #define CODE_FOR_msa_bmnz_v CODE_FOR_msa_bmnz_b
15471 #define CODE_FOR_msa_bmnzi_b CODE_FOR_msa_bmnz_b
15472 #define CODE_FOR_msa_bmz_v CODE_FOR_msa_bmz_b
15473 #define CODE_FOR_msa_bmzi_b CODE_FOR_msa_bmz_b
15474 #define CODE_FOR_msa_bnz_v CODE_FOR_msa_bnz_v_b
15475 #define CODE_FOR_msa_bz_v CODE_FOR_msa_bz_v_b
15476 #define CODE_FOR_msa_bsel_v CODE_FOR_msa_bsel_b
15477 #define CODE_FOR_msa_bseli_b CODE_FOR_msa_bsel_b
15478 #define CODE_FOR_msa_ceqi_b CODE_FOR_msa_ceq_b
15479 #define CODE_FOR_msa_ceqi_h CODE_FOR_msa_ceq_h
15480 #define CODE_FOR_msa_ceqi_w CODE_FOR_msa_ceq_w
15481 #define CODE_FOR_msa_ceqi_d CODE_FOR_msa_ceq_d
15482 #define CODE_FOR_msa_clti_s_b CODE_FOR_msa_clt_s_b
15483 #define CODE_FOR_msa_clti_s_h CODE_FOR_msa_clt_s_h
15484 #define CODE_FOR_msa_clti_s_w CODE_FOR_msa_clt_s_w
15485 #define CODE_FOR_msa_clti_s_d CODE_FOR_msa_clt_s_d
15486 #define CODE_FOR_msa_clti_u_b CODE_FOR_msa_clt_u_b
15487 #define CODE_FOR_msa_clti_u_h CODE_FOR_msa_clt_u_h
15488 #define CODE_FOR_msa_clti_u_w CODE_FOR_msa_clt_u_w
15489 #define CODE_FOR_msa_clti_u_d CODE_FOR_msa_clt_u_d
15490 #define CODE_FOR_msa_clei_s_b CODE_FOR_msa_cle_s_b
15491 #define CODE_FOR_msa_clei_s_h CODE_FOR_msa_cle_s_h
15492 #define CODE_FOR_msa_clei_s_w CODE_FOR_msa_cle_s_w
15493 #define CODE_FOR_msa_clei_s_d CODE_FOR_msa_cle_s_d
15494 #define CODE_FOR_msa_clei_u_b CODE_FOR_msa_cle_u_b
15495 #define CODE_FOR_msa_clei_u_h CODE_FOR_msa_cle_u_h
15496 #define CODE_FOR_msa_clei_u_w CODE_FOR_msa_cle_u_w
15497 #define CODE_FOR_msa_clei_u_d CODE_FOR_msa_cle_u_d
15498 #define CODE_FOR_msa_div_s_b CODE_FOR_divv16qi3
15499 #define CODE_FOR_msa_div_s_h CODE_FOR_divv8hi3
15500 #define CODE_FOR_msa_div_s_w CODE_FOR_divv4si3
15501 #define CODE_FOR_msa_div_s_d CODE_FOR_divv2di3
15502 #define CODE_FOR_msa_div_u_b CODE_FOR_udivv16qi3
15503 #define CODE_FOR_msa_div_u_h CODE_FOR_udivv8hi3
15504 #define CODE_FOR_msa_div_u_w CODE_FOR_udivv4si3
15505 #define CODE_FOR_msa_div_u_d CODE_FOR_udivv2di3
15506 #define CODE_FOR_msa_fadd_w CODE_FOR_addv4sf3
15507 #define CODE_FOR_msa_fadd_d CODE_FOR_addv2df3
15508 #define CODE_FOR_msa_fexdo_w CODE_FOR_vec_pack_trunc_v2df
15509 #define CODE_FOR_msa_ftrunc_s_w CODE_FOR_fix_truncv4sfv4si2
15510 #define CODE_FOR_msa_ftrunc_s_d CODE_FOR_fix_truncv2dfv2di2
15511 #define CODE_FOR_msa_ftrunc_u_w CODE_FOR_fixuns_truncv4sfv4si2
15512 #define CODE_FOR_msa_ftrunc_u_d CODE_FOR_fixuns_truncv2dfv2di2
15513 #define CODE_FOR_msa_ffint_s_w CODE_FOR_floatv4siv4sf2
15514 #define CODE_FOR_msa_ffint_s_d CODE_FOR_floatv2div2df2
15515 #define CODE_FOR_msa_ffint_u_w CODE_FOR_floatunsv4siv4sf2
15516 #define CODE_FOR_msa_ffint_u_d CODE_FOR_floatunsv2div2df2
15517 #define CODE_FOR_msa_fsub_w CODE_FOR_subv4sf3
15518 #define CODE_FOR_msa_fsub_d CODE_FOR_subv2df3
15519 #define CODE_FOR_msa_fmadd_w CODE_FOR_fmav4sf4
15520 #define CODE_FOR_msa_fmadd_d CODE_FOR_fmav2df4
15521 #define CODE_FOR_msa_fmsub_w CODE_FOR_fnmav4sf4
15522 #define CODE_FOR_msa_fmsub_d CODE_FOR_fnmav2df4
15523 #define CODE_FOR_msa_fmul_w CODE_FOR_mulv4sf3
15524 #define CODE_FOR_msa_fmul_d CODE_FOR_mulv2df3
15525 #define CODE_FOR_msa_fdiv_w CODE_FOR_divv4sf3
15526 #define CODE_FOR_msa_fdiv_d CODE_FOR_divv2df3
15527 #define CODE_FOR_msa_fmax_w CODE_FOR_smaxv4sf3
15528 #define CODE_FOR_msa_fmax_d CODE_FOR_smaxv2df3
15529 #define CODE_FOR_msa_fmin_w CODE_FOR_sminv4sf3
15530 #define CODE_FOR_msa_fmin_d CODE_FOR_sminv2df3
15531 #define CODE_FOR_msa_fsqrt_w CODE_FOR_sqrtv4sf2
15532 #define CODE_FOR_msa_fsqrt_d CODE_FOR_sqrtv2df2
15533 #define CODE_FOR_msa_max_s_b CODE_FOR_smaxv16qi3
15534 #define CODE_FOR_msa_max_s_h CODE_FOR_smaxv8hi3
15535 #define CODE_FOR_msa_max_s_w CODE_FOR_smaxv4si3
15536 #define CODE_FOR_msa_max_s_d CODE_FOR_smaxv2di3
15537 #define CODE_FOR_msa_maxi_s_b CODE_FOR_smaxv16qi3
15538 #define CODE_FOR_msa_maxi_s_h CODE_FOR_smaxv8hi3
15539 #define CODE_FOR_msa_maxi_s_w CODE_FOR_smaxv4si3
15540 #define CODE_FOR_msa_maxi_s_d CODE_FOR_smaxv2di3
15541 #define CODE_FOR_msa_max_u_b CODE_FOR_umaxv16qi3
15542 #define CODE_FOR_msa_max_u_h CODE_FOR_umaxv8hi3
15543 #define CODE_FOR_msa_max_u_w CODE_FOR_umaxv4si3
15544 #define CODE_FOR_msa_max_u_d CODE_FOR_umaxv2di3
15545 #define CODE_FOR_msa_maxi_u_b CODE_FOR_umaxv16qi3
15546 #define CODE_FOR_msa_maxi_u_h CODE_FOR_umaxv8hi3
15547 #define CODE_FOR_msa_maxi_u_w CODE_FOR_umaxv4si3
15548 #define CODE_FOR_msa_maxi_u_d CODE_FOR_umaxv2di3
15549 #define CODE_FOR_msa_min_s_b CODE_FOR_sminv16qi3
15550 #define CODE_FOR_msa_min_s_h CODE_FOR_sminv8hi3
15551 #define CODE_FOR_msa_min_s_w CODE_FOR_sminv4si3
15552 #define CODE_FOR_msa_min_s_d CODE_FOR_sminv2di3
15553 #define CODE_FOR_msa_mini_s_b CODE_FOR_sminv16qi3
15554 #define CODE_FOR_msa_mini_s_h CODE_FOR_sminv8hi3
15555 #define CODE_FOR_msa_mini_s_w CODE_FOR_sminv4si3
15556 #define CODE_FOR_msa_mini_s_d CODE_FOR_sminv2di3
15557 #define CODE_FOR_msa_min_u_b CODE_FOR_uminv16qi3
15558 #define CODE_FOR_msa_min_u_h CODE_FOR_uminv8hi3
15559 #define CODE_FOR_msa_min_u_w CODE_FOR_uminv4si3
15560 #define CODE_FOR_msa_min_u_d CODE_FOR_uminv2di3
15561 #define CODE_FOR_msa_mini_u_b CODE_FOR_uminv16qi3
15562 #define CODE_FOR_msa_mini_u_h CODE_FOR_uminv8hi3
15563 #define CODE_FOR_msa_mini_u_w CODE_FOR_uminv4si3
15564 #define CODE_FOR_msa_mini_u_d CODE_FOR_uminv2di3
15565 #define CODE_FOR_msa_mod_s_b CODE_FOR_modv16qi3
15566 #define CODE_FOR_msa_mod_s_h CODE_FOR_modv8hi3
15567 #define CODE_FOR_msa_mod_s_w CODE_FOR_modv4si3
15568 #define CODE_FOR_msa_mod_s_d CODE_FOR_modv2di3
15569 #define CODE_FOR_msa_mod_u_b CODE_FOR_umodv16qi3
15570 #define CODE_FOR_msa_mod_u_h CODE_FOR_umodv8hi3
15571 #define CODE_FOR_msa_mod_u_w CODE_FOR_umodv4si3
15572 #define CODE_FOR_msa_mod_u_d CODE_FOR_umodv2di3
15573 #define CODE_FOR_msa_mod_s_b CODE_FOR_modv16qi3
15574 #define CODE_FOR_msa_mod_s_h CODE_FOR_modv8hi3
15575 #define CODE_FOR_msa_mod_s_w CODE_FOR_modv4si3
15576 #define CODE_FOR_msa_mod_s_d CODE_FOR_modv2di3
15577 #define CODE_FOR_msa_mod_u_b CODE_FOR_umodv16qi3
15578 #define CODE_FOR_msa_mod_u_h CODE_FOR_umodv8hi3
15579 #define CODE_FOR_msa_mod_u_w CODE_FOR_umodv4si3
15580 #define CODE_FOR_msa_mod_u_d CODE_FOR_umodv2di3
15581 #define CODE_FOR_msa_mulv_b CODE_FOR_mulv16qi3
15582 #define CODE_FOR_msa_mulv_h CODE_FOR_mulv8hi3
15583 #define CODE_FOR_msa_mulv_w CODE_FOR_mulv4si3
15584 #define CODE_FOR_msa_mulv_d CODE_FOR_mulv2di3
15585 #define CODE_FOR_msa_nlzc_b CODE_FOR_clzv16qi2
15586 #define CODE_FOR_msa_nlzc_h CODE_FOR_clzv8hi2
15587 #define CODE_FOR_msa_nlzc_w CODE_FOR_clzv4si2
15588 #define CODE_FOR_msa_nlzc_d CODE_FOR_clzv2di2
15589 #define CODE_FOR_msa_nor_v CODE_FOR_msa_nor_b
15590 #define CODE_FOR_msa_or_v CODE_FOR_iorv16qi3
15591 #define CODE_FOR_msa_ori_b CODE_FOR_iorv16qi3
15592 #define CODE_FOR_msa_nori_b CODE_FOR_msa_nor_b
15593 #define CODE_FOR_msa_pcnt_b CODE_FOR_popcountv16qi2
15594 #define CODE_FOR_msa_pcnt_h CODE_FOR_popcountv8hi2
15595 #define CODE_FOR_msa_pcnt_w CODE_FOR_popcountv4si2
15596 #define CODE_FOR_msa_pcnt_d CODE_FOR_popcountv2di2
15597 #define CODE_FOR_msa_xor_v CODE_FOR_xorv16qi3
15598 #define CODE_FOR_msa_xori_b CODE_FOR_xorv16qi3
15599 #define CODE_FOR_msa_sll_b CODE_FOR_vashlv16qi3
15600 #define CODE_FOR_msa_sll_h CODE_FOR_vashlv8hi3
15601 #define CODE_FOR_msa_sll_w CODE_FOR_vashlv4si3
15602 #define CODE_FOR_msa_sll_d CODE_FOR_vashlv2di3
15603 #define CODE_FOR_msa_slli_b CODE_FOR_vashlv16qi3
15604 #define CODE_FOR_msa_slli_h CODE_FOR_vashlv8hi3
15605 #define CODE_FOR_msa_slli_w CODE_FOR_vashlv4si3
15606 #define CODE_FOR_msa_slli_d CODE_FOR_vashlv2di3
15607 #define CODE_FOR_msa_sra_b CODE_FOR_vashrv16qi3
15608 #define CODE_FOR_msa_sra_h CODE_FOR_vashrv8hi3
15609 #define CODE_FOR_msa_sra_w CODE_FOR_vashrv4si3
15610 #define CODE_FOR_msa_sra_d CODE_FOR_vashrv2di3
15611 #define CODE_FOR_msa_srai_b CODE_FOR_vashrv16qi3
15612 #define CODE_FOR_msa_srai_h CODE_FOR_vashrv8hi3
15613 #define CODE_FOR_msa_srai_w CODE_FOR_vashrv4si3
15614 #define CODE_FOR_msa_srai_d CODE_FOR_vashrv2di3
15615 #define CODE_FOR_msa_srl_b CODE_FOR_vlshrv16qi3
15616 #define CODE_FOR_msa_srl_h CODE_FOR_vlshrv8hi3
15617 #define CODE_FOR_msa_srl_w CODE_FOR_vlshrv4si3
15618 #define CODE_FOR_msa_srl_d CODE_FOR_vlshrv2di3
15619 #define CODE_FOR_msa_srli_b CODE_FOR_vlshrv16qi3
15620 #define CODE_FOR_msa_srli_h CODE_FOR_vlshrv8hi3
15621 #define CODE_FOR_msa_srli_w CODE_FOR_vlshrv4si3
15622 #define CODE_FOR_msa_srli_d CODE_FOR_vlshrv2di3
15623 #define CODE_FOR_msa_subv_b CODE_FOR_subv16qi3
15624 #define CODE_FOR_msa_subv_h CODE_FOR_subv8hi3
15625 #define CODE_FOR_msa_subv_w CODE_FOR_subv4si3
15626 #define CODE_FOR_msa_subv_d CODE_FOR_subv2di3
15627 #define CODE_FOR_msa_subvi_b CODE_FOR_subv16qi3
15628 #define CODE_FOR_msa_subvi_h CODE_FOR_subv8hi3
15629 #define CODE_FOR_msa_subvi_w CODE_FOR_subv4si3
15630 #define CODE_FOR_msa_subvi_d CODE_FOR_subv2di3
15631 
15632 #define CODE_FOR_msa_move_v CODE_FOR_movv16qi
15633 
15634 #define CODE_FOR_msa_vshf_b CODE_FOR_vec_permv16qi
15635 #define CODE_FOR_msa_vshf_h CODE_FOR_vec_permv8hi
15636 #define CODE_FOR_msa_vshf_w CODE_FOR_vec_permv4si
15637 #define CODE_FOR_msa_vshf_d CODE_FOR_vec_permv2di
15638 
15639 #define CODE_FOR_msa_ilvod_d CODE_FOR_msa_ilvl_d
15640 #define CODE_FOR_msa_ilvev_d CODE_FOR_msa_ilvr_d
15641 #define CODE_FOR_msa_pckod_d CODE_FOR_msa_ilvl_d
15642 #define CODE_FOR_msa_pckev_d CODE_FOR_msa_ilvr_d
15643 
15644 #define CODE_FOR_msa_ldi_b CODE_FOR_msa_ldiv16qi
15645 #define CODE_FOR_msa_ldi_h CODE_FOR_msa_ldiv8hi
15646 #define CODE_FOR_msa_ldi_w CODE_FOR_msa_ldiv4si
15647 #define CODE_FOR_msa_ldi_d CODE_FOR_msa_ldiv2di
15648 
15649 static const struct mips_builtin_description mips_builtins[] = {
15650 #define MIPS_GET_FCSR 0
15651   DIRECT_BUILTIN (get_fcsr, MIPS_USI_FTYPE_VOID, hard_float),
15652 #define MIPS_SET_FCSR 1
15653   DIRECT_NO_TARGET_BUILTIN (set_fcsr, MIPS_VOID_FTYPE_USI, hard_float),
15654 
15655   DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
15656   DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
15657   DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
15658   DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
15659   DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
15660   DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
15661   DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
15662   DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
15663 
15664   DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
15665   DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
15666   DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
15667   DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
15668   DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
15669 
15670   DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
15671   DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
15672   DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
15673   DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
15674   DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
15675   DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
15676 
15677   DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
15678   DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
15679   DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
15680   DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
15681   DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
15682   DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
15683 
15684   MIPS_FP_CONDITIONS (CMP_BUILTINS),
15685 
15686   /* Built-in functions for the SB-1 processor.  */
15687   DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
15688 
15689   /* Built-in functions for the DSP ASE (32-bit and 64-bit).  */
15690   DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15691   DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15692   DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
15693   DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
15694   DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
15695   DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15696   DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15697   DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
15698   DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
15699   DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
15700   DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
15701   DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
15702   DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
15703   DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
15704   DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
15705   DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
15706   DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
15707   DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
15708   DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
15709   DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
15710   DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
15711   DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
15712   DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
15713   DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
15714   DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
15715   DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
15716   DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
15717   DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
15718   DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
15719   DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
15720   DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
15721   DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
15722   DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
15723   DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
15724   DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
15725   DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
15726   DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
15727   DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
15728   DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
15729   DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
15730   DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15731   DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
15732   DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
15733   DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
15734   DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
15735   DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
15736   DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
15737   DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
15738   DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
15739   DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
15740   DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
15741   DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
15742   DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
15743   DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
15744   DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
15745   DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
15746   DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
15747   DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15748   DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
15749   DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
15750   DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
15751   DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
15752   DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
15753   DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
15754   BPOSGE_BUILTIN (32, dsp),
15755 
15756   /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit).  */
15757   DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
15758   DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15759   DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15760   DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
15761   DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
15762   DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
15763   DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
15764   DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
15765   DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
15766   DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
15767   DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15768   DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15769   DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
15770   DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15771   DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
15772   DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
15773   DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
15774   DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
15775   DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
15776   DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
15777   DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
15778   DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
15779   DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15780   DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15781   DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
15782   DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
15783   DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15784   DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15785   DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
15786   DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
15787   DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15788   DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
15789   DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
15790   DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
15791 
15792   /* Built-in functions for the DSP ASE (32-bit only).  */
15793   DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
15794   DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
15795   DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
15796   DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
15797   DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15798   DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15799   DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15800   DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
15801   DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
15802   DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15803   DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15804   DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15805   DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
15806   DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
15807   DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
15808   DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
15809   DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
15810   DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
15811   DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
15812   DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
15813   DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
15814   DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
15815   DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
15816   DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
15817   DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
15818   DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dsp_32),
15819   DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dsp_32),
15820 
15821   /* Built-in functions for the DSP ASE (64-bit only).  */
15822   DIRECT_BUILTIN (ldx, MIPS_DI_FTYPE_POINTER_SI, dsp_64),
15823 
15824   /* The following are for the MIPS DSP ASE REV 2 (32-bit only).  */
15825   DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15826   DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15827   DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15828   DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15829   DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15830   DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15831   DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15832   DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15833   DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
15834 
15835   /* Builtin functions for ST Microelectronics Loongson-2E/2F cores.  */
15836   LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
15837   LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
15838   LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
15839   LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15840   LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15841   LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15842   LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
15843   LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15844   LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
15845   LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
15846   LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
15847   LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15848   LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
15849   LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15850   LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15851   LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
15852   LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15853   LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15854   LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15855   LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
15856   LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
15857   LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15858   LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
15859   LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15860   LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15861   LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15862   LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15863   LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15864   LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
15865   LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15866   LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
15867   LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15868   LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15869   LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15870   LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
15871   LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15872   LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
15873   LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
15874   LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
15875   LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15876   LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15877   LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15878   LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15879   LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15880   LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15881   LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15882   LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15883   LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
15884   LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15885   LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15886   LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15887   LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15888   LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
15889   LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
15890   LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15891   LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15892   LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15893   LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
15894   LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15895   LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
15896   LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
15897   LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
15898   LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
15899   LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
15900   LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
15901   LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
15902   LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
15903   LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
15904   LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
15905   LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
15906   LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
15907   LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
15908   LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
15909   LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
15910   LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
15911   LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15912   LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15913   LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15914   LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
15915   LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15916   LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
15917   LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
15918   LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
15919   LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
15920   LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
15921   LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15922   LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15923   LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15924   LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15925   LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15926   LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
15927   LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15928   LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
15929   LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
15930   LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
15931   LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
15932   LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
15933   LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
15934   LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
15935 
15936   /* Sundry other built-in functions.  */
15937   DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache),
15938 
15939   /* Built-in functions for MSA.  */
15940   MSA_BUILTIN (sll_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
15941   MSA_BUILTIN (sll_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
15942   MSA_BUILTIN (sll_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
15943   MSA_BUILTIN (sll_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
15944   MSA_BUILTIN (slli_b, MIPS_V16QI_FTYPE_V16QI_UQI),
15945   MSA_BUILTIN (slli_h, MIPS_V8HI_FTYPE_V8HI_UQI),
15946   MSA_BUILTIN (slli_w, MIPS_V4SI_FTYPE_V4SI_UQI),
15947   MSA_BUILTIN (slli_d, MIPS_V2DI_FTYPE_V2DI_UQI),
15948   MSA_BUILTIN (sra_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
15949   MSA_BUILTIN (sra_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
15950   MSA_BUILTIN (sra_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
15951   MSA_BUILTIN (sra_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
15952   MSA_BUILTIN (srai_b, MIPS_V16QI_FTYPE_V16QI_UQI),
15953   MSA_BUILTIN (srai_h, MIPS_V8HI_FTYPE_V8HI_UQI),
15954   MSA_BUILTIN (srai_w, MIPS_V4SI_FTYPE_V4SI_UQI),
15955   MSA_BUILTIN (srai_d, MIPS_V2DI_FTYPE_V2DI_UQI),
15956   MSA_BUILTIN (srar_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
15957   MSA_BUILTIN (srar_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
15958   MSA_BUILTIN (srar_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
15959   MSA_BUILTIN (srar_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
15960   MSA_BUILTIN (srari_b, MIPS_V16QI_FTYPE_V16QI_UQI),
15961   MSA_BUILTIN (srari_h, MIPS_V8HI_FTYPE_V8HI_UQI),
15962   MSA_BUILTIN (srari_w, MIPS_V4SI_FTYPE_V4SI_UQI),
15963   MSA_BUILTIN (srari_d, MIPS_V2DI_FTYPE_V2DI_UQI),
15964   MSA_BUILTIN (srl_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
15965   MSA_BUILTIN (srl_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
15966   MSA_BUILTIN (srl_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
15967   MSA_BUILTIN (srl_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
15968   MSA_BUILTIN (srli_b, MIPS_V16QI_FTYPE_V16QI_UQI),
15969   MSA_BUILTIN (srli_h, MIPS_V8HI_FTYPE_V8HI_UQI),
15970   MSA_BUILTIN (srli_w, MIPS_V4SI_FTYPE_V4SI_UQI),
15971   MSA_BUILTIN (srli_d, MIPS_V2DI_FTYPE_V2DI_UQI),
15972   MSA_BUILTIN (srlr_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
15973   MSA_BUILTIN (srlr_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
15974   MSA_BUILTIN (srlr_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
15975   MSA_BUILTIN (srlr_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
15976   MSA_BUILTIN (srlri_b, MIPS_V16QI_FTYPE_V16QI_UQI),
15977   MSA_BUILTIN (srlri_h, MIPS_V8HI_FTYPE_V8HI_UQI),
15978   MSA_BUILTIN (srlri_w, MIPS_V4SI_FTYPE_V4SI_UQI),
15979   MSA_BUILTIN (srlri_d, MIPS_V2DI_FTYPE_V2DI_UQI),
15980   MSA_BUILTIN (bclr_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
15981   MSA_BUILTIN (bclr_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
15982   MSA_BUILTIN (bclr_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
15983   MSA_BUILTIN (bclr_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
15984   MSA_BUILTIN (bclri_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
15985   MSA_BUILTIN (bclri_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
15986   MSA_BUILTIN (bclri_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
15987   MSA_BUILTIN (bclri_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
15988   MSA_BUILTIN (bset_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
15989   MSA_BUILTIN (bset_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
15990   MSA_BUILTIN (bset_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
15991   MSA_BUILTIN (bset_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
15992   MSA_BUILTIN (bseti_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
15993   MSA_BUILTIN (bseti_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
15994   MSA_BUILTIN (bseti_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
15995   MSA_BUILTIN (bseti_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
15996   MSA_BUILTIN (bneg_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
15997   MSA_BUILTIN (bneg_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
15998   MSA_BUILTIN (bneg_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
15999   MSA_BUILTIN (bneg_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16000   MSA_BUILTIN (bnegi_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16001   MSA_BUILTIN (bnegi_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
16002   MSA_BUILTIN (bnegi_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
16003   MSA_BUILTIN (bnegi_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
16004   MSA_BUILTIN (binsl_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
16005   MSA_BUILTIN (binsl_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UV8HI),
16006   MSA_BUILTIN (binsl_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UV4SI),
16007   MSA_BUILTIN (binsl_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI),
16008   MSA_BUILTIN (binsli_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
16009   MSA_BUILTIN (binsli_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UQI),
16010   MSA_BUILTIN (binsli_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UQI),
16011   MSA_BUILTIN (binsli_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UQI),
16012   MSA_BUILTIN (binsr_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
16013   MSA_BUILTIN (binsr_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UV8HI),
16014   MSA_BUILTIN (binsr_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UV4SI),
16015   MSA_BUILTIN (binsr_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI),
16016   MSA_BUILTIN (binsri_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
16017   MSA_BUILTIN (binsri_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI_UQI),
16018   MSA_BUILTIN (binsri_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI_UQI),
16019   MSA_BUILTIN (binsri_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI_UQI),
16020   MSA_BUILTIN (addv_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16021   MSA_BUILTIN (addv_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16022   MSA_BUILTIN (addv_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16023   MSA_BUILTIN (addv_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16024   MSA_BUILTIN (addvi_b, MIPS_V16QI_FTYPE_V16QI_UQI),
16025   MSA_BUILTIN (addvi_h, MIPS_V8HI_FTYPE_V8HI_UQI),
16026   MSA_BUILTIN (addvi_w, MIPS_V4SI_FTYPE_V4SI_UQI),
16027   MSA_BUILTIN (addvi_d, MIPS_V2DI_FTYPE_V2DI_UQI),
16028   MSA_BUILTIN (subv_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16029   MSA_BUILTIN (subv_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16030   MSA_BUILTIN (subv_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16031   MSA_BUILTIN (subv_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16032   MSA_BUILTIN (subvi_b, MIPS_V16QI_FTYPE_V16QI_UQI),
16033   MSA_BUILTIN (subvi_h, MIPS_V8HI_FTYPE_V8HI_UQI),
16034   MSA_BUILTIN (subvi_w, MIPS_V4SI_FTYPE_V4SI_UQI),
16035   MSA_BUILTIN (subvi_d, MIPS_V2DI_FTYPE_V2DI_UQI),
16036   MSA_BUILTIN (max_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16037   MSA_BUILTIN (max_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16038   MSA_BUILTIN (max_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16039   MSA_BUILTIN (max_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16040   MSA_BUILTIN (maxi_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
16041   MSA_BUILTIN (maxi_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
16042   MSA_BUILTIN (maxi_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
16043   MSA_BUILTIN (maxi_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
16044   MSA_BUILTIN (max_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16045   MSA_BUILTIN (max_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16046   MSA_BUILTIN (max_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16047   MSA_BUILTIN (max_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16048   MSA_BUILTIN (maxi_u_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16049   MSA_BUILTIN (maxi_u_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
16050   MSA_BUILTIN (maxi_u_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
16051   MSA_BUILTIN (maxi_u_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
16052   MSA_BUILTIN (min_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16053   MSA_BUILTIN (min_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16054   MSA_BUILTIN (min_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16055   MSA_BUILTIN (min_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16056   MSA_BUILTIN (mini_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
16057   MSA_BUILTIN (mini_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
16058   MSA_BUILTIN (mini_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
16059   MSA_BUILTIN (mini_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
16060   MSA_BUILTIN (min_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16061   MSA_BUILTIN (min_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16062   MSA_BUILTIN (min_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16063   MSA_BUILTIN (min_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16064   MSA_BUILTIN (mini_u_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16065   MSA_BUILTIN (mini_u_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
16066   MSA_BUILTIN (mini_u_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
16067   MSA_BUILTIN (mini_u_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
16068   MSA_BUILTIN (max_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16069   MSA_BUILTIN (max_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16070   MSA_BUILTIN (max_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16071   MSA_BUILTIN (max_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16072   MSA_BUILTIN (min_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16073   MSA_BUILTIN (min_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16074   MSA_BUILTIN (min_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16075   MSA_BUILTIN (min_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16076   MSA_BUILTIN (ceq_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16077   MSA_BUILTIN (ceq_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16078   MSA_BUILTIN (ceq_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16079   MSA_BUILTIN (ceq_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16080   MSA_BUILTIN (ceqi_b, MIPS_V16QI_FTYPE_V16QI_QI),
16081   MSA_BUILTIN (ceqi_h, MIPS_V8HI_FTYPE_V8HI_QI),
16082   MSA_BUILTIN (ceqi_w, MIPS_V4SI_FTYPE_V4SI_QI),
16083   MSA_BUILTIN (ceqi_d, MIPS_V2DI_FTYPE_V2DI_QI),
16084   MSA_BUILTIN (clt_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16085   MSA_BUILTIN (clt_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16086   MSA_BUILTIN (clt_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16087   MSA_BUILTIN (clt_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16088   MSA_BUILTIN (clti_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
16089   MSA_BUILTIN (clti_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
16090   MSA_BUILTIN (clti_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
16091   MSA_BUILTIN (clti_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
16092   MSA_BUILTIN (clt_u_b, MIPS_V16QI_FTYPE_UV16QI_UV16QI),
16093   MSA_BUILTIN (clt_u_h, MIPS_V8HI_FTYPE_UV8HI_UV8HI),
16094   MSA_BUILTIN (clt_u_w, MIPS_V4SI_FTYPE_UV4SI_UV4SI),
16095   MSA_BUILTIN (clt_u_d, MIPS_V2DI_FTYPE_UV2DI_UV2DI),
16096   MSA_BUILTIN (clti_u_b, MIPS_V16QI_FTYPE_UV16QI_UQI),
16097   MSA_BUILTIN (clti_u_h, MIPS_V8HI_FTYPE_UV8HI_UQI),
16098   MSA_BUILTIN (clti_u_w, MIPS_V4SI_FTYPE_UV4SI_UQI),
16099   MSA_BUILTIN (clti_u_d, MIPS_V2DI_FTYPE_UV2DI_UQI),
16100   MSA_BUILTIN (cle_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16101   MSA_BUILTIN (cle_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16102   MSA_BUILTIN (cle_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16103   MSA_BUILTIN (cle_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16104   MSA_BUILTIN (clei_s_b, MIPS_V16QI_FTYPE_V16QI_QI),
16105   MSA_BUILTIN (clei_s_h, MIPS_V8HI_FTYPE_V8HI_QI),
16106   MSA_BUILTIN (clei_s_w, MIPS_V4SI_FTYPE_V4SI_QI),
16107   MSA_BUILTIN (clei_s_d, MIPS_V2DI_FTYPE_V2DI_QI),
16108   MSA_BUILTIN (cle_u_b, MIPS_V16QI_FTYPE_UV16QI_UV16QI),
16109   MSA_BUILTIN (cle_u_h, MIPS_V8HI_FTYPE_UV8HI_UV8HI),
16110   MSA_BUILTIN (cle_u_w, MIPS_V4SI_FTYPE_UV4SI_UV4SI),
16111   MSA_BUILTIN (cle_u_d, MIPS_V2DI_FTYPE_UV2DI_UV2DI),
16112   MSA_BUILTIN (clei_u_b, MIPS_V16QI_FTYPE_UV16QI_UQI),
16113   MSA_BUILTIN (clei_u_h, MIPS_V8HI_FTYPE_UV8HI_UQI),
16114   MSA_BUILTIN (clei_u_w, MIPS_V4SI_FTYPE_UV4SI_UQI),
16115   MSA_BUILTIN (clei_u_d, MIPS_V2DI_FTYPE_UV2DI_UQI),
16116   MSA_BUILTIN (ld_b, MIPS_V16QI_FTYPE_CVPOINTER_SI),
16117   MSA_BUILTIN (ld_h, MIPS_V8HI_FTYPE_CVPOINTER_SI),
16118   MSA_BUILTIN (ld_w, MIPS_V4SI_FTYPE_CVPOINTER_SI),
16119   MSA_BUILTIN (ld_d, MIPS_V2DI_FTYPE_CVPOINTER_SI),
16120   MSA_NO_TARGET_BUILTIN (st_b, MIPS_VOID_FTYPE_V16QI_CVPOINTER_SI),
16121   MSA_NO_TARGET_BUILTIN (st_h, MIPS_VOID_FTYPE_V8HI_CVPOINTER_SI),
16122   MSA_NO_TARGET_BUILTIN (st_w, MIPS_VOID_FTYPE_V4SI_CVPOINTER_SI),
16123   MSA_NO_TARGET_BUILTIN (st_d, MIPS_VOID_FTYPE_V2DI_CVPOINTER_SI),
16124   MSA_BUILTIN (sat_s_b, MIPS_V16QI_FTYPE_V16QI_UQI),
16125   MSA_BUILTIN (sat_s_h, MIPS_V8HI_FTYPE_V8HI_UQI),
16126   MSA_BUILTIN (sat_s_w, MIPS_V4SI_FTYPE_V4SI_UQI),
16127   MSA_BUILTIN (sat_s_d, MIPS_V2DI_FTYPE_V2DI_UQI),
16128   MSA_BUILTIN (sat_u_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16129   MSA_BUILTIN (sat_u_h, MIPS_UV8HI_FTYPE_UV8HI_UQI),
16130   MSA_BUILTIN (sat_u_w, MIPS_UV4SI_FTYPE_UV4SI_UQI),
16131   MSA_BUILTIN (sat_u_d, MIPS_UV2DI_FTYPE_UV2DI_UQI),
16132   MSA_BUILTIN (add_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16133   MSA_BUILTIN (add_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16134   MSA_BUILTIN (add_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16135   MSA_BUILTIN (add_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16136   MSA_BUILTIN (adds_a_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16137   MSA_BUILTIN (adds_a_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16138   MSA_BUILTIN (adds_a_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16139   MSA_BUILTIN (adds_a_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16140   MSA_BUILTIN (adds_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16141   MSA_BUILTIN (adds_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16142   MSA_BUILTIN (adds_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16143   MSA_BUILTIN (adds_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16144   MSA_BUILTIN (adds_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16145   MSA_BUILTIN (adds_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16146   MSA_BUILTIN (adds_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16147   MSA_BUILTIN (adds_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16148   MSA_BUILTIN (ave_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16149   MSA_BUILTIN (ave_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16150   MSA_BUILTIN (ave_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16151   MSA_BUILTIN (ave_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16152   MSA_BUILTIN (ave_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16153   MSA_BUILTIN (ave_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16154   MSA_BUILTIN (ave_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16155   MSA_BUILTIN (ave_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16156   MSA_BUILTIN (aver_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16157   MSA_BUILTIN (aver_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16158   MSA_BUILTIN (aver_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16159   MSA_BUILTIN (aver_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16160   MSA_BUILTIN (aver_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16161   MSA_BUILTIN (aver_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16162   MSA_BUILTIN (aver_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16163   MSA_BUILTIN (aver_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16164   MSA_BUILTIN (subs_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16165   MSA_BUILTIN (subs_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16166   MSA_BUILTIN (subs_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16167   MSA_BUILTIN (subs_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16168   MSA_BUILTIN (subs_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16169   MSA_BUILTIN (subs_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16170   MSA_BUILTIN (subs_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16171   MSA_BUILTIN (subs_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16172   MSA_BUILTIN (subsuu_s_b, MIPS_V16QI_FTYPE_UV16QI_UV16QI),
16173   MSA_BUILTIN (subsuu_s_h, MIPS_V8HI_FTYPE_UV8HI_UV8HI),
16174   MSA_BUILTIN (subsuu_s_w, MIPS_V4SI_FTYPE_UV4SI_UV4SI),
16175   MSA_BUILTIN (subsuu_s_d, MIPS_V2DI_FTYPE_UV2DI_UV2DI),
16176   MSA_BUILTIN (subsus_u_b, MIPS_UV16QI_FTYPE_UV16QI_V16QI),
16177   MSA_BUILTIN (subsus_u_h, MIPS_UV8HI_FTYPE_UV8HI_V8HI),
16178   MSA_BUILTIN (subsus_u_w, MIPS_UV4SI_FTYPE_UV4SI_V4SI),
16179   MSA_BUILTIN (subsus_u_d, MIPS_UV2DI_FTYPE_UV2DI_V2DI),
16180   MSA_BUILTIN (asub_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16181   MSA_BUILTIN (asub_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16182   MSA_BUILTIN (asub_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16183   MSA_BUILTIN (asub_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16184   MSA_BUILTIN (asub_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16185   MSA_BUILTIN (asub_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16186   MSA_BUILTIN (asub_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16187   MSA_BUILTIN (asub_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16188   MSA_BUILTIN (mulv_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16189   MSA_BUILTIN (mulv_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16190   MSA_BUILTIN (mulv_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16191   MSA_BUILTIN (mulv_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16192   MSA_BUILTIN (maddv_b, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI),
16193   MSA_BUILTIN (maddv_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16194   MSA_BUILTIN (maddv_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16195   MSA_BUILTIN (maddv_d, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI),
16196   MSA_BUILTIN (msubv_b, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI),
16197   MSA_BUILTIN (msubv_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16198   MSA_BUILTIN (msubv_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16199   MSA_BUILTIN (msubv_d, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI),
16200   MSA_BUILTIN (div_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16201   MSA_BUILTIN (div_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16202   MSA_BUILTIN (div_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16203   MSA_BUILTIN (div_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16204   MSA_BUILTIN (div_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16205   MSA_BUILTIN (div_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16206   MSA_BUILTIN (div_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16207   MSA_BUILTIN (div_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16208   MSA_BUILTIN (hadd_s_h, MIPS_V8HI_FTYPE_V16QI_V16QI),
16209   MSA_BUILTIN (hadd_s_w, MIPS_V4SI_FTYPE_V8HI_V8HI),
16210   MSA_BUILTIN (hadd_s_d, MIPS_V2DI_FTYPE_V4SI_V4SI),
16211   MSA_BUILTIN (hadd_u_h, MIPS_UV8HI_FTYPE_UV16QI_UV16QI),
16212   MSA_BUILTIN (hadd_u_w, MIPS_UV4SI_FTYPE_UV8HI_UV8HI),
16213   MSA_BUILTIN (hadd_u_d, MIPS_UV2DI_FTYPE_UV4SI_UV4SI),
16214   MSA_BUILTIN (hsub_s_h, MIPS_V8HI_FTYPE_V16QI_V16QI),
16215   MSA_BUILTIN (hsub_s_w, MIPS_V4SI_FTYPE_V8HI_V8HI),
16216   MSA_BUILTIN (hsub_s_d, MIPS_V2DI_FTYPE_V4SI_V4SI),
16217   MSA_BUILTIN (hsub_u_h, MIPS_V8HI_FTYPE_UV16QI_UV16QI),
16218   MSA_BUILTIN (hsub_u_w, MIPS_V4SI_FTYPE_UV8HI_UV8HI),
16219   MSA_BUILTIN (hsub_u_d, MIPS_V2DI_FTYPE_UV4SI_UV4SI),
16220   MSA_BUILTIN (mod_s_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16221   MSA_BUILTIN (mod_s_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16222   MSA_BUILTIN (mod_s_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16223   MSA_BUILTIN (mod_s_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16224   MSA_BUILTIN (mod_u_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16225   MSA_BUILTIN (mod_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV8HI),
16226   MSA_BUILTIN (mod_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV4SI),
16227   MSA_BUILTIN (mod_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV2DI),
16228   MSA_BUILTIN (dotp_s_h, MIPS_V8HI_FTYPE_V16QI_V16QI),
16229   MSA_BUILTIN (dotp_s_w, MIPS_V4SI_FTYPE_V8HI_V8HI),
16230   MSA_BUILTIN (dotp_s_d, MIPS_V2DI_FTYPE_V4SI_V4SI),
16231   MSA_BUILTIN (dotp_u_h, MIPS_UV8HI_FTYPE_UV16QI_UV16QI),
16232   MSA_BUILTIN (dotp_u_w, MIPS_UV4SI_FTYPE_UV8HI_UV8HI),
16233   MSA_BUILTIN (dotp_u_d, MIPS_UV2DI_FTYPE_UV4SI_UV4SI),
16234   MSA_BUILTIN (dpadd_s_h, MIPS_V8HI_FTYPE_V8HI_V16QI_V16QI),
16235   MSA_BUILTIN (dpadd_s_w, MIPS_V4SI_FTYPE_V4SI_V8HI_V8HI),
16236   MSA_BUILTIN (dpadd_s_d, MIPS_V2DI_FTYPE_V2DI_V4SI_V4SI),
16237   MSA_BUILTIN (dpadd_u_h, MIPS_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI),
16238   MSA_BUILTIN (dpadd_u_w, MIPS_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI),
16239   MSA_BUILTIN (dpadd_u_d, MIPS_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI),
16240   MSA_BUILTIN (dpsub_s_h, MIPS_V8HI_FTYPE_V8HI_V16QI_V16QI),
16241   MSA_BUILTIN (dpsub_s_w, MIPS_V4SI_FTYPE_V4SI_V8HI_V8HI),
16242   MSA_BUILTIN (dpsub_s_d, MIPS_V2DI_FTYPE_V2DI_V4SI_V4SI),
16243   MSA_BUILTIN (dpsub_u_h, MIPS_V8HI_FTYPE_V8HI_UV16QI_UV16QI),
16244   MSA_BUILTIN (dpsub_u_w, MIPS_V4SI_FTYPE_V4SI_UV8HI_UV8HI),
16245   MSA_BUILTIN (dpsub_u_d, MIPS_V2DI_FTYPE_V2DI_UV4SI_UV4SI),
16246   MSA_BUILTIN (sld_b, MIPS_V16QI_FTYPE_V16QI_V16QI_SI),
16247   MSA_BUILTIN (sld_h, MIPS_V8HI_FTYPE_V8HI_V8HI_SI),
16248   MSA_BUILTIN (sld_w, MIPS_V4SI_FTYPE_V4SI_V4SI_SI),
16249   MSA_BUILTIN (sld_d, MIPS_V2DI_FTYPE_V2DI_V2DI_SI),
16250   MSA_BUILTIN (sldi_b, MIPS_V16QI_FTYPE_V16QI_V16QI_UQI),
16251   MSA_BUILTIN (sldi_h, MIPS_V8HI_FTYPE_V8HI_V8HI_UQI),
16252   MSA_BUILTIN (sldi_w, MIPS_V4SI_FTYPE_V4SI_V4SI_UQI),
16253   MSA_BUILTIN (sldi_d, MIPS_V2DI_FTYPE_V2DI_V2DI_UQI),
16254   MSA_BUILTIN (splat_b, MIPS_V16QI_FTYPE_V16QI_SI),
16255   MSA_BUILTIN (splat_h, MIPS_V8HI_FTYPE_V8HI_SI),
16256   MSA_BUILTIN (splat_w, MIPS_V4SI_FTYPE_V4SI_SI),
16257   MSA_BUILTIN (splat_d, MIPS_V2DI_FTYPE_V2DI_SI),
16258   MSA_BUILTIN (splati_b, MIPS_V16QI_FTYPE_V16QI_UQI),
16259   MSA_BUILTIN (splati_h, MIPS_V8HI_FTYPE_V8HI_UQI),
16260   MSA_BUILTIN (splati_w, MIPS_V4SI_FTYPE_V4SI_UQI),
16261   MSA_BUILTIN (splati_d, MIPS_V2DI_FTYPE_V2DI_UQI),
16262   MSA_BUILTIN (pckev_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16263   MSA_BUILTIN (pckev_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16264   MSA_BUILTIN (pckev_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16265   MSA_BUILTIN (pckev_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16266   MSA_BUILTIN (pckod_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16267   MSA_BUILTIN (pckod_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16268   MSA_BUILTIN (pckod_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16269   MSA_BUILTIN (pckod_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16270   MSA_BUILTIN (ilvl_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16271   MSA_BUILTIN (ilvl_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16272   MSA_BUILTIN (ilvl_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16273   MSA_BUILTIN (ilvl_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16274   MSA_BUILTIN (ilvr_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16275   MSA_BUILTIN (ilvr_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16276   MSA_BUILTIN (ilvr_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16277   MSA_BUILTIN (ilvr_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16278   MSA_BUILTIN (ilvev_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16279   MSA_BUILTIN (ilvev_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16280   MSA_BUILTIN (ilvev_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16281   MSA_BUILTIN (ilvev_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16282   MSA_BUILTIN (ilvod_b, MIPS_V16QI_FTYPE_V16QI_V16QI),
16283   MSA_BUILTIN (ilvod_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16284   MSA_BUILTIN (ilvod_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16285   MSA_BUILTIN (ilvod_d, MIPS_V2DI_FTYPE_V2DI_V2DI),
16286   MSA_BUILTIN (vshf_b, MIPS_V16QI_FTYPE_V16QI_V16QI_V16QI),
16287   MSA_BUILTIN (vshf_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16288   MSA_BUILTIN (vshf_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16289   MSA_BUILTIN (vshf_d, MIPS_V2DI_FTYPE_V2DI_V2DI_V2DI),
16290   MSA_BUILTIN (and_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16291   MSA_BUILTIN (andi_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16292   MSA_BUILTIN (or_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16293   MSA_BUILTIN (ori_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16294   MSA_BUILTIN (nor_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16295   MSA_BUILTIN (nori_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16296   MSA_BUILTIN (xor_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI),
16297   MSA_BUILTIN (xori_b, MIPS_UV16QI_FTYPE_UV16QI_UQI),
16298   MSA_BUILTIN (bmnz_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
16299   MSA_BUILTIN (bmnzi_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
16300   MSA_BUILTIN (bmz_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
16301   MSA_BUILTIN (bmzi_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
16302   MSA_BUILTIN (bsel_v, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
16303   MSA_BUILTIN (bseli_b, MIPS_UV16QI_FTYPE_UV16QI_UV16QI_UQI),
16304   MSA_BUILTIN (shf_b, MIPS_V16QI_FTYPE_V16QI_UQI),
16305   MSA_BUILTIN (shf_h, MIPS_V8HI_FTYPE_V8HI_UQI),
16306   MSA_BUILTIN (shf_w, MIPS_V4SI_FTYPE_V4SI_UQI),
16307   MSA_BUILTIN_TEST_BRANCH (bnz_v, MIPS_SI_FTYPE_UV16QI),
16308   MSA_BUILTIN_TEST_BRANCH (bz_v, MIPS_SI_FTYPE_UV16QI),
16309   MSA_BUILTIN (fill_b, MIPS_V16QI_FTYPE_SI),
16310   MSA_BUILTIN (fill_h, MIPS_V8HI_FTYPE_SI),
16311   MSA_BUILTIN (fill_w, MIPS_V4SI_FTYPE_SI),
16312   MSA_BUILTIN (fill_d, MIPS_V2DI_FTYPE_DI),
16313   MSA_BUILTIN (pcnt_b, MIPS_V16QI_FTYPE_V16QI),
16314   MSA_BUILTIN (pcnt_h, MIPS_V8HI_FTYPE_V8HI),
16315   MSA_BUILTIN (pcnt_w, MIPS_V4SI_FTYPE_V4SI),
16316   MSA_BUILTIN (pcnt_d, MIPS_V2DI_FTYPE_V2DI),
16317   MSA_BUILTIN (nloc_b, MIPS_V16QI_FTYPE_V16QI),
16318   MSA_BUILTIN (nloc_h, MIPS_V8HI_FTYPE_V8HI),
16319   MSA_BUILTIN (nloc_w, MIPS_V4SI_FTYPE_V4SI),
16320   MSA_BUILTIN (nloc_d, MIPS_V2DI_FTYPE_V2DI),
16321   MSA_BUILTIN (nlzc_b, MIPS_V16QI_FTYPE_V16QI),
16322   MSA_BUILTIN (nlzc_h, MIPS_V8HI_FTYPE_V8HI),
16323   MSA_BUILTIN (nlzc_w, MIPS_V4SI_FTYPE_V4SI),
16324   MSA_BUILTIN (nlzc_d, MIPS_V2DI_FTYPE_V2DI),
16325   MSA_BUILTIN (copy_s_b, MIPS_SI_FTYPE_V16QI_UQI),
16326   MSA_BUILTIN (copy_s_h, MIPS_SI_FTYPE_V8HI_UQI),
16327   MSA_BUILTIN (copy_s_w, MIPS_SI_FTYPE_V4SI_UQI),
16328   MSA_BUILTIN (copy_s_d, MIPS_DI_FTYPE_V2DI_UQI),
16329   MSA_BUILTIN (copy_u_b, MIPS_USI_FTYPE_V16QI_UQI),
16330   MSA_BUILTIN (copy_u_h, MIPS_USI_FTYPE_V8HI_UQI),
16331   MSA_BUILTIN_REMAP (copy_u_w, copy_s_w, MIPS_USI_FTYPE_V4SI_UQI),
16332   MSA_BUILTIN_REMAP (copy_u_d, copy_s_d, MIPS_UDI_FTYPE_V2DI_UQI),
16333   MSA_BUILTIN (insert_b, MIPS_V16QI_FTYPE_V16QI_UQI_SI),
16334   MSA_BUILTIN (insert_h, MIPS_V8HI_FTYPE_V8HI_UQI_SI),
16335   MSA_BUILTIN (insert_w, MIPS_V4SI_FTYPE_V4SI_UQI_SI),
16336   MSA_BUILTIN (insert_d, MIPS_V2DI_FTYPE_V2DI_UQI_DI),
16337   MSA_BUILTIN (insve_b, MIPS_V16QI_FTYPE_V16QI_UQI_V16QI),
16338   MSA_BUILTIN (insve_h, MIPS_V8HI_FTYPE_V8HI_UQI_V8HI),
16339   MSA_BUILTIN (insve_w, MIPS_V4SI_FTYPE_V4SI_UQI_V4SI),
16340   MSA_BUILTIN (insve_d, MIPS_V2DI_FTYPE_V2DI_UQI_V2DI),
16341   MSA_BUILTIN_TEST_BRANCH (bnz_b, MIPS_SI_FTYPE_UV16QI),
16342   MSA_BUILTIN_TEST_BRANCH (bnz_h, MIPS_SI_FTYPE_UV8HI),
16343   MSA_BUILTIN_TEST_BRANCH (bnz_w, MIPS_SI_FTYPE_UV4SI),
16344   MSA_BUILTIN_TEST_BRANCH (bnz_d, MIPS_SI_FTYPE_UV2DI),
16345   MSA_BUILTIN_TEST_BRANCH (bz_b, MIPS_SI_FTYPE_UV16QI),
16346   MSA_BUILTIN_TEST_BRANCH (bz_h, MIPS_SI_FTYPE_UV8HI),
16347   MSA_BUILTIN_TEST_BRANCH (bz_w, MIPS_SI_FTYPE_UV4SI),
16348   MSA_BUILTIN_TEST_BRANCH (bz_d, MIPS_SI_FTYPE_UV2DI),
16349   MSA_BUILTIN (ldi_b, MIPS_V16QI_FTYPE_HI),
16350   MSA_BUILTIN (ldi_h, MIPS_V8HI_FTYPE_HI),
16351   MSA_BUILTIN (ldi_w, MIPS_V4SI_FTYPE_HI),
16352   MSA_BUILTIN (ldi_d, MIPS_V2DI_FTYPE_HI),
16353   MSA_BUILTIN (fcaf_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16354   MSA_BUILTIN (fcaf_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16355   MSA_BUILTIN (fcor_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16356   MSA_BUILTIN (fcor_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16357   MSA_BUILTIN (fcun_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16358   MSA_BUILTIN (fcun_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16359   MSA_BUILTIN (fcune_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16360   MSA_BUILTIN (fcune_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16361   MSA_BUILTIN (fcueq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16362   MSA_BUILTIN (fcueq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16363   MSA_BUILTIN (fceq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16364   MSA_BUILTIN (fceq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16365   MSA_BUILTIN (fcne_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16366   MSA_BUILTIN (fcne_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16367   MSA_BUILTIN (fclt_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16368   MSA_BUILTIN (fclt_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16369   MSA_BUILTIN (fcult_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16370   MSA_BUILTIN (fcult_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16371   MSA_BUILTIN (fcle_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16372   MSA_BUILTIN (fcle_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16373   MSA_BUILTIN (fcule_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16374   MSA_BUILTIN (fcule_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16375   MSA_BUILTIN (fsaf_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16376   MSA_BUILTIN (fsaf_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16377   MSA_BUILTIN (fsor_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16378   MSA_BUILTIN (fsor_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16379   MSA_BUILTIN (fsun_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16380   MSA_BUILTIN (fsun_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16381   MSA_BUILTIN (fsune_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16382   MSA_BUILTIN (fsune_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16383   MSA_BUILTIN (fsueq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16384   MSA_BUILTIN (fsueq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16385   MSA_BUILTIN (fseq_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16386   MSA_BUILTIN (fseq_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16387   MSA_BUILTIN (fsne_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16388   MSA_BUILTIN (fsne_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16389   MSA_BUILTIN (fslt_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16390   MSA_BUILTIN (fslt_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16391   MSA_BUILTIN (fsult_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16392   MSA_BUILTIN (fsult_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16393   MSA_BUILTIN (fsle_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16394   MSA_BUILTIN (fsle_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16395   MSA_BUILTIN (fsule_w, MIPS_V4SI_FTYPE_V4SF_V4SF),
16396   MSA_BUILTIN (fsule_d, MIPS_V2DI_FTYPE_V2DF_V2DF),
16397   MSA_BUILTIN (fadd_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16398   MSA_BUILTIN (fadd_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16399   MSA_BUILTIN (fsub_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16400   MSA_BUILTIN (fsub_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16401   MSA_BUILTIN (fmul_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16402   MSA_BUILTIN (fmul_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16403   MSA_BUILTIN (fdiv_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16404   MSA_BUILTIN (fdiv_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16405   MSA_BUILTIN (fmadd_w, MIPS_V4SF_FTYPE_V4SF_V4SF_V4SF),
16406   MSA_BUILTIN (fmadd_d, MIPS_V2DF_FTYPE_V2DF_V2DF_V2DF),
16407   MSA_BUILTIN (fmsub_w, MIPS_V4SF_FTYPE_V4SF_V4SF_V4SF),
16408   MSA_BUILTIN (fmsub_d, MIPS_V2DF_FTYPE_V2DF_V2DF_V2DF),
16409   MSA_BUILTIN (fexp2_w, MIPS_V4SF_FTYPE_V4SF_V4SI),
16410   MSA_BUILTIN (fexp2_d, MIPS_V2DF_FTYPE_V2DF_V2DI),
16411   MSA_BUILTIN (fexdo_h, MIPS_V8HI_FTYPE_V4SF_V4SF),
16412   MSA_BUILTIN (fexdo_w, MIPS_V4SF_FTYPE_V2DF_V2DF),
16413   MSA_BUILTIN (ftq_h, MIPS_V8HI_FTYPE_V4SF_V4SF),
16414   MSA_BUILTIN (ftq_w, MIPS_V4SI_FTYPE_V2DF_V2DF),
16415   MSA_BUILTIN (fmin_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16416   MSA_BUILTIN (fmin_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16417   MSA_BUILTIN (fmin_a_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16418   MSA_BUILTIN (fmin_a_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16419   MSA_BUILTIN (fmax_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16420   MSA_BUILTIN (fmax_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16421   MSA_BUILTIN (fmax_a_w, MIPS_V4SF_FTYPE_V4SF_V4SF),
16422   MSA_BUILTIN (fmax_a_d, MIPS_V2DF_FTYPE_V2DF_V2DF),
16423   MSA_BUILTIN (mul_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16424   MSA_BUILTIN (mul_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16425   MSA_BUILTIN (mulr_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI),
16426   MSA_BUILTIN (mulr_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI),
16427   MSA_BUILTIN (madd_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16428   MSA_BUILTIN (madd_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16429   MSA_BUILTIN (maddr_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16430   MSA_BUILTIN (maddr_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16431   MSA_BUILTIN (msub_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16432   MSA_BUILTIN (msub_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16433   MSA_BUILTIN (msubr_q_h, MIPS_V8HI_FTYPE_V8HI_V8HI_V8HI),
16434   MSA_BUILTIN (msubr_q_w, MIPS_V4SI_FTYPE_V4SI_V4SI_V4SI),
16435   MSA_BUILTIN (fclass_w, MIPS_V4SI_FTYPE_V4SF),
16436   MSA_BUILTIN (fclass_d, MIPS_V2DI_FTYPE_V2DF),
16437   MSA_BUILTIN (fsqrt_w, MIPS_V4SF_FTYPE_V4SF),
16438   MSA_BUILTIN (fsqrt_d, MIPS_V2DF_FTYPE_V2DF),
16439   MSA_BUILTIN (frcp_w, MIPS_V4SF_FTYPE_V4SF),
16440   MSA_BUILTIN (frcp_d, MIPS_V2DF_FTYPE_V2DF),
16441   MSA_BUILTIN (frint_w, MIPS_V4SF_FTYPE_V4SF),
16442   MSA_BUILTIN (frint_d, MIPS_V2DF_FTYPE_V2DF),
16443   MSA_BUILTIN (frsqrt_w, MIPS_V4SF_FTYPE_V4SF),
16444   MSA_BUILTIN (frsqrt_d, MIPS_V2DF_FTYPE_V2DF),
16445   MSA_BUILTIN (flog2_w, MIPS_V4SF_FTYPE_V4SF),
16446   MSA_BUILTIN (flog2_d, MIPS_V2DF_FTYPE_V2DF),
16447   MSA_BUILTIN (fexupl_w, MIPS_V4SF_FTYPE_V8HI),
16448   MSA_BUILTIN (fexupl_d, MIPS_V2DF_FTYPE_V4SF),
16449   MSA_BUILTIN (fexupr_w, MIPS_V4SF_FTYPE_V8HI),
16450   MSA_BUILTIN (fexupr_d, MIPS_V2DF_FTYPE_V4SF),
16451   MSA_BUILTIN (ffql_w, MIPS_V4SF_FTYPE_V8HI),
16452   MSA_BUILTIN (ffql_d, MIPS_V2DF_FTYPE_V4SI),
16453   MSA_BUILTIN (ffqr_w, MIPS_V4SF_FTYPE_V8HI),
16454   MSA_BUILTIN (ffqr_d, MIPS_V2DF_FTYPE_V4SI),
16455   MSA_BUILTIN (ftint_s_w, MIPS_V4SI_FTYPE_V4SF),
16456   MSA_BUILTIN (ftint_s_d, MIPS_V2DI_FTYPE_V2DF),
16457   MSA_BUILTIN (ftint_u_w, MIPS_UV4SI_FTYPE_V4SF),
16458   MSA_BUILTIN (ftint_u_d, MIPS_UV2DI_FTYPE_V2DF),
16459   MSA_BUILTIN (ftrunc_s_w, MIPS_V4SI_FTYPE_V4SF),
16460   MSA_BUILTIN (ftrunc_s_d, MIPS_V2DI_FTYPE_V2DF),
16461   MSA_BUILTIN (ftrunc_u_w, MIPS_UV4SI_FTYPE_V4SF),
16462   MSA_BUILTIN (ftrunc_u_d, MIPS_UV2DI_FTYPE_V2DF),
16463   MSA_BUILTIN (ffint_s_w, MIPS_V4SF_FTYPE_V4SI),
16464   MSA_BUILTIN (ffint_s_d, MIPS_V2DF_FTYPE_V2DI),
16465   MSA_BUILTIN (ffint_u_w, MIPS_V4SF_FTYPE_UV4SI),
16466   MSA_BUILTIN (ffint_u_d, MIPS_V2DF_FTYPE_UV2DI),
16467   MSA_NO_TARGET_BUILTIN (ctcmsa, MIPS_VOID_FTYPE_UQI_SI),
16468   MSA_BUILTIN (cfcmsa, MIPS_SI_FTYPE_UQI),
16469   MSA_BUILTIN (move_v, MIPS_V16QI_FTYPE_V16QI),
16470 };
16471 
16472 /* Index I is the function declaration for mips_builtins[I], or null if the
16473    function isn't defined on this target.  */
16474 static GTY(()) tree mips_builtin_decls[ARRAY_SIZE (mips_builtins)];
16475 /* Get the index I of the function declaration for mips_builtin_decls[I]
16476    using the instruction code or return null if not defined for the target.  */
16477 static GTY(()) int mips_get_builtin_decl_index[NUM_INSN_CODES];
16478 
16479 /* MODE is a vector mode whose elements have type TYPE.  Return the type
16480    of the vector itself.  */
16481 
16482 static tree
mips_builtin_vector_type(tree type,machine_mode mode)16483 mips_builtin_vector_type (tree type, machine_mode mode)
16484 {
16485   static tree types[2 * (int) MAX_MACHINE_MODE];
16486   int mode_index;
16487 
16488   mode_index = (int) mode;
16489 
16490   if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
16491     mode_index += MAX_MACHINE_MODE;
16492 
16493   if (types[mode_index] == NULL_TREE)
16494     types[mode_index] = build_vector_type_for_mode (type, mode);
16495   return types[mode_index];
16496 }
16497 
16498 /* Return a type for 'const volatile void *'.  */
16499 
16500 static tree
mips_build_cvpointer_type(void)16501 mips_build_cvpointer_type (void)
16502 {
16503   static tree cache;
16504 
16505   if (cache == NULL_TREE)
16506     cache = build_pointer_type (build_qualified_type
16507 				(void_type_node,
16508 				 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
16509   return cache;
16510 }
16511 
16512 /* Source-level argument types.  */
16513 #define MIPS_ATYPE_VOID void_type_node
16514 #define MIPS_ATYPE_INT integer_type_node
16515 #define MIPS_ATYPE_POINTER ptr_type_node
16516 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
16517 
16518 /* Standard mode-based argument types.  */
16519 #define MIPS_ATYPE_QI intQI_type_node
16520 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
16521 #define MIPS_ATYPE_HI intHI_type_node
16522 #define MIPS_ATYPE_SI intSI_type_node
16523 #define MIPS_ATYPE_USI unsigned_intSI_type_node
16524 #define MIPS_ATYPE_DI intDI_type_node
16525 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
16526 #define MIPS_ATYPE_SF float_type_node
16527 #define MIPS_ATYPE_DF double_type_node
16528 
16529 /* Vector argument types.  */
16530 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
16531 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
16532 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
16533 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
16534 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
16535 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
16536 
16537 #define MIPS_ATYPE_V2DI						\
16538   mips_builtin_vector_type (long_long_integer_type_node, V2DImode)
16539 #define MIPS_ATYPE_V4SI mips_builtin_vector_type (intSI_type_node, V4SImode)
16540 #define MIPS_ATYPE_V8HI mips_builtin_vector_type (intHI_type_node, V8HImode)
16541 #define MIPS_ATYPE_V16QI mips_builtin_vector_type (intQI_type_node, V16QImode)
16542 #define MIPS_ATYPE_V2DF mips_builtin_vector_type (double_type_node, V2DFmode)
16543 #define MIPS_ATYPE_V4SF mips_builtin_vector_type (float_type_node, V4SFmode)
16544 
16545 #define MIPS_ATYPE_UV2DI					\
16546   mips_builtin_vector_type (long_long_unsigned_type_node, V2DImode)
16547 #define MIPS_ATYPE_UV4SI					\
16548   mips_builtin_vector_type (unsigned_intSI_type_node, V4SImode)
16549 #define MIPS_ATYPE_UV8HI					\
16550   mips_builtin_vector_type (unsigned_intHI_type_node, V8HImode)
16551 #define MIPS_ATYPE_UV16QI					\
16552   mips_builtin_vector_type (unsigned_intQI_type_node, V16QImode)
16553 
16554 #define MIPS_ATYPE_UV2SI					\
16555   mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
16556 #define MIPS_ATYPE_UV4HI					\
16557   mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
16558 #define MIPS_ATYPE_UV8QI					\
16559   mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
16560 
16561 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
16562    their associated MIPS_ATYPEs.  */
16563 #define MIPS_FTYPE_ATYPES1(A, B) \
16564   MIPS_ATYPE_##A, MIPS_ATYPE_##B
16565 
16566 #define MIPS_FTYPE_ATYPES2(A, B, C) \
16567   MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
16568 
16569 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
16570   MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
16571 
16572 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
16573   MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
16574   MIPS_ATYPE_##E
16575 
16576 /* Return the function type associated with function prototype TYPE.  */
16577 
16578 static tree
mips_build_function_type(enum mips_function_type type)16579 mips_build_function_type (enum mips_function_type type)
16580 {
16581   static tree types[(int) MIPS_MAX_FTYPE_MAX];
16582 
16583   if (types[(int) type] == NULL_TREE)
16584     switch (type)
16585       {
16586 #define DEF_MIPS_FTYPE(NUM, ARGS)					\
16587   case MIPS_FTYPE_NAME##NUM ARGS:					\
16588     types[(int) type]							\
16589       = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS,		\
16590 				  NULL_TREE);				\
16591     break;
16592 #include "config/mips/mips-ftypes.def"
16593 #undef DEF_MIPS_FTYPE
16594       default:
16595 	gcc_unreachable ();
16596       }
16597 
16598   return types[(int) type];
16599 }
16600 
16601 /* Implement TARGET_INIT_BUILTINS.  */
16602 
16603 static void
mips_init_builtins(void)16604 mips_init_builtins (void)
16605 {
16606   const struct mips_builtin_description *d;
16607   unsigned int i;
16608 
16609   /* Iterate through all of the bdesc arrays, initializing all of the
16610      builtin functions.  */
16611   for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
16612     {
16613       d = &mips_builtins[i];
16614       if (d->avail ())
16615 	{
16616 	  mips_builtin_decls[i]
16617 	    = add_builtin_function (d->name,
16618 				    mips_build_function_type (d->function_type),
16619 				    i, BUILT_IN_MD, NULL, NULL);
16620 	  mips_get_builtin_decl_index[d->icode] = i;
16621 	}
16622     }
16623 }
16624 
16625 /* Implement TARGET_BUILTIN_DECL.  */
16626 
16627 static tree
mips_builtin_decl(unsigned int code,bool initialize_p ATTRIBUTE_UNUSED)16628 mips_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
16629 {
16630   if (code >= ARRAY_SIZE (mips_builtins))
16631     return error_mark_node;
16632   return mips_builtin_decls[code];
16633 }
16634 
16635 /* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION.  */
16636 
16637 static tree
mips_builtin_vectorized_function(unsigned int fn,tree type_out,tree type_in)16638 mips_builtin_vectorized_function (unsigned int fn, tree type_out, tree type_in)
16639 {
16640   machine_mode in_mode, out_mode;
16641   int in_n, out_n;
16642 
16643   if (TREE_CODE (type_out) != VECTOR_TYPE
16644       || TREE_CODE (type_in) != VECTOR_TYPE
16645       || !ISA_HAS_MSA)
16646     return NULL_TREE;
16647 
16648   out_mode = TYPE_MODE (TREE_TYPE (type_out));
16649   out_n = TYPE_VECTOR_SUBPARTS (type_out);
16650   in_mode = TYPE_MODE (TREE_TYPE (type_in));
16651   in_n = TYPE_VECTOR_SUBPARTS (type_in);
16652 
16653   /* INSN is the name of the associated instruction pattern, without
16654      the leading CODE_FOR_.  */
16655 #define MIPS_GET_BUILTIN(INSN) \
16656   mips_builtin_decls[mips_get_builtin_decl_index[CODE_FOR_##INSN]]
16657 
16658   switch (fn)
16659     {
16660     case BUILT_IN_SQRT:
16661       if (out_mode == DFmode && out_n == 2
16662 	  && in_mode == DFmode && in_n == 2)
16663 	return MIPS_GET_BUILTIN (msa_fsqrt_d);
16664       break;
16665     case BUILT_IN_SQRTF:
16666       if (out_mode == SFmode && out_n == 4
16667 	  && in_mode == SFmode && in_n == 4)
16668 	return MIPS_GET_BUILTIN (msa_fsqrt_w);
16669       break;
16670     default:
16671       break;
16672     }
16673 
16674   return NULL_TREE;
16675 }
16676 
16677 /* Take argument ARGNO from EXP's argument list and convert it into
16678    an expand operand.  Store the operand in *OP.  */
16679 
16680 static void
mips_prepare_builtin_arg(struct expand_operand * op,tree exp,unsigned int argno)16681 mips_prepare_builtin_arg (struct expand_operand *op, tree exp,
16682 			  unsigned int argno)
16683 {
16684   tree arg;
16685   rtx value;
16686 
16687   arg = CALL_EXPR_ARG (exp, argno);
16688   value = expand_normal (arg);
16689   create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg)));
16690 }
16691 
16692 /* Expand instruction ICODE as part of a built-in function sequence.
16693    Use the first NOPS elements of OPS as the instruction's operands.
16694    HAS_TARGET_P is true if operand 0 is a target; it is false if the
16695    instruction has no target.
16696 
16697    Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx.  */
16698 
16699 static rtx
mips_expand_builtin_insn(enum insn_code icode,unsigned int nops,struct expand_operand * ops,bool has_target_p)16700 mips_expand_builtin_insn (enum insn_code icode, unsigned int nops,
16701 			  struct expand_operand *ops, bool has_target_p)
16702 {
16703   machine_mode imode;
16704   int rangelo = 0, rangehi = 0, error_opno = 0;
16705   rtx sireg;
16706 
16707   switch (icode)
16708     {
16709     /* The third operand of these instructions is in SImode, so we need to
16710        bring the corresponding builtin argument from QImode into SImode.  */
16711     case CODE_FOR_loongson_pshufh:
16712     case CODE_FOR_loongson_psllh:
16713     case CODE_FOR_loongson_psllw:
16714     case CODE_FOR_loongson_psrah:
16715     case CODE_FOR_loongson_psraw:
16716     case CODE_FOR_loongson_psrlh:
16717     case CODE_FOR_loongson_psrlw:
16718       gcc_assert (has_target_p && nops == 3 && ops[2].mode == QImode);
16719       sireg = gen_reg_rtx (SImode);
16720       emit_insn (gen_zero_extendqisi2 (sireg,
16721 				       force_reg (QImode, ops[2].value)));
16722       ops[2].value = sireg;
16723       ops[2].mode = SImode;
16724       break;
16725 
16726     case CODE_FOR_msa_addvi_b:
16727     case CODE_FOR_msa_addvi_h:
16728     case CODE_FOR_msa_addvi_w:
16729     case CODE_FOR_msa_addvi_d:
16730     case CODE_FOR_msa_clti_u_b:
16731     case CODE_FOR_msa_clti_u_h:
16732     case CODE_FOR_msa_clti_u_w:
16733     case CODE_FOR_msa_clti_u_d:
16734     case CODE_FOR_msa_clei_u_b:
16735     case CODE_FOR_msa_clei_u_h:
16736     case CODE_FOR_msa_clei_u_w:
16737     case CODE_FOR_msa_clei_u_d:
16738     case CODE_FOR_msa_maxi_u_b:
16739     case CODE_FOR_msa_maxi_u_h:
16740     case CODE_FOR_msa_maxi_u_w:
16741     case CODE_FOR_msa_maxi_u_d:
16742     case CODE_FOR_msa_mini_u_b:
16743     case CODE_FOR_msa_mini_u_h:
16744     case CODE_FOR_msa_mini_u_w:
16745     case CODE_FOR_msa_mini_u_d:
16746     case CODE_FOR_msa_subvi_b:
16747     case CODE_FOR_msa_subvi_h:
16748     case CODE_FOR_msa_subvi_w:
16749     case CODE_FOR_msa_subvi_d:
16750       gcc_assert (has_target_p && nops == 3);
16751       /* We only generate a vector of constants iff the second argument
16752 	 is an immediate.  We also validate the range of the immediate.  */
16753       if (CONST_INT_P (ops[2].value))
16754 	{
16755 	  rangelo = 0;
16756 	  rangehi = 31;
16757 	  if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi))
16758 	    {
16759 	      ops[2].mode = ops[0].mode;
16760 	      ops[2].value = mips_gen_const_int_vector (ops[2].mode,
16761 							INTVAL (ops[2].value));
16762 	    }
16763 	  else
16764 	    error_opno = 2;
16765 	}
16766       break;
16767 
16768     case CODE_FOR_msa_ceqi_b:
16769     case CODE_FOR_msa_ceqi_h:
16770     case CODE_FOR_msa_ceqi_w:
16771     case CODE_FOR_msa_ceqi_d:
16772     case CODE_FOR_msa_clti_s_b:
16773     case CODE_FOR_msa_clti_s_h:
16774     case CODE_FOR_msa_clti_s_w:
16775     case CODE_FOR_msa_clti_s_d:
16776     case CODE_FOR_msa_clei_s_b:
16777     case CODE_FOR_msa_clei_s_h:
16778     case CODE_FOR_msa_clei_s_w:
16779     case CODE_FOR_msa_clei_s_d:
16780     case CODE_FOR_msa_maxi_s_b:
16781     case CODE_FOR_msa_maxi_s_h:
16782     case CODE_FOR_msa_maxi_s_w:
16783     case CODE_FOR_msa_maxi_s_d:
16784     case CODE_FOR_msa_mini_s_b:
16785     case CODE_FOR_msa_mini_s_h:
16786     case CODE_FOR_msa_mini_s_w:
16787     case CODE_FOR_msa_mini_s_d:
16788       gcc_assert (has_target_p && nops == 3);
16789       /* We only generate a vector of constants iff the second argument
16790 	 is an immediate.  We also validate the range of the immediate.  */
16791       if (CONST_INT_P (ops[2].value))
16792 	{
16793 	  rangelo = -16;
16794 	  rangehi = 15;
16795 	  if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi))
16796 	    {
16797 	      ops[2].mode = ops[0].mode;
16798 	      ops[2].value = mips_gen_const_int_vector (ops[2].mode,
16799 							INTVAL (ops[2].value));
16800 	    }
16801 	  else
16802 	    error_opno = 2;
16803 	}
16804       break;
16805 
16806     case CODE_FOR_msa_andi_b:
16807     case CODE_FOR_msa_ori_b:
16808     case CODE_FOR_msa_nori_b:
16809     case CODE_FOR_msa_xori_b:
16810       gcc_assert (has_target_p && nops == 3);
16811       if (!CONST_INT_P (ops[2].value))
16812 	break;
16813       ops[2].mode = ops[0].mode;
16814       ops[2].value = mips_gen_const_int_vector (ops[2].mode,
16815 						INTVAL (ops[2].value));
16816       break;
16817 
16818     case CODE_FOR_msa_bmzi_b:
16819     case CODE_FOR_msa_bmnzi_b:
16820     case CODE_FOR_msa_bseli_b:
16821       gcc_assert (has_target_p && nops == 4);
16822       if (!CONST_INT_P (ops[3].value))
16823 	break;
16824       ops[3].mode = ops[0].mode;
16825       ops[3].value = mips_gen_const_int_vector (ops[3].mode,
16826 						INTVAL (ops[3].value));
16827       break;
16828 
16829     case CODE_FOR_msa_fill_b:
16830     case CODE_FOR_msa_fill_h:
16831     case CODE_FOR_msa_fill_w:
16832     case CODE_FOR_msa_fill_d:
16833       /* Map the built-ins to vector fill operations.  We need fix up the mode
16834 	 for the element being inserted.  */
16835       gcc_assert (has_target_p && nops == 2);
16836       imode = GET_MODE_INNER (ops[0].mode);
16837       ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode);
16838       ops[1].mode = imode;
16839       break;
16840 
16841     case CODE_FOR_msa_ilvl_b:
16842     case CODE_FOR_msa_ilvl_h:
16843     case CODE_FOR_msa_ilvl_w:
16844     case CODE_FOR_msa_ilvl_d:
16845     case CODE_FOR_msa_ilvr_b:
16846     case CODE_FOR_msa_ilvr_h:
16847     case CODE_FOR_msa_ilvr_w:
16848     case CODE_FOR_msa_ilvr_d:
16849     case CODE_FOR_msa_ilvev_b:
16850     case CODE_FOR_msa_ilvev_h:
16851     case CODE_FOR_msa_ilvev_w:
16852     case CODE_FOR_msa_ilvod_b:
16853     case CODE_FOR_msa_ilvod_h:
16854     case CODE_FOR_msa_ilvod_w:
16855     case CODE_FOR_msa_pckev_b:
16856     case CODE_FOR_msa_pckev_h:
16857     case CODE_FOR_msa_pckev_w:
16858     case CODE_FOR_msa_pckod_b:
16859     case CODE_FOR_msa_pckod_h:
16860     case CODE_FOR_msa_pckod_w:
16861       /* Swap the operands 1 and 2 for interleave operations.  Built-ins follow
16862 	 convention of ISA, which have op1 as higher component and op2 as lower
16863 	 component.  However, the VEC_PERM op in tree and vec_concat in RTL
16864 	 expects first operand to be lower component, because of which this
16865 	 swap is needed for builtins.  */
16866       gcc_assert (has_target_p && nops == 3);
16867       std::swap (ops[1], ops[2]);
16868       break;
16869 
16870     case CODE_FOR_msa_maddv_b:
16871     case CODE_FOR_msa_maddv_h:
16872     case CODE_FOR_msa_maddv_w:
16873     case CODE_FOR_msa_maddv_d:
16874     case CODE_FOR_msa_fmadd_w:
16875     case CODE_FOR_msa_fmadd_d:
16876     case CODE_FOR_msa_fmsub_w:
16877     case CODE_FOR_msa_fmsub_d:
16878       /* fma(a, b, c) results into (a * b + c), however builtin_msa_fmadd expects
16879 	 it to be (a + b * c).  Swap the 1st and 3rd operands.  */
16880       std::swap (ops[1], ops[3]);
16881       break;
16882 
16883     case CODE_FOR_msa_slli_b:
16884     case CODE_FOR_msa_slli_h:
16885     case CODE_FOR_msa_slli_w:
16886     case CODE_FOR_msa_slli_d:
16887     case CODE_FOR_msa_srai_b:
16888     case CODE_FOR_msa_srai_h:
16889     case CODE_FOR_msa_srai_w:
16890     case CODE_FOR_msa_srai_d:
16891     case CODE_FOR_msa_srli_b:
16892     case CODE_FOR_msa_srli_h:
16893     case CODE_FOR_msa_srli_w:
16894     case CODE_FOR_msa_srli_d:
16895       gcc_assert (has_target_p && nops == 3);
16896       if (CONST_INT_P (ops[2].value))
16897 	{
16898 	  rangelo = 0;
16899 	  rangehi = GET_MODE_UNIT_BITSIZE (ops[0].mode) - 1;
16900 	  if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi))
16901 	    {
16902 	      ops[2].mode = ops[0].mode;
16903 	      ops[2].value = mips_gen_const_int_vector (ops[2].mode,
16904 							INTVAL (ops[2].value));
16905 	    }
16906 	  else
16907 	    error_opno = 2;
16908 	}
16909       break;
16910 
16911     case CODE_FOR_msa_insert_b:
16912     case CODE_FOR_msa_insert_h:
16913     case CODE_FOR_msa_insert_w:
16914     case CODE_FOR_msa_insert_d:
16915       /* Map the built-ins to insert operations.  We need to swap operands,
16916 	 fix up the mode for the element being inserted, and generate
16917 	 a bit mask for vec_merge.  */
16918       gcc_assert (has_target_p && nops == 4);
16919       std::swap (ops[1], ops[2]);
16920       std::swap (ops[1], ops[3]);
16921       imode = GET_MODE_INNER (ops[0].mode);
16922       ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode);
16923       ops[1].mode = imode;
16924       rangelo = 0;
16925       rangehi = GET_MODE_NUNITS (ops[0].mode) - 1;
16926       if (CONST_INT_P (ops[3].value)
16927 	  && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi))
16928 	ops[3].value = GEN_INT (1 << INTVAL (ops[3].value));
16929       else
16930 	error_opno = 2;
16931       break;
16932 
16933     case CODE_FOR_msa_insve_b:
16934     case CODE_FOR_msa_insve_h:
16935     case CODE_FOR_msa_insve_w:
16936     case CODE_FOR_msa_insve_d:
16937       /* Map the built-ins to element insert operations.  We need to swap
16938 	 operands and generate a bit mask.  */
16939       gcc_assert (has_target_p && nops == 4);
16940       std::swap (ops[1], ops[2]);
16941       std::swap (ops[1], ops[3]);
16942       rangelo = 0;
16943       rangehi = GET_MODE_NUNITS (ops[0].mode) - 1;
16944       if (CONST_INT_P (ops[3].value)
16945 	  && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi))
16946 	ops[3].value = GEN_INT (1 << INTVAL (ops[3].value));
16947       else
16948 	error_opno = 2;
16949       break;
16950 
16951     case CODE_FOR_msa_shf_b:
16952     case CODE_FOR_msa_shf_h:
16953     case CODE_FOR_msa_shf_w:
16954     case CODE_FOR_msa_shf_w_f:
16955       gcc_assert (has_target_p && nops == 3);
16956       ops[2].value = mips_gen_const_int_vector_shuffle (ops[0].mode,
16957 							INTVAL (ops[2].value));
16958       break;
16959 
16960     case CODE_FOR_msa_vshf_b:
16961     case CODE_FOR_msa_vshf_h:
16962     case CODE_FOR_msa_vshf_w:
16963     case CODE_FOR_msa_vshf_d:
16964       gcc_assert (has_target_p && nops == 4);
16965       std::swap (ops[1], ops[3]);
16966       break;
16967 
16968     default:
16969       break;
16970   }
16971 
16972   if (error_opno != 0)
16973     {
16974       error ("argument %d to the built-in must be a constant"
16975 	     " in range %d to %d", error_opno, rangelo, rangehi);
16976       return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
16977     }
16978   else if (!maybe_expand_insn (icode, nops, ops))
16979     {
16980       error ("invalid argument to built-in function");
16981       return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
16982     }
16983   return has_target_p ? ops[0].value : const0_rtx;
16984 }
16985 
16986 /* Expand a floating-point comparison for built-in function call EXP.
16987    The first NARGS arguments are the values to be compared.  ICODE is
16988    the .md pattern that does the comparison and COND is the condition
16989    that is being tested.  Return an rtx for the result.  */
16990 
16991 static rtx
mips_expand_builtin_compare_1(enum insn_code icode,enum mips_fp_condition cond,tree exp,int nargs)16992 mips_expand_builtin_compare_1 (enum insn_code icode,
16993 			       enum mips_fp_condition cond,
16994 			       tree exp, int nargs)
16995 {
16996   struct expand_operand ops[MAX_RECOG_OPERANDS];
16997   rtx output;
16998   int opno, argno;
16999 
17000   /* The instruction should have a target operand, an operand for each
17001      argument, and an operand for COND.  */
17002   gcc_assert (nargs + 2 == insn_data[(int) icode].n_generator_args);
17003 
17004   output = mips_allocate_fcc (insn_data[(int) icode].operand[0].mode);
17005   opno = 0;
17006   create_fixed_operand (&ops[opno++], output);
17007   for (argno = 0; argno < nargs; argno++)
17008     mips_prepare_builtin_arg (&ops[opno++], exp, argno);
17009   create_integer_operand (&ops[opno++], (int) cond);
17010   return mips_expand_builtin_insn (icode, opno, ops, true);
17011 }
17012 
17013 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
17014    HAS_TARGET_P says which.  EXP is the CALL_EXPR that calls the function
17015    and ICODE is the code of the associated .md pattern.  TARGET, if nonnull,
17016    suggests a good place to put the result.  */
17017 
17018 static rtx
mips_expand_builtin_direct(enum insn_code icode,rtx target,tree exp,bool has_target_p)17019 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
17020 			    bool has_target_p)
17021 {
17022   struct expand_operand ops[MAX_RECOG_OPERANDS];
17023   int opno, argno;
17024 
17025   /* Map any target to operand 0.  */
17026   opno = 0;
17027   if (has_target_p)
17028     create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
17029 
17030   /* Map the arguments to the other operands.  */
17031   gcc_assert (opno + call_expr_nargs (exp)
17032 	      == insn_data[icode].n_generator_args);
17033   for (argno = 0; argno < call_expr_nargs (exp); argno++)
17034     mips_prepare_builtin_arg (&ops[opno++], exp, argno);
17035 
17036   return mips_expand_builtin_insn (icode, opno, ops, has_target_p);
17037 }
17038 
17039 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
17040    function; TYPE says which.  EXP is the CALL_EXPR that calls the
17041    function, ICODE is the instruction that should be used to compare
17042    the first two arguments, and COND is the condition it should test.
17043    TARGET, if nonnull, suggests a good place to put the result.  */
17044 
17045 static rtx
mips_expand_builtin_movtf(enum mips_builtin_type type,enum insn_code icode,enum mips_fp_condition cond,rtx target,tree exp)17046 mips_expand_builtin_movtf (enum mips_builtin_type type,
17047 			   enum insn_code icode, enum mips_fp_condition cond,
17048 			   rtx target, tree exp)
17049 {
17050   struct expand_operand ops[4];
17051   rtx cmp_result;
17052 
17053   cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp, 2);
17054   create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
17055   if (type == MIPS_BUILTIN_MOVT)
17056     {
17057       mips_prepare_builtin_arg (&ops[2], exp, 2);
17058       mips_prepare_builtin_arg (&ops[1], exp, 3);
17059     }
17060   else
17061     {
17062       mips_prepare_builtin_arg (&ops[1], exp, 2);
17063       mips_prepare_builtin_arg (&ops[2], exp, 3);
17064     }
17065   create_fixed_operand (&ops[3], cmp_result);
17066   return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps,
17067 				   4, ops, true);
17068 }
17069 
17070 /* Expand an MSA built-in for a compare and branch instruction specified by
17071    ICODE, set a general-purpose register to 1 if the branch was taken,
17072    0 otherwise.  */
17073 
17074 static rtx
mips_expand_builtin_msa_test_branch(enum insn_code icode,tree exp)17075 mips_expand_builtin_msa_test_branch (enum insn_code icode, tree exp)
17076 {
17077   struct expand_operand ops[3];
17078   rtx_insn *cbranch;
17079   rtx_code_label *true_label, *done_label;
17080   rtx cmp_result;
17081 
17082   true_label = gen_label_rtx ();
17083   done_label = gen_label_rtx ();
17084 
17085   create_input_operand (&ops[0], true_label, TYPE_MODE (TREE_TYPE (exp)));
17086   mips_prepare_builtin_arg (&ops[1], exp, 0);
17087   create_fixed_operand (&ops[2], const0_rtx);
17088 
17089   /* Make sure that the operand 1 is a REG.  */
17090   if (GET_CODE (ops[1].value) != REG)
17091     ops[1].value = force_reg (ops[1].mode, ops[1].value);
17092 
17093   if ((cbranch = maybe_gen_insn (icode, 3, ops)) == NULL_RTX)
17094     error ("failed to expand built-in function");
17095 
17096   cmp_result = gen_reg_rtx (SImode);
17097 
17098   /* First assume that CMP_RESULT is false.  */
17099   mips_emit_move (cmp_result, const0_rtx);
17100 
17101   /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise.  */
17102   emit_jump_insn (cbranch);
17103   emit_jump_insn (gen_jump (done_label));
17104   emit_barrier ();
17105 
17106   /* Set CMP_RESULT to true if the branch was taken.  */
17107   emit_label (true_label);
17108   mips_emit_move (cmp_result, const1_rtx);
17109 
17110   emit_label (done_label);
17111   return cmp_result;
17112 }
17113 
17114 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
17115    into TARGET otherwise.  Return TARGET.  */
17116 
17117 static rtx
mips_builtin_branch_and_move(rtx condition,rtx target,rtx value_if_true,rtx value_if_false)17118 mips_builtin_branch_and_move (rtx condition, rtx target,
17119 			      rtx value_if_true, rtx value_if_false)
17120 {
17121   rtx_code_label *true_label, *done_label;
17122 
17123   true_label = gen_label_rtx ();
17124   done_label = gen_label_rtx ();
17125 
17126   /* First assume that CONDITION is false.  */
17127   mips_emit_move (target, value_if_false);
17128 
17129   /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise.  */
17130   emit_jump_insn (gen_condjump (condition, true_label));
17131   emit_jump_insn (gen_jump (done_label));
17132   emit_barrier ();
17133 
17134   /* Fix TARGET if CONDITION is true.  */
17135   emit_label (true_label);
17136   mips_emit_move (target, value_if_true);
17137 
17138   emit_label (done_label);
17139   return target;
17140 }
17141 
17142 /* Expand a comparison built-in function of type BUILTIN_TYPE.  EXP is
17143    the CALL_EXPR that calls the function, ICODE is the code of the
17144    comparison instruction, and COND is the condition it should test.
17145    TARGET, if nonnull, suggests a good place to put the boolean result.  */
17146 
17147 static rtx
mips_expand_builtin_compare(enum mips_builtin_type builtin_type,enum insn_code icode,enum mips_fp_condition cond,rtx target,tree exp)17148 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
17149 			     enum insn_code icode, enum mips_fp_condition cond,
17150 			     rtx target, tree exp)
17151 {
17152   rtx offset, condition, cmp_result;
17153 
17154   if (target == 0 || GET_MODE (target) != SImode)
17155     target = gen_reg_rtx (SImode);
17156   cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp,
17157 					      call_expr_nargs (exp));
17158 
17159   /* If the comparison sets more than one register, we define the result
17160      to be 0 if all registers are false and -1 if all registers are true.
17161      The value of the complete result is indeterminate otherwise.  */
17162   switch (builtin_type)
17163     {
17164     case MIPS_BUILTIN_CMP_ALL:
17165       condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
17166       return mips_builtin_branch_and_move (condition, target,
17167 					   const0_rtx, const1_rtx);
17168 
17169     case MIPS_BUILTIN_CMP_UPPER:
17170     case MIPS_BUILTIN_CMP_LOWER:
17171       offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
17172       condition = gen_single_cc (cmp_result, offset);
17173       return mips_builtin_branch_and_move (condition, target,
17174 					   const1_rtx, const0_rtx);
17175 
17176     default:
17177       condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
17178       return mips_builtin_branch_and_move (condition, target,
17179 					   const1_rtx, const0_rtx);
17180     }
17181 }
17182 
17183 /* Expand a bposge built-in function of type BUILTIN_TYPE.  TARGET,
17184    if nonnull, suggests a good place to put the boolean result.  */
17185 
17186 static rtx
mips_expand_builtin_bposge(enum mips_builtin_type builtin_type,rtx target)17187 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
17188 {
17189   rtx condition, cmp_result;
17190   int cmp_value;
17191 
17192   if (target == 0 || GET_MODE (target) != SImode)
17193     target = gen_reg_rtx (SImode);
17194 
17195   cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
17196 
17197   if (builtin_type == MIPS_BUILTIN_BPOSGE32)
17198     cmp_value = 32;
17199   else
17200     gcc_assert (0);
17201 
17202   condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
17203   return mips_builtin_branch_and_move (condition, target,
17204 				       const1_rtx, const0_rtx);
17205 }
17206 
17207 /* Implement TARGET_EXPAND_BUILTIN.  */
17208 
17209 static rtx
mips_expand_builtin(tree exp,rtx target,rtx subtarget ATTRIBUTE_UNUSED,machine_mode mode,int ignore)17210 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17211 		     machine_mode mode, int ignore)
17212 {
17213   tree fndecl;
17214   unsigned int fcode, avail;
17215   const struct mips_builtin_description *d;
17216 
17217   fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
17218   fcode = DECL_FUNCTION_CODE (fndecl);
17219   gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
17220   d = &mips_builtins[fcode];
17221   avail = d->avail ();
17222   gcc_assert (avail != 0);
17223   if (TARGET_MIPS16 && !(avail & BUILTIN_AVAIL_MIPS16))
17224     {
17225       error ("built-in function %qE not supported for MIPS16",
17226 	     DECL_NAME (fndecl));
17227       return ignore ? const0_rtx : CONST0_RTX (mode);
17228     }
17229   switch (d->builtin_type)
17230     {
17231     case MIPS_BUILTIN_DIRECT:
17232       return mips_expand_builtin_direct (d->icode, target, exp, true);
17233 
17234     case MIPS_BUILTIN_DIRECT_NO_TARGET:
17235       return mips_expand_builtin_direct (d->icode, target, exp, false);
17236 
17237     case MIPS_BUILTIN_MOVT:
17238     case MIPS_BUILTIN_MOVF:
17239       return mips_expand_builtin_movtf (d->builtin_type, d->icode,
17240 					d->cond, target, exp);
17241 
17242     case MIPS_BUILTIN_CMP_ANY:
17243     case MIPS_BUILTIN_CMP_ALL:
17244     case MIPS_BUILTIN_CMP_UPPER:
17245     case MIPS_BUILTIN_CMP_LOWER:
17246     case MIPS_BUILTIN_CMP_SINGLE:
17247       return mips_expand_builtin_compare (d->builtin_type, d->icode,
17248 					  d->cond, target, exp);
17249 
17250     case MIPS_BUILTIN_MSA_TEST_BRANCH:
17251       return mips_expand_builtin_msa_test_branch (d->icode, exp);
17252 
17253     case MIPS_BUILTIN_BPOSGE32:
17254       return mips_expand_builtin_bposge (d->builtin_type, target);
17255     }
17256   gcc_unreachable ();
17257 }
17258 
17259 /* An entry in the MIPS16 constant pool.  VALUE is the pool constant,
17260    MODE is its mode, and LABEL is the CODE_LABEL associated with it.  */
17261 struct mips16_constant {
17262   struct mips16_constant *next;
17263   rtx value;
17264   rtx_code_label *label;
17265   machine_mode mode;
17266 };
17267 
17268 /* Information about an incomplete MIPS16 constant pool.  FIRST is the
17269    first constant, HIGHEST_ADDRESS is the highest address that the first
17270    byte of the pool can have, and INSN_ADDRESS is the current instruction
17271    address.  */
17272 struct mips16_constant_pool {
17273   struct mips16_constant *first;
17274   int highest_address;
17275   int insn_address;
17276 };
17277 
17278 /* Add constant VALUE to POOL and return its label.  MODE is the
17279    value's mode (used for CONST_INTs, etc.).  */
17280 
17281 static rtx_code_label *
mips16_add_constant(struct mips16_constant_pool * pool,rtx value,machine_mode mode)17282 mips16_add_constant (struct mips16_constant_pool *pool,
17283 		     rtx value, machine_mode mode)
17284 {
17285   struct mips16_constant **p, *c;
17286   bool first_of_size_p;
17287 
17288   /* See whether the constant is already in the pool.  If so, return the
17289      existing label, otherwise leave P pointing to the place where the
17290      constant should be added.
17291 
17292      Keep the pool sorted in increasing order of mode size so that we can
17293      reduce the number of alignments needed.  */
17294   first_of_size_p = true;
17295   for (p = &pool->first; *p != 0; p = &(*p)->next)
17296     {
17297       if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
17298 	return (*p)->label;
17299       if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
17300 	break;
17301       if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
17302 	first_of_size_p = false;
17303     }
17304 
17305   /* In the worst case, the constant needed by the earliest instruction
17306      will end up at the end of the pool.  The entire pool must then be
17307      accessible from that instruction.
17308 
17309      When adding the first constant, set the pool's highest address to
17310      the address of the first out-of-range byte.  Adjust this address
17311      downwards each time a new constant is added.  */
17312   if (pool->first == 0)
17313     /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
17314        of the instruction with the lowest two bits clear.  The base PC
17315        value for LDPC has the lowest three bits clear.  Assume the worst
17316        case here; namely that the PC-relative instruction occupies the
17317        last 2 bytes in an aligned word.  */
17318     pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
17319   pool->highest_address -= GET_MODE_SIZE (mode);
17320   if (first_of_size_p)
17321     /* Take into account the worst possible padding due to alignment.  */
17322     pool->highest_address -= GET_MODE_SIZE (mode) - 1;
17323 
17324   /* Create a new entry.  */
17325   c = XNEW (struct mips16_constant);
17326   c->value = value;
17327   c->mode = mode;
17328   c->label = gen_label_rtx ();
17329   c->next = *p;
17330   *p = c;
17331 
17332   return c->label;
17333 }
17334 
17335 /* Output constant VALUE after instruction INSN and return the last
17336    instruction emitted.  MODE is the mode of the constant.  */
17337 
17338 static rtx_insn *
mips16_emit_constants_1(machine_mode mode,rtx value,rtx_insn * insn)17339 mips16_emit_constants_1 (machine_mode mode, rtx value, rtx_insn *insn)
17340 {
17341   if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
17342     {
17343       rtx size = GEN_INT (GET_MODE_SIZE (mode));
17344       return emit_insn_after (gen_consttable_int (value, size), insn);
17345     }
17346 
17347   if (SCALAR_FLOAT_MODE_P (mode))
17348     return emit_insn_after (gen_consttable_float (value), insn);
17349 
17350   if (VECTOR_MODE_P (mode))
17351     {
17352       int i;
17353 
17354       for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
17355 	insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
17356 					CONST_VECTOR_ELT (value, i), insn);
17357       return insn;
17358     }
17359 
17360   gcc_unreachable ();
17361 }
17362 
17363 /* Dump out the constants in CONSTANTS after INSN.  Record the initial
17364    label number in the `consttable' and `consttable_end' insns emitted
17365    at the beginning and the end of the constant pool respectively, so
17366    that individual pools can be uniquely marked as data for the purpose
17367    of disassembly.  */
17368 
17369 static void
mips16_emit_constants(struct mips16_constant * constants,rtx_insn * insn)17370 mips16_emit_constants (struct mips16_constant *constants, rtx_insn *insn)
17371 {
17372   int label_num = constants ? CODE_LABEL_NUMBER (constants->label) : 0;
17373   struct mips16_constant *c, *next;
17374   int align;
17375 
17376   align = 0;
17377   if (constants)
17378     insn = emit_insn_after (gen_consttable (GEN_INT (label_num)), insn);
17379   for (c = constants; c != NULL; c = next)
17380     {
17381       /* If necessary, increase the alignment of PC.  */
17382       if (align < GET_MODE_SIZE (c->mode))
17383 	{
17384 	  int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
17385 	  insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
17386 	}
17387       align = GET_MODE_SIZE (c->mode);
17388 
17389       insn = emit_label_after (c->label, insn);
17390       insn = mips16_emit_constants_1 (c->mode, c->value, insn);
17391 
17392       next = c->next;
17393       free (c);
17394     }
17395   if (constants)
17396     insn = emit_insn_after (gen_consttable_end (GEN_INT (label_num)), insn);
17397 
17398   emit_barrier_after (insn);
17399 }
17400 
17401 /* Return the length of instruction INSN.  */
17402 
17403 static int
mips16_insn_length(rtx_insn * insn)17404 mips16_insn_length (rtx_insn *insn)
17405 {
17406   if (JUMP_TABLE_DATA_P (insn))
17407     {
17408       rtx body = PATTERN (insn);
17409       if (GET_CODE (body) == ADDR_VEC)
17410 	return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
17411       else if (GET_CODE (body) == ADDR_DIFF_VEC)
17412 	return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
17413       else
17414 	gcc_unreachable ();
17415     }
17416   return get_attr_length (insn);
17417 }
17418 
17419 /* If *X is a symbolic constant that refers to the constant pool, add
17420    the constant to POOL and rewrite *X to use the constant's label.  */
17421 
17422 static void
mips16_rewrite_pool_constant(struct mips16_constant_pool * pool,rtx * x)17423 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
17424 {
17425   rtx base, offset;
17426   rtx_code_label *label;
17427 
17428   split_const (*x, &base, &offset);
17429   if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
17430     {
17431       label = mips16_add_constant (pool, copy_rtx (get_pool_constant (base)),
17432 				   get_pool_mode (base));
17433       base = gen_rtx_LABEL_REF (Pmode, label);
17434       *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
17435     }
17436 }
17437 
17438 /* Rewrite INSN so that constant pool references refer to the constant's
17439    label instead.  */
17440 
17441 static void
mips16_rewrite_pool_refs(rtx_insn * insn,struct mips16_constant_pool * pool)17442 mips16_rewrite_pool_refs (rtx_insn *insn, struct mips16_constant_pool *pool)
17443 {
17444   subrtx_ptr_iterator::array_type array;
17445   FOR_EACH_SUBRTX_PTR (iter, array, &PATTERN (insn), ALL)
17446     {
17447       rtx *loc = *iter;
17448 
17449       if (force_to_mem_operand (*loc, Pmode))
17450 	{
17451 	  rtx mem = force_const_mem (GET_MODE (*loc), *loc);
17452 	  validate_change (insn, loc, mem, false);
17453 	}
17454 
17455       if (MEM_P (*loc))
17456 	{
17457 	  mips16_rewrite_pool_constant (pool, &XEXP (*loc, 0));
17458 	  iter.skip_subrtxes ();
17459 	}
17460       else
17461 	{
17462 	  if (TARGET_MIPS16_TEXT_LOADS)
17463 	    mips16_rewrite_pool_constant (pool, loc);
17464 	  if (GET_CODE (*loc) == CONST
17465 	      /* Don't rewrite the __mips16_rdwr symbol.  */
17466 	      || (GET_CODE (*loc) == UNSPEC
17467 		  && XINT (*loc, 1) == UNSPEC_TLS_GET_TP))
17468 	    iter.skip_subrtxes ();
17469 	}
17470     }
17471 }
17472 
17473 /* Return whether CFG is used in mips_reorg.  */
17474 
17475 static bool
mips_cfg_in_reorg(void)17476 mips_cfg_in_reorg (void)
17477 {
17478   return (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
17479 	  || TARGET_RELAX_PIC_CALLS);
17480 }
17481 
17482 /* Build MIPS16 constant pools.  Split the instructions if SPLIT_P,
17483    otherwise assume that they are already split.  */
17484 
17485 static void
mips16_lay_out_constants(bool split_p)17486 mips16_lay_out_constants (bool split_p)
17487 {
17488   struct mips16_constant_pool pool;
17489   rtx_insn *insn, *barrier;
17490 
17491   if (!TARGET_MIPS16_PCREL_LOADS)
17492     return;
17493 
17494   if (split_p)
17495     {
17496       if (mips_cfg_in_reorg ())
17497 	split_all_insns ();
17498       else
17499 	split_all_insns_noflow ();
17500     }
17501   barrier = 0;
17502   memset (&pool, 0, sizeof (pool));
17503   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
17504     {
17505       /* Rewrite constant pool references in INSN.  */
17506       if (USEFUL_INSN_P (insn))
17507 	mips16_rewrite_pool_refs (insn, &pool);
17508 
17509       pool.insn_address += mips16_insn_length (insn);
17510 
17511       if (pool.first != NULL)
17512 	{
17513 	  /* If there are no natural barriers between the first user of
17514 	     the pool and the highest acceptable address, we'll need to
17515 	     create a new instruction to jump around the constant pool.
17516 	     In the worst case, this instruction will be 4 bytes long.
17517 
17518 	     If it's too late to do this transformation after INSN,
17519 	     do it immediately before INSN.  */
17520 	  if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
17521 	    {
17522 	      rtx_code_label *label;
17523 	      rtx_insn *jump;
17524 
17525 	      label = gen_label_rtx ();
17526 
17527 	      jump = emit_jump_insn_before (gen_jump (label), insn);
17528 	      JUMP_LABEL (jump) = label;
17529 	      LABEL_NUSES (label) = 1;
17530 	      barrier = emit_barrier_after (jump);
17531 
17532 	      emit_label_after (label, barrier);
17533 	      pool.insn_address += 4;
17534 	    }
17535 
17536 	  /* See whether the constant pool is now out of range of the first
17537 	     user.  If so, output the constants after the previous barrier.
17538 	     Note that any instructions between BARRIER and INSN (inclusive)
17539 	     will use negative offsets to refer to the pool.  */
17540 	  if (pool.insn_address > pool.highest_address)
17541 	    {
17542 	      mips16_emit_constants (pool.first, barrier);
17543 	      pool.first = NULL;
17544 	      barrier = 0;
17545 	    }
17546 	  else if (BARRIER_P (insn))
17547 	    barrier = insn;
17548 	}
17549     }
17550   mips16_emit_constants (pool.first, get_last_insn ());
17551 }
17552 
17553 /* Return true if it is worth r10k_simplify_address's while replacing
17554    an address with X.  We are looking for constants, and for addresses
17555    at a known offset from the incoming stack pointer.  */
17556 
17557 static bool
r10k_simplified_address_p(rtx x)17558 r10k_simplified_address_p (rtx x)
17559 {
17560   if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
17561     x = XEXP (x, 0);
17562   return x == virtual_incoming_args_rtx || CONSTANT_P (x);
17563 }
17564 
17565 /* X is an expression that appears in INSN.  Try to use the UD chains
17566    to simplify it, returning the simplified form on success and the
17567    original form otherwise.  Replace the incoming value of $sp with
17568    virtual_incoming_args_rtx (which should never occur in X otherwise).  */
17569 
17570 static rtx
r10k_simplify_address(rtx x,rtx_insn * insn)17571 r10k_simplify_address (rtx x, rtx_insn *insn)
17572 {
17573   rtx newx, op0, op1, set, note;
17574   rtx_insn *def_insn;
17575   df_ref use, def;
17576   struct df_link *defs;
17577 
17578   newx = NULL_RTX;
17579   if (UNARY_P (x))
17580     {
17581       op0 = r10k_simplify_address (XEXP (x, 0), insn);
17582       if (op0 != XEXP (x, 0))
17583 	newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
17584 				   op0, GET_MODE (XEXP (x, 0)));
17585     }
17586   else if (BINARY_P (x))
17587     {
17588       op0 = r10k_simplify_address (XEXP (x, 0), insn);
17589       op1 = r10k_simplify_address (XEXP (x, 1), insn);
17590       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
17591 	newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
17592     }
17593   else if (GET_CODE (x) == LO_SUM)
17594     {
17595       /* LO_SUMs can be offset from HIGHs, if we know they won't
17596 	 overflow.  See mips_classify_address for the rationale behind
17597 	 the lax check.  */
17598       op0 = r10k_simplify_address (XEXP (x, 0), insn);
17599       if (GET_CODE (op0) == HIGH)
17600 	newx = XEXP (x, 1);
17601     }
17602   else if (REG_P (x))
17603     {
17604       /* Uses are recorded by regno_reg_rtx, not X itself.  */
17605       use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
17606       gcc_assert (use);
17607       defs = DF_REF_CHAIN (use);
17608 
17609       /* Require a single definition.  */
17610       if (defs && defs->next == NULL)
17611 	{
17612 	  def = defs->ref;
17613 	  if (DF_REF_IS_ARTIFICIAL (def))
17614 	    {
17615 	      /* Replace the incoming value of $sp with
17616 		 virtual_incoming_args_rtx.  */
17617 	      if (x == stack_pointer_rtx
17618 		  && DF_REF_BB (def) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
17619 		newx = virtual_incoming_args_rtx;
17620 	    }
17621 	  else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
17622 				   DF_REF_BB (def)))
17623 	    {
17624 	      /* Make sure that DEF_INSN is a single set of REG.  */
17625 	      def_insn = DF_REF_INSN (def);
17626 	      if (NONJUMP_INSN_P (def_insn))
17627 		{
17628 		  set = single_set (def_insn);
17629 		  if (set && rtx_equal_p (SET_DEST (set), x))
17630 		    {
17631 		      /* Prefer to use notes, since the def-use chains
17632 			 are often shorter.  */
17633 		      note = find_reg_equal_equiv_note (def_insn);
17634 		      if (note)
17635 			newx = XEXP (note, 0);
17636 		      else
17637 			newx = SET_SRC (set);
17638 		      newx = r10k_simplify_address (newx, def_insn);
17639 		    }
17640 		}
17641 	    }
17642 	}
17643     }
17644   if (newx && r10k_simplified_address_p (newx))
17645     return newx;
17646   return x;
17647 }
17648 
17649 /* Return true if ADDRESS is known to be an uncached address
17650    on R10K systems.  */
17651 
17652 static bool
r10k_uncached_address_p(unsigned HOST_WIDE_INT address)17653 r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
17654 {
17655   unsigned HOST_WIDE_INT upper;
17656 
17657   /* Check for KSEG1.  */
17658   if (address + 0x60000000 < 0x20000000)
17659     return true;
17660 
17661   /* Check for uncached XKPHYS addresses.  */
17662   if (Pmode == DImode)
17663     {
17664       upper = (address >> 40) & 0xf9ffff;
17665       if (upper == 0x900000 || upper == 0xb80000)
17666 	return true;
17667     }
17668   return false;
17669 }
17670 
17671 /* Return true if we can prove that an access to address X in instruction
17672    INSN would be safe from R10K speculation.  This X is a general
17673    expression; it might not be a legitimate address.  */
17674 
17675 static bool
r10k_safe_address_p(rtx x,rtx_insn * insn)17676 r10k_safe_address_p (rtx x, rtx_insn *insn)
17677 {
17678   rtx base, offset;
17679   HOST_WIDE_INT offset_val;
17680 
17681   x = r10k_simplify_address (x, insn);
17682 
17683   /* Check for references to the stack frame.  It doesn't really matter
17684      how much of the frame has been allocated at INSN; -mr10k-cache-barrier
17685      allows us to assume that accesses to any part of the eventual frame
17686      is safe from speculation at any point in the function.  */
17687   mips_split_plus (x, &base, &offset_val);
17688   if (base == virtual_incoming_args_rtx
17689       && offset_val >= -cfun->machine->frame.total_size
17690       && offset_val < cfun->machine->frame.args_size)
17691     return true;
17692 
17693   /* Check for uncached addresses.  */
17694   if (CONST_INT_P (x))
17695     return r10k_uncached_address_p (INTVAL (x));
17696 
17697   /* Check for accesses to a static object.  */
17698   split_const (x, &base, &offset);
17699   return offset_within_block_p (base, INTVAL (offset));
17700 }
17701 
17702 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
17703    an in-range access to an automatic variable, or to an object with
17704    a link-time-constant address.  */
17705 
17706 static bool
r10k_safe_mem_expr_p(tree expr,unsigned HOST_WIDE_INT offset)17707 r10k_safe_mem_expr_p (tree expr, unsigned HOST_WIDE_INT offset)
17708 {
17709   poly_int64 bitoffset, bitsize;
17710   tree inner, var_offset;
17711   machine_mode mode;
17712   int unsigned_p, reverse_p, volatile_p;
17713 
17714   inner = get_inner_reference (expr, &bitsize, &bitoffset, &var_offset, &mode,
17715 			       &unsigned_p, &reverse_p, &volatile_p);
17716   if (!DECL_P (inner) || !DECL_SIZE_UNIT (inner) || var_offset)
17717     return false;
17718 
17719   offset += bitoffset / BITS_PER_UNIT;
17720   return offset < tree_to_uhwi (DECL_SIZE_UNIT (inner));
17721 }
17722 
17723 /* Return true if X contains a MEM that is not safe from R10K speculation.
17724    INSN is the instruction that contains X.  */
17725 
17726 static bool
r10k_needs_protection_p_1(rtx x,rtx_insn * insn)17727 r10k_needs_protection_p_1 (rtx x, rtx_insn *insn)
17728 {
17729   subrtx_var_iterator::array_type array;
17730   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
17731     {
17732       rtx mem = *iter;
17733       if (MEM_P (mem))
17734 	{
17735 	  if ((MEM_EXPR (mem)
17736 	       && MEM_OFFSET_KNOWN_P (mem)
17737 	       && r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
17738 	      || r10k_safe_address_p (XEXP (mem, 0), insn))
17739 	    iter.skip_subrtxes ();
17740 	  else
17741 	    return true;
17742 	}
17743     }
17744   return false;
17745 }
17746 
17747 /* A note_stores callback for which DATA points to an instruction pointer.
17748    If *DATA is nonnull, make it null if it X contains a MEM that is not
17749    safe from R10K speculation.  */
17750 
17751 static void
r10k_needs_protection_p_store(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)17752 r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
17753 			       void *data)
17754 {
17755   rtx_insn **insn_ptr;
17756 
17757   insn_ptr = (rtx_insn **) data;
17758   if (*insn_ptr && r10k_needs_protection_p_1 (x, *insn_ptr))
17759     *insn_ptr = NULL;
17760 }
17761 
17762 /* X is the pattern of a call instruction.  Return true if the call is
17763    not to a declared function.  */
17764 
17765 static bool
r10k_needs_protection_p_call(const_rtx x)17766 r10k_needs_protection_p_call (const_rtx x)
17767 {
17768   subrtx_iterator::array_type array;
17769   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
17770     {
17771       const_rtx mem = *iter;
17772       if (MEM_P (mem))
17773 	{
17774 	  const_rtx addr = XEXP (mem, 0);
17775 	  if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DECL (addr))
17776 	    iter.skip_subrtxes ();
17777 	  else
17778 	    return true;
17779 	}
17780     }
17781   return false;
17782 }
17783 
17784 /* Return true if instruction INSN needs to be protected by an R10K
17785    cache barrier.  */
17786 
17787 static bool
r10k_needs_protection_p(rtx_insn * insn)17788 r10k_needs_protection_p (rtx_insn *insn)
17789 {
17790   if (CALL_P (insn))
17791     return r10k_needs_protection_p_call (PATTERN (insn));
17792 
17793   if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
17794     {
17795       note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
17796       return insn == NULL_RTX;
17797     }
17798 
17799   return r10k_needs_protection_p_1 (PATTERN (insn), insn);
17800 }
17801 
17802 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
17803    edge is unconditional.  */
17804 
17805 static bool
r10k_protected_bb_p(basic_block bb,sbitmap protected_bbs)17806 r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
17807 {
17808   edge_iterator ei;
17809   edge e;
17810 
17811   FOR_EACH_EDGE (e, ei, bb->preds)
17812     if (!single_succ_p (e->src)
17813 	|| !bitmap_bit_p (protected_bbs, e->src->index)
17814 	|| (e->flags & EDGE_COMPLEX) != 0)
17815       return false;
17816   return true;
17817 }
17818 
17819 /* Implement -mr10k-cache-barrier= for the current function.  */
17820 
17821 static void
r10k_insert_cache_barriers(void)17822 r10k_insert_cache_barriers (void)
17823 {
17824   int *rev_post_order;
17825   unsigned int i, n;
17826   basic_block bb;
17827   sbitmap protected_bbs;
17828   rtx_insn *insn, *end;
17829   rtx unprotected_region;
17830 
17831   if (TARGET_MIPS16)
17832     {
17833       sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
17834       return;
17835     }
17836 
17837   /* Calculate dominators.  */
17838   calculate_dominance_info (CDI_DOMINATORS);
17839 
17840   /* Bit X of PROTECTED_BBS is set if the last operation in basic block
17841      X is protected by a cache barrier.  */
17842   protected_bbs = sbitmap_alloc (last_basic_block_for_fn (cfun));
17843   bitmap_clear (protected_bbs);
17844 
17845   /* Iterate over the basic blocks in reverse post-order.  */
17846   rev_post_order = XNEWVEC (int, last_basic_block_for_fn (cfun));
17847   n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
17848   for (i = 0; i < n; i++)
17849     {
17850       bb = BASIC_BLOCK_FOR_FN (cfun, rev_post_order[i]);
17851 
17852       /* If this block is only reached by unconditional edges, and if the
17853 	 source of every edge is protected, the beginning of the block is
17854 	 also protected.  */
17855       if (r10k_protected_bb_p (bb, protected_bbs))
17856 	unprotected_region = NULL_RTX;
17857       else
17858 	unprotected_region = pc_rtx;
17859       end = NEXT_INSN (BB_END (bb));
17860 
17861       /* UNPROTECTED_REGION is:
17862 
17863 	 - null if we are processing a protected region,
17864 	 - pc_rtx if we are processing an unprotected region but have
17865 	   not yet found the first instruction in it
17866 	 - the first instruction in an unprotected region otherwise.  */
17867       for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
17868 	{
17869 	  if (unprotected_region && USEFUL_INSN_P (insn))
17870 	    {
17871 	      if (recog_memoized (insn) == CODE_FOR_mips_cache)
17872 		/* This CACHE instruction protects the following code.  */
17873 		unprotected_region = NULL_RTX;
17874 	      else
17875 		{
17876 		  /* See if INSN is the first instruction in this
17877 		     unprotected region.  */
17878 		  if (unprotected_region == pc_rtx)
17879 		    unprotected_region = insn;
17880 
17881 		  /* See if INSN needs to be protected.  If so,
17882 		     we must insert a cache barrier somewhere between
17883 		     PREV_INSN (UNPROTECTED_REGION) and INSN.  It isn't
17884 		     clear which position is better performance-wise,
17885 		     but as a tie-breaker, we assume that it is better
17886 		     to allow delay slots to be back-filled where
17887 		     possible, and that it is better not to insert
17888 		     barriers in the middle of already-scheduled code.
17889 		     We therefore insert the barrier at the beginning
17890 		     of the region.  */
17891 		  if (r10k_needs_protection_p (insn))
17892 		    {
17893 		      emit_insn_before (gen_r10k_cache_barrier (),
17894 					as_a <rtx_insn *> (unprotected_region));
17895 		      unprotected_region = NULL_RTX;
17896 		    }
17897 		}
17898 	    }
17899 
17900 	  if (CALL_P (insn))
17901 	    /* The called function is not required to protect the exit path.
17902 	       The code that follows a call is therefore unprotected.  */
17903 	    unprotected_region = pc_rtx;
17904 	}
17905 
17906       /* Record whether the end of this block is protected.  */
17907       if (unprotected_region == NULL_RTX)
17908 	bitmap_set_bit (protected_bbs, bb->index);
17909     }
17910   XDELETEVEC (rev_post_order);
17911 
17912   sbitmap_free (protected_bbs);
17913 
17914   free_dominance_info (CDI_DOMINATORS);
17915 }
17916 
17917 /* If INSN is a call, return the underlying CALL expr.  Return NULL_RTX
17918    otherwise.  If INSN has two call rtx, then store the second one in
17919    SECOND_CALL.  */
17920 
17921 static rtx
mips_call_expr_from_insn(rtx_insn * insn,rtx * second_call)17922 mips_call_expr_from_insn (rtx_insn *insn, rtx *second_call)
17923 {
17924   rtx x;
17925   rtx x2;
17926 
17927   if (!CALL_P (insn))
17928     return NULL_RTX;
17929 
17930   x = PATTERN (insn);
17931   if (GET_CODE (x) == PARALLEL)
17932     {
17933       /* Calls returning complex values have two CALL rtx.  Look for the second
17934 	 one here, and return it via the SECOND_CALL arg.  */
17935       x2 = XVECEXP (x, 0, 1);
17936       if (GET_CODE (x2) == SET)
17937 	x2 = XEXP (x2, 1);
17938       if (GET_CODE (x2) == CALL)
17939 	*second_call = x2;
17940 
17941       x = XVECEXP (x, 0, 0);
17942     }
17943   if (GET_CODE (x) == SET)
17944     x = XEXP (x, 1);
17945   gcc_assert (GET_CODE (x) == CALL);
17946 
17947   return x;
17948 }
17949 
17950 /* REG is set in DEF.  See if the definition is one of the ways we load a
17951    register with a symbol address for a mips_use_pic_fn_addr_reg_p call.
17952    If it is, return the symbol reference of the function, otherwise return
17953    NULL_RTX.
17954 
17955    If RECURSE_P is true, use mips_find_pic_call_symbol to interpret
17956    the values of source registers, otherwise treat such registers as
17957    having an unknown value.  */
17958 
17959 static rtx
mips_pic_call_symbol_from_set(df_ref def,rtx reg,bool recurse_p)17960 mips_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p)
17961 {
17962   rtx_insn *def_insn;
17963   rtx set;
17964 
17965   if (DF_REF_IS_ARTIFICIAL (def))
17966     return NULL_RTX;
17967 
17968   def_insn = DF_REF_INSN (def);
17969   set = single_set (def_insn);
17970   if (set && rtx_equal_p (SET_DEST (set), reg))
17971     {
17972       rtx note, src, symbol;
17973 
17974       /* First see whether the source is a plain symbol.  This is used
17975 	 when calling symbols that are not lazily bound.  */
17976       src = SET_SRC (set);
17977       if (GET_CODE (src) == SYMBOL_REF)
17978 	return src;
17979 
17980       /* Handle %call16 references.  */
17981       symbol = mips_strip_unspec_call (src);
17982       if (symbol)
17983 	{
17984 	  gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
17985 	  return symbol;
17986 	}
17987 
17988       /* If we have something more complicated, look for a
17989 	 REG_EQUAL or REG_EQUIV note.  */
17990       note = find_reg_equal_equiv_note (def_insn);
17991       if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
17992 	return XEXP (note, 0);
17993 
17994       /* Follow at most one simple register copy.  Such copies are
17995 	 interesting in cases like:
17996 
17997 	     for (...)
17998 	       {
17999 	         locally_binding_fn (...);
18000 	       }
18001 
18002 	 and:
18003 
18004 	     locally_binding_fn (...);
18005 	     ...
18006 	     locally_binding_fn (...);
18007 
18008 	 where the load of locally_binding_fn can legitimately be
18009 	 hoisted or shared.  However, we do not expect to see complex
18010 	 chains of copies, so a full worklist solution to the problem
18011 	 would probably be overkill.  */
18012       if (recurse_p && REG_P (src))
18013 	return mips_find_pic_call_symbol (def_insn, src, false);
18014     }
18015 
18016   return NULL_RTX;
18017 }
18018 
18019 /* Find the definition of the use of REG in INSN.  See if the definition
18020    is one of the ways we load a register with a symbol address for a
18021    mips_use_pic_fn_addr_reg_p call.  If it is return the symbol reference
18022    of the function, otherwise return NULL_RTX.  RECURSE_P is as for
18023    mips_pic_call_symbol_from_set.  */
18024 
18025 static rtx
mips_find_pic_call_symbol(rtx_insn * insn,rtx reg,bool recurse_p)18026 mips_find_pic_call_symbol (rtx_insn *insn, rtx reg, bool recurse_p)
18027 {
18028   df_ref use;
18029   struct df_link *defs;
18030   rtx symbol;
18031 
18032   use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]);
18033   if (!use)
18034     return NULL_RTX;
18035   defs = DF_REF_CHAIN (use);
18036   if (!defs)
18037     return NULL_RTX;
18038   symbol = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
18039   if (!symbol)
18040     return NULL_RTX;
18041 
18042   /* If we have more than one definition, they need to be identical.  */
18043   for (defs = defs->next; defs; defs = defs->next)
18044     {
18045       rtx other;
18046 
18047       other = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
18048       if (!rtx_equal_p (symbol, other))
18049 	return NULL_RTX;
18050     }
18051 
18052   return symbol;
18053 }
18054 
18055 /* Replace the args_size operand of the call expression CALL with the
18056    call-attribute UNSPEC and fill in SYMBOL as the function symbol.  */
18057 
18058 static void
mips_annotate_pic_call_expr(rtx call,rtx symbol)18059 mips_annotate_pic_call_expr (rtx call, rtx symbol)
18060 {
18061   rtx args_size;
18062 
18063   args_size = XEXP (call, 1);
18064   XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size),
18065 				   gen_rtvec (2, args_size, symbol),
18066 				   UNSPEC_CALL_ATTR);
18067 }
18068 
18069 /* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression.  See
18070    if instead of the arg_size argument it contains the call attributes.  If
18071    yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
18072    symbol from the call attributes.  Also return false if ARGS_SIZE_OPNO is
18073    -1.  */
18074 
18075 bool
mips_get_pic_call_symbol(rtx * operands,int args_size_opno)18076 mips_get_pic_call_symbol (rtx *operands, int args_size_opno)
18077 {
18078   rtx args_size, symbol;
18079 
18080   if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1)
18081     return false;
18082 
18083   args_size = operands[args_size_opno];
18084   if (GET_CODE (args_size) != UNSPEC)
18085     return false;
18086   gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR);
18087 
18088   symbol = XVECEXP (args_size, 0, 1);
18089   gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
18090 
18091   operands[args_size_opno] = symbol;
18092   return true;
18093 }
18094 
18095 /* Use DF to annotate PIC indirect calls with the function symbol they
18096    dispatch to.  */
18097 
18098 static void
mips_annotate_pic_calls(void)18099 mips_annotate_pic_calls (void)
18100 {
18101   basic_block bb;
18102   rtx_insn *insn;
18103 
18104   FOR_EACH_BB_FN (bb, cfun)
18105     FOR_BB_INSNS (bb, insn)
18106     {
18107       rtx call, reg, symbol, second_call;
18108 
18109       second_call = 0;
18110       call = mips_call_expr_from_insn (insn, &second_call);
18111       if (!call)
18112 	continue;
18113       gcc_assert (MEM_P (XEXP (call, 0)));
18114       reg = XEXP (XEXP (call, 0), 0);
18115       if (!REG_P (reg))
18116 	continue;
18117 
18118       symbol = mips_find_pic_call_symbol (insn, reg, true);
18119       if (symbol)
18120 	{
18121 	  mips_annotate_pic_call_expr (call, symbol);
18122 	  if (second_call)
18123 	    mips_annotate_pic_call_expr (second_call, symbol);
18124 	}
18125     }
18126 }
18127 
18128 /* A temporary variable used by note_uses callbacks, etc.  */
18129 static rtx_insn *mips_sim_insn;
18130 
18131 /* A structure representing the state of the processor pipeline.
18132    Used by the mips_sim_* family of functions.  */
18133 struct mips_sim {
18134   /* The maximum number of instructions that can be issued in a cycle.
18135      (Caches mips_issue_rate.)  */
18136   unsigned int issue_rate;
18137 
18138   /* The current simulation time.  */
18139   unsigned int time;
18140 
18141   /* How many more instructions can be issued in the current cycle.  */
18142   unsigned int insns_left;
18143 
18144   /* LAST_SET[X].INSN is the last instruction to set register X.
18145      LAST_SET[X].TIME is the time at which that instruction was issued.
18146      INSN is null if no instruction has yet set register X.  */
18147   struct {
18148     rtx_insn *insn;
18149     unsigned int time;
18150   } last_set[FIRST_PSEUDO_REGISTER];
18151 
18152   /* The pipeline's current DFA state.  */
18153   state_t dfa_state;
18154 };
18155 
18156 /* Reset STATE to the initial simulation state.  */
18157 
18158 static void
mips_sim_reset(struct mips_sim * state)18159 mips_sim_reset (struct mips_sim *state)
18160 {
18161   curr_state = state->dfa_state;
18162 
18163   state->time = 0;
18164   state->insns_left = state->issue_rate;
18165   memset (&state->last_set, 0, sizeof (state->last_set));
18166   state_reset (curr_state);
18167 
18168   targetm.sched.init (0, false, 0);
18169   advance_state (curr_state);
18170 }
18171 
18172 /* Initialize STATE before its first use.  DFA_STATE points to an
18173    allocated but uninitialized DFA state.  */
18174 
18175 static void
mips_sim_init(struct mips_sim * state,state_t dfa_state)18176 mips_sim_init (struct mips_sim *state, state_t dfa_state)
18177 {
18178   if (targetm.sched.init_dfa_pre_cycle_insn)
18179     targetm.sched.init_dfa_pre_cycle_insn ();
18180 
18181   if (targetm.sched.init_dfa_post_cycle_insn)
18182     targetm.sched.init_dfa_post_cycle_insn ();
18183 
18184   state->issue_rate = mips_issue_rate ();
18185   state->dfa_state = dfa_state;
18186   mips_sim_reset (state);
18187 }
18188 
18189 /* Advance STATE by one clock cycle.  */
18190 
18191 static void
mips_sim_next_cycle(struct mips_sim * state)18192 mips_sim_next_cycle (struct mips_sim *state)
18193 {
18194   curr_state = state->dfa_state;
18195 
18196   state->time++;
18197   state->insns_left = state->issue_rate;
18198   advance_state (curr_state);
18199 }
18200 
18201 /* Advance simulation state STATE until instruction INSN can read
18202    register REG.  */
18203 
18204 static void
mips_sim_wait_reg(struct mips_sim * state,rtx_insn * insn,rtx reg)18205 mips_sim_wait_reg (struct mips_sim *state, rtx_insn *insn, rtx reg)
18206 {
18207   unsigned int regno, end_regno;
18208 
18209   end_regno = END_REGNO (reg);
18210   for (regno = REGNO (reg); regno < end_regno; regno++)
18211     if (state->last_set[regno].insn != 0)
18212       {
18213 	unsigned int t;
18214 
18215 	t = (state->last_set[regno].time
18216 	     + insn_latency (state->last_set[regno].insn, insn));
18217 	while (state->time < t)
18218 	  mips_sim_next_cycle (state);
18219     }
18220 }
18221 
18222 /* A note_uses callback.  For each register in *X, advance simulation
18223    state DATA until mips_sim_insn can read the register's value.  */
18224 
18225 static void
mips_sim_wait_regs_1(rtx * x,void * data)18226 mips_sim_wait_regs_1 (rtx *x, void *data)
18227 {
18228   subrtx_var_iterator::array_type array;
18229   FOR_EACH_SUBRTX_VAR (iter, array, *x, NONCONST)
18230     if (REG_P (*iter))
18231       mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *iter);
18232 }
18233 
18234 /* Advance simulation state STATE until all of INSN's register
18235    dependencies are satisfied.  */
18236 
18237 static void
mips_sim_wait_regs(struct mips_sim * state,rtx_insn * insn)18238 mips_sim_wait_regs (struct mips_sim *state, rtx_insn *insn)
18239 {
18240   mips_sim_insn = insn;
18241   note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
18242 }
18243 
18244 /* Advance simulation state STATE until the units required by
18245    instruction INSN are available.  */
18246 
18247 static void
mips_sim_wait_units(struct mips_sim * state,rtx_insn * insn)18248 mips_sim_wait_units (struct mips_sim *state, rtx_insn *insn)
18249 {
18250   state_t tmp_state;
18251 
18252   tmp_state = alloca (state_size ());
18253   while (state->insns_left == 0
18254 	 || (memcpy (tmp_state, state->dfa_state, state_size ()),
18255 	     state_transition (tmp_state, insn) >= 0))
18256     mips_sim_next_cycle (state);
18257 }
18258 
18259 /* Advance simulation state STATE until INSN is ready to issue.  */
18260 
18261 static void
mips_sim_wait_insn(struct mips_sim * state,rtx_insn * insn)18262 mips_sim_wait_insn (struct mips_sim *state, rtx_insn *insn)
18263 {
18264   mips_sim_wait_regs (state, insn);
18265   mips_sim_wait_units (state, insn);
18266 }
18267 
18268 /* mips_sim_insn has just set X.  Update the LAST_SET array
18269    in simulation state DATA.  */
18270 
18271 static void
mips_sim_record_set(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)18272 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
18273 {
18274   struct mips_sim *state;
18275 
18276   state = (struct mips_sim *) data;
18277   if (REG_P (x))
18278     {
18279       unsigned int regno, end_regno;
18280 
18281       end_regno = END_REGNO (x);
18282       for (regno = REGNO (x); regno < end_regno; regno++)
18283 	{
18284 	  state->last_set[regno].insn = mips_sim_insn;
18285 	  state->last_set[regno].time = state->time;
18286 	}
18287     }
18288 }
18289 
18290 /* Issue instruction INSN in scheduler state STATE.  Assume that INSN
18291    can issue immediately (i.e., that mips_sim_wait_insn has already
18292    been called).  */
18293 
18294 static void
mips_sim_issue_insn(struct mips_sim * state,rtx_insn * insn)18295 mips_sim_issue_insn (struct mips_sim *state, rtx_insn *insn)
18296 {
18297   curr_state = state->dfa_state;
18298 
18299   state_transition (curr_state, insn);
18300   state->insns_left = targetm.sched.variable_issue (0, false, insn,
18301 						    state->insns_left);
18302 
18303   mips_sim_insn = insn;
18304   note_stores (PATTERN (insn), mips_sim_record_set, state);
18305 }
18306 
18307 /* Simulate issuing a NOP in state STATE.  */
18308 
18309 static void
mips_sim_issue_nop(struct mips_sim * state)18310 mips_sim_issue_nop (struct mips_sim *state)
18311 {
18312   if (state->insns_left == 0)
18313     mips_sim_next_cycle (state);
18314   state->insns_left--;
18315 }
18316 
18317 /* Update simulation state STATE so that it's ready to accept the instruction
18318    after INSN.  INSN should be part of the main rtl chain, not a member of a
18319    SEQUENCE.  */
18320 
18321 static void
mips_sim_finish_insn(struct mips_sim * state,rtx_insn * insn)18322 mips_sim_finish_insn (struct mips_sim *state, rtx_insn *insn)
18323 {
18324   /* If INSN is a jump with an implicit delay slot, simulate a nop.  */
18325   if (JUMP_P (insn))
18326     mips_sim_issue_nop (state);
18327 
18328   switch (GET_CODE (SEQ_BEGIN (insn)))
18329     {
18330     case CODE_LABEL:
18331     case CALL_INSN:
18332       /* We can't predict the processor state after a call or label.  */
18333       mips_sim_reset (state);
18334       break;
18335 
18336     case JUMP_INSN:
18337       /* The delay slots of branch likely instructions are only executed
18338 	 when the branch is taken.  Therefore, if the caller has simulated
18339 	 the delay slot instruction, STATE does not really reflect the state
18340 	 of the pipeline for the instruction after the delay slot.  Also,
18341 	 branch likely instructions tend to incur a penalty when not taken,
18342 	 so there will probably be an extra delay between the branch and
18343 	 the instruction after the delay slot.  */
18344       if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
18345 	mips_sim_reset (state);
18346       break;
18347 
18348     default:
18349       break;
18350     }
18351 }
18352 
18353 /* Use simulator state STATE to calculate the execution time of
18354    instruction sequence SEQ.  */
18355 
18356 static unsigned int
mips_seq_time(struct mips_sim * state,rtx_insn * seq)18357 mips_seq_time (struct mips_sim *state, rtx_insn *seq)
18358 {
18359   mips_sim_reset (state);
18360   for (rtx_insn *insn = seq; insn; insn = NEXT_INSN (insn))
18361     {
18362       mips_sim_wait_insn (state, insn);
18363       mips_sim_issue_insn (state, insn);
18364     }
18365   return state->time;
18366 }
18367 
18368 /* Return the execution-time cost of mips_tuning_info.fast_mult_zero_zero_p
18369    setting SETTING, using STATE to simulate instruction sequences.  */
18370 
18371 static unsigned int
mips_mult_zero_zero_cost(struct mips_sim * state,bool setting)18372 mips_mult_zero_zero_cost (struct mips_sim *state, bool setting)
18373 {
18374   mips_tuning_info.fast_mult_zero_zero_p = setting;
18375   start_sequence ();
18376 
18377   machine_mode dword_mode = TARGET_64BIT ? TImode : DImode;
18378   rtx hilo = gen_rtx_REG (dword_mode, MD_REG_FIRST);
18379   mips_emit_move_or_split (hilo, const0_rtx, SPLIT_FOR_SPEED);
18380 
18381   /* If the target provides mulsidi3_32bit then that's the most likely
18382      consumer of the result.  Test for bypasses.  */
18383   if (dword_mode == DImode && HAVE_maddsidi4)
18384     {
18385       rtx gpr = gen_rtx_REG (SImode, GP_REG_FIRST + 4);
18386       emit_insn (gen_maddsidi4 (hilo, gpr, gpr, hilo));
18387     }
18388 
18389   unsigned int time = mips_seq_time (state, get_insns ());
18390   end_sequence ();
18391   return time;
18392 }
18393 
18394 /* Check the relative speeds of "MULT $0,$0" and "MTLO $0; MTHI $0"
18395    and set up mips_tuning_info.fast_mult_zero_zero_p accordingly.
18396    Prefer MULT -- which is shorter -- in the event of a tie.  */
18397 
18398 static void
mips_set_fast_mult_zero_zero_p(struct mips_sim * state)18399 mips_set_fast_mult_zero_zero_p (struct mips_sim *state)
18400 {
18401   if (TARGET_MIPS16 || !ISA_HAS_HILO)
18402     /* No MTLO or MTHI available for MIPS16. Also, when there are no HI or LO
18403        registers then there is no reason to zero them, arbitrarily choose to
18404        say that "MULT $0,$0" would be faster.  */
18405     mips_tuning_info.fast_mult_zero_zero_p = true;
18406   else
18407     {
18408       unsigned int true_time = mips_mult_zero_zero_cost (state, true);
18409       unsigned int false_time = mips_mult_zero_zero_cost (state, false);
18410       mips_tuning_info.fast_mult_zero_zero_p = (true_time <= false_time);
18411     }
18412 }
18413 
18414 /* Set up costs based on the current architecture and tuning settings.  */
18415 
18416 static void
mips_set_tuning_info(void)18417 mips_set_tuning_info (void)
18418 {
18419   if (mips_tuning_info.initialized_p
18420       && mips_tuning_info.arch == mips_arch
18421       && mips_tuning_info.tune == mips_tune
18422       && mips_tuning_info.mips16_p == TARGET_MIPS16)
18423     return;
18424 
18425   mips_tuning_info.arch = mips_arch;
18426   mips_tuning_info.tune = mips_tune;
18427   mips_tuning_info.mips16_p = TARGET_MIPS16;
18428   mips_tuning_info.initialized_p = true;
18429 
18430   dfa_start ();
18431 
18432   struct mips_sim state;
18433   mips_sim_init (&state, alloca (state_size ()));
18434 
18435   mips_set_fast_mult_zero_zero_p (&state);
18436 
18437   dfa_finish ();
18438 }
18439 
18440 /* Implement TARGET_EXPAND_TO_RTL_HOOK.  */
18441 
18442 static void
mips_expand_to_rtl_hook(void)18443 mips_expand_to_rtl_hook (void)
18444 {
18445   /* We need to call this at a point where we can safely create sequences
18446      of instructions, so TARGET_OVERRIDE_OPTIONS is too early.  We also
18447      need to call it at a point where the DFA infrastructure is not
18448      already in use, so we can't just call it lazily on demand.
18449 
18450      At present, mips_tuning_info is only needed during post-expand
18451      RTL passes such as split_insns, so this hook should be early enough.
18452      We may need to move the call elsewhere if mips_tuning_info starts
18453      to be used for other things (such as rtx_costs, or expanders that
18454      could be called during gimple optimization).  */
18455   mips_set_tuning_info ();
18456 }
18457 
18458 /* The VR4130 pipeline issues aligned pairs of instructions together,
18459    but it stalls the second instruction if it depends on the first.
18460    In order to cut down the amount of logic required, this dependence
18461    check is not based on a full instruction decode.  Instead, any non-SPECIAL
18462    instruction is assumed to modify the register specified by bits 20-16
18463    (which is usually the "rt" field).
18464 
18465    In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
18466    input, so we can end up with a false dependence between the branch
18467    and its delay slot.  If this situation occurs in instruction INSN,
18468    try to avoid it by swapping rs and rt.  */
18469 
18470 static void
vr4130_avoid_branch_rt_conflict(rtx_insn * insn)18471 vr4130_avoid_branch_rt_conflict (rtx_insn *insn)
18472 {
18473   rtx_insn *first, *second;
18474 
18475   first = SEQ_BEGIN (insn);
18476   second = SEQ_END (insn);
18477   if (JUMP_P (first)
18478       && NONJUMP_INSN_P (second)
18479       && GET_CODE (PATTERN (first)) == SET
18480       && GET_CODE (SET_DEST (PATTERN (first))) == PC
18481       && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
18482     {
18483       /* Check for the right kind of condition.  */
18484       rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
18485       if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
18486 	  && REG_P (XEXP (cond, 0))
18487 	  && REG_P (XEXP (cond, 1))
18488 	  && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
18489 	  && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
18490 	{
18491 	  /* SECOND mentions the rt register but not the rs register.  */
18492 	  rtx tmp = XEXP (cond, 0);
18493 	  XEXP (cond, 0) = XEXP (cond, 1);
18494 	  XEXP (cond, 1) = tmp;
18495 	}
18496     }
18497 }
18498 
18499 /* Implement -mvr4130-align.  Go through each basic block and simulate the
18500    processor pipeline.  If we find that a pair of instructions could execute
18501    in parallel, and the first of those instructions is not 8-byte aligned,
18502    insert a nop to make it aligned.  */
18503 
18504 static void
vr4130_align_insns(void)18505 vr4130_align_insns (void)
18506 {
18507   struct mips_sim state;
18508   rtx_insn *insn, *subinsn, *last, *last2, *next;
18509   bool aligned_p;
18510 
18511   dfa_start ();
18512 
18513   /* LAST is the last instruction before INSN to have a nonzero length.
18514      LAST2 is the last such instruction before LAST.  */
18515   last = 0;
18516   last2 = 0;
18517 
18518   /* ALIGNED_P is true if INSN is known to be at an aligned address.  */
18519   aligned_p = true;
18520 
18521   mips_sim_init (&state, alloca (state_size ()));
18522   for (insn = get_insns (); insn != 0; insn = next)
18523     {
18524       unsigned int length;
18525 
18526       next = NEXT_INSN (insn);
18527 
18528       /* See the comment above vr4130_avoid_branch_rt_conflict for details.
18529 	 This isn't really related to the alignment pass, but we do it on
18530 	 the fly to avoid a separate instruction walk.  */
18531       vr4130_avoid_branch_rt_conflict (insn);
18532 
18533       length = get_attr_length (insn);
18534       if (length > 0 && USEFUL_INSN_P (insn))
18535 	FOR_EACH_SUBINSN (subinsn, insn)
18536 	  {
18537 	    mips_sim_wait_insn (&state, subinsn);
18538 
18539 	    /* If we want this instruction to issue in parallel with the
18540 	       previous one, make sure that the previous instruction is
18541 	       aligned.  There are several reasons why this isn't worthwhile
18542 	       when the second instruction is a call:
18543 
18544 	          - Calls are less likely to be performance critical,
18545 		  - There's a good chance that the delay slot can execute
18546 		    in parallel with the call.
18547 	          - The return address would then be unaligned.
18548 
18549 	       In general, if we're going to insert a nop between instructions
18550 	       X and Y, it's better to insert it immediately after X.  That
18551 	       way, if the nop makes Y aligned, it will also align any labels
18552 	       between X and Y.  */
18553 	    if (state.insns_left != state.issue_rate
18554 		&& !CALL_P (subinsn))
18555 	      {
18556 		if (subinsn == SEQ_BEGIN (insn) && aligned_p)
18557 		  {
18558 		    /* SUBINSN is the first instruction in INSN and INSN is
18559 		       aligned.  We want to align the previous instruction
18560 		       instead, so insert a nop between LAST2 and LAST.
18561 
18562 		       Note that LAST could be either a single instruction
18563 		       or a branch with a delay slot.  In the latter case,
18564 		       LAST, like INSN, is already aligned, but the delay
18565 		       slot must have some extra delay that stops it from
18566 		       issuing at the same time as the branch.  We therefore
18567 		       insert a nop before the branch in order to align its
18568 		       delay slot.  */
18569 		    gcc_assert (last2);
18570 		    emit_insn_after (gen_nop (), last2);
18571 		    aligned_p = false;
18572 		  }
18573 		else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
18574 		  {
18575 		    /* SUBINSN is the delay slot of INSN, but INSN is
18576 		       currently unaligned.  Insert a nop between
18577 		       LAST and INSN to align it.  */
18578 		    gcc_assert (last);
18579 		    emit_insn_after (gen_nop (), last);
18580 		    aligned_p = true;
18581 		  }
18582 	      }
18583 	    mips_sim_issue_insn (&state, subinsn);
18584 	  }
18585       mips_sim_finish_insn (&state, insn);
18586 
18587       /* Update LAST, LAST2 and ALIGNED_P for the next instruction.  */
18588       length = get_attr_length (insn);
18589       if (length > 0)
18590 	{
18591 	  /* If the instruction is an asm statement or multi-instruction
18592 	     mips.md patern, the length is only an estimate.  Insert an
18593 	     8 byte alignment after it so that the following instructions
18594 	     can be handled correctly.  */
18595 	  if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
18596 	      && (recog_memoized (insn) < 0 || length >= 8))
18597 	    {
18598 	      next = emit_insn_after (gen_align (GEN_INT (3)), insn);
18599 	      next = NEXT_INSN (next);
18600 	      mips_sim_next_cycle (&state);
18601 	      aligned_p = true;
18602 	    }
18603 	  else if (length & 4)
18604 	    aligned_p = !aligned_p;
18605 	  last2 = last;
18606 	  last = insn;
18607 	}
18608 
18609       /* See whether INSN is an aligned label.  */
18610       if (LABEL_P (insn) && label_to_alignment (insn).levels[0].log >= 3)
18611 	aligned_p = true;
18612     }
18613   dfa_finish ();
18614 }
18615 
18616 /* This structure records that the current function has a LO_SUM
18617    involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
18618    the largest offset applied to BASE by all such LO_SUMs.  */
18619 struct mips_lo_sum_offset {
18620   rtx base;
18621   HOST_WIDE_INT offset;
18622 };
18623 
18624 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE.  */
18625 
18626 static hashval_t
mips_hash_base(rtx base)18627 mips_hash_base (rtx base)
18628 {
18629   int do_not_record_p;
18630 
18631   return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
18632 }
18633 
18634 /* Hashtable helpers.  */
18635 
18636 struct mips_lo_sum_offset_hasher : free_ptr_hash <mips_lo_sum_offset>
18637 {
18638   typedef rtx_def *compare_type;
18639   static inline hashval_t hash (const mips_lo_sum_offset *);
18640   static inline bool equal (const mips_lo_sum_offset *, const rtx_def *);
18641 };
18642 
18643 /* Hash-table callbacks for mips_lo_sum_offsets.  */
18644 
18645 inline hashval_t
hash(const mips_lo_sum_offset * entry)18646 mips_lo_sum_offset_hasher::hash (const mips_lo_sum_offset *entry)
18647 {
18648   return mips_hash_base (entry->base);
18649 }
18650 
18651 inline bool
equal(const mips_lo_sum_offset * entry,const rtx_def * value)18652 mips_lo_sum_offset_hasher::equal (const mips_lo_sum_offset *entry,
18653 				  const rtx_def *value)
18654 {
18655   return rtx_equal_p (entry->base, value);
18656 }
18657 
18658 typedef hash_table<mips_lo_sum_offset_hasher> mips_offset_table;
18659 
18660 /* Look up symbolic constant X in HTAB, which is a hash table of
18661    mips_lo_sum_offsets.  If OPTION is NO_INSERT, return true if X can be
18662    paired with a recorded LO_SUM, otherwise record X in the table.  */
18663 
18664 static bool
mips_lo_sum_offset_lookup(mips_offset_table * htab,rtx x,enum insert_option option)18665 mips_lo_sum_offset_lookup (mips_offset_table *htab, rtx x,
18666 			   enum insert_option option)
18667 {
18668   rtx base, offset;
18669   mips_lo_sum_offset **slot;
18670   struct mips_lo_sum_offset *entry;
18671 
18672   /* Split X into a base and offset.  */
18673   split_const (x, &base, &offset);
18674   if (UNSPEC_ADDRESS_P (base))
18675     base = UNSPEC_ADDRESS (base);
18676 
18677   /* Look up the base in the hash table.  */
18678   slot = htab->find_slot_with_hash (base, mips_hash_base (base), option);
18679   if (slot == NULL)
18680     return false;
18681 
18682   entry = (struct mips_lo_sum_offset *) *slot;
18683   if (option == INSERT)
18684     {
18685       if (entry == NULL)
18686 	{
18687 	  entry = XNEW (struct mips_lo_sum_offset);
18688 	  entry->base = base;
18689 	  entry->offset = INTVAL (offset);
18690 	  *slot = entry;
18691 	}
18692       else
18693 	{
18694 	  if (INTVAL (offset) > entry->offset)
18695 	    entry->offset = INTVAL (offset);
18696 	}
18697     }
18698   return INTVAL (offset) <= entry->offset;
18699 }
18700 
18701 /* Search X for LO_SUMs and record them in HTAB.  */
18702 
18703 static void
mips_record_lo_sums(const_rtx x,mips_offset_table * htab)18704 mips_record_lo_sums (const_rtx x, mips_offset_table *htab)
18705 {
18706   subrtx_iterator::array_type array;
18707   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
18708     if (GET_CODE (*iter) == LO_SUM)
18709       mips_lo_sum_offset_lookup (htab, XEXP (*iter, 1), INSERT);
18710 }
18711 
18712 /* Return true if INSN is a SET of an orphaned high-part relocation.
18713    HTAB is a hash table of mips_lo_sum_offsets that describes all the
18714    LO_SUMs in the current function.  */
18715 
18716 static bool
mips_orphaned_high_part_p(mips_offset_table * htab,rtx_insn * insn)18717 mips_orphaned_high_part_p (mips_offset_table *htab, rtx_insn *insn)
18718 {
18719   enum mips_symbol_type type;
18720   rtx x, set;
18721 
18722   set = single_set (insn);
18723   if (set)
18724     {
18725       /* Check for %his.  */
18726       x = SET_SRC (set);
18727       if (GET_CODE (x) == HIGH
18728 	  && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
18729 	return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
18730 
18731       /* Check for local %gots (and %got_pages, which is redundant but OK).  */
18732       if (GET_CODE (x) == UNSPEC
18733 	  && XINT (x, 1) == UNSPEC_LOAD_GOT
18734 	  && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
18735 				       SYMBOL_CONTEXT_LEA, &type)
18736 	  && type == SYMBOL_GOTOFF_PAGE)
18737 	return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
18738     }
18739   return false;
18740 }
18741 
18742 /* Subroutine of mips_avoid_hazard.  We classify unconditional branches
18743    of interest for the P6600 for performance reasons.  We're interested
18744    in differentiating BALC from JIC, JIALC and BC.  */
18745 
18746 static enum mips_ucbranch_type
mips_classify_branch_p6600(rtx_insn * insn)18747 mips_classify_branch_p6600 (rtx_insn *insn)
18748 {
18749   /* We ignore sequences here as they represent a filled delay slot.  */
18750   if (!insn
18751       || !USEFUL_INSN_P (insn)
18752       || GET_CODE (PATTERN (insn)) == SEQUENCE)
18753     return UC_UNDEFINED;
18754 
18755   if (get_attr_jal (insn) == JAL_INDIRECT /* JIC and JIALC.  */
18756       || get_attr_type (insn) == TYPE_JUMP) /* BC.  */
18757     return UC_OTHER;
18758 
18759   if (CALL_P (insn) && get_attr_jal (insn) == JAL_DIRECT)
18760     return UC_BALC;
18761 
18762   return UC_UNDEFINED;
18763 }
18764 
18765 /* Subroutine of mips_reorg_process_insns.  If there is a hazard between
18766    INSN and a previous instruction, avoid it by inserting nops after
18767    instruction AFTER.
18768 
18769    *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
18770    this point.  If *DELAYED_REG is non-null, INSN must wait a cycle
18771    before using the value of that register.  *HILO_DELAY counts the
18772    number of instructions since the last hilo hazard (that is,
18773    the number of instructions since the last MFLO or MFHI).
18774 
18775    After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
18776    for the next instruction.
18777 
18778    LO_REG is an rtx for the LO register, used in dependence checking.  */
18779 
18780 static void
mips_avoid_hazard(rtx_insn * after,rtx_insn * insn,int * hilo_delay,rtx * delayed_reg,rtx lo_reg,bool * fs_delay)18781 mips_avoid_hazard (rtx_insn *after, rtx_insn *insn, int *hilo_delay,
18782 		   rtx *delayed_reg, rtx lo_reg, bool *fs_delay)
18783 {
18784   rtx pattern, set;
18785   int nops, ninsns;
18786 
18787   pattern = PATTERN (insn);
18788 
18789   /* Do not put the whole function in .set noreorder if it contains
18790      an asm statement.  We don't know whether there will be hazards
18791      between the asm statement and the gcc-generated code.  */
18792   if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
18793     cfun->machine->all_noreorder_p = false;
18794 
18795   /* Ignore zero-length instructions (barriers and the like).  */
18796   ninsns = get_attr_length (insn) / 4;
18797   if (ninsns == 0)
18798     return;
18799 
18800   /* Work out how many nops are needed.  Note that we only care about
18801      registers that are explicitly mentioned in the instruction's pattern.
18802      It doesn't matter that calls use the argument registers or that they
18803      clobber hi and lo.  */
18804   if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
18805     nops = 2 - *hilo_delay;
18806   else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
18807     nops = 1;
18808   /* If processing a forbidden slot hazard then a NOP is required if the
18809      branch instruction was not in a sequence (as the sequence would
18810      imply it is not actually a compact branch anyway) and the current
18811      insn is not an inline asm, and can't go in a delay slot.  */
18812   else if (*fs_delay && get_attr_can_delay (insn) == CAN_DELAY_NO
18813 	   && GET_CODE (PATTERN (after)) != SEQUENCE
18814 	   && GET_CODE (pattern) != ASM_INPUT
18815 	   && asm_noperands (pattern) < 0)
18816     nops = 1;
18817   /* The P6600's branch predictor can handle static sequences of back-to-back
18818      branches in the following cases:
18819 
18820      (1) BALC followed by any conditional compact branch
18821      (2) BALC followed by BALC
18822 
18823      Any other combinations of compact branches will incur performance
18824      penalty.  Inserting a no-op only costs space as the dispatch unit will
18825      disregard the nop.  */
18826   else if (TUNE_P6600 && TARGET_CB_MAYBE && !optimize_size
18827 	   && ((mips_classify_branch_p6600 (after) == UC_BALC
18828 		&& mips_classify_branch_p6600 (insn) == UC_OTHER)
18829 	       || (mips_classify_branch_p6600 (insn) == UC_BALC
18830 		   && mips_classify_branch_p6600 (after) == UC_OTHER)))
18831     nops = 1;
18832   else
18833     nops = 0;
18834 
18835   /* Insert the nops between this instruction and the previous one.
18836      Each new nop takes us further from the last hilo hazard.  */
18837   *hilo_delay += nops;
18838 
18839   /* Move to the next real instruction if we are inserting a NOP and this
18840      instruction is a call with debug information.  The reason being that
18841      we can't separate the call from the debug info.   */
18842   rtx_insn *real_after = after;
18843   if (real_after && nops && CALL_P (real_after))
18844     while (real_after
18845 	   && (NOTE_P (NEXT_INSN (real_after))
18846 	       || BARRIER_P (NEXT_INSN (real_after))))
18847       real_after = NEXT_INSN (real_after);
18848 
18849   while (nops-- > 0)
18850     emit_insn_after (gen_hazard_nop (), real_after);
18851 
18852   /* Set up the state for the next instruction.  */
18853   *hilo_delay += ninsns;
18854   *delayed_reg = 0;
18855   *fs_delay = false;
18856   if (INSN_CODE (insn) >= 0)
18857     switch (get_attr_hazard (insn))
18858       {
18859       case HAZARD_NONE:
18860 	/* For the P6600, flag some unconditional branches as having a
18861 	   pseudo-forbidden slot.  This will cause additional nop insertion
18862 	   or SEQUENCE breaking as required.  This is for performance
18863 	   reasons not correctness.  */
18864 	if (TUNE_P6600
18865 	    && !optimize_size
18866 	    && TARGET_CB_MAYBE
18867 	    && mips_classify_branch_p6600 (insn) == UC_OTHER)
18868 	  *fs_delay = true;
18869 	break;
18870 
18871       case HAZARD_FORBIDDEN_SLOT:
18872 	if (TARGET_CB_MAYBE)
18873 	  *fs_delay = true;
18874 	break;
18875 
18876       case HAZARD_HILO:
18877 	*hilo_delay = 0;
18878 	break;
18879 
18880       case HAZARD_DELAY:
18881 	set = single_set (insn);
18882 	gcc_assert (set);
18883 	*delayed_reg = SET_DEST (set);
18884 	break;
18885       }
18886 }
18887 
18888 /* A SEQUENCE is breakable iff the branch inside it has a compact form
18889    and the target has compact branches.  */
18890 
18891 static bool
mips_breakable_sequence_p(rtx_insn * insn)18892 mips_breakable_sequence_p (rtx_insn *insn)
18893 {
18894   return (insn && GET_CODE (PATTERN (insn)) == SEQUENCE
18895 	  && TARGET_CB_MAYBE
18896 	  && get_attr_compact_form (SEQ_BEGIN (insn)) != COMPACT_FORM_NEVER);
18897 }
18898 
18899 /* Remove a SEQUENCE and replace it with the delay slot instruction
18900    followed by the branch and return the instruction in the delay slot.
18901    Return the first of the two new instructions.
18902    Subroutine of mips_reorg_process_insns.  */
18903 
18904 static rtx_insn *
mips_break_sequence(rtx_insn * insn)18905 mips_break_sequence (rtx_insn *insn)
18906 {
18907   rtx_insn *before = PREV_INSN (insn);
18908   rtx_insn *branch = SEQ_BEGIN (insn);
18909   rtx_insn *ds = SEQ_END (insn);
18910   remove_insn (insn);
18911   add_insn_after (ds, before, NULL);
18912   add_insn_after (branch, ds, NULL);
18913   return ds;
18914 }
18915 
18916 /* Go through the instruction stream and insert nops where necessary.
18917    Also delete any high-part relocations whose partnering low parts
18918    are now all dead.  See if the whole function can then be put into
18919    .set noreorder and .set nomacro.  */
18920 
18921 static void
mips_reorg_process_insns(void)18922 mips_reorg_process_insns (void)
18923 {
18924   rtx_insn *insn, *last_insn, *subinsn, *next_insn;
18925   rtx lo_reg, delayed_reg;
18926   int hilo_delay;
18927   bool fs_delay;
18928 
18929   /* Force all instructions to be split into their final form.  */
18930   split_all_insns_noflow ();
18931 
18932   /* Recalculate instruction lengths without taking nops into account.  */
18933   cfun->machine->ignore_hazard_length_p = true;
18934   shorten_branches (get_insns ());
18935 
18936   cfun->machine->all_noreorder_p = true;
18937 
18938   /* We don't track MIPS16 PC-relative offsets closely enough to make
18939      a good job of "set .noreorder" code in MIPS16 mode.  */
18940   if (TARGET_MIPS16)
18941     cfun->machine->all_noreorder_p = false;
18942 
18943   /* Code that doesn't use explicit relocs can't be ".set nomacro".  */
18944   if (!TARGET_EXPLICIT_RELOCS)
18945     cfun->machine->all_noreorder_p = false;
18946 
18947   /* Profiled functions can't be all noreorder because the profiler
18948      support uses assembler macros.  */
18949   if (crtl->profile)
18950     cfun->machine->all_noreorder_p = false;
18951 
18952   /* Code compiled with -mfix-vr4120, -mfix-r5900, -mfix-rm7000 or
18953      -mfix-24k can't be all noreorder because we rely on the assembler
18954      to work around some errata.  The R5900 target has several bugs.  */
18955   if (TARGET_FIX_VR4120
18956       || TARGET_FIX_RM7000
18957       || TARGET_FIX_24K
18958       || TARGET_FIX_R5900)
18959     cfun->machine->all_noreorder_p = false;
18960 
18961   /* The same is true for -mfix-vr4130 if we might generate MFLO or
18962      MFHI instructions.  Note that we avoid using MFLO and MFHI if
18963      the VR4130 MACC and DMACC instructions are available instead;
18964      see the *mfhilo_{si,di}_macc patterns.  */
18965   if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
18966     cfun->machine->all_noreorder_p = false;
18967 
18968   mips_offset_table htab (37);
18969 
18970   /* Make a first pass over the instructions, recording all the LO_SUMs.  */
18971   for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
18972     FOR_EACH_SUBINSN (subinsn, insn)
18973       if (USEFUL_INSN_P (subinsn))
18974 	{
18975 	  rtx body = PATTERN (insn);
18976 	  int noperands = asm_noperands (body);
18977 	  if (noperands >= 0)
18978 	    {
18979 	      rtx *ops = XALLOCAVEC (rtx, noperands);
18980 	      bool *used = XALLOCAVEC (bool, noperands);
18981 	      const char *string = decode_asm_operands (body, ops, NULL, NULL,
18982 							NULL, NULL);
18983 	      get_referenced_operands (string, used, noperands);
18984 	      for (int i = 0; i < noperands; ++i)
18985 		if (used[i])
18986 		  mips_record_lo_sums (ops[i], &htab);
18987 	    }
18988 	  else
18989 	    mips_record_lo_sums (PATTERN (subinsn), &htab);
18990 	}
18991 
18992   last_insn = 0;
18993   hilo_delay = 2;
18994   delayed_reg = 0;
18995   lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
18996   fs_delay = false;
18997 
18998   /* Make a second pass over the instructions.  Delete orphaned
18999      high-part relocations or turn them into NOPs.  Avoid hazards
19000      by inserting NOPs.  */
19001   for (insn = get_insns (); insn != 0; insn = next_insn)
19002     {
19003       next_insn = NEXT_INSN (insn);
19004       if (USEFUL_INSN_P (insn))
19005 	{
19006 	  if (GET_CODE (PATTERN (insn)) == SEQUENCE)
19007 	    {
19008 	      rtx_insn *next_active = next_active_insn (insn);
19009 	      /* Undo delay slots to avoid bubbles if the next instruction can
19010 		 be placed in a forbidden slot or the cost of adding an
19011 		 explicit NOP in a forbidden slot is OK and if the SEQUENCE is
19012 		 safely breakable.  */
19013 	      if (TARGET_CB_MAYBE
19014 		  && mips_breakable_sequence_p (insn)
19015 		  && INSN_P (SEQ_BEGIN (insn))
19016 		  && INSN_P (SEQ_END (insn))
19017 		  && ((next_active
19018 		       && INSN_P (next_active)
19019 		       && GET_CODE (PATTERN (next_active)) != SEQUENCE
19020 		       && get_attr_can_delay (next_active) == CAN_DELAY_YES)
19021 		      || !optimize_size))
19022 		{
19023 		  /* To hide a potential pipeline bubble, if we scan backwards
19024 		     from the current SEQUENCE and find that there is a load
19025 		     of a value that is used in the CTI and there are no
19026 		     dependencies between the CTI and instruction in the delay
19027 		     slot, break the sequence so the load delay is hidden.  */
19028 		  HARD_REG_SET uses;
19029 		  CLEAR_HARD_REG_SET (uses);
19030 		  note_uses (&PATTERN (SEQ_BEGIN (insn)), record_hard_reg_uses,
19031 			     &uses);
19032 		  HARD_REG_SET delay_sets;
19033 		  CLEAR_HARD_REG_SET (delay_sets);
19034 		  note_stores (PATTERN (SEQ_END (insn)), record_hard_reg_sets,
19035 			       &delay_sets);
19036 
19037 		  rtx_insn *prev = prev_active_insn (insn);
19038 		  if (prev
19039 		      && GET_CODE (PATTERN (prev)) == SET
19040 		      && MEM_P (SET_SRC (PATTERN (prev))))
19041 		    {
19042 		      HARD_REG_SET sets;
19043 		      CLEAR_HARD_REG_SET (sets);
19044 		      note_stores (PATTERN (prev), record_hard_reg_sets,
19045 				   &sets);
19046 
19047 		      /* Re-order if safe.  */
19048 		      if (!hard_reg_set_intersect_p (delay_sets, uses)
19049 			  && hard_reg_set_intersect_p (uses, sets))
19050 			{
19051 			  next_insn = mips_break_sequence (insn);
19052 			  /* Need to process the hazards of the newly
19053 			     introduced instructions.  */
19054 			  continue;
19055 			}
19056 		    }
19057 
19058 		  /* If we find an orphaned high-part relocation in a delay
19059 		     slot then we can convert to a compact branch and get
19060 		     the orphaned high part deleted.  */
19061 		  if (mips_orphaned_high_part_p (&htab, SEQ_END (insn)))
19062 		    {
19063 		      next_insn = mips_break_sequence (insn);
19064 		      /* Need to process the hazards of the newly
19065 			 introduced instructions.  */
19066 		      continue;
19067 		    }
19068 		}
19069 
19070 	      /* If we find an orphaned high-part relocation in a delay
19071 		 slot, it's easier to turn that instruction into a NOP than
19072 		 to delete it.  The delay slot will be a NOP either way.  */
19073 	      FOR_EACH_SUBINSN (subinsn, insn)
19074 		if (INSN_P (subinsn))
19075 		  {
19076 		    if (mips_orphaned_high_part_p (&htab, subinsn))
19077 		      {
19078 			PATTERN (subinsn) = gen_nop ();
19079 			INSN_CODE (subinsn) = CODE_FOR_nop;
19080 		      }
19081 		    mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
19082 				       &delayed_reg, lo_reg, &fs_delay);
19083 		  }
19084 	      last_insn = insn;
19085 	    }
19086 	  else
19087 	    {
19088 	      /* INSN is a single instruction.  Delete it if it's an
19089 		 orphaned high-part relocation.  */
19090 	      if (mips_orphaned_high_part_p (&htab, insn))
19091 		delete_insn (insn);
19092 	      /* Also delete cache barriers if the last instruction
19093 		 was an annulled branch.  INSN will not be speculatively
19094 		 executed.  */
19095 	      else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
19096 		       && last_insn
19097 		       && JUMP_P (SEQ_BEGIN (last_insn))
19098 		       && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
19099 		delete_insn (insn);
19100 	      else
19101 		{
19102 		  mips_avoid_hazard (last_insn, insn, &hilo_delay,
19103 				     &delayed_reg, lo_reg, &fs_delay);
19104 		  /* When a compact branch introduces a forbidden slot hazard
19105 		     and the next useful instruction is a SEQUENCE of a jump
19106 		     and a non-nop instruction in the delay slot, remove the
19107 		     sequence and replace it with the delay slot instruction
19108 		     then the jump to clear the forbidden slot hazard.
19109 
19110 		     For the P6600, this optimisation solves the performance
19111 		     penalty associated with BALC followed by a delay slot
19112 		     branch.  We do not set fs_delay as we do not want
19113 		     the full logic of a forbidden slot; the penalty exists
19114 		     only against branches not the full class of forbidden
19115 		     slot instructions.  */
19116 
19117 		  if (fs_delay || (TUNE_P6600
19118 				   && TARGET_CB_MAYBE
19119 				   && mips_classify_branch_p6600 (insn)
19120 				      == UC_BALC))
19121 		    {
19122 		      /* Search onwards from the current position looking for
19123 			 a SEQUENCE.  We are looking for pipeline hazards here
19124 			 and do not need to worry about labels or barriers as
19125 			 the optimization only undoes delay slot filling which
19126 			 only affects the order of the branch and its delay
19127 			 slot.  */
19128 		      rtx_insn *next = next_active_insn (insn);
19129 		      if (next
19130 			  && USEFUL_INSN_P (next)
19131 			  && GET_CODE (PATTERN (next)) == SEQUENCE
19132 			  && mips_breakable_sequence_p (next))
19133 			{
19134 			  last_insn = insn;
19135 			  next_insn = mips_break_sequence (next);
19136 			  /* Need to process the hazards of the newly
19137 			     introduced instructions.  */
19138 			  continue;
19139 			}
19140 		    }
19141 		  last_insn = insn;
19142 		}
19143 	    }
19144 	}
19145     }
19146 }
19147 
19148 /* Return true if the function has a long branch instruction.  */
19149 
19150 static bool
mips_has_long_branch_p(void)19151 mips_has_long_branch_p (void)
19152 {
19153   rtx_insn *insn, *subinsn;
19154   int normal_length;
19155 
19156   /* We need up-to-date instruction lengths.  */
19157   shorten_branches (get_insns ());
19158 
19159   /* Look for a branch that is longer than normal.  The normal length for
19160      non-MIPS16 branches is 8, because the length includes the delay slot.
19161      It is 4 for MIPS16, because MIPS16 branches are extended instructions,
19162      but they have no delay slot.  */
19163   normal_length = (TARGET_MIPS16 ? 4 : 8);
19164   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
19165     FOR_EACH_SUBINSN (subinsn, insn)
19166       if (JUMP_P (subinsn)
19167 	  && get_attr_length (subinsn) > normal_length
19168 	  && (any_condjump_p (subinsn) || any_uncondjump_p (subinsn)))
19169 	return true;
19170 
19171   return false;
19172 }
19173 
19174 /* If we are using a GOT, but have not decided to use a global pointer yet,
19175    see whether we need one to implement long branches.  Convert the ghost
19176    global-pointer instructions into real ones if so.  */
19177 
19178 static bool
mips_expand_ghost_gp_insns(void)19179 mips_expand_ghost_gp_insns (void)
19180 {
19181   /* Quick exit if we already know that we will or won't need a
19182      global pointer.  */
19183   if (!TARGET_USE_GOT
19184       || cfun->machine->global_pointer == INVALID_REGNUM
19185       || mips_must_initialize_gp_p ())
19186     return false;
19187 
19188   /* Run a full check for long branches.  */
19189   if (!mips_has_long_branch_p ())
19190     return false;
19191 
19192   /* We've now established that we need $gp.  */
19193   cfun->machine->must_initialize_gp_p = true;
19194   split_all_insns_noflow ();
19195 
19196   return true;
19197 }
19198 
19199 /* Subroutine of mips_reorg to manage passes that require DF.  */
19200 
19201 static void
mips_df_reorg(void)19202 mips_df_reorg (void)
19203 {
19204   /* Create def-use chains.  */
19205   df_set_flags (DF_EQ_NOTES);
19206   df_chain_add_problem (DF_UD_CHAIN);
19207   df_analyze ();
19208 
19209   if (TARGET_RELAX_PIC_CALLS)
19210     mips_annotate_pic_calls ();
19211 
19212   if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
19213     r10k_insert_cache_barriers ();
19214 
19215   df_finish_pass (false);
19216 }
19217 
19218 /* Emit code to load LABEL_REF SRC into MIPS16 register DEST.  This is
19219    called very late in mips_reorg, but the caller is required to run
19220    mips16_lay_out_constants on the result.  */
19221 
19222 static void
mips16_load_branch_target(rtx dest,rtx src)19223 mips16_load_branch_target (rtx dest, rtx src)
19224 {
19225   if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
19226     {
19227       rtx page, low;
19228 
19229       if (mips_cfun_has_cprestore_slot_p ())
19230 	mips_emit_move (dest, mips_cprestore_slot (dest, true));
19231       else
19232 	mips_emit_move (dest, pic_offset_table_rtx);
19233       page = mips_unspec_address (src, SYMBOL_GOTOFF_PAGE);
19234       low = mips_unspec_address (src, SYMBOL_GOT_PAGE_OFST);
19235       emit_insn (gen_rtx_SET (dest,
19236 			      PMODE_INSN (gen_unspec_got, (dest, page))));
19237       emit_insn (gen_rtx_SET (dest, gen_rtx_LO_SUM (Pmode, dest, low)));
19238     }
19239   else
19240     {
19241       src = mips_unspec_address (src, SYMBOL_ABSOLUTE);
19242       mips_emit_move (dest, src);
19243     }
19244 }
19245 
19246 /* If we're compiling a MIPS16 function, look for and split any long branches.
19247    This must be called after all other instruction modifications in
19248    mips_reorg.  */
19249 
19250 static void
mips16_split_long_branches(void)19251 mips16_split_long_branches (void)
19252 {
19253   bool something_changed;
19254 
19255   if (!TARGET_MIPS16)
19256     return;
19257 
19258   /* Loop until the alignments for all targets are sufficient.  */
19259   do
19260     {
19261       rtx_insn *insn;
19262       rtx_jump_insn *jump_insn;
19263 
19264       shorten_branches (get_insns ());
19265       something_changed = false;
19266       for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
19267 	if ((jump_insn = dyn_cast <rtx_jump_insn *> (insn))
19268 	    && get_attr_length (jump_insn) > 4
19269 	    && (any_condjump_p (jump_insn) || any_uncondjump_p (jump_insn)))
19270 	  {
19271 	    rtx old_label, temp, saved_temp;
19272 	    rtx_code_label *new_label;
19273 	    rtx target;
19274 	    rtx_insn *jump, *jump_sequence;
19275 
19276 	    start_sequence ();
19277 
19278 	    /* Free up a MIPS16 register by saving it in $1.  */
19279 	    saved_temp = gen_rtx_REG (Pmode, AT_REGNUM);
19280 	    temp = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
19281 	    emit_move_insn (saved_temp, temp);
19282 
19283 	    /* Load the branch target into TEMP.  */
19284 	    old_label = JUMP_LABEL (jump_insn);
19285 	    target = gen_rtx_LABEL_REF (Pmode, old_label);
19286 	    mips16_load_branch_target (temp, target);
19287 
19288 	    /* Jump to the target and restore the register's
19289 	       original value.  */
19290 	    jump = emit_jump_insn (PMODE_INSN (gen_indirect_jump_and_restore,
19291 					       (temp, temp, saved_temp)));
19292 	    JUMP_LABEL (jump) = old_label;
19293 	    LABEL_NUSES (old_label)++;
19294 
19295 	    /* Rewrite any symbolic references that are supposed to use
19296 	       a PC-relative constant pool.  */
19297 	    mips16_lay_out_constants (false);
19298 
19299 	    if (simplejump_p (jump_insn))
19300 	      /* We're going to replace INSN with a longer form.  */
19301 	      new_label = NULL;
19302 	    else
19303 	      {
19304 		/* Create a branch-around label for the original
19305 		   instruction.  */
19306 		new_label = gen_label_rtx ();
19307 		emit_label (new_label);
19308 	      }
19309 
19310 	    jump_sequence = get_insns ();
19311 	    end_sequence ();
19312 
19313 	    emit_insn_after (jump_sequence, jump_insn);
19314 	    if (new_label)
19315 	      invert_jump (jump_insn, new_label, false);
19316 	    else
19317 	      delete_insn (jump_insn);
19318 	    something_changed = true;
19319 	  }
19320     }
19321   while (something_changed);
19322 }
19323 
19324 /* Insert a `.insn' assembly pseudo-op after any labels followed by
19325    a MIPS16 constant pool or no insn at all.  This is needed so that
19326    targets that have been optimized away are still marked as code
19327    and therefore branches that remained and point to them are known
19328    to retain the ISA mode and as such can be successfully assembled.  */
19329 
19330 static void
mips_insert_insn_pseudos(void)19331 mips_insert_insn_pseudos (void)
19332 {
19333   bool insn_pseudo_needed = TRUE;
19334   rtx_insn *insn;
19335 
19336   for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
19337     switch (GET_CODE (insn))
19338       {
19339       case INSN:
19340 	if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
19341 	    && XINT (PATTERN (insn), 1) == UNSPEC_CONSTTABLE)
19342 	  {
19343 	    insn_pseudo_needed = TRUE;
19344 	    break;
19345 	  }
19346 	/* Fall through.  */
19347       case JUMP_INSN:
19348       case CALL_INSN:
19349       case JUMP_TABLE_DATA:
19350 	insn_pseudo_needed = FALSE;
19351 	break;
19352       case CODE_LABEL:
19353 	if (insn_pseudo_needed)
19354 	  {
19355 	    emit_insn_after (gen_insn_pseudo (), insn);
19356 	    insn_pseudo_needed = FALSE;
19357 	  }
19358 	break;
19359       default:
19360 	break;
19361       }
19362 }
19363 
19364 /* Implement TARGET_MACHINE_DEPENDENT_REORG.  */
19365 
19366 static void
mips_reorg(void)19367 mips_reorg (void)
19368 {
19369   /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF.  Also during
19370      insn splitting in mips16_lay_out_constants, DF insn info is only kept up
19371      to date if the CFG is available.  */
19372   if (mips_cfg_in_reorg ())
19373     compute_bb_for_insn ();
19374   mips16_lay_out_constants (true);
19375   if (mips_cfg_in_reorg ())
19376     {
19377       mips_df_reorg ();
19378       free_bb_for_insn ();
19379     }
19380 }
19381 
19382 /* We use a machine specific pass to do a second machine dependent reorg
19383    pass after delay branch scheduling.  */
19384 
19385 static unsigned int
mips_machine_reorg2(void)19386 mips_machine_reorg2 (void)
19387 {
19388   mips_reorg_process_insns ();
19389   if (!TARGET_MIPS16
19390       && TARGET_EXPLICIT_RELOCS
19391       && TUNE_MIPS4130
19392       && TARGET_VR4130_ALIGN)
19393     vr4130_align_insns ();
19394   if (mips_expand_ghost_gp_insns ())
19395     /* The expansion could invalidate some of the VR4130 alignment
19396        optimizations, but this should be an extremely rare case anyhow.  */
19397     mips_reorg_process_insns ();
19398   mips16_split_long_branches ();
19399   mips_insert_insn_pseudos ();
19400   return 0;
19401 }
19402 
19403 namespace {
19404 
19405 const pass_data pass_data_mips_machine_reorg2 =
19406 {
19407   RTL_PASS, /* type */
19408   "mach2", /* name */
19409   OPTGROUP_NONE, /* optinfo_flags */
19410   TV_MACH_DEP, /* tv_id */
19411   0, /* properties_required */
19412   0, /* properties_provided */
19413   0, /* properties_destroyed */
19414   0, /* todo_flags_start */
19415   0, /* todo_flags_finish */
19416 };
19417 
19418 class pass_mips_machine_reorg2 : public rtl_opt_pass
19419 {
19420 public:
pass_mips_machine_reorg2(gcc::context * ctxt)19421   pass_mips_machine_reorg2(gcc::context *ctxt)
19422     : rtl_opt_pass(pass_data_mips_machine_reorg2, ctxt)
19423   {}
19424 
19425   /* opt_pass methods: */
execute(function *)19426   virtual unsigned int execute (function *) { return mips_machine_reorg2 (); }
19427 
19428 }; // class pass_mips_machine_reorg2
19429 
19430 } // anon namespace
19431 
19432 rtl_opt_pass *
make_pass_mips_machine_reorg2(gcc::context * ctxt)19433 make_pass_mips_machine_reorg2 (gcc::context *ctxt)
19434 {
19435   return new pass_mips_machine_reorg2 (ctxt);
19436 }
19437 
19438 
19439 /* Implement TARGET_ASM_OUTPUT_MI_THUNK.  Generate rtl rather than asm text
19440    in order to avoid duplicating too much logic from elsewhere.  */
19441 
19442 static void
mips_output_mi_thunk(FILE * file,tree thunk_fndecl ATTRIBUTE_UNUSED,HOST_WIDE_INT delta,HOST_WIDE_INT vcall_offset,tree function)19443 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
19444 		      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
19445 		      tree function)
19446 {
19447   rtx this_rtx, temp1, temp2, fnaddr;
19448   rtx_insn *insn;
19449   bool use_sibcall_p;
19450 
19451   /* Pretend to be a post-reload pass while generating rtl.  */
19452   reload_completed = 1;
19453 
19454   /* Mark the end of the (empty) prologue.  */
19455   emit_note (NOTE_INSN_PROLOGUE_END);
19456 
19457   /* Determine if we can use a sibcall to call FUNCTION directly.  */
19458   fnaddr = XEXP (DECL_RTL (function), 0);
19459   use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
19460 		   && const_call_insn_operand (fnaddr, Pmode));
19461 
19462   /* Determine if we need to load FNADDR from the GOT.  */
19463   if (!use_sibcall_p
19464       && (mips_got_symbol_type_p
19465 	  (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
19466     {
19467       /* Pick a global pointer.  Use a call-clobbered register if
19468 	 TARGET_CALL_SAVED_GP.  */
19469       cfun->machine->global_pointer
19470 	= TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
19471       cfun->machine->must_initialize_gp_p = true;
19472       SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
19473 
19474       /* Set up the global pointer for n32 or n64 abicalls.  */
19475       mips_emit_loadgp ();
19476     }
19477 
19478   /* We need two temporary registers in some cases.  */
19479   temp1 = gen_rtx_REG (Pmode, 2);
19480   temp2 = gen_rtx_REG (Pmode, 3);
19481 
19482   /* Find out which register contains the "this" pointer.  */
19483   if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
19484     this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
19485   else
19486     this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
19487 
19488   /* Add DELTA to THIS_RTX.  */
19489   if (delta != 0)
19490     {
19491       rtx offset = GEN_INT (delta);
19492       if (!SMALL_OPERAND (delta))
19493 	{
19494 	  mips_emit_move (temp1, offset);
19495 	  offset = temp1;
19496 	}
19497       emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
19498     }
19499 
19500   /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX.  */
19501   if (vcall_offset != 0)
19502     {
19503       rtx addr;
19504 
19505       /* Set TEMP1 to *THIS_RTX.  */
19506       mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
19507 
19508       /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET.  */
19509       addr = mips_add_offset (temp2, temp1, vcall_offset);
19510 
19511       /* Load the offset and add it to THIS_RTX.  */
19512       mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
19513       emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
19514     }
19515 
19516   /* Jump to the target function.  Use a sibcall if direct jumps are
19517      allowed, otherwise load the address into a register first.  */
19518   if (use_sibcall_p)
19519     {
19520       insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
19521       SIBLING_CALL_P (insn) = 1;
19522     }
19523   else
19524     {
19525       /* This is messy.  GAS treats "la $25,foo" as part of a call
19526 	 sequence and may allow a global "foo" to be lazily bound.
19527 	 The general move patterns therefore reject this combination.
19528 
19529 	 In this context, lazy binding would actually be OK
19530 	 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
19531 	 TARGET_CALL_SAVED_GP; see mips_load_call_address.
19532 	 We must therefore load the address via a temporary
19533 	 register if mips_dangerous_for_la25_p.
19534 
19535 	 If we jump to the temporary register rather than $25,
19536 	 the assembler can use the move insn to fill the jump's
19537 	 delay slot.
19538 
19539 	 We can use the same technique for MIPS16 code, where $25
19540 	 is not a valid JR register.  */
19541       if (TARGET_USE_PIC_FN_ADDR_REG
19542 	  && !TARGET_MIPS16
19543 	  && !mips_dangerous_for_la25_p (fnaddr))
19544 	temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
19545       mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
19546 
19547       if (TARGET_USE_PIC_FN_ADDR_REG
19548 	  && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
19549 	mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
19550       emit_jump_insn (gen_indirect_jump (temp1));
19551     }
19552 
19553   /* Run just enough of rest_of_compilation.  This sequence was
19554      "borrowed" from alpha.c.  */
19555   insn = get_insns ();
19556   split_all_insns_noflow ();
19557   mips16_lay_out_constants (true);
19558   shorten_branches (insn);
19559   final_start_function (insn, file, 1);
19560   final (insn, file, 1);
19561   final_end_function ();
19562 
19563   /* Clean up the vars set above.  Note that final_end_function resets
19564      the global pointer for us.  */
19565   reload_completed = 0;
19566 }
19567 
19568 
19569 /* The last argument passed to mips_set_compression_mode,
19570    or negative if the function hasn't been called yet.  */
19571 static unsigned int old_compression_mode = -1;
19572 
19573 /* Set up the target-dependent global state for ISA mode COMPRESSION_MODE,
19574    which is either MASK_MIPS16 or MASK_MICROMIPS.  */
19575 
19576 static void
mips_set_compression_mode(unsigned int compression_mode)19577 mips_set_compression_mode (unsigned int compression_mode)
19578 {
19579 
19580   if (compression_mode == old_compression_mode)
19581     return;
19582 
19583   /* Restore base settings of various flags.  */
19584   target_flags = mips_base_target_flags;
19585   flag_schedule_insns = mips_base_schedule_insns;
19586   flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
19587   flag_move_loop_invariants = mips_base_move_loop_invariants;
19588   str_align_loops = mips_base_align_loops;
19589   str_align_jumps = mips_base_align_jumps;
19590   str_align_functions = mips_base_align_functions;
19591   target_flags &= ~(MASK_MIPS16 | MASK_MICROMIPS);
19592   target_flags |= compression_mode;
19593 
19594   if (compression_mode & MASK_MIPS16)
19595     {
19596       /* Switch to MIPS16 mode.  */
19597       target_flags |= MASK_MIPS16;
19598 
19599       /* Turn off SYNCI if it was on, MIPS16 doesn't support it.  */
19600       target_flags &= ~MASK_SYNCI;
19601 
19602       /* Don't run the scheduler before reload, since it tends to
19603          increase register pressure.  */
19604       flag_schedule_insns = 0;
19605 
19606       /* Don't do hot/cold partitioning.  mips16_lay_out_constants expects
19607 	 the whole function to be in a single section.  */
19608       flag_reorder_blocks_and_partition = 0;
19609 
19610       /* Don't move loop invariants, because it tends to increase
19611 	 register pressure.  It also introduces an extra move in cases
19612 	 where the constant is the first operand in a two-operand binary
19613 	 instruction, or when it forms a register argument to a functon
19614 	 call.  */
19615       flag_move_loop_invariants = 0;
19616 
19617       target_flags |= MASK_EXPLICIT_RELOCS;
19618 
19619       /* Experiments suggest we get the best overall section-anchor
19620 	 results from using the range of an unextended LW or SW.  Code
19621 	 that makes heavy use of byte or short accesses can do better
19622 	 with ranges of 0...31 and 0...63 respectively, but most code is
19623 	 sensitive to the range of LW and SW instead.  */
19624       targetm.min_anchor_offset = 0;
19625       targetm.max_anchor_offset = 127;
19626 
19627       targetm.const_anchor = 0;
19628 
19629       /* MIPS16 has no BAL instruction.  */
19630       target_flags &= ~MASK_RELAX_PIC_CALLS;
19631 
19632       /* The R4000 errata don't apply to any known MIPS16 cores.
19633 	 It's simpler to make the R4000 fixes and MIPS16 mode
19634 	 mutually exclusive.  */
19635       target_flags &= ~MASK_FIX_R4000;
19636 
19637       if (flag_pic && !TARGET_OLDABI)
19638 	sorry ("MIPS16 PIC for ABIs other than o32 and o64");
19639 
19640       if (TARGET_XGOT)
19641 	sorry ("MIPS16 %<-mxgot%> code");
19642 
19643       if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
19644 	sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
19645 
19646       if (TARGET_MSA)
19647 	sorry ("MSA MIPS16 code");
19648     }
19649   else
19650     {
19651       /* Switch to microMIPS or the standard encoding.  */
19652 
19653       if (TARGET_MICROMIPS)
19654 	/* Avoid branch likely.  */
19655 	target_flags &= ~MASK_BRANCHLIKELY;
19656 
19657       /* Provide default values for align_* for 64-bit targets.  */
19658       if (TARGET_64BIT)
19659 	{
19660 	  if (flag_align_loops && !str_align_loops)
19661 	    str_align_loops = "8";
19662 	  if (flag_align_jumps && !str_align_jumps)
19663 	    str_align_jumps = "8";
19664 	  if (flag_align_functions && !str_align_functions)
19665 	    str_align_functions = "8";
19666 	}
19667 
19668       targetm.min_anchor_offset = -32768;
19669       targetm.max_anchor_offset = 32767;
19670 
19671       targetm.const_anchor = 0x8000;
19672     }
19673 
19674   /* (Re)initialize MIPS target internals for new ISA.  */
19675   mips_init_relocs ();
19676 
19677   if (compression_mode & MASK_MIPS16)
19678     {
19679       if (!mips16_globals)
19680 	mips16_globals = save_target_globals_default_opts ();
19681       else
19682 	restore_target_globals (mips16_globals);
19683     }
19684   else if (compression_mode & MASK_MICROMIPS)
19685     {
19686       if (!micromips_globals)
19687 	micromips_globals = save_target_globals_default_opts ();
19688       else
19689 	restore_target_globals (micromips_globals);
19690     }
19691   else
19692     restore_target_globals (&default_target_globals);
19693 
19694   old_compression_mode = compression_mode;
19695 }
19696 
19697 /* Implement TARGET_SET_CURRENT_FUNCTION.  Decide whether the current
19698    function should use the MIPS16 or microMIPS ISA and switch modes
19699    accordingly.  */
19700 
19701 static void
mips_set_current_function(tree fndecl)19702 mips_set_current_function (tree fndecl)
19703 {
19704   mips_set_compression_mode (mips_get_compress_mode (fndecl));
19705 }
19706 
19707 /* Allocate a chunk of memory for per-function machine-dependent data.  */
19708 
19709 static struct machine_function *
mips_init_machine_status(void)19710 mips_init_machine_status (void)
19711 {
19712   return ggc_cleared_alloc<machine_function> ();
19713 }
19714 
19715 /* Return the processor associated with the given ISA level, or null
19716    if the ISA isn't valid.  */
19717 
19718 static const struct mips_cpu_info *
mips_cpu_info_from_isa(int isa)19719 mips_cpu_info_from_isa (int isa)
19720 {
19721   unsigned int i;
19722 
19723   for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
19724     if (mips_cpu_info_table[i].isa == isa)
19725       return mips_cpu_info_table + i;
19726 
19727   return NULL;
19728 }
19729 
19730 /* Return a mips_cpu_info entry determined by an option valued
19731    OPT.  */
19732 
19733 static const struct mips_cpu_info *
mips_cpu_info_from_opt(int opt)19734 mips_cpu_info_from_opt (int opt)
19735 {
19736   switch (opt)
19737     {
19738     case MIPS_ARCH_OPTION_FROM_ABI:
19739       /* 'from-abi' selects the most compatible architecture for the
19740 	 given ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit
19741 	 ABIs.  For the EABIs, we have to decide whether we're using
19742 	 the 32-bit or 64-bit version.  */
19743       return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
19744 				     : ABI_NEEDS_64BIT_REGS ? 3
19745 				     : (TARGET_64BIT ? 3 : 1));
19746 
19747     case MIPS_ARCH_OPTION_NATIVE:
19748       gcc_unreachable ();
19749 
19750     default:
19751       return &mips_cpu_info_table[opt];
19752     }
19753 }
19754 
19755 /* Return a default mips_cpu_info entry, given that no -march= option
19756    was explicitly specified.  */
19757 
19758 static const struct mips_cpu_info *
mips_default_arch(void)19759 mips_default_arch (void)
19760 {
19761 #if defined (MIPS_CPU_STRING_DEFAULT)
19762   unsigned int i;
19763   for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
19764     if (strcmp (mips_cpu_info_table[i].name, MIPS_CPU_STRING_DEFAULT) == 0)
19765       return mips_cpu_info_table + i;
19766   gcc_unreachable ();
19767 #elif defined (MIPS_ISA_DEFAULT)
19768   return mips_cpu_info_from_isa (MIPS_ISA_DEFAULT);
19769 #else
19770   /* 'from-abi' makes a good default: you get whatever the ABI
19771      requires.  */
19772   return mips_cpu_info_from_opt (MIPS_ARCH_OPTION_FROM_ABI);
19773 #endif
19774 }
19775 
19776 /* Set up globals to generate code for the ISA or processor
19777    described by INFO.  */
19778 
19779 static void
mips_set_architecture(const struct mips_cpu_info * info)19780 mips_set_architecture (const struct mips_cpu_info *info)
19781 {
19782   if (info != 0)
19783     {
19784       mips_arch_info = info;
19785       mips_arch = info->cpu;
19786       mips_isa = info->isa;
19787       if (mips_isa < 32)
19788 	mips_isa_rev = 0;
19789       else
19790 	mips_isa_rev = (mips_isa & 31) + 1;
19791     }
19792 }
19793 
19794 /* Likewise for tuning.  */
19795 
19796 static void
mips_set_tune(const struct mips_cpu_info * info)19797 mips_set_tune (const struct mips_cpu_info *info)
19798 {
19799   if (info != 0)
19800     {
19801       mips_tune_info = info;
19802       mips_tune = info->cpu;
19803     }
19804 }
19805 
19806 /* Implement TARGET_OPTION_OVERRIDE.  */
19807 
19808 static void
mips_option_override(void)19809 mips_option_override (void)
19810 {
19811   int i, start, regno, mode;
19812 
19813   if (global_options_set.x_mips_isa_option)
19814     mips_isa_option_info = &mips_cpu_info_table[mips_isa_option];
19815 
19816 #ifdef SUBTARGET_OVERRIDE_OPTIONS
19817   SUBTARGET_OVERRIDE_OPTIONS;
19818 #endif
19819 
19820   /* MIPS16 and microMIPS cannot coexist.  */
19821   if (TARGET_MICROMIPS && TARGET_MIPS16)
19822     error ("unsupported combination: %s", "-mips16 -mmicromips");
19823 
19824   /* Prohibit Paired-Single and MSA combination.  This is software restriction
19825      rather than architectural.  */
19826   if (ISA_HAS_MSA && TARGET_PAIRED_SINGLE_FLOAT)
19827     error ("unsupported combination: %s", "-mmsa -mpaired-single");
19828 
19829   /* Save the base compression state and process flags as though we
19830      were generating uncompressed code.  */
19831   mips_base_compression_flags = TARGET_COMPRESSION;
19832   target_flags &= ~TARGET_COMPRESSION;
19833 
19834   /* -mno-float overrides -mhard-float and -msoft-float.  */
19835   if (TARGET_NO_FLOAT)
19836     {
19837       target_flags |= MASK_SOFT_FLOAT_ABI;
19838       target_flags_explicit |= MASK_SOFT_FLOAT_ABI;
19839     }
19840 
19841   if (TARGET_FLIP_MIPS16)
19842     TARGET_INTERLINK_COMPRESSED = 1;
19843 
19844   /* Set the small data limit.  */
19845   mips_small_data_threshold = (global_options_set.x_g_switch_value
19846 			       ? g_switch_value
19847 			       : MIPS_DEFAULT_GVALUE);
19848 
19849   /* The following code determines the architecture and register size.
19850      Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
19851      The GAS and GCC code should be kept in sync as much as possible.  */
19852 
19853   if (global_options_set.x_mips_arch_option)
19854     mips_set_architecture (mips_cpu_info_from_opt (mips_arch_option));
19855 
19856   if (mips_isa_option_info != 0)
19857     {
19858       if (mips_arch_info == 0)
19859 	mips_set_architecture (mips_isa_option_info);
19860       else if (mips_arch_info->isa != mips_isa_option_info->isa)
19861 	error ("%<-%s%> conflicts with the other architecture options, "
19862 	       "which specify a %s processor",
19863 	       mips_isa_option_info->name,
19864 	       mips_cpu_info_from_isa (mips_arch_info->isa)->name);
19865     }
19866 
19867   if (mips_arch_info == 0)
19868     mips_set_architecture (mips_default_arch ());
19869 
19870   if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
19871     error ("%<-march=%s%> is not compatible with the selected ABI",
19872 	   mips_arch_info->name);
19873 
19874   /* Optimize for mips_arch, unless -mtune selects a different processor.  */
19875   if (global_options_set.x_mips_tune_option)
19876     mips_set_tune (mips_cpu_info_from_opt (mips_tune_option));
19877 
19878   if (mips_tune_info == 0)
19879     mips_set_tune (mips_arch_info);
19880 
19881   if ((target_flags_explicit & MASK_64BIT) != 0)
19882     {
19883       /* The user specified the size of the integer registers.  Make sure
19884 	 it agrees with the ABI and ISA.  */
19885       if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
19886 	error ("%<-mgp64%> used with a 32-bit processor");
19887       else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
19888 	error ("%<-mgp32%> used with a 64-bit ABI");
19889       else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
19890 	error ("%<-mgp64%> used with a 32-bit ABI");
19891     }
19892   else
19893     {
19894       /* Infer the integer register size from the ABI and processor.
19895 	 Restrict ourselves to 32-bit registers if that's all the
19896 	 processor has, or if the ABI cannot handle 64-bit registers.  */
19897       if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
19898 	target_flags &= ~MASK_64BIT;
19899       else
19900 	target_flags |= MASK_64BIT;
19901     }
19902 
19903   if ((target_flags_explicit & MASK_FLOAT64) != 0)
19904     {
19905       if (mips_isa_rev >= 6 && !TARGET_FLOAT64)
19906 	error ("the %qs architecture does not support %<-mfp32%>",
19907 	       mips_arch_info->name);
19908       else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
19909 	error ("unsupported combination: %s", "-mfp64 -msingle-float");
19910       else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
19911 	error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
19912       else if (!TARGET_64BIT && TARGET_FLOAT64)
19913 	{
19914 	  if (!ISA_HAS_MXHC1)
19915 	    error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
19916 		   " the target supports the mfhc1 and mthc1 instructions");
19917 	  else if (mips_abi != ABI_32)
19918 	    error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
19919 		   " the o32 ABI");
19920 	}
19921     }
19922   else
19923     {
19924       /* -msingle-float selects 32-bit float registers.  On r6 and later,
19925 	 -mdouble-float selects 64-bit float registers, since the old paired
19926 	 register model is not supported.  In other cases the float registers
19927 	 should be the same size as the integer ones.  */
19928       if (mips_isa_rev >= 6 && TARGET_DOUBLE_FLOAT && !TARGET_FLOATXX)
19929 	target_flags |= MASK_FLOAT64;
19930       else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
19931 	target_flags |= MASK_FLOAT64;
19932       else if (mips_abi == ABI_32 && ISA_HAS_MSA && !TARGET_FLOATXX)
19933 	target_flags |= MASK_FLOAT64;
19934       else
19935 	target_flags &= ~MASK_FLOAT64;
19936     }
19937 
19938   if (mips_abi != ABI_32 && TARGET_FLOATXX)
19939     error ("%<-mfpxx%> can only be used with the o32 ABI");
19940   else if (TARGET_FLOAT64 && TARGET_FLOATXX)
19941     error ("unsupported combination: %s", "-mfp64 -mfpxx");
19942   else if (ISA_MIPS1 && !TARGET_FLOAT32)
19943     error ("%<-march=%s%> requires %<-mfp32%>", mips_arch_info->name);
19944   else if (TARGET_FLOATXX && !mips_lra_flag)
19945     error ("%<-mfpxx%> requires %<-mlra%>");
19946 
19947   /* End of code shared with GAS.  */
19948 
19949   /* The R5900 FPU only supports single precision.  */
19950   if (TARGET_MIPS5900 && TARGET_HARD_FLOAT_ABI && TARGET_DOUBLE_FLOAT)
19951     error ("unsupported combination: %s",
19952 	   "-march=r5900 -mhard-float -mdouble-float");
19953 
19954   /* If a -mlong* option was given, check that it matches the ABI,
19955      otherwise infer the -mlong* setting from the other options.  */
19956   if ((target_flags_explicit & MASK_LONG64) != 0)
19957     {
19958       if (TARGET_LONG64)
19959 	{
19960 	  if (mips_abi == ABI_N32)
19961 	    error ("%qs is incompatible with %qs", "-mabi=n32", "-mlong64");
19962 	  else if (mips_abi == ABI_32)
19963 	    error ("%qs is incompatible with %qs", "-mabi=32", "-mlong64");
19964 	  else if (mips_abi == ABI_O64 && TARGET_ABICALLS)
19965 	    /* We have traditionally allowed non-abicalls code to use
19966 	       an LP64 form of o64.  However, it would take a bit more
19967 	       effort to support the combination of 32-bit GOT entries
19968 	       and 64-bit pointers, so we treat the abicalls case as
19969 	       an error.  */
19970 	    error ("the combination of %qs and %qs is incompatible with %qs",
19971 		   "-mabi=o64", "-mabicalls", "-mlong64");
19972 	}
19973       else
19974 	{
19975 	  if (mips_abi == ABI_64)
19976 	    error ("%qs is incompatible with %qs", "-mabi=64", "-mlong32");
19977 	}
19978     }
19979   else
19980     {
19981       if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
19982 	target_flags |= MASK_LONG64;
19983       else
19984 	target_flags &= ~MASK_LONG64;
19985     }
19986 
19987   if (!TARGET_OLDABI)
19988     flag_pcc_struct_return = 0;
19989 
19990   /* Decide which rtx_costs structure to use.  */
19991   if (optimize_size)
19992     mips_cost = &mips_rtx_cost_optimize_size;
19993   else
19994     mips_cost = &mips_rtx_cost_data[mips_tune];
19995 
19996   /* If the user hasn't specified a branch cost, use the processor's
19997      default.  */
19998   if (mips_branch_cost == 0)
19999     mips_branch_cost = mips_cost->branch_cost;
20000 
20001   /* If neither -mbranch-likely nor -mno-branch-likely was given
20002      on the command line, set MASK_BRANCHLIKELY based on the target
20003      architecture and tuning flags.  Annulled delay slots are a
20004      size win, so we only consider the processor-specific tuning
20005      for !optimize_size.  */
20006   if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
20007     {
20008       if (ISA_HAS_BRANCHLIKELY
20009 	  && ((optimize_size
20010 	       && (mips_tune_info->tune_flags
20011 		   & PTF_AVOID_BRANCHLIKELY_SIZE) == 0)
20012 	      || (!optimize_size
20013 		  && optimize > 0
20014 		  && (mips_tune_info->tune_flags
20015 		      & PTF_AVOID_BRANCHLIKELY_SPEED) == 0)
20016 	      || (mips_tune_info->tune_flags
20017 		  & PTF_AVOID_BRANCHLIKELY_ALWAYS) == 0))
20018 	target_flags |= MASK_BRANCHLIKELY;
20019       else
20020 	target_flags &= ~MASK_BRANCHLIKELY;
20021     }
20022   else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
20023     warning (0, "the %qs architecture does not support branch-likely"
20024 	     " instructions", mips_arch_info->name);
20025 
20026   /* If the user hasn't specified -mimadd or -mno-imadd set
20027      MASK_IMADD based on the target architecture and tuning
20028      flags.  */
20029   if ((target_flags_explicit & MASK_IMADD) == 0)
20030     {
20031       if (ISA_HAS_MADD_MSUB &&
20032           (mips_tune_info->tune_flags & PTF_AVOID_IMADD) == 0)
20033 	target_flags |= MASK_IMADD;
20034       else
20035 	target_flags &= ~MASK_IMADD;
20036     }
20037   else if (TARGET_IMADD && !ISA_HAS_MADD_MSUB)
20038     warning (0, "the %qs architecture does not support madd or msub"
20039 	     " instructions", mips_arch_info->name);
20040 
20041   /* If neither -modd-spreg nor -mno-odd-spreg was given on the command
20042      line, set MASK_ODD_SPREG based on the ISA and ABI.  */
20043   if ((target_flags_explicit & MASK_ODD_SPREG) == 0)
20044     {
20045       /* Disable TARGET_ODD_SPREG when using the o32 FPXX ABI.  */
20046       if (!ISA_HAS_ODD_SPREG || TARGET_FLOATXX)
20047 	target_flags &= ~MASK_ODD_SPREG;
20048       else
20049 	target_flags |= MASK_ODD_SPREG;
20050     }
20051   else if (TARGET_ODD_SPREG && !ISA_HAS_ODD_SPREG)
20052     warning (0, "the %qs architecture does not support odd single-precision"
20053 	     " registers", mips_arch_info->name);
20054 
20055   if (!TARGET_ODD_SPREG && TARGET_64BIT)
20056     {
20057       error ("unsupported combination: %s", "-mgp64 -mno-odd-spreg");
20058       /* Allow compilation to continue further even though invalid output
20059          will be produced.  */
20060       target_flags |= MASK_ODD_SPREG;
20061     }
20062 
20063   if (!ISA_HAS_COMPACT_BRANCHES && mips_cb == MIPS_CB_ALWAYS)
20064     {
20065       error ("unsupported combination: %qs%s %s",
20066 	      mips_arch_info->name, TARGET_MICROMIPS ? " -mmicromips" : "",
20067 	      "-mcompact-branches=always");
20068     }
20069   else if (!ISA_HAS_DELAY_SLOTS && mips_cb == MIPS_CB_NEVER)
20070     {
20071       error ("unsupported combination: %qs%s %s",
20072 	      mips_arch_info->name, TARGET_MICROMIPS ? " -mmicromips" : "",
20073 	      "-mcompact-branches=never");
20074     }
20075 
20076   /* Require explicit relocs for MIPS R6 onwards.  This enables simplification
20077      of the compact branch and jump support through the backend.  */
20078   if (!TARGET_EXPLICIT_RELOCS && mips_isa_rev >= 6)
20079     {
20080       error ("unsupported combination: %qs %s",
20081 	     mips_arch_info->name, "-mno-explicit-relocs");
20082     }
20083 
20084   /* The effect of -mabicalls isn't defined for the EABI.  */
20085   if (mips_abi == ABI_EABI && TARGET_ABICALLS)
20086     {
20087       error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
20088       target_flags &= ~MASK_ABICALLS;
20089     }
20090 
20091   /* PIC requires -mabicalls.  */
20092   if (flag_pic)
20093     {
20094       if (mips_abi == ABI_EABI)
20095 	error ("cannot generate position-independent code for %qs",
20096 	       "-mabi=eabi");
20097       else if (!TARGET_ABICALLS)
20098 	error ("position-independent code requires %qs", "-mabicalls");
20099     }
20100 
20101   if (TARGET_ABICALLS_PIC2)
20102     /* We need to set flag_pic for executables as well as DSOs
20103        because we may reference symbols that are not defined in
20104        the final executable.  (MIPS does not use things like
20105        copy relocs, for example.)
20106 
20107        There is a body of code that uses __PIC__ to distinguish
20108        between -mabicalls and -mno-abicalls code.  The non-__PIC__
20109        variant is usually appropriate for TARGET_ABICALLS_PIC0, as
20110        long as any indirect jumps use $25.  */
20111     flag_pic = 1;
20112 
20113   /* -mvr4130-align is a "speed over size" optimization: it usually produces
20114      faster code, but at the expense of more nops.  Enable it at -O3 and
20115      above.  */
20116   if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
20117     target_flags |= MASK_VR4130_ALIGN;
20118 
20119   /* Prefer a call to memcpy over inline code when optimizing for size,
20120      though see MOVE_RATIO in mips.h.  */
20121   if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
20122     target_flags |= MASK_MEMCPY;
20123 
20124   /* If we have a nonzero small-data limit, check that the -mgpopt
20125      setting is consistent with the other target flags.  */
20126   if (mips_small_data_threshold > 0)
20127     {
20128       if (!TARGET_GPOPT)
20129 	{
20130 	  if (!TARGET_EXPLICIT_RELOCS)
20131 	    error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
20132 
20133 	  TARGET_LOCAL_SDATA = false;
20134 	  TARGET_EXTERN_SDATA = false;
20135 	}
20136       else
20137 	{
20138 	  if (TARGET_VXWORKS_RTP)
20139 	    warning (0, "cannot use small-data accesses for %qs", "-mrtp");
20140 
20141 	  if (TARGET_ABICALLS)
20142 	    warning (0, "cannot use small-data accesses for %qs",
20143 		     "-mabicalls");
20144 	}
20145     }
20146 
20147   /* Set NaN and ABS defaults.  */
20148   if (mips_nan == MIPS_IEEE_754_DEFAULT && !ISA_HAS_IEEE_754_LEGACY)
20149     mips_nan = MIPS_IEEE_754_2008;
20150   if (mips_abs == MIPS_IEEE_754_DEFAULT && !ISA_HAS_IEEE_754_LEGACY)
20151     mips_abs = MIPS_IEEE_754_2008;
20152 
20153   /* Check for IEEE 754 legacy/2008 support.  */
20154   if ((mips_nan == MIPS_IEEE_754_LEGACY
20155        || mips_abs == MIPS_IEEE_754_LEGACY)
20156       && !ISA_HAS_IEEE_754_LEGACY)
20157     warning (0, "the %qs architecture does not support %<-m%s=legacy%>",
20158 	     mips_arch_info->name,
20159 	     mips_nan == MIPS_IEEE_754_LEGACY ? "nan" : "abs");
20160 
20161   if ((mips_nan == MIPS_IEEE_754_2008
20162        || mips_abs == MIPS_IEEE_754_2008)
20163       && !ISA_HAS_IEEE_754_2008)
20164     warning (0, "the %qs architecture does not support %<-m%s=2008%>",
20165 	     mips_arch_info->name,
20166 	     mips_nan == MIPS_IEEE_754_2008 ? "nan" : "abs");
20167 
20168   /* Pre-IEEE 754-2008 MIPS hardware has a quirky almost-IEEE format
20169      for all its floating point.  */
20170   if (mips_nan != MIPS_IEEE_754_2008)
20171     {
20172       REAL_MODE_FORMAT (SFmode) = &mips_single_format;
20173       REAL_MODE_FORMAT (DFmode) = &mips_double_format;
20174       REAL_MODE_FORMAT (TFmode) = &mips_quad_format;
20175     }
20176 
20177   /* Make sure that the user didn't turn off paired single support when
20178      MIPS-3D support is requested.  */
20179   if (TARGET_MIPS3D
20180       && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
20181       && !TARGET_PAIRED_SINGLE_FLOAT)
20182     error ("%<-mips3d%> requires %<-mpaired-single%>");
20183 
20184   /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT.  */
20185   if (TARGET_MIPS3D)
20186     target_flags |= MASK_PAIRED_SINGLE_FLOAT;
20187 
20188   /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
20189      and TARGET_HARD_FLOAT_ABI are both true.  */
20190   if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
20191     {
20192       error ("%qs must be used with %qs",
20193 	     TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
20194 	     TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
20195       target_flags &= ~MASK_PAIRED_SINGLE_FLOAT;
20196       TARGET_MIPS3D = 0;
20197     }
20198 
20199   /* Make sure that when ISA_HAS_MSA is true, TARGET_FLOAT64 and
20200      TARGET_HARD_FLOAT_ABI and  both true.  */
20201   if (ISA_HAS_MSA && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
20202     error ("%<-mmsa%> must be used with %<-mfp64%> and %<-mhard-float%>");
20203 
20204   /* Make sure that -mpaired-single is only used on ISAs that support it.
20205      We must disable it otherwise since it relies on other ISA properties
20206      like ISA_HAS_8CC having their normal values.  */
20207   if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
20208     {
20209       error ("the %qs architecture does not support paired-single"
20210 	     " instructions", mips_arch_info->name);
20211       target_flags &= ~MASK_PAIRED_SINGLE_FLOAT;
20212       TARGET_MIPS3D = 0;
20213     }
20214 
20215   if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
20216       && !TARGET_CACHE_BUILTIN)
20217     {
20218       error ("%qs requires a target that provides the %qs instruction",
20219 	     "-mr10k-cache-barrier", "cache");
20220       mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
20221     }
20222 
20223   /* If TARGET_DSPR2, enable TARGET_DSP.  */
20224   if (TARGET_DSPR2)
20225     TARGET_DSP = true;
20226 
20227   if (TARGET_DSP && mips_isa_rev >= 6)
20228     {
20229       error ("the %qs architecture does not support DSP instructions",
20230 	     mips_arch_info->name);
20231       TARGET_DSP = false;
20232       TARGET_DSPR2 = false;
20233     }
20234 
20235   /* Make sure that when TARGET_LOONGSON_MMI is true, TARGET_HARD_FLOAT_ABI
20236      is true.  In o32 pairs of floating-point registers provide 64-bit
20237      values.  */
20238   if (TARGET_LOONGSON_MMI &&  !TARGET_HARD_FLOAT_ABI)
20239     error ("%<-mloongson-mmi%> must be used with %<-mhard-float%>");
20240 
20241   /* If TARGET_LOONGSON_EXT2, enable TARGET_LOONGSON_EXT.  */
20242   if (TARGET_LOONGSON_EXT2)
20243     {
20244       /* Make sure that when TARGET_LOONGSON_EXT2 is true, TARGET_LOONGSON_EXT
20245 	 is true.  If a user explicitly says -mloongson-ext2 -mno-loongson-ext
20246 	 then that is an error.  */
20247       if (!TARGET_LOONGSON_EXT
20248 	  && (target_flags_explicit & MASK_LOONGSON_EXT) != 0)
20249 	error ("%<-mloongson-ext2%> must be used with %<-mloongson-ext%>");
20250       target_flags |= MASK_LOONGSON_EXT;
20251     }
20252 
20253   /* .eh_frame addresses should be the same width as a C pointer.
20254      Most MIPS ABIs support only one pointer size, so the assembler
20255      will usually know exactly how big an .eh_frame address is.
20256 
20257      Unfortunately, this is not true of the 64-bit EABI.  The ABI was
20258      originally defined to use 64-bit pointers (i.e. it is LP64), and
20259      this is still the default mode.  However, we also support an n32-like
20260      ILP32 mode, which is selected by -mlong32.  The problem is that the
20261      assembler has traditionally not had an -mlong option, so it has
20262      traditionally not known whether we're using the ILP32 or LP64 form.
20263 
20264      As it happens, gas versions up to and including 2.19 use _32-bit_
20265      addresses for EABI64 .cfi_* directives.  This is wrong for the
20266      default LP64 mode, so we can't use the directives by default.
20267      Moreover, since gas's current behavior is at odds with gcc's
20268      default behavior, it seems unwise to rely on future versions
20269      of gas behaving the same way.  We therefore avoid using .cfi
20270      directives for -mlong32 as well.  */
20271   if (mips_abi == ABI_EABI && TARGET_64BIT)
20272     flag_dwarf2_cfi_asm = 0;
20273 
20274   /* .cfi_* directives generate a read-only section, so fall back on
20275      manual .eh_frame creation if we need the section to be writable.  */
20276   if (TARGET_WRITABLE_EH_FRAME)
20277     flag_dwarf2_cfi_asm = 0;
20278 
20279   mips_init_print_operand_punct ();
20280 
20281   /* Set up array to map GCC register number to debug register number.
20282      Ignore the special purpose register numbers.  */
20283 
20284   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
20285     {
20286       mips_dbx_regno[i] = IGNORED_DWARF_REGNUM;
20287       if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
20288 	mips_dwarf_regno[i] = i;
20289       else
20290 	mips_dwarf_regno[i] = INVALID_REGNUM;
20291     }
20292 
20293   start = GP_DBX_FIRST - GP_REG_FIRST;
20294   for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
20295     mips_dbx_regno[i] = i + start;
20296 
20297   start = FP_DBX_FIRST - FP_REG_FIRST;
20298   for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
20299     mips_dbx_regno[i] = i + start;
20300 
20301   /* Accumulator debug registers use big-endian ordering.  */
20302   mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
20303   mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
20304   mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
20305   mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
20306   for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
20307     {
20308       mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
20309       mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
20310     }
20311 
20312   /* Set up mips_hard_regno_mode_ok.  */
20313   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
20314     for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
20315       mips_hard_regno_mode_ok_p[mode][regno]
20316 	= mips_hard_regno_mode_ok_uncached (regno, (machine_mode) mode);
20317 
20318   /* Function to allocate machine-dependent function status.  */
20319   init_machine_status = &mips_init_machine_status;
20320 
20321   /* Default to working around R4000 errata only if the processor
20322      was selected explicitly.  */
20323   if ((target_flags_explicit & MASK_FIX_R4000) == 0
20324       && strcmp (mips_arch_info->name, "r4000") == 0)
20325     target_flags |= MASK_FIX_R4000;
20326 
20327   /* Default to working around R4400 errata only if the processor
20328      was selected explicitly.  */
20329   if ((target_flags_explicit & MASK_FIX_R4400) == 0
20330       && strcmp (mips_arch_info->name, "r4400") == 0)
20331     target_flags |= MASK_FIX_R4400;
20332 
20333   /* Default to working around R5900 errata only if the processor
20334      was selected explicitly.  */
20335   if ((target_flags_explicit & MASK_FIX_R5900) == 0
20336       && strcmp (mips_arch_info->name, "r5900") == 0)
20337     target_flags |= MASK_FIX_R5900;
20338 
20339   /* Default to working around R10000 errata only if the processor
20340      was selected explicitly.  */
20341   if ((target_flags_explicit & MASK_FIX_R10000) == 0
20342       && strcmp (mips_arch_info->name, "r10000") == 0)
20343     target_flags |= MASK_FIX_R10000;
20344 
20345   /* Make sure that branch-likely instructions available when using
20346      -mfix-r10000.  The instructions are not available if either:
20347 
20348 	1. -mno-branch-likely was passed.
20349 	2. The selected ISA does not support branch-likely and
20350 	   the command line does not include -mbranch-likely.  */
20351   if (TARGET_FIX_R10000
20352       && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
20353           ? !ISA_HAS_BRANCHLIKELY
20354           : !TARGET_BRANCHLIKELY))
20355     sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
20356 
20357   if (TARGET_SYNCI && !ISA_HAS_SYNCI)
20358     {
20359       warning (0, "the %qs architecture does not support the synci "
20360 	       "instruction", mips_arch_info->name);
20361       target_flags &= ~MASK_SYNCI;
20362     }
20363 
20364   /* Only optimize PIC indirect calls if they are actually required.  */
20365   if (!TARGET_USE_GOT || !TARGET_EXPLICIT_RELOCS)
20366     target_flags &= ~MASK_RELAX_PIC_CALLS;
20367 
20368   /* Save base state of options.  */
20369   mips_base_target_flags = target_flags;
20370   mips_base_schedule_insns = flag_schedule_insns;
20371   mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
20372   mips_base_move_loop_invariants = flag_move_loop_invariants;
20373   mips_base_align_loops = str_align_loops;
20374   mips_base_align_jumps = str_align_jumps;
20375   mips_base_align_functions = str_align_functions;
20376 
20377   /* Now select the ISA mode.
20378 
20379      Do all CPP-sensitive stuff in uncompressed mode; we'll switch modes
20380      later if required.  */
20381   mips_set_compression_mode (0);
20382 
20383   /* We register a second machine specific reorg pass after delay slot
20384      filling.  Registering the pass must be done at start up.  It's
20385      convenient to do it here.  */
20386   opt_pass *new_pass = make_pass_mips_machine_reorg2 (g);
20387   struct register_pass_info insert_pass_mips_machine_reorg2 =
20388     {
20389       new_pass,		/* pass */
20390       "dbr",			/* reference_pass_name */
20391       1,			/* ref_pass_instance_number */
20392       PASS_POS_INSERT_AFTER	/* po_op */
20393     };
20394   register_pass (&insert_pass_mips_machine_reorg2);
20395 
20396   if (TARGET_HARD_FLOAT_ABI && TARGET_MIPS5900)
20397     REAL_MODE_FORMAT (SFmode) = &spu_single_format;
20398 
20399   mips_register_frame_header_opt ();
20400 }
20401 
20402 /* Swap the register information for registers I and I + 1, which
20403    currently have the wrong endianness.  Note that the registers'
20404    fixedness and call-clobberedness might have been set on the
20405    command line.  */
20406 
20407 static void
mips_swap_registers(unsigned int i)20408 mips_swap_registers (unsigned int i)
20409 {
20410   int tmpi;
20411   const char *tmps;
20412 
20413 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
20414 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
20415 
20416   SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
20417   SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
20418   SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
20419   SWAP_STRING (reg_names[i], reg_names[i + 1]);
20420 
20421 #undef SWAP_STRING
20422 #undef SWAP_INT
20423 }
20424 
20425 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */
20426 
20427 static void
mips_conditional_register_usage(void)20428 mips_conditional_register_usage (void)
20429 {
20430 
20431   if (ISA_HAS_DSP)
20432     {
20433       /* These DSP control register fields are global.  */
20434       global_regs[CCDSP_PO_REGNUM] = 1;
20435       global_regs[CCDSP_SC_REGNUM] = 1;
20436     }
20437   else
20438     AND_COMPL_HARD_REG_SET (accessible_reg_set,
20439 			    reg_class_contents[(int) DSP_ACC_REGS]);
20440 
20441   if (!ISA_HAS_HILO)
20442     AND_COMPL_HARD_REG_SET (accessible_reg_set,
20443 			    reg_class_contents[(int) MD_REGS]);
20444 
20445   if (!TARGET_HARD_FLOAT)
20446     {
20447       AND_COMPL_HARD_REG_SET (accessible_reg_set,
20448 			      reg_class_contents[(int) FP_REGS]);
20449       AND_COMPL_HARD_REG_SET (accessible_reg_set,
20450 			      reg_class_contents[(int) ST_REGS]);
20451     }
20452   else if (!ISA_HAS_8CC)
20453     {
20454       /* We only have a single condition-code register.  We implement
20455 	 this by fixing all the condition-code registers and generating
20456 	 RTL that refers directly to ST_REG_FIRST.  */
20457       AND_COMPL_HARD_REG_SET (accessible_reg_set,
20458 			      reg_class_contents[(int) ST_REGS]);
20459       if (!ISA_HAS_CCF)
20460 	SET_HARD_REG_BIT (accessible_reg_set, FPSW_REGNUM);
20461       fixed_regs[FPSW_REGNUM] = call_used_regs[FPSW_REGNUM] = 1;
20462     }
20463   if (TARGET_MIPS16)
20464     {
20465       /* In MIPS16 mode, we prohibit the unused $s registers, since they
20466 	 are call-saved, and saving them via a MIPS16 register would
20467 	 probably waste more time than just reloading the value.
20468 
20469 	 We permit the $t temporary registers when optimizing for speed
20470 	 but not when optimizing for space because using them results in
20471 	 code that is larger (but faster) then not using them.  We do
20472 	 allow $24 (t8) because it is used in CMP and CMPI instructions
20473 	 and $25 (t9) because it is used as the function call address in
20474 	 SVR4 PIC code.  */
20475 
20476       fixed_regs[18] = call_used_regs[18] = 1;
20477       fixed_regs[19] = call_used_regs[19] = 1;
20478       fixed_regs[20] = call_used_regs[20] = 1;
20479       fixed_regs[21] = call_used_regs[21] = 1;
20480       fixed_regs[22] = call_used_regs[22] = 1;
20481       fixed_regs[23] = call_used_regs[23] = 1;
20482       fixed_regs[26] = call_used_regs[26] = 1;
20483       fixed_regs[27] = call_used_regs[27] = 1;
20484       fixed_regs[30] = call_used_regs[30] = 1;
20485       if (optimize_size)
20486 	{
20487 	  fixed_regs[8] = call_used_regs[8] = 1;
20488 	  fixed_regs[9] = call_used_regs[9] = 1;
20489 	  fixed_regs[10] = call_used_regs[10] = 1;
20490 	  fixed_regs[11] = call_used_regs[11] = 1;
20491 	  fixed_regs[12] = call_used_regs[12] = 1;
20492 	  fixed_regs[13] = call_used_regs[13] = 1;
20493 	  fixed_regs[14] = call_used_regs[14] = 1;
20494 	  fixed_regs[15] = call_used_regs[15] = 1;
20495 	}
20496 
20497       /* Do not allow HI and LO to be treated as register operands.
20498 	 There are no MTHI or MTLO instructions (or any real need
20499 	 for them) and one-way registers cannot easily be reloaded.  */
20500       AND_COMPL_HARD_REG_SET (operand_reg_set,
20501 			      reg_class_contents[(int) MD_REGS]);
20502     }
20503   /* $f20-$f23 are call-clobbered for n64.  */
20504   if (mips_abi == ABI_64)
20505     {
20506       int regno;
20507       for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
20508 	call_really_used_regs[regno] = call_used_regs[regno] = 1;
20509     }
20510   /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
20511      for n32 and o32 FP64.  */
20512   if (mips_abi == ABI_N32
20513       || (mips_abi == ABI_32
20514           && TARGET_FLOAT64))
20515     {
20516       int regno;
20517       for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
20518 	call_really_used_regs[regno] = call_used_regs[regno] = 1;
20519     }
20520   /* Make sure that double-register accumulator values are correctly
20521      ordered for the current endianness.  */
20522   if (TARGET_LITTLE_ENDIAN)
20523     {
20524       unsigned int regno;
20525 
20526       mips_swap_registers (MD_REG_FIRST);
20527       for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
20528 	mips_swap_registers (regno);
20529     }
20530 }
20531 
20532 /* Implement EH_USES.  */
20533 
20534 bool
mips_eh_uses(unsigned int regno)20535 mips_eh_uses (unsigned int regno)
20536 {
20537   if (reload_completed && !TARGET_ABSOLUTE_JUMPS)
20538     {
20539       /* We need to force certain registers to be live in order to handle
20540 	 PIC long branches correctly.  See mips_must_initialize_gp_p for
20541 	 details.  */
20542       if (mips_cfun_has_cprestore_slot_p ())
20543 	{
20544 	  if (regno == CPRESTORE_SLOT_REGNUM)
20545 	    return true;
20546 	}
20547       else
20548 	{
20549 	  if (cfun->machine->global_pointer == regno)
20550 	    return true;
20551 	}
20552     }
20553 
20554   return false;
20555 }
20556 
20557 /* Implement EPILOGUE_USES.  */
20558 
20559 bool
mips_epilogue_uses(unsigned int regno)20560 mips_epilogue_uses (unsigned int regno)
20561 {
20562   /* Say that the epilogue uses the return address register.  Note that
20563      in the case of sibcalls, the values "used by the epilogue" are
20564      considered live at the start of the called function.  */
20565   if (regno == RETURN_ADDR_REGNUM)
20566     return true;
20567 
20568   /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
20569      See the comment above load_call<mode> for details.  */
20570   if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
20571     return true;
20572 
20573   /* An interrupt handler must preserve some registers that are
20574      ordinarily call-clobbered.  */
20575   if (cfun->machine->interrupt_handler_p
20576       && mips_interrupt_extra_call_saved_reg_p (regno))
20577     return true;
20578 
20579   return false;
20580 }
20581 
20582 /* Return true if INSN needs to be wrapped in ".set noat".
20583    INSN has NOPERANDS operands, stored in OPVEC.  */
20584 
20585 static bool
mips_need_noat_wrapper_p(rtx_insn * insn,rtx * opvec,int noperands)20586 mips_need_noat_wrapper_p (rtx_insn *insn, rtx *opvec, int noperands)
20587 {
20588   if (recog_memoized (insn) >= 0)
20589     {
20590       subrtx_iterator::array_type array;
20591       for (int i = 0; i < noperands; i++)
20592 	FOR_EACH_SUBRTX (iter, array, opvec[i], NONCONST)
20593 	  if (REG_P (*iter) && REGNO (*iter) == AT_REGNUM)
20594 	    return true;
20595     }
20596   return false;
20597 }
20598 
20599 /* Implement FINAL_PRESCAN_INSN.  Mark MIPS16 inline constant pools
20600    as data for the purpose of disassembly.  For simplicity embed the
20601    pool's initial label number in the local symbol produced so that
20602    multiple pools within a single function end up marked with unique
20603    symbols.  The label number is carried by the `consttable' insn
20604    emitted at the beginning of each pool.  */
20605 
20606 void
mips_final_prescan_insn(rtx_insn * insn,rtx * opvec,int noperands)20607 mips_final_prescan_insn (rtx_insn *insn, rtx *opvec, int noperands)
20608 {
20609   if (INSN_P (insn)
20610       && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
20611       && XINT (PATTERN (insn), 1) == UNSPEC_CONSTTABLE)
20612     mips_set_text_contents_type (asm_out_file, "__pool_",
20613 				 INTVAL (XVECEXP (PATTERN (insn), 0, 0)),
20614 				 FALSE);
20615 
20616   if (mips_need_noat_wrapper_p (insn, opvec, noperands))
20617     mips_push_asm_switch (&mips_noat);
20618 }
20619 
20620 /* Implement TARGET_ASM_FINAL_POSTSCAN_INSN.  Reset text marking to
20621    code after a MIPS16 inline constant pool.  Like with the beginning
20622    of a pool table use the pool's initial label number to keep symbols
20623    unique.  The label number is carried by the `consttable_end' insn
20624    emitted at the end of each pool.  */
20625 
20626 static void
mips_final_postscan_insn(FILE * file ATTRIBUTE_UNUSED,rtx_insn * insn,rtx * opvec,int noperands)20627 mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx_insn *insn,
20628 			  rtx *opvec, int noperands)
20629 {
20630   if (mips_need_noat_wrapper_p (insn, opvec, noperands))
20631     mips_pop_asm_switch (&mips_noat);
20632 
20633   if (INSN_P (insn)
20634       && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
20635       && XINT (PATTERN (insn), 1) == UNSPEC_CONSTTABLE_END)
20636     mips_set_text_contents_type (asm_out_file, "__pend_",
20637 				 INTVAL (XVECEXP (PATTERN (insn), 0, 0)),
20638 				 TRUE);
20639 }
20640 
20641 /* Return the function that is used to expand the <u>mulsidi3 pattern.
20642    EXT_CODE is the code of the extension used.  Return NULL if widening
20643    multiplication shouldn't be used.  */
20644 
20645 mulsidi3_gen_fn
mips_mulsidi3_gen_fn(enum rtx_code ext_code)20646 mips_mulsidi3_gen_fn (enum rtx_code ext_code)
20647 {
20648   bool signed_p;
20649 
20650   signed_p = ext_code == SIGN_EXTEND;
20651   if (TARGET_64BIT)
20652     {
20653       /* Don't use widening multiplication with MULT when we have DMUL.  Even
20654 	 with the extension of its input operands DMUL is faster.  Note that
20655 	 the extension is not needed for signed multiplication.  In order to
20656 	 ensure that we always remove the redundant sign-extension in this
20657 	 case we still expand mulsidi3 for DMUL.  */
20658       if (ISA_HAS_R6DMUL)
20659 	return signed_p ? gen_mulsidi3_64bit_r6dmul : NULL;
20660       if (ISA_HAS_DMUL3)
20661 	return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
20662       if (TARGET_MIPS16)
20663 	return (signed_p
20664 		? gen_mulsidi3_64bit_mips16
20665 		: gen_umulsidi3_64bit_mips16);
20666       if (TARGET_FIX_R4000)
20667 	return NULL;
20668       return signed_p ? gen_mulsidi3_64bit : gen_umulsidi3_64bit;
20669     }
20670   else
20671     {
20672       if (ISA_HAS_R6MUL)
20673 	return (signed_p ? gen_mulsidi3_32bit_r6 : gen_umulsidi3_32bit_r6);
20674       if (TARGET_MIPS16)
20675 	return (signed_p
20676 		? gen_mulsidi3_32bit_mips16
20677 		: gen_umulsidi3_32bit_mips16);
20678       if (TARGET_FIX_R4000 && !ISA_HAS_DSP)
20679 	return signed_p ? gen_mulsidi3_32bit_r4000 : gen_umulsidi3_32bit_r4000;
20680       return signed_p ? gen_mulsidi3_32bit : gen_umulsidi3_32bit;
20681     }
20682 }
20683 
20684 /* Return true if PATTERN matches the kind of instruction generated by
20685    umips_build_save_restore.  SAVE_P is true for store.  */
20686 
20687 bool
umips_save_restore_pattern_p(bool save_p,rtx pattern)20688 umips_save_restore_pattern_p (bool save_p, rtx pattern)
20689 {
20690   int n;
20691   unsigned int i;
20692   HOST_WIDE_INT first_offset = 0;
20693   rtx first_base = 0;
20694   unsigned int regmask = 0;
20695 
20696   for (n = 0; n < XVECLEN (pattern, 0); n++)
20697     {
20698       rtx set, reg, mem, this_base;
20699       HOST_WIDE_INT this_offset;
20700 
20701       /* Check that we have a SET.  */
20702       set = XVECEXP (pattern, 0, n);
20703       if (GET_CODE (set) != SET)
20704 	return false;
20705 
20706       /* Check that the SET is a load (if restoring) or a store
20707 	 (if saving).  */
20708       mem = save_p ? SET_DEST (set) : SET_SRC (set);
20709       if (!MEM_P (mem) || MEM_VOLATILE_P (mem))
20710 	return false;
20711 
20712       /* Check that the address is the sum of base and a possibly-zero
20713 	 constant offset.  Determine if the offset is in range.  */
20714       mips_split_plus (XEXP (mem, 0), &this_base, &this_offset);
20715       if (!REG_P (this_base))
20716 	return false;
20717 
20718       if (n == 0)
20719 	{
20720 	  if (!UMIPS_12BIT_OFFSET_P (this_offset))
20721 	    return false;
20722 	  first_base = this_base;
20723 	  first_offset = this_offset;
20724 	}
20725       else
20726 	{
20727 	  /* Check that the save slots are consecutive.  */
20728 	  if (REGNO (this_base) != REGNO (first_base)
20729 	      || this_offset != first_offset + UNITS_PER_WORD * n)
20730 	    return false;
20731 	}
20732 
20733       /* Check that SET's other operand is a register.  */
20734       reg = save_p ? SET_SRC (set) : SET_DEST (set);
20735       if (!REG_P (reg))
20736 	return false;
20737 
20738       regmask |= 1 << REGNO (reg);
20739     }
20740 
20741   for (i = 0; i < ARRAY_SIZE (umips_swm_mask); i++)
20742     if (regmask == umips_swm_mask[i])
20743       return true;
20744 
20745   return false;
20746 }
20747 
20748 /* Return the assembly instruction for microMIPS LWM or SWM.
20749    SAVE_P and PATTERN are as for umips_save_restore_pattern_p.  */
20750 
20751 const char *
umips_output_save_restore(bool save_p,rtx pattern)20752 umips_output_save_restore (bool save_p, rtx pattern)
20753 {
20754   static char buffer[300];
20755   char *s;
20756   int n;
20757   HOST_WIDE_INT offset;
20758   rtx base, mem, set, last_set, last_reg;
20759 
20760   /* Parse the pattern.  */
20761   gcc_assert (umips_save_restore_pattern_p (save_p, pattern));
20762 
20763   s = strcpy (buffer, save_p ? "swm\t" : "lwm\t");
20764   s += strlen (s);
20765   n = XVECLEN (pattern, 0);
20766 
20767   set = XVECEXP (pattern, 0, 0);
20768   mem = save_p ? SET_DEST (set) : SET_SRC (set);
20769   mips_split_plus (XEXP (mem, 0), &base, &offset);
20770 
20771   last_set = XVECEXP (pattern, 0, n - 1);
20772   last_reg = save_p ? SET_SRC (last_set) : SET_DEST (last_set);
20773 
20774   if (REGNO (last_reg) == 31)
20775     n--;
20776 
20777   gcc_assert (n <= 9);
20778   if (n == 0)
20779     ;
20780   else if (n == 1)
20781     s += sprintf (s, "%s,", reg_names[16]);
20782   else if (n < 9)
20783     s += sprintf (s, "%s-%s,", reg_names[16], reg_names[15 + n]);
20784   else if (n == 9)
20785     s += sprintf (s, "%s-%s,%s,", reg_names[16], reg_names[23],
20786 		  reg_names[30]);
20787 
20788   if (REGNO (last_reg) == 31)
20789     s += sprintf (s, "%s,", reg_names[31]);
20790 
20791   s += sprintf (s, "%d(%s)", (int)offset, reg_names[REGNO (base)]);
20792   return buffer;
20793 }
20794 
20795 /* Return true if MEM1 and MEM2 use the same base register, and the
20796    offset of MEM2 equals the offset of MEM1 plus 4.  FIRST_REG is the
20797    register into (from) which the contents of MEM1 will be loaded
20798    (stored), depending on the value of LOAD_P.
20799    SWAP_P is true when the 1st and 2nd instructions are swapped.  */
20800 
20801 static bool
umips_load_store_pair_p_1(bool load_p,bool swap_p,rtx first_reg,rtx mem1,rtx mem2)20802 umips_load_store_pair_p_1 (bool load_p, bool swap_p,
20803 			   rtx first_reg, rtx mem1, rtx mem2)
20804 {
20805   rtx base1, base2;
20806   HOST_WIDE_INT offset1, offset2;
20807 
20808   if (!MEM_P (mem1) || !MEM_P (mem2))
20809     return false;
20810 
20811   mips_split_plus (XEXP (mem1, 0), &base1, &offset1);
20812   mips_split_plus (XEXP (mem2, 0), &base2, &offset2);
20813 
20814   if (!REG_P (base1) || !rtx_equal_p (base1, base2))
20815     return false;
20816 
20817   /* Avoid invalid load pair instructions.  */
20818   if (load_p && REGNO (first_reg) == REGNO (base1))
20819     return false;
20820 
20821   /* We must avoid this case for anti-dependence.
20822      Ex:  lw $3, 4($3)
20823           lw $2, 0($3)
20824      first_reg is $2, but the base is $3.  */
20825   if (load_p
20826       && swap_p
20827       && REGNO (first_reg) + 1 == REGNO (base1))
20828     return false;
20829 
20830   if (offset2 != offset1 + 4)
20831     return false;
20832 
20833   if (!UMIPS_12BIT_OFFSET_P (offset1))
20834     return false;
20835 
20836   return true;
20837 }
20838 
20839 bool
mips_load_store_bonding_p(rtx * operands,machine_mode mode,bool load_p)20840 mips_load_store_bonding_p (rtx *operands, machine_mode mode, bool load_p)
20841 {
20842   rtx reg1, reg2, mem1, mem2, base1, base2;
20843   enum reg_class rc1, rc2;
20844   HOST_WIDE_INT offset1, offset2;
20845 
20846   if (load_p)
20847     {
20848       reg1 = operands[0];
20849       reg2 = operands[2];
20850       mem1 = operands[1];
20851       mem2 = operands[3];
20852     }
20853   else
20854     {
20855       reg1 = operands[1];
20856       reg2 = operands[3];
20857       mem1 = operands[0];
20858       mem2 = operands[2];
20859     }
20860 
20861   if (mips_address_insns (XEXP (mem1, 0), mode, false) == 0
20862       || mips_address_insns (XEXP (mem2, 0), mode, false) == 0)
20863     return false;
20864 
20865   mips_split_plus (XEXP (mem1, 0), &base1, &offset1);
20866   mips_split_plus (XEXP (mem2, 0), &base2, &offset2);
20867 
20868   /* Base regs do not match.  */
20869   if (!REG_P (base1) || !rtx_equal_p (base1, base2))
20870     return false;
20871 
20872   /* Either of the loads is clobbering base register.  It is legitimate to bond
20873      loads if second load clobbers base register.  However, hardware does not
20874      support such bonding.  */
20875   if (load_p
20876       && (REGNO (reg1) == REGNO (base1)
20877 	  || (REGNO (reg2) == REGNO (base1))))
20878     return false;
20879 
20880   /* Loading in same registers.  */
20881   if (load_p
20882       && REGNO (reg1) == REGNO (reg2))
20883     return false;
20884 
20885   /* The loads/stores are not of same type.  */
20886   rc1 = REGNO_REG_CLASS (REGNO (reg1));
20887   rc2 = REGNO_REG_CLASS (REGNO (reg2));
20888   if (rc1 != rc2
20889       && !reg_class_subset_p (rc1, rc2)
20890       && !reg_class_subset_p (rc2, rc1))
20891     return false;
20892 
20893   if (abs (offset1 - offset2) != GET_MODE_SIZE (mode))
20894     return false;
20895 
20896   return true;
20897 }
20898 
20899 /* OPERANDS describes the operands to a pair of SETs, in the order
20900    dest1, src1, dest2, src2.  Return true if the operands can be used
20901    in an LWP or SWP instruction; LOAD_P says which.  */
20902 
20903 bool
umips_load_store_pair_p(bool load_p,rtx * operands)20904 umips_load_store_pair_p (bool load_p, rtx *operands)
20905 {
20906   rtx reg1, reg2, mem1, mem2;
20907 
20908   if (load_p)
20909     {
20910       reg1 = operands[0];
20911       reg2 = operands[2];
20912       mem1 = operands[1];
20913       mem2 = operands[3];
20914     }
20915   else
20916     {
20917       reg1 = operands[1];
20918       reg2 = operands[3];
20919       mem1 = operands[0];
20920       mem2 = operands[2];
20921     }
20922 
20923   if (REGNO (reg2) == REGNO (reg1) + 1)
20924     return umips_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2);
20925 
20926   if (REGNO (reg1) == REGNO (reg2) + 1)
20927     return umips_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1);
20928 
20929   return false;
20930 }
20931 
20932 /* Return the assembly instruction for a microMIPS LWP or SWP in which
20933    the first register is REG and the first memory slot is MEM.
20934    LOAD_P is true for LWP.  */
20935 
20936 static void
umips_output_load_store_pair_1(bool load_p,rtx reg,rtx mem)20937 umips_output_load_store_pair_1 (bool load_p, rtx reg, rtx mem)
20938 {
20939   rtx ops[] = {reg, mem};
20940 
20941   if (load_p)
20942     output_asm_insn ("lwp\t%0,%1", ops);
20943   else
20944     output_asm_insn ("swp\t%0,%1", ops);
20945 }
20946 
20947 /* Output the assembly instruction for a microMIPS LWP or SWP instruction.
20948    LOAD_P and OPERANDS are as for umips_load_store_pair_p.  */
20949 
20950 void
umips_output_load_store_pair(bool load_p,rtx * operands)20951 umips_output_load_store_pair (bool load_p, rtx *operands)
20952 {
20953   rtx reg1, reg2, mem1, mem2;
20954   if (load_p)
20955     {
20956       reg1 = operands[0];
20957       reg2 = operands[2];
20958       mem1 = operands[1];
20959       mem2 = operands[3];
20960     }
20961   else
20962     {
20963       reg1 = operands[1];
20964       reg2 = operands[3];
20965       mem1 = operands[0];
20966       mem2 = operands[2];
20967     }
20968 
20969   if (REGNO (reg2) == REGNO (reg1) + 1)
20970     {
20971       umips_output_load_store_pair_1 (load_p, reg1, mem1);
20972       return;
20973     }
20974 
20975   gcc_assert (REGNO (reg1) == REGNO (reg2) + 1);
20976   umips_output_load_store_pair_1 (load_p, reg2, mem2);
20977 }
20978 
20979 /* Return true if REG1 and REG2 match the criteria for a movep insn.  */
20980 
20981 bool
umips_movep_target_p(rtx reg1,rtx reg2)20982 umips_movep_target_p (rtx reg1, rtx reg2)
20983 {
20984   int regno1, regno2, pair;
20985   unsigned int i;
20986   static const int match[8] = {
20987     0x00000060, /* 5, 6 */
20988     0x000000a0, /* 5, 7 */
20989     0x000000c0, /* 6, 7 */
20990     0x00200010, /* 4, 21 */
20991     0x00400010, /* 4, 22 */
20992     0x00000030, /* 4, 5 */
20993     0x00000050, /* 4, 6 */
20994     0x00000090  /* 4, 7 */
20995   };
20996 
20997   if (!REG_P (reg1) || !REG_P (reg2))
20998     return false;
20999 
21000   regno1 = REGNO (reg1);
21001   regno2 = REGNO (reg2);
21002 
21003   if (!GP_REG_P (regno1) || !GP_REG_P (regno2))
21004     return false;
21005 
21006   pair = (1 << regno1) | (1 << regno2);
21007 
21008   for (i = 0; i < ARRAY_SIZE (match); i++)
21009     if (pair == match[i])
21010       return true;
21011 
21012   return false;
21013 }
21014 
21015 /* Return the size in bytes of the trampoline code, padded to
21016    TRAMPOLINE_ALIGNMENT bits.  The static chain pointer and target
21017    function address immediately follow.  */
21018 
21019 int
mips_trampoline_code_size(void)21020 mips_trampoline_code_size (void)
21021 {
21022   if (TARGET_USE_PIC_FN_ADDR_REG)
21023     return 4 * 4;
21024   else if (ptr_mode == DImode)
21025     return 8 * 4;
21026   else if (ISA_HAS_LOAD_DELAY)
21027     return 6 * 4;
21028   else
21029     return 4 * 4;
21030 }
21031 
21032 /* Implement TARGET_TRAMPOLINE_INIT.  */
21033 
21034 static void
mips_trampoline_init(rtx m_tramp,tree fndecl,rtx chain_value)21035 mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
21036 {
21037   rtx addr, end_addr, high, low, opcode, mem;
21038   rtx trampoline[8];
21039   unsigned int i, j;
21040   HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset;
21041 
21042   /* Work out the offsets of the pointers from the start of the
21043      trampoline code.  */
21044   end_addr_offset = mips_trampoline_code_size ();
21045   static_chain_offset = end_addr_offset;
21046   target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
21047 
21048   /* Get pointers to the beginning and end of the code block.  */
21049   addr = force_reg (Pmode, XEXP (m_tramp, 0));
21050   end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
21051 
21052 #define OP(X) gen_int_mode (X, SImode)
21053 
21054   /* Build up the code in TRAMPOLINE.  */
21055   i = 0;
21056   if (TARGET_USE_PIC_FN_ADDR_REG)
21057     {
21058       /* $25 contains the address of the trampoline.  Emit code of the form:
21059 
21060 	     l[wd]    $1, target_function_offset($25)
21061 	     l[wd]    $static_chain, static_chain_offset($25)
21062 	     jr       $1
21063 	     move     $25,$1.  */
21064       trampoline[i++] = OP (MIPS_LOAD_PTR (AT_REGNUM,
21065 					   target_function_offset,
21066 					   PIC_FUNCTION_ADDR_REGNUM));
21067       trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
21068 					   static_chain_offset,
21069 					   PIC_FUNCTION_ADDR_REGNUM));
21070       trampoline[i++] = OP (MIPS_JR (AT_REGNUM));
21071       trampoline[i++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM, AT_REGNUM));
21072     }
21073   else if (ptr_mode == DImode)
21074     {
21075       /* It's too cumbersome to create the full 64-bit address, so let's
21076 	 instead use:
21077 
21078 	     move    $1, $31
21079 	     bal     1f
21080 	     nop
21081 	 1:  l[wd]   $25, target_function_offset - 12($31)
21082 	     l[wd]   $static_chain, static_chain_offset - 12($31)
21083 	     jr      $25
21084 	     move    $31, $1
21085 
21086 	where 12 is the offset of "1:" from the start of the code block.  */
21087       trampoline[i++] = OP (MIPS_MOVE (AT_REGNUM, RETURN_ADDR_REGNUM));
21088       trampoline[i++] = OP (MIPS_BAL (1));
21089       trampoline[i++] = OP (MIPS_NOP);
21090       trampoline[i++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
21091 					   target_function_offset - 12,
21092 					   RETURN_ADDR_REGNUM));
21093       trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
21094 					   static_chain_offset - 12,
21095 					   RETURN_ADDR_REGNUM));
21096       trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
21097       trampoline[i++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM, AT_REGNUM));
21098     }
21099   else
21100     {
21101       /* If the target has load delays, emit:
21102 
21103 	     lui     $1, %hi(end_addr)
21104 	     lw      $25, %lo(end_addr + ...)($1)
21105 	     lw      $static_chain, %lo(end_addr + ...)($1)
21106 	     jr      $25
21107 	     nop
21108 
21109 	 Otherwise emit:
21110 
21111 	     lui     $1, %hi(end_addr)
21112 	     lw      $25, %lo(end_addr + ...)($1)
21113 	     jr      $25
21114 	     lw      $static_chain, %lo(end_addr + ...)($1).  */
21115 
21116       /* Split END_ADDR into %hi and %lo values.  Trampolines are aligned
21117 	 to 64 bits, so the %lo value will have the bottom 3 bits clear.  */
21118       high = expand_simple_binop (SImode, PLUS, end_addr, GEN_INT (0x8000),
21119 				  NULL, false, OPTAB_WIDEN);
21120       high = expand_simple_binop (SImode, LSHIFTRT, high, GEN_INT (16),
21121 				  NULL, false, OPTAB_WIDEN);
21122       low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
21123 
21124       /* Emit the LUI.  */
21125       opcode = OP (MIPS_LUI (AT_REGNUM, 0));
21126       trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high,
21127 					     NULL, false, OPTAB_WIDEN);
21128 
21129       /* Emit the load of the target function.  */
21130       opcode = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
21131 				  target_function_offset - end_addr_offset,
21132 				  AT_REGNUM));
21133       trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
21134 					     NULL, false, OPTAB_WIDEN);
21135 
21136       /* Emit the JR here, if we can.  */
21137       if (!ISA_HAS_LOAD_DELAY)
21138 	trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
21139 
21140       /* Emit the load of the static chain register.  */
21141       opcode = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
21142 				  static_chain_offset - end_addr_offset,
21143 				  AT_REGNUM));
21144       trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
21145 					     NULL, false, OPTAB_WIDEN);
21146 
21147       /* Emit the JR, if we couldn't above.  */
21148       if (ISA_HAS_LOAD_DELAY)
21149 	{
21150 	  trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
21151 	  trampoline[i++] = OP (MIPS_NOP);
21152 	}
21153     }
21154 
21155 #undef OP
21156 
21157   /* If we are using compact branches we don't have delay slots so
21158      place the instruction that was in the delay slot before the JRC
21159      instruction.  */
21160 
21161   if (TARGET_CB_ALWAYS)
21162     {
21163       rtx temp;
21164       temp = trampoline[i-2];
21165       trampoline[i-2] = trampoline[i-1];
21166       trampoline[i-1] = temp;
21167     }
21168 
21169   /* Copy the trampoline code.  Leave any padding uninitialized.  */
21170   for (j = 0; j < i; j++)
21171     {
21172       mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
21173       mips_emit_move (mem, trampoline[j]);
21174     }
21175 
21176   /* Set up the static chain pointer field.  */
21177   mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
21178   mips_emit_move (mem, chain_value);
21179 
21180   /* Set up the target function field.  */
21181   mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
21182   mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
21183 
21184   /* Flush the code part of the trampoline.  */
21185   emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
21186   emit_insn (gen_clear_cache (addr, end_addr));
21187 }
21188 
21189 /* Implement FUNCTION_PROFILER.  */
21190 
mips_function_profiler(FILE * file)21191 void mips_function_profiler (FILE *file)
21192 {
21193   if (TARGET_MIPS16)
21194     sorry ("mips16 function profiling");
21195   if (TARGET_LONG_CALLS)
21196     {
21197       /* For TARGET_LONG_CALLS use $3 for the address of _mcount.  */
21198       if (Pmode == DImode)
21199 	fprintf (file, "\tdla\t%s,_mcount\n", reg_names[3]);
21200       else
21201 	fprintf (file, "\tla\t%s,_mcount\n", reg_names[3]);
21202     }
21203   mips_push_asm_switch (&mips_noat);
21204   fprintf (file, "\tmove\t%s,%s\t\t# save current return address\n",
21205 	   reg_names[AT_REGNUM], reg_names[RETURN_ADDR_REGNUM]);
21206   /* _mcount treats $2 as the static chain register.  */
21207   if (cfun->static_chain_decl != NULL)
21208     fprintf (file, "\tmove\t%s,%s\n", reg_names[2],
21209 	     reg_names[STATIC_CHAIN_REGNUM]);
21210   if (TARGET_MCOUNT_RA_ADDRESS)
21211     {
21212       /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
21213 	 ra save location.  */
21214       if (cfun->machine->frame.ra_fp_offset == 0)
21215 	/* ra not saved, pass zero.  */
21216 	fprintf (file, "\tmove\t%s,%s\n", reg_names[12], reg_names[0]);
21217       else
21218 	fprintf (file, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC "(%s)\n",
21219 		 Pmode == DImode ? "dla" : "la", reg_names[12],
21220 		 cfun->machine->frame.ra_fp_offset,
21221 		 reg_names[STACK_POINTER_REGNUM]);
21222     }
21223   if (!TARGET_NEWABI)
21224     fprintf (file,
21225 	     "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from  stack\n",
21226 	     TARGET_64BIT ? "dsubu" : "subu",
21227 	     reg_names[STACK_POINTER_REGNUM],
21228 	     reg_names[STACK_POINTER_REGNUM],
21229 	     Pmode == DImode ? 16 : 8);
21230 
21231   if (TARGET_LONG_CALLS)
21232     fprintf (file, "\tjalr\t%s\n", reg_names[3]);
21233   else
21234     fprintf (file, "\tjal\t_mcount\n");
21235   mips_pop_asm_switch (&mips_noat);
21236   /* _mcount treats $2 as the static chain register.  */
21237   if (cfun->static_chain_decl != NULL)
21238     fprintf (file, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM],
21239 	     reg_names[2]);
21240 }
21241 
21242 /* Implement TARGET_SHIFT_TRUNCATION_MASK.  We want to keep the default
21243    behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
21244    when TARGET_LOONGSON_MMI is true.  */
21245 
21246 static unsigned HOST_WIDE_INT
mips_shift_truncation_mask(machine_mode mode)21247 mips_shift_truncation_mask (machine_mode mode)
21248 {
21249   if (TARGET_LOONGSON_MMI && VECTOR_MODE_P (mode))
21250     return 0;
21251 
21252   return GET_MODE_BITSIZE (mode) - 1;
21253 }
21254 
21255 /* Implement TARGET_PREPARE_PCH_SAVE.  */
21256 
21257 static void
mips_prepare_pch_save(void)21258 mips_prepare_pch_save (void)
21259 {
21260   /* We are called in a context where the current compression vs.
21261      non-compression setting should be irrelevant.  The question then is:
21262      which setting makes most sense at load time?
21263 
21264      The PCH is loaded before the first token is read.  We should never have
21265      switched into a compression mode by that point, and thus should not have
21266      populated mips16_globals or micromips_globals.  Nor can we load the
21267      entire contents of mips16_globals or micromips_globals from the PCH file,
21268      because they contain a combination of GGC and non-GGC data.
21269 
21270      There is therefore no point in trying save the GGC part of
21271      mips16_globals/micromips_globals to the PCH file, or to preserve a
21272      compression setting across the PCH save and load.  The loading compiler
21273      would not have access to the non-GGC parts of mips16_globals or
21274      micromips_globals (either from the PCH file, or from a copy that the
21275      loading compiler generated itself) and would have to call target_reinit
21276      anyway.
21277 
21278      It therefore seems best to switch back to non-MIPS16 mode and
21279      non-microMIPS mode to save time, and to ensure that mips16_globals and
21280      micromips_globals remain null after a PCH load.  */
21281   mips_set_compression_mode (0);
21282   mips16_globals = 0;
21283   micromips_globals = 0;
21284 }
21285 
21286 /* Generate or test for an insn that supports a constant permutation.  */
21287 
21288 #define MAX_VECT_LEN 16
21289 
21290 struct expand_vec_perm_d
21291 {
21292   rtx target, op0, op1;
21293   unsigned char perm[MAX_VECT_LEN];
21294   machine_mode vmode;
21295   unsigned char nelt;
21296   bool one_vector_p;
21297   bool testing_p;
21298 };
21299 
21300 /* Construct (set target (vec_select op0 (parallel perm))) and
21301    return true if that's a valid instruction in the active ISA.  */
21302 
21303 static bool
mips_expand_vselect(rtx target,rtx op0,const unsigned char * perm,unsigned nelt)21304 mips_expand_vselect (rtx target, rtx op0,
21305 		     const unsigned char *perm, unsigned nelt)
21306 {
21307   rtx rperm[MAX_VECT_LEN], x;
21308   rtx_insn *insn;
21309   unsigned i;
21310 
21311   for (i = 0; i < nelt; ++i)
21312     rperm[i] = GEN_INT (perm[i]);
21313 
21314   x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
21315   x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
21316   x = gen_rtx_SET (target, x);
21317 
21318   insn = emit_insn (x);
21319   if (recog_memoized (insn) < 0)
21320     {
21321       remove_insn (insn);
21322       return false;
21323     }
21324   return true;
21325 }
21326 
21327 /* Similar, but generate a vec_concat from op0 and op1 as well.  */
21328 
21329 static bool
mips_expand_vselect_vconcat(rtx target,rtx op0,rtx op1,const unsigned char * perm,unsigned nelt)21330 mips_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
21331 			     const unsigned char *perm, unsigned nelt)
21332 {
21333   machine_mode v2mode;
21334   rtx x;
21335 
21336   if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode))
21337     return false;
21338   x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
21339   return mips_expand_vselect (target, x, perm, nelt);
21340 }
21341 
21342 /* Recognize patterns for even-odd extraction.  */
21343 
21344 static bool
mips_expand_vpc_loongson_even_odd(struct expand_vec_perm_d * d)21345 mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d *d)
21346 {
21347   unsigned i, odd, nelt = d->nelt;
21348   rtx t0, t1, t2, t3;
21349 
21350   if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
21351     return false;
21352   /* Even-odd for V2SI/V2SFmode is matched by interleave directly.  */
21353   if (nelt < 4)
21354     return false;
21355 
21356   odd = d->perm[0];
21357   if (odd > 1)
21358     return false;
21359   for (i = 1; i < nelt; ++i)
21360     if (d->perm[i] != i * 2 + odd)
21361       return false;
21362 
21363   if (d->testing_p)
21364     return true;
21365 
21366   /* We need 2*log2(N)-1 operations to achieve odd/even with interleave. */
21367   t0 = gen_reg_rtx (d->vmode);
21368   t1 = gen_reg_rtx (d->vmode);
21369   switch (d->vmode)
21370     {
21371     case E_V4HImode:
21372       emit_insn (gen_loongson_punpckhhw (t0, d->op0, d->op1));
21373       emit_insn (gen_loongson_punpcklhw (t1, d->op0, d->op1));
21374       if (odd)
21375 	emit_insn (gen_loongson_punpckhhw (d->target, t1, t0));
21376       else
21377 	emit_insn (gen_loongson_punpcklhw (d->target, t1, t0));
21378       break;
21379 
21380     case E_V8QImode:
21381       t2 = gen_reg_rtx (d->vmode);
21382       t3 = gen_reg_rtx (d->vmode);
21383       emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op1));
21384       emit_insn (gen_loongson_punpcklbh (t1, d->op0, d->op1));
21385       emit_insn (gen_loongson_punpckhbh (t2, t1, t0));
21386       emit_insn (gen_loongson_punpcklbh (t3, t1, t0));
21387       if (odd)
21388 	emit_insn (gen_loongson_punpckhbh (d->target, t3, t2));
21389       else
21390 	emit_insn (gen_loongson_punpcklbh (d->target, t3, t2));
21391       break;
21392 
21393     default:
21394       gcc_unreachable ();
21395     }
21396   return true;
21397 }
21398 
21399 /* Recognize patterns for the Loongson PSHUFH instruction.  */
21400 
21401 static bool
mips_expand_vpc_loongson_pshufh(struct expand_vec_perm_d * d)21402 mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d *d)
21403 {
21404   unsigned i, mask;
21405   rtx rmask;
21406 
21407   if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
21408     return false;
21409   if (d->vmode != V4HImode)
21410     return false;
21411   if (d->testing_p)
21412     return true;
21413 
21414   /* Convert the selector into the packed 8-bit form for pshufh.  */
21415   /* Recall that loongson is little-endian only.  No big-endian
21416      adjustment required.  */
21417   for (i = mask = 0; i < 4; i++)
21418     mask |= (d->perm[i] & 3) << (i * 2);
21419   rmask = force_reg (SImode, GEN_INT (mask));
21420 
21421   if (d->one_vector_p)
21422     emit_insn (gen_loongson_pshufh (d->target, d->op0, rmask));
21423   else
21424     {
21425       rtx t0, t1, x, merge, rmerge[4];
21426 
21427       t0 = gen_reg_rtx (V4HImode);
21428       t1 = gen_reg_rtx (V4HImode);
21429       emit_insn (gen_loongson_pshufh (t1, d->op1, rmask));
21430       emit_insn (gen_loongson_pshufh (t0, d->op0, rmask));
21431 
21432       for (i = 0; i < 4; ++i)
21433 	rmerge[i] = (d->perm[i] & 4 ? constm1_rtx : const0_rtx);
21434       merge = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmerge));
21435       merge = force_reg (V4HImode, merge);
21436 
21437       x = gen_rtx_AND (V4HImode, merge, t1);
21438       emit_insn (gen_rtx_SET (t1, x));
21439 
21440       x = gen_rtx_NOT (V4HImode, merge);
21441       x = gen_rtx_AND (V4HImode, x, t0);
21442       emit_insn (gen_rtx_SET (t0, x));
21443 
21444       x = gen_rtx_IOR (V4HImode, t0, t1);
21445       emit_insn (gen_rtx_SET (d->target, x));
21446     }
21447 
21448   return true;
21449 }
21450 
21451 /* Recognize broadcast patterns for the Loongson.  */
21452 
21453 static bool
mips_expand_vpc_loongson_bcast(struct expand_vec_perm_d * d)21454 mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d *d)
21455 {
21456   unsigned i, elt;
21457   rtx t0, t1;
21458 
21459   if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
21460     return false;
21461   /* Note that we've already matched V2SI via punpck and V4HI via pshufh.  */
21462   if (d->vmode != V8QImode)
21463     return false;
21464   if (!d->one_vector_p)
21465     return false;
21466 
21467   elt = d->perm[0];
21468   for (i = 1; i < 8; ++i)
21469     if (d->perm[i] != elt)
21470       return false;
21471 
21472   if (d->testing_p)
21473     return true;
21474 
21475   /* With one interleave we put two of the desired element adjacent.  */
21476   t0 = gen_reg_rtx (V8QImode);
21477   if (elt < 4)
21478     emit_insn (gen_loongson_punpcklbh (t0, d->op0, d->op0));
21479   else
21480     emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op0));
21481 
21482   /* Shuffle that one HImode element into all locations.  */
21483   elt &= 3;
21484   elt *= 0x55;
21485   t1 = gen_reg_rtx (V4HImode);
21486   emit_insn (gen_loongson_pshufh (t1, gen_lowpart (V4HImode, t0),
21487 				  force_reg (SImode, GEN_INT (elt))));
21488 
21489   emit_move_insn (d->target, gen_lowpart (V8QImode, t1));
21490   return true;
21491 }
21492 
21493 /* Construct (set target (vec_select op0 (parallel selector))) and
21494    return true if that's a valid instruction in the active ISA.  */
21495 
21496 static bool
mips_expand_msa_shuffle(struct expand_vec_perm_d * d)21497 mips_expand_msa_shuffle (struct expand_vec_perm_d *d)
21498 {
21499   rtx x, elts[MAX_VECT_LEN];
21500   rtvec v;
21501   rtx_insn *insn;
21502   unsigned i;
21503 
21504   if (!ISA_HAS_MSA)
21505     return false;
21506 
21507   for (i = 0; i < d->nelt; i++)
21508     elts[i] = GEN_INT (d->perm[i]);
21509 
21510   v = gen_rtvec_v (d->nelt, elts);
21511   x = gen_rtx_PARALLEL (VOIDmode, v);
21512 
21513   if (!mips_const_vector_shuffle_set_p (x, d->vmode))
21514     return false;
21515 
21516   x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x);
21517   x = gen_rtx_SET (d->target, x);
21518 
21519   insn = emit_insn (x);
21520   if (recog_memoized (insn) < 0)
21521     {
21522       remove_insn (insn);
21523       return false;
21524     }
21525   return true;
21526 }
21527 
21528 static bool
mips_expand_vec_perm_const_1(struct expand_vec_perm_d * d)21529 mips_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
21530 {
21531   unsigned int i, nelt = d->nelt;
21532   unsigned char perm2[MAX_VECT_LEN];
21533 
21534   if (d->one_vector_p)
21535     {
21536       /* Try interleave with alternating operands.  */
21537       memcpy (perm2, d->perm, sizeof(perm2));
21538       for (i = 1; i < nelt; i += 2)
21539 	perm2[i] += nelt;
21540       if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt))
21541 	return true;
21542     }
21543   else
21544     {
21545       if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1,
21546 				       d->perm, nelt))
21547 	return true;
21548 
21549       /* Try again with swapped operands.  */
21550       for (i = 0; i < nelt; ++i)
21551 	perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
21552       if (mips_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
21553 	return true;
21554     }
21555 
21556   if (mips_expand_vpc_loongson_even_odd (d))
21557     return true;
21558   if (mips_expand_vpc_loongson_pshufh (d))
21559     return true;
21560   if (mips_expand_vpc_loongson_bcast (d))
21561     return true;
21562   if (mips_expand_msa_shuffle (d))
21563     return true;
21564   return false;
21565 }
21566 
21567 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST.  */
21568 
21569 static bool
mips_vectorize_vec_perm_const(machine_mode vmode,rtx target,rtx op0,rtx op1,const vec_perm_indices & sel)21570 mips_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
21571 			       rtx op1, const vec_perm_indices &sel)
21572 {
21573   struct expand_vec_perm_d d;
21574   int i, nelt, which;
21575   unsigned char orig_perm[MAX_VECT_LEN];
21576   bool ok;
21577 
21578   d.target = target;
21579   d.op0 = op0;
21580   d.op1 = op1;
21581 
21582   d.vmode = vmode;
21583   gcc_assert (VECTOR_MODE_P (vmode));
21584   d.nelt = nelt = GET_MODE_NUNITS (vmode);
21585   d.testing_p = !target;
21586 
21587   /* This is overly conservative, but ensures we don't get an
21588      uninitialized warning on ORIG_PERM.  */
21589   memset (orig_perm, 0, MAX_VECT_LEN);
21590   for (i = which = 0; i < nelt; ++i)
21591     {
21592       int ei = sel[i] & (2 * nelt - 1);
21593       which |= (ei < nelt ? 1 : 2);
21594       orig_perm[i] = ei;
21595     }
21596   memcpy (d.perm, orig_perm, MAX_VECT_LEN);
21597 
21598   switch (which)
21599     {
21600     default:
21601       gcc_unreachable();
21602 
21603     case 3:
21604       d.one_vector_p = false;
21605       if (d.testing_p || !rtx_equal_p (d.op0, d.op1))
21606 	break;
21607       /* FALLTHRU */
21608 
21609     case 2:
21610       for (i = 0; i < nelt; ++i)
21611         d.perm[i] &= nelt - 1;
21612       d.op0 = d.op1;
21613       d.one_vector_p = true;
21614       break;
21615 
21616     case 1:
21617       d.op1 = d.op0;
21618       d.one_vector_p = true;
21619       break;
21620     }
21621 
21622   if (d.testing_p)
21623     {
21624       d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
21625       d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
21626       if (!d.one_vector_p)
21627 	d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
21628 
21629       start_sequence ();
21630       ok = mips_expand_vec_perm_const_1 (&d);
21631       end_sequence ();
21632       return ok;
21633     }
21634 
21635   ok = mips_expand_vec_perm_const_1 (&d);
21636 
21637   /* If we were given a two-vector permutation which just happened to
21638      have both input vectors equal, we folded this into a one-vector
21639      permutation.  There are several loongson patterns that are matched
21640      via direct vec_select+vec_concat expansion, but we do not have
21641      support in mips_expand_vec_perm_const_1 to guess the adjustment
21642      that should be made for a single operand.  Just try again with
21643      the original permutation.  */
21644   if (!ok && which == 3)
21645     {
21646       d.op0 = op0;
21647       d.op1 = op1;
21648       d.one_vector_p = false;
21649       memcpy (d.perm, orig_perm, MAX_VECT_LEN);
21650       ok = mips_expand_vec_perm_const_1 (&d);
21651     }
21652 
21653   return ok;
21654 }
21655 
21656 /* Implement TARGET_SCHED_REASSOCIATION_WIDTH.  */
21657 
21658 static int
mips_sched_reassociation_width(unsigned int opc ATTRIBUTE_UNUSED,machine_mode mode)21659 mips_sched_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
21660 				machine_mode mode)
21661 {
21662   if (MSA_SUPPORTED_MODE_P (mode))
21663     return 2;
21664   return 1;
21665 }
21666 
21667 /* Expand an integral vector unpack operation.  */
21668 
21669 void
mips_expand_vec_unpack(rtx operands[2],bool unsigned_p,bool high_p)21670 mips_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
21671 {
21672   machine_mode imode = GET_MODE (operands[1]);
21673   rtx (*unpack) (rtx, rtx, rtx);
21674   rtx (*cmpFunc) (rtx, rtx, rtx);
21675   rtx tmp, dest, zero;
21676 
21677   if (ISA_HAS_MSA)
21678     {
21679       switch (imode)
21680 	{
21681 	case E_V4SImode:
21682 	  if (BYTES_BIG_ENDIAN != high_p)
21683 	    unpack = gen_msa_ilvl_w;
21684 	  else
21685 	    unpack = gen_msa_ilvr_w;
21686 
21687 	  cmpFunc = gen_msa_clt_s_w;
21688 	  break;
21689 
21690 	case E_V8HImode:
21691 	  if (BYTES_BIG_ENDIAN != high_p)
21692 	    unpack = gen_msa_ilvl_h;
21693 	  else
21694 	    unpack = gen_msa_ilvr_h;
21695 
21696 	  cmpFunc = gen_msa_clt_s_h;
21697 	  break;
21698 
21699 	case E_V16QImode:
21700 	  if (BYTES_BIG_ENDIAN != high_p)
21701 	    unpack = gen_msa_ilvl_b;
21702 	  else
21703 	    unpack = gen_msa_ilvr_b;
21704 
21705 	  cmpFunc = gen_msa_clt_s_b;
21706 	  break;
21707 
21708 	default:
21709 	  gcc_unreachable ();
21710 	  break;
21711 	}
21712 
21713       if (!unsigned_p)
21714 	{
21715 	  /* Extract sign extention for each element comparing each element
21716 	     with immediate zero.  */
21717 	  tmp = gen_reg_rtx (imode);
21718 	  emit_insn (cmpFunc (tmp, operands[1], CONST0_RTX (imode)));
21719 	}
21720       else
21721 	tmp = force_reg (imode, CONST0_RTX (imode));
21722 
21723       dest = gen_reg_rtx (imode);
21724 
21725       emit_insn (unpack (dest, operands[1], tmp));
21726       emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest));
21727       return;
21728     }
21729 
21730   switch (imode)
21731     {
21732     case E_V8QImode:
21733       if (high_p)
21734 	unpack = gen_loongson_punpckhbh;
21735       else
21736 	unpack = gen_loongson_punpcklbh;
21737       cmpFunc = gen_loongson_pcmpgtb;
21738       break;
21739     case E_V4HImode:
21740       if (high_p)
21741 	unpack = gen_loongson_punpckhhw;
21742       else
21743 	unpack = gen_loongson_punpcklhw;
21744       cmpFunc = gen_loongson_pcmpgth;
21745       break;
21746     default:
21747       gcc_unreachable ();
21748     }
21749 
21750   zero = force_reg (imode, CONST0_RTX (imode));
21751   if (unsigned_p)
21752     tmp = zero;
21753   else
21754     {
21755       tmp = gen_reg_rtx (imode);
21756       emit_insn (cmpFunc (tmp, zero, operands[1]));
21757     }
21758 
21759   dest = gen_reg_rtx (imode);
21760   emit_insn (unpack (dest, operands[1], tmp));
21761 
21762   emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest));
21763 }
21764 
21765 /* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE)
21766    or LOW (high_p == FALSE) half of a vector for mode MODE.  */
21767 
21768 rtx
mips_msa_vec_parallel_const_half(machine_mode mode,bool high_p)21769 mips_msa_vec_parallel_const_half (machine_mode mode, bool high_p)
21770 {
21771   int nunits = GET_MODE_NUNITS (mode);
21772   rtvec v = rtvec_alloc (nunits / 2);
21773   int base;
21774   int i;
21775 
21776   if (BYTES_BIG_ENDIAN)
21777     base = high_p ? 0 : nunits / 2;
21778   else
21779     base = high_p ? nunits / 2 : 0;
21780 
21781   for (i = 0; i < nunits / 2; i++)
21782     RTVEC_ELT (v, i) = GEN_INT (base + i);
21783 
21784   return gen_rtx_PARALLEL (VOIDmode, v);
21785 }
21786 
21787 /* A subroutine of mips_expand_vec_init, match constant vector elements.  */
21788 
21789 static inline bool
mips_constant_elt_p(rtx x)21790 mips_constant_elt_p (rtx x)
21791 {
21792   return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE;
21793 }
21794 
21795 /* A subroutine of mips_expand_vec_init, expand via broadcast.  */
21796 
21797 static void
mips_expand_vi_broadcast(machine_mode vmode,rtx target,rtx elt)21798 mips_expand_vi_broadcast (machine_mode vmode, rtx target, rtx elt)
21799 {
21800   struct expand_vec_perm_d d;
21801   rtx t1;
21802   bool ok;
21803 
21804   if (elt != const0_rtx)
21805     elt = force_reg (GET_MODE_INNER (vmode), elt);
21806   if (REG_P (elt))
21807     elt = gen_lowpart (DImode, elt);
21808 
21809   t1 = gen_reg_rtx (vmode);
21810   switch (vmode)
21811     {
21812     case E_V8QImode:
21813       emit_insn (gen_loongson_vec_init1_v8qi (t1, elt));
21814       break;
21815     case E_V4HImode:
21816       emit_insn (gen_loongson_vec_init1_v4hi (t1, elt));
21817       break;
21818     default:
21819       gcc_unreachable ();
21820     }
21821 
21822   memset (&d, 0, sizeof (d));
21823   d.target = target;
21824   d.op0 = t1;
21825   d.op1 = t1;
21826   d.vmode = vmode;
21827   d.nelt = GET_MODE_NUNITS (vmode);
21828   d.one_vector_p = true;
21829 
21830   ok = mips_expand_vec_perm_const_1 (&d);
21831   gcc_assert (ok);
21832 }
21833 
21834 /* Return a const_int vector of VAL with mode MODE.  */
21835 
21836 rtx
mips_gen_const_int_vector(machine_mode mode,HOST_WIDE_INT val)21837 mips_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val)
21838 {
21839   rtx c = gen_int_mode (val, GET_MODE_INNER (mode));
21840   return gen_const_vec_duplicate (mode, c);
21841 }
21842 
21843 /* Return a vector of repeated 4-element sets generated from
21844    immediate VAL in mode MODE.  */
21845 
21846 static rtx
mips_gen_const_int_vector_shuffle(machine_mode mode,int val)21847 mips_gen_const_int_vector_shuffle (machine_mode mode, int val)
21848 {
21849   int nunits = GET_MODE_NUNITS (mode);
21850   int nsets = nunits / 4;
21851   rtx elts[MAX_VECT_LEN];
21852   int set = 0;
21853   int i, j;
21854 
21855   /* Generate a const_int vector replicating the same 4-element set
21856      from an immediate.  */
21857   for (j = 0; j < nsets; j++, set = 4 * j)
21858     for (i = 0; i < 4; i++)
21859       elts[set + i] = GEN_INT (set + ((val >> (2 * i)) & 0x3));
21860 
21861   return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts));
21862 }
21863 
21864 /* A subroutine of mips_expand_vec_init, replacing all of the non-constant
21865    elements of VALS with zeros, copy the constant vector to TARGET.  */
21866 
21867 static void
mips_expand_vi_constant(machine_mode vmode,unsigned nelt,rtx target,rtx vals)21868 mips_expand_vi_constant (machine_mode vmode, unsigned nelt,
21869 			 rtx target, rtx vals)
21870 {
21871   rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
21872   unsigned i;
21873 
21874   for (i = 0; i < nelt; ++i)
21875     {
21876       rtx elem = RTVEC_ELT (vec, i);
21877       if (!mips_constant_elt_p (elem))
21878 	RTVEC_ELT (vec, i) = CONST0_RTX (GET_MODE (elem));
21879     }
21880 
21881   emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
21882 }
21883 
21884 
21885 /* A subroutine of mips_expand_vec_init, expand via pinsrh.  */
21886 
21887 static void
mips_expand_vi_loongson_one_pinsrh(rtx target,rtx vals,unsigned one_var)21888 mips_expand_vi_loongson_one_pinsrh (rtx target, rtx vals, unsigned one_var)
21889 {
21890   mips_expand_vi_constant (V4HImode, 4, target, vals);
21891 
21892   emit_insn (gen_vec_setv4hi (target, target, XVECEXP (vals, 0, one_var),
21893 			      GEN_INT (one_var)));
21894 }
21895 
21896 /* A subroutine of mips_expand_vec_init, expand anything via memory.  */
21897 
21898 static void
mips_expand_vi_general(machine_mode vmode,machine_mode imode,unsigned nelt,unsigned nvar,rtx target,rtx vals)21899 mips_expand_vi_general (machine_mode vmode, machine_mode imode,
21900 			unsigned nelt, unsigned nvar, rtx target, rtx vals)
21901 {
21902   rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode));
21903   unsigned int i, isize = GET_MODE_SIZE (imode);
21904 
21905   if (nvar < nelt)
21906     mips_expand_vi_constant (vmode, nelt, mem, vals);
21907 
21908   for (i = 0; i < nelt; ++i)
21909     {
21910       rtx x = XVECEXP (vals, 0, i);
21911       if (!mips_constant_elt_p (x))
21912 	emit_move_insn (adjust_address (mem, imode, i * isize), x);
21913     }
21914 
21915   emit_move_insn (target, mem);
21916 }
21917 
21918 /* Expand a vector initialization.  */
21919 
21920 void
mips_expand_vector_init(rtx target,rtx vals)21921 mips_expand_vector_init (rtx target, rtx vals)
21922 {
21923   machine_mode vmode = GET_MODE (target);
21924   machine_mode imode = GET_MODE_INNER (vmode);
21925   unsigned i, nelt = GET_MODE_NUNITS (vmode);
21926   unsigned nvar = 0, one_var = -1u;
21927   bool all_same = true;
21928   rtx x;
21929 
21930   for (i = 0; i < nelt; ++i)
21931     {
21932       x = XVECEXP (vals, 0, i);
21933       if (!mips_constant_elt_p (x))
21934 	nvar++, one_var = i;
21935       if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
21936 	all_same = false;
21937     }
21938 
21939   if (ISA_HAS_MSA)
21940     {
21941       if (all_same)
21942 	{
21943 	  rtx same = XVECEXP (vals, 0, 0);
21944 	  rtx temp, temp2;
21945 
21946 	  if (CONST_INT_P (same) && nvar == 0
21947 	      && mips_signed_immediate_p (INTVAL (same), 10, 0))
21948 	    {
21949 	      switch (vmode)
21950 		{
21951 		case E_V16QImode:
21952 		case E_V8HImode:
21953 		case E_V4SImode:
21954 		case E_V2DImode:
21955 		  temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0));
21956 		  emit_move_insn (target, temp);
21957 		  return;
21958 
21959 		default:
21960 		  gcc_unreachable ();
21961 		}
21962 	    }
21963 	  temp = gen_reg_rtx (imode);
21964 	  if (imode == GET_MODE (same))
21965 	    temp2 = same;
21966 	  else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD)
21967 	    temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0);
21968 	  else
21969 	    temp2 = lowpart_subreg (imode, same, GET_MODE (same));
21970 	  emit_move_insn (temp, temp2);
21971 
21972 	  switch (vmode)
21973 	    {
21974 	    case E_V16QImode:
21975 	    case E_V8HImode:
21976 	    case E_V4SImode:
21977 	    case E_V2DImode:
21978 	      mips_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp));
21979 	      break;
21980 
21981 	    case E_V4SFmode:
21982 	      emit_insn (gen_msa_splati_w_f_scalar (target, temp));
21983 	      break;
21984 
21985 	    case E_V2DFmode:
21986 	      emit_insn (gen_msa_splati_d_f_scalar (target, temp));
21987 	      break;
21988 
21989 	    default:
21990 	      gcc_unreachable ();
21991 	    }
21992 	}
21993       else
21994 	{
21995 	  emit_move_insn (target, CONST0_RTX (vmode));
21996 
21997 	  for (i = 0; i < nelt; ++i)
21998 	    {
21999 	      rtx temp = gen_reg_rtx (imode);
22000 	      emit_move_insn (temp, XVECEXP (vals, 0, i));
22001 	      switch (vmode)
22002 		{
22003 		case E_V16QImode:
22004 		  emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i)));
22005 		  break;
22006 
22007 		case E_V8HImode:
22008 		  emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i)));
22009 		  break;
22010 
22011 		case E_V4SImode:
22012 		  emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i)));
22013 		  break;
22014 
22015 		case E_V2DImode:
22016 		  emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i)));
22017 		  break;
22018 
22019 		case E_V4SFmode:
22020 		  emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i)));
22021 		  break;
22022 
22023 		case E_V2DFmode:
22024 		  emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i)));
22025 		  break;
22026 
22027 		default:
22028 		  gcc_unreachable ();
22029 		}
22030 	    }
22031 	}
22032       return;
22033     }
22034 
22035   /* Load constants from the pool, or whatever's handy.  */
22036   if (nvar == 0)
22037     {
22038       emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)));
22039       return;
22040     }
22041 
22042   /* For two-part initialization, always use CONCAT.  */
22043   if (nelt == 2)
22044     {
22045       rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0));
22046       rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1));
22047       x = gen_rtx_VEC_CONCAT (vmode, op0, op1);
22048       emit_insn (gen_rtx_SET (target, x));
22049       return;
22050     }
22051 
22052   /* Loongson is the only cpu with vectors with more elements.  */
22053   gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI);
22054 
22055   /* If all values are identical, broadcast the value.  */
22056   if (all_same)
22057     {
22058       mips_expand_vi_broadcast (vmode, target, XVECEXP (vals, 0, 0));
22059       return;
22060     }
22061 
22062   /* If we've only got one non-variable V4HImode, use PINSRH.  */
22063   if (nvar == 1 && vmode == V4HImode)
22064     {
22065       mips_expand_vi_loongson_one_pinsrh (target, vals, one_var);
22066       return;
22067     }
22068 
22069   mips_expand_vi_general (vmode, imode, nelt, nvar, target, vals);
22070 }
22071 
22072 /* Expand a vector reduction.  */
22073 
22074 void
mips_expand_vec_reduc(rtx target,rtx in,rtx (* gen)(rtx,rtx,rtx))22075 mips_expand_vec_reduc (rtx target, rtx in, rtx (*gen)(rtx, rtx, rtx))
22076 {
22077   machine_mode vmode = GET_MODE (in);
22078   unsigned char perm2[2];
22079   rtx last, next, fold, x;
22080   bool ok;
22081 
22082   last = in;
22083   fold = gen_reg_rtx (vmode);
22084   switch (vmode)
22085     {
22086     case E_V2SFmode:
22087       /* Use PUL/PLU to produce { L, H } op { H, L }.
22088 	 By reversing the pair order, rather than a pure interleave high,
22089 	 we avoid erroneous exceptional conditions that we might otherwise
22090 	 produce from the computation of H op H.  */
22091       perm2[0] = 1;
22092       perm2[1] = 2;
22093       ok = mips_expand_vselect_vconcat (fold, last, last, perm2, 2);
22094       gcc_assert (ok);
22095       break;
22096 
22097     case E_V2SImode:
22098       /* Use interleave to produce { H, L } op { H, H }.  */
22099       emit_insn (gen_loongson_punpckhwd (fold, last, last));
22100       break;
22101 
22102     case E_V4HImode:
22103       /* Perform the first reduction with interleave,
22104 	 and subsequent reductions with shifts.  */
22105       emit_insn (gen_loongson_punpckhwd_hi (fold, last, last));
22106 
22107       next = gen_reg_rtx (vmode);
22108       emit_insn (gen (next, last, fold));
22109       last = next;
22110 
22111       fold = gen_reg_rtx (vmode);
22112       x = force_reg (SImode, GEN_INT (16));
22113       emit_insn (gen_vec_shr_v4hi (fold, last, x));
22114       break;
22115 
22116     case E_V8QImode:
22117       emit_insn (gen_loongson_punpckhwd_qi (fold, last, last));
22118 
22119       next = gen_reg_rtx (vmode);
22120       emit_insn (gen (next, last, fold));
22121       last = next;
22122 
22123       fold = gen_reg_rtx (vmode);
22124       x = force_reg (SImode, GEN_INT (16));
22125       emit_insn (gen_vec_shr_v8qi (fold, last, x));
22126 
22127       next = gen_reg_rtx (vmode);
22128       emit_insn (gen (next, last, fold));
22129       last = next;
22130 
22131       fold = gen_reg_rtx (vmode);
22132       x = force_reg (SImode, GEN_INT (8));
22133       emit_insn (gen_vec_shr_v8qi (fold, last, x));
22134       break;
22135 
22136     default:
22137       gcc_unreachable ();
22138     }
22139 
22140   emit_insn (gen (target, last, fold));
22141 }
22142 
22143 /* Expand a vector minimum/maximum.  */
22144 
22145 void
mips_expand_vec_minmax(rtx target,rtx op0,rtx op1,rtx (* cmp)(rtx,rtx,rtx),bool min_p)22146 mips_expand_vec_minmax (rtx target, rtx op0, rtx op1,
22147 			rtx (*cmp) (rtx, rtx, rtx), bool min_p)
22148 {
22149   machine_mode vmode = GET_MODE (target);
22150   rtx tc, t0, t1, x;
22151 
22152   tc = gen_reg_rtx (vmode);
22153   t0 = gen_reg_rtx (vmode);
22154   t1 = gen_reg_rtx (vmode);
22155 
22156   /* op0 > op1 */
22157   emit_insn (cmp (tc, op0, op1));
22158 
22159   x = gen_rtx_AND (vmode, tc, (min_p ? op1 : op0));
22160   emit_insn (gen_rtx_SET (t0, x));
22161 
22162   x = gen_rtx_NOT (vmode, tc);
22163   x = gen_rtx_AND (vmode, x, (min_p ? op0 : op1));
22164   emit_insn (gen_rtx_SET (t1, x));
22165 
22166   x = gen_rtx_IOR (vmode, t0, t1);
22167   emit_insn (gen_rtx_SET (target, x));
22168 }
22169 
22170 /* Implement HARD_REGNO_CALLER_SAVE_MODE.  */
22171 
22172 machine_mode
mips_hard_regno_caller_save_mode(unsigned int regno,unsigned int nregs,machine_mode mode)22173 mips_hard_regno_caller_save_mode (unsigned int regno,
22174 				  unsigned int nregs,
22175 				  machine_mode mode)
22176 {
22177   /* For performance, avoid saving/restoring upper parts of a register
22178      by returning MODE as save mode when the mode is known.  */
22179   if (mode == VOIDmode)
22180     return choose_hard_reg_mode (regno, nregs, false);
22181   else
22182     return mode;
22183 }
22184 
22185 /* Generate RTL for comparing CMP_OP0 and CMP_OP1 using condition COND and
22186    store the result -1 or 0 in DEST.  */
22187 
22188 static void
mips_expand_msa_cmp(rtx dest,enum rtx_code cond,rtx op0,rtx op1)22189 mips_expand_msa_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1)
22190 {
22191   machine_mode cmp_mode = GET_MODE (op0);
22192   int unspec = -1;
22193   bool negate = false;
22194 
22195   switch (cmp_mode)
22196     {
22197     case E_V16QImode:
22198     case E_V8HImode:
22199     case E_V4SImode:
22200     case E_V2DImode:
22201       switch (cond)
22202 	{
22203 	case NE:
22204 	  cond = reverse_condition (cond);
22205 	  negate = true;
22206 	  break;
22207 	case EQ:
22208 	case LT:
22209 	case LE:
22210 	case LTU:
22211 	case LEU:
22212 	  break;
22213 	case GE:
22214 	case GT:
22215 	case GEU:
22216 	case GTU:
22217 	  std::swap (op0, op1);
22218 	  cond = swap_condition (cond);
22219 	  break;
22220 	default:
22221 	  gcc_unreachable ();
22222 	}
22223       mips_emit_binary (cond, dest, op0, op1);
22224       if (negate)
22225 	emit_move_insn (dest, gen_rtx_NOT (GET_MODE (dest), dest));
22226       break;
22227 
22228     case E_V4SFmode:
22229     case E_V2DFmode:
22230       switch (cond)
22231 	{
22232 	case UNORDERED:
22233 	case ORDERED:
22234 	case EQ:
22235 	case NE:
22236 	case UNEQ:
22237 	case UNLE:
22238 	case UNLT:
22239 	  break;
22240 	case LTGT: cond = NE; break;
22241 	case UNGE: cond = UNLE; std::swap (op0, op1); break;
22242 	case UNGT: cond = UNLT; std::swap (op0, op1); break;
22243 	case LE: unspec = UNSPEC_MSA_FSLE; break;
22244 	case LT: unspec = UNSPEC_MSA_FSLT; break;
22245 	case GE: unspec = UNSPEC_MSA_FSLE; std::swap (op0, op1); break;
22246 	case GT: unspec = UNSPEC_MSA_FSLT; std::swap (op0, op1); break;
22247 	default:
22248 	  gcc_unreachable ();
22249 	}
22250       if (unspec < 0)
22251 	mips_emit_binary (cond, dest, op0, op1);
22252       else
22253 	{
22254 	  rtx x = gen_rtx_UNSPEC (GET_MODE (dest),
22255 				  gen_rtvec (2, op0, op1), unspec);
22256 	  emit_insn (gen_rtx_SET (dest, x));
22257 	}
22258       break;
22259 
22260     default:
22261       gcc_unreachable ();
22262       break;
22263     }
22264 }
22265 
22266 /* Expand VEC_COND_EXPR, where:
22267    MODE is mode of the result
22268    VIMODE equivalent integer mode
22269    OPERANDS operands of VEC_COND_EXPR.  */
22270 
22271 void
mips_expand_vec_cond_expr(machine_mode mode,machine_mode vimode,rtx * operands)22272 mips_expand_vec_cond_expr (machine_mode mode, machine_mode vimode,
22273 			   rtx *operands)
22274 {
22275   rtx cond = operands[3];
22276   rtx cmp_op0 = operands[4];
22277   rtx cmp_op1 = operands[5];
22278   rtx cmp_res = gen_reg_rtx (vimode);
22279 
22280   mips_expand_msa_cmp (cmp_res, GET_CODE (cond), cmp_op0, cmp_op1);
22281 
22282   /* We handle the following cases:
22283      1) r = a CMP b ? -1 : 0
22284      2) r = a CMP b ? -1 : v
22285      3) r = a CMP b ?  v : 0
22286      4) r = a CMP b ? v1 : v2  */
22287 
22288   /* Case (1) above.  We only move the results.  */
22289   if (operands[1] == CONSTM1_RTX (vimode)
22290       && operands[2] == CONST0_RTX (vimode))
22291     emit_move_insn (operands[0], cmp_res);
22292   else
22293     {
22294       rtx src1 = gen_reg_rtx (vimode);
22295       rtx src2 = gen_reg_rtx (vimode);
22296       rtx mask = gen_reg_rtx (vimode);
22297       rtx bsel;
22298 
22299       /* Move the vector result to use it as a mask.  */
22300       emit_move_insn (mask, cmp_res);
22301 
22302       if (register_operand (operands[1], mode))
22303 	{
22304 	  rtx xop1 = operands[1];
22305 	  if (mode != vimode)
22306 	    {
22307 	      xop1 = gen_reg_rtx (vimode);
22308 	      emit_move_insn (xop1, gen_lowpart (vimode, operands[1]));
22309 	    }
22310 	  emit_move_insn (src1, xop1);
22311 	}
22312       else
22313 	{
22314 	  gcc_assert (operands[1] == CONSTM1_RTX (vimode));
22315 	  /* Case (2) if the below doesn't move the mask to src2.  */
22316 	  emit_move_insn (src1, mask);
22317 	}
22318 
22319       if (register_operand (operands[2], mode))
22320 	{
22321 	  rtx xop2 = operands[2];
22322 	  if (mode != vimode)
22323 	    {
22324 	      xop2 = gen_reg_rtx (vimode);
22325 	      emit_move_insn (xop2, gen_lowpart (vimode, operands[2]));
22326 	    }
22327 	  emit_move_insn (src2, xop2);
22328 	}
22329       else
22330 	{
22331 	  gcc_assert (operands[2] == CONST0_RTX (mode));
22332 	  /* Case (3) if the above didn't move the mask to src1.  */
22333 	  emit_move_insn (src2, mask);
22334 	}
22335 
22336       /* We deal with case (4) if the mask wasn't moved to either src1 or src2.
22337 	 In any case, we eventually do vector mask-based copy.  */
22338       bsel = gen_rtx_IOR (vimode,
22339 			  gen_rtx_AND (vimode,
22340 				       gen_rtx_NOT (vimode, mask), src2),
22341 			  gen_rtx_AND (vimode, mask, src1));
22342       /* The result is placed back to a register with the mask.  */
22343       emit_insn (gen_rtx_SET (mask, bsel));
22344       emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0));
22345     }
22346 }
22347 
22348 /* Implement TARGET_CASE_VALUES_THRESHOLD.  */
22349 
22350 unsigned int
mips_case_values_threshold(void)22351 mips_case_values_threshold (void)
22352 {
22353   /* In MIPS16 mode using a larger case threshold generates smaller code.  */
22354   if (TARGET_MIPS16 && optimize_size)
22355     return 10;
22356   else
22357     return default_case_values_threshold ();
22358 }
22359 
22360 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV.  */
22361 
22362 static void
mips_atomic_assign_expand_fenv(tree * hold,tree * clear,tree * update)22363 mips_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
22364 {
22365   if (!TARGET_HARD_FLOAT_ABI)
22366     return;
22367   tree exceptions_var = create_tmp_var_raw (MIPS_ATYPE_USI);
22368   tree fcsr_orig_var = create_tmp_var_raw (MIPS_ATYPE_USI);
22369   tree fcsr_mod_var = create_tmp_var_raw (MIPS_ATYPE_USI);
22370   tree get_fcsr = mips_builtin_decls[MIPS_GET_FCSR];
22371   tree set_fcsr = mips_builtin_decls[MIPS_SET_FCSR];
22372   tree get_fcsr_hold_call = build_call_expr (get_fcsr, 0);
22373   tree hold_assign_orig = build4 (TARGET_EXPR, MIPS_ATYPE_USI,
22374 				  fcsr_orig_var, get_fcsr_hold_call, NULL, NULL);
22375   tree hold_mod_val = build2 (BIT_AND_EXPR, MIPS_ATYPE_USI, fcsr_orig_var,
22376 			      build_int_cst (MIPS_ATYPE_USI, 0xfffff003));
22377   tree hold_assign_mod = build4 (TARGET_EXPR, MIPS_ATYPE_USI,
22378 				 fcsr_mod_var, hold_mod_val, NULL, NULL);
22379   tree set_fcsr_hold_call = build_call_expr (set_fcsr, 1, fcsr_mod_var);
22380   tree hold_all = build2 (COMPOUND_EXPR, MIPS_ATYPE_USI,
22381 			  hold_assign_orig, hold_assign_mod);
22382   *hold = build2 (COMPOUND_EXPR, void_type_node, hold_all,
22383 		  set_fcsr_hold_call);
22384 
22385   *clear = build_call_expr (set_fcsr, 1, fcsr_mod_var);
22386 
22387   tree get_fcsr_update_call = build_call_expr (get_fcsr, 0);
22388   *update = build4 (TARGET_EXPR, MIPS_ATYPE_USI,
22389 		    exceptions_var, get_fcsr_update_call, NULL, NULL);
22390   tree set_fcsr_update_call = build_call_expr (set_fcsr, 1, fcsr_orig_var);
22391   *update = build2 (COMPOUND_EXPR, void_type_node, *update,
22392 		    set_fcsr_update_call);
22393   tree atomic_feraiseexcept
22394     = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
22395   tree int_exceptions_var = fold_convert (integer_type_node,
22396 					  exceptions_var);
22397   tree atomic_feraiseexcept_call = build_call_expr (atomic_feraiseexcept,
22398 						    1, int_exceptions_var);
22399   *update = build2 (COMPOUND_EXPR, void_type_node, *update,
22400 		    atomic_feraiseexcept_call);
22401 }
22402 
22403 /* Implement TARGET_SPILL_CLASS.  */
22404 
22405 static reg_class_t
mips_spill_class(reg_class_t rclass ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED)22406 mips_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED,
22407 		  machine_mode mode ATTRIBUTE_UNUSED)
22408 {
22409   if (TARGET_MIPS16)
22410     return SPILL_REGS;
22411   return NO_REGS;
22412 }
22413 
22414 /* Implement TARGET_LRA_P.  */
22415 
22416 static bool
mips_lra_p(void)22417 mips_lra_p (void)
22418 {
22419   return mips_lra_flag;
22420 }
22421 
22422 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.  */
22423 
22424 static reg_class_t
mips_ira_change_pseudo_allocno_class(int regno,reg_class_t allocno_class,reg_class_t best_class ATTRIBUTE_UNUSED)22425 mips_ira_change_pseudo_allocno_class (int regno, reg_class_t allocno_class,
22426 				      reg_class_t best_class ATTRIBUTE_UNUSED)
22427 {
22428   /* LRA will allocate an FPR for an integer mode pseudo instead of spilling
22429      to memory if an FPR is present in the allocno class.  It is rare that
22430      we actually need to place an integer mode value in an FPR so where
22431      possible limit the allocation to GR_REGS.  This will slightly pessimize
22432      code that involves integer to/from float conversions as these will have
22433      to reload into FPRs in LRA.  Such reloads are sometimes eliminated and
22434      sometimes only partially eliminated.  We choose to take this penalty
22435      in order to eliminate usage of FPRs in code that does not use floating
22436      point data.
22437 
22438      This change has a similar effect to increasing the cost of FPR->GPR
22439      register moves for integer modes so that they are higher than the cost
22440      of memory but changing the allocno class is more reliable.
22441 
22442      This is also similar to forbidding integer mode values in FPRs entirely
22443      but this would lead to an inconsistency in the integer to/from float
22444      instructions that say integer mode values must be placed in FPRs.  */
22445   if (INTEGRAL_MODE_P (PSEUDO_REGNO_MODE (regno)) && allocno_class == ALL_REGS)
22446     return GR_REGS;
22447   return allocno_class;
22448 }
22449 
22450 /* Implement TARGET_PROMOTE_FUNCTION_MODE */
22451 
22452 /* This function is equivalent to default_promote_function_mode_always_promote
22453    except that it returns a promoted mode even if type is NULL_TREE.  This is
22454    needed by libcalls which have no type (only a mode) such as fixed conversion
22455    routines that take a signed or unsigned char/short argument and convert it
22456    to a fixed type.  */
22457 
22458 static machine_mode
mips_promote_function_mode(const_tree type ATTRIBUTE_UNUSED,machine_mode mode,int * punsignedp ATTRIBUTE_UNUSED,const_tree fntype ATTRIBUTE_UNUSED,int for_return ATTRIBUTE_UNUSED)22459 mips_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
22460                             machine_mode mode,
22461                             int *punsignedp ATTRIBUTE_UNUSED,
22462                             const_tree fntype ATTRIBUTE_UNUSED,
22463                             int for_return ATTRIBUTE_UNUSED)
22464 {
22465   int unsignedp;
22466 
22467   if (type != NULL_TREE)
22468     return promote_mode (type, mode, punsignedp);
22469 
22470   unsignedp = *punsignedp;
22471   PROMOTE_MODE (mode, unsignedp, type);
22472   *punsignedp = unsignedp;
22473   return mode;
22474 }
22475 
22476 /* Implement TARGET_TRULY_NOOP_TRUNCATION.  */
22477 
22478 static bool
mips_truly_noop_truncation(poly_uint64 outprec,poly_uint64 inprec)22479 mips_truly_noop_truncation (poly_uint64 outprec, poly_uint64 inprec)
22480 {
22481   return !TARGET_64BIT || inprec <= 32 || outprec > 32;
22482 }
22483 
22484 /* Implement TARGET_CONSTANT_ALIGNMENT.  */
22485 
22486 static HOST_WIDE_INT
mips_constant_alignment(const_tree exp,HOST_WIDE_INT align)22487 mips_constant_alignment (const_tree exp, HOST_WIDE_INT align)
22488 {
22489   if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR)
22490     return MAX (align, BITS_PER_WORD);
22491   return align;
22492 }
22493 
22494 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook.  */
22495 
22496 static unsigned HOST_WIDE_INT
mips_asan_shadow_offset(void)22497 mips_asan_shadow_offset (void)
22498 {
22499   return 0x0aaa0000;
22500 }
22501 
22502 /* Implement TARGET_STARTING_FRAME_OFFSET.  See mips_compute_frame_info
22503    for details about the frame layout.  */
22504 
22505 static HOST_WIDE_INT
mips_starting_frame_offset(void)22506 mips_starting_frame_offset (void)
22507 {
22508   if (FRAME_GROWS_DOWNWARD)
22509     return 0;
22510   return crtl->outgoing_args_size + MIPS_GP_SAVE_AREA_SIZE;
22511 }
22512 
22513 /* Initialize the GCC target structure.  */
22514 #undef TARGET_ASM_ALIGNED_HI_OP
22515 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
22516 #undef TARGET_ASM_ALIGNED_SI_OP
22517 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
22518 #undef TARGET_ASM_ALIGNED_DI_OP
22519 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
22520 
22521 #undef TARGET_OPTION_OVERRIDE
22522 #define TARGET_OPTION_OVERRIDE mips_option_override
22523 
22524 #undef TARGET_LEGITIMIZE_ADDRESS
22525 #define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
22526 
22527 #undef TARGET_ASM_FUNCTION_PROLOGUE
22528 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
22529 #undef TARGET_ASM_FUNCTION_EPILOGUE
22530 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
22531 #undef TARGET_ASM_SELECT_RTX_SECTION
22532 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
22533 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
22534 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
22535 
22536 #undef TARGET_SCHED_INIT
22537 #define TARGET_SCHED_INIT mips_sched_init
22538 #undef TARGET_SCHED_REORDER
22539 #define TARGET_SCHED_REORDER mips_sched_reorder
22540 #undef TARGET_SCHED_REORDER2
22541 #define TARGET_SCHED_REORDER2 mips_sched_reorder2
22542 #undef TARGET_SCHED_VARIABLE_ISSUE
22543 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
22544 #undef TARGET_SCHED_ADJUST_COST
22545 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
22546 #undef TARGET_SCHED_ISSUE_RATE
22547 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
22548 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
22549 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
22550 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
22551 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
22552 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
22553 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
22554   mips_multipass_dfa_lookahead
22555 #undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
22556 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
22557   mips_small_register_classes_for_mode_p
22558 
22559 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
22560 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
22561 
22562 #undef TARGET_INSERT_ATTRIBUTES
22563 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
22564 #undef TARGET_MERGE_DECL_ATTRIBUTES
22565 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
22566 #undef TARGET_CAN_INLINE_P
22567 #define TARGET_CAN_INLINE_P mips_can_inline_p
22568 #undef TARGET_SET_CURRENT_FUNCTION
22569 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
22570 
22571 #undef TARGET_VALID_POINTER_MODE
22572 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
22573 #undef TARGET_REGISTER_MOVE_COST
22574 #define TARGET_REGISTER_MOVE_COST mips_register_move_cost
22575 #undef TARGET_REGISTER_PRIORITY
22576 #define TARGET_REGISTER_PRIORITY mips_register_priority
22577 #undef TARGET_MEMORY_MOVE_COST
22578 #define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
22579 #undef TARGET_RTX_COSTS
22580 #define TARGET_RTX_COSTS mips_rtx_costs
22581 #undef TARGET_ADDRESS_COST
22582 #define TARGET_ADDRESS_COST mips_address_cost
22583 
22584 #undef TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P
22585 #define TARGET_NO_SPECULATION_IN_DELAY_SLOTS_P mips_no_speculation_in_delay_slots_p
22586 
22587 #undef TARGET_IN_SMALL_DATA_P
22588 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
22589 
22590 #undef TARGET_MACHINE_DEPENDENT_REORG
22591 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
22592 
22593 #undef  TARGET_PREFERRED_RELOAD_CLASS
22594 #define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
22595 
22596 #undef TARGET_EXPAND_TO_RTL_HOOK
22597 #define TARGET_EXPAND_TO_RTL_HOOK mips_expand_to_rtl_hook
22598 #undef TARGET_ASM_FILE_START
22599 #define TARGET_ASM_FILE_START mips_file_start
22600 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
22601 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
22602 #undef TARGET_ASM_CODE_END
22603 #define TARGET_ASM_CODE_END mips_code_end
22604 
22605 #undef TARGET_INIT_LIBFUNCS
22606 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
22607 
22608 #undef TARGET_BUILD_BUILTIN_VA_LIST
22609 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
22610 #undef TARGET_EXPAND_BUILTIN_VA_START
22611 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
22612 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
22613 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
22614 
22615 #undef  TARGET_PROMOTE_FUNCTION_MODE
22616 #define TARGET_PROMOTE_FUNCTION_MODE mips_promote_function_mode
22617 #undef TARGET_FUNCTION_VALUE
22618 #define TARGET_FUNCTION_VALUE mips_function_value
22619 #undef TARGET_LIBCALL_VALUE
22620 #define TARGET_LIBCALL_VALUE mips_libcall_value
22621 #undef TARGET_FUNCTION_VALUE_REGNO_P
22622 #define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
22623 #undef TARGET_RETURN_IN_MEMORY
22624 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
22625 #undef TARGET_RETURN_IN_MSB
22626 #define TARGET_RETURN_IN_MSB mips_return_in_msb
22627 
22628 #undef TARGET_ASM_OUTPUT_MI_THUNK
22629 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
22630 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
22631 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
22632 
22633 #undef TARGET_PRINT_OPERAND
22634 #define TARGET_PRINT_OPERAND mips_print_operand
22635 #undef TARGET_PRINT_OPERAND_ADDRESS
22636 #define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
22637 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
22638 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
22639 
22640 #undef TARGET_SETUP_INCOMING_VARARGS
22641 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
22642 #undef TARGET_STRICT_ARGUMENT_NAMING
22643 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
22644 #undef TARGET_MUST_PASS_IN_STACK
22645 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
22646 #undef TARGET_PASS_BY_REFERENCE
22647 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
22648 #undef TARGET_CALLEE_COPIES
22649 #define TARGET_CALLEE_COPIES mips_callee_copies
22650 #undef TARGET_ARG_PARTIAL_BYTES
22651 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
22652 #undef TARGET_FUNCTION_ARG
22653 #define TARGET_FUNCTION_ARG mips_function_arg
22654 #undef TARGET_FUNCTION_ARG_ADVANCE
22655 #define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
22656 #undef TARGET_FUNCTION_ARG_PADDING
22657 #define TARGET_FUNCTION_ARG_PADDING mips_function_arg_padding
22658 #undef TARGET_FUNCTION_ARG_BOUNDARY
22659 #define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
22660 #undef TARGET_GET_RAW_RESULT_MODE
22661 #define TARGET_GET_RAW_RESULT_MODE mips_get_reg_raw_mode
22662 #undef TARGET_GET_RAW_ARG_MODE
22663 #define TARGET_GET_RAW_ARG_MODE mips_get_reg_raw_mode
22664 
22665 #undef TARGET_MODE_REP_EXTENDED
22666 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
22667 
22668 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
22669 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
22670   mips_builtin_vectorized_function
22671 #undef TARGET_VECTOR_MODE_SUPPORTED_P
22672 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
22673 
22674 #undef TARGET_SCALAR_MODE_SUPPORTED_P
22675 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
22676 
22677 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
22678 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
22679 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
22680 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
22681   mips_autovectorize_vector_sizes
22682 
22683 #undef TARGET_INIT_BUILTINS
22684 #define TARGET_INIT_BUILTINS mips_init_builtins
22685 #undef TARGET_BUILTIN_DECL
22686 #define TARGET_BUILTIN_DECL mips_builtin_decl
22687 #undef TARGET_EXPAND_BUILTIN
22688 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
22689 
22690 #undef TARGET_HAVE_TLS
22691 #define TARGET_HAVE_TLS HAVE_AS_TLS
22692 
22693 #undef TARGET_CANNOT_FORCE_CONST_MEM
22694 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
22695 
22696 #undef TARGET_LEGITIMATE_CONSTANT_P
22697 #define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
22698 
22699 #undef TARGET_ENCODE_SECTION_INFO
22700 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
22701 
22702 #undef TARGET_ATTRIBUTE_TABLE
22703 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
22704 /* All our function attributes are related to how out-of-line copies should
22705    be compiled or called.  They don't in themselves prevent inlining.  */
22706 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
22707 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
22708 
22709 #undef TARGET_EXTRA_LIVE_ON_ENTRY
22710 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
22711 
22712 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
22713 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
22714 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
22715 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
22716 
22717 #undef  TARGET_COMP_TYPE_ATTRIBUTES
22718 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
22719 
22720 #ifdef HAVE_AS_DTPRELWORD
22721 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
22722 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
22723 #endif
22724 #undef TARGET_DWARF_REGISTER_SPAN
22725 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
22726 #undef TARGET_DWARF_FRAME_REG_MODE
22727 #define TARGET_DWARF_FRAME_REG_MODE mips_dwarf_frame_reg_mode
22728 
22729 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
22730 #define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
22731 
22732 #undef TARGET_LEGITIMATE_ADDRESS_P
22733 #define TARGET_LEGITIMATE_ADDRESS_P	mips_legitimate_address_p
22734 
22735 #undef TARGET_FRAME_POINTER_REQUIRED
22736 #define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
22737 
22738 #undef TARGET_CAN_ELIMINATE
22739 #define TARGET_CAN_ELIMINATE mips_can_eliminate
22740 
22741 #undef TARGET_CONDITIONAL_REGISTER_USAGE
22742 #define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
22743 
22744 #undef TARGET_TRAMPOLINE_INIT
22745 #define TARGET_TRAMPOLINE_INIT mips_trampoline_init
22746 
22747 #undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
22748 #define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
22749 
22750 #undef TARGET_SHIFT_TRUNCATION_MASK
22751 #define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
22752 
22753 #undef TARGET_PREPARE_PCH_SAVE
22754 #define TARGET_PREPARE_PCH_SAVE mips_prepare_pch_save
22755 
22756 #undef TARGET_VECTORIZE_VEC_PERM_CONST
22757 #define TARGET_VECTORIZE_VEC_PERM_CONST mips_vectorize_vec_perm_const
22758 
22759 #undef TARGET_SCHED_REASSOCIATION_WIDTH
22760 #define TARGET_SCHED_REASSOCIATION_WIDTH mips_sched_reassociation_width
22761 
22762 #undef TARGET_CASE_VALUES_THRESHOLD
22763 #define TARGET_CASE_VALUES_THRESHOLD mips_case_values_threshold
22764 
22765 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
22766 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV mips_atomic_assign_expand_fenv
22767 
22768 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
22769 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
22770 
22771 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
22772 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
22773   mips_use_by_pieces_infrastructure_p
22774 
22775 #undef TARGET_SPILL_CLASS
22776 #define TARGET_SPILL_CLASS mips_spill_class
22777 #undef TARGET_LRA_P
22778 #define TARGET_LRA_P mips_lra_p
22779 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
22780 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS mips_ira_change_pseudo_allocno_class
22781 
22782 #undef TARGET_HARD_REGNO_SCRATCH_OK
22783 #define TARGET_HARD_REGNO_SCRATCH_OK mips_hard_regno_scratch_ok
22784 
22785 #undef TARGET_HARD_REGNO_NREGS
22786 #define TARGET_HARD_REGNO_NREGS mips_hard_regno_nregs
22787 #undef TARGET_HARD_REGNO_MODE_OK
22788 #define TARGET_HARD_REGNO_MODE_OK mips_hard_regno_mode_ok
22789 
22790 #undef TARGET_MODES_TIEABLE_P
22791 #define TARGET_MODES_TIEABLE_P mips_modes_tieable_p
22792 
22793 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
22794 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
22795   mips_hard_regno_call_part_clobbered
22796 
22797 /* The architecture reserves bit 0 for MIPS16 so use bit 1 for descriptors.  */
22798 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
22799 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2
22800 
22801 #undef TARGET_SECONDARY_MEMORY_NEEDED
22802 #define TARGET_SECONDARY_MEMORY_NEEDED mips_secondary_memory_needed
22803 
22804 #undef TARGET_CAN_CHANGE_MODE_CLASS
22805 #define TARGET_CAN_CHANGE_MODE_CLASS mips_can_change_mode_class
22806 
22807 #undef TARGET_TRULY_NOOP_TRUNCATION
22808 #define TARGET_TRULY_NOOP_TRUNCATION mips_truly_noop_truncation
22809 
22810 #undef TARGET_CONSTANT_ALIGNMENT
22811 #define TARGET_CONSTANT_ALIGNMENT mips_constant_alignment
22812 
22813 #undef TARGET_ASAN_SHADOW_OFFSET
22814 #define TARGET_ASAN_SHADOW_OFFSET mips_asan_shadow_offset
22815 
22816 #undef TARGET_STARTING_FRAME_OFFSET
22817 #define TARGET_STARTING_FRAME_OFFSET mips_starting_frame_offset
22818 
22819 struct gcc_target targetm = TARGET_INITIALIZER;
22820 
22821 #include "gt-mips.h"
22822