1 /* Output routines for Motorola MCore processor
2    Copyright (C) 1993-2016 Free Software Foundation, Inc.
3 
4    This file is part of GCC.
5 
6    GCC is free software; you can redistribute it and/or modify it
7    under the terms of the GNU General Public License as published
8    by the Free Software Foundation; either version 3, or (at your
9    option) any later version.
10 
11    GCC is distributed in the hope that it will be useful, but WITHOUT
12    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
14    License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with GCC; see the file COPYING3.  If not see
18    <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "df.h"
28 #include "tm_p.h"
29 #include "stringpool.h"
30 #include "emit-rtl.h"
31 #include "diagnostic-core.h"
32 #include "stor-layout.h"
33 #include "varasm.h"
34 #include "calls.h"
35 #include "mcore.h"
36 #include "output.h"
37 #include "explow.h"
38 #include "expr.h"
39 #include "cfgrtl.h"
40 #include "builtins.h"
41 #include "regs.h"
42 
43 /* This file should be included last.  */
44 #include "target-def.h"
45 
46 /* For dumping information about frame sizes.  */
47 char * mcore_current_function_name = 0;
48 long   mcore_current_compilation_timestamp = 0;
49 
50 /* Global variables for machine-dependent things.  */
51 
52 /* Provides the class number of the smallest class containing
53    reg number.  */
54 const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
55 {
56   GENERAL_REGS,	ONLYR1_REGS,  LRW_REGS,	    LRW_REGS,
57   LRW_REGS,	LRW_REGS,     LRW_REGS,	    LRW_REGS,
58   LRW_REGS,	LRW_REGS,     LRW_REGS,	    LRW_REGS,
59   LRW_REGS,	LRW_REGS,     LRW_REGS,	    GENERAL_REGS,
60   GENERAL_REGS, C_REGS,       NO_REGS,      NO_REGS,
61 };
62 
63 struct mcore_frame
64 {
65   int arg_size;			/* Stdarg spills (bytes).  */
66   int reg_size;			/* Non-volatile reg saves (bytes).  */
67   int reg_mask;			/* Non-volatile reg saves.  */
68   int local_size;		/* Locals.  */
69   int outbound_size;		/* Arg overflow on calls out.  */
70   int pad_outbound;
71   int pad_local;
72   int pad_reg;
73   /* Describe the steps we'll use to grow it.  */
74 #define	MAX_STACK_GROWS	4	/* Gives us some spare space.  */
75   int growth[MAX_STACK_GROWS];
76   int arg_offset;
77   int reg_offset;
78   int reg_growth;
79   int local_growth;
80 };
81 
82 typedef enum
83 {
84   COND_NO,
85   COND_MOV_INSN,
86   COND_CLR_INSN,
87   COND_INC_INSN,
88   COND_DEC_INSN,
89   COND_BRANCH_INSN
90 }
91 cond_type;
92 
93 static void       output_stack_adjust           (int, int);
94 static int        calc_live_regs                (int *);
95 static int        try_constant_tricks           (HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *);
96 static const char *     output_inline_const     (machine_mode, rtx *);
97 static void       layout_mcore_frame            (struct mcore_frame *);
98 static void       mcore_setup_incoming_varargs	(cumulative_args_t, machine_mode, tree, int *, int);
99 static cond_type  is_cond_candidate             (rtx);
100 static rtx_insn  *emit_new_cond_insn            (rtx, int);
101 static rtx_insn  *conditionalize_block          (rtx_insn *);
102 static void       conditionalize_optimization   (void);
103 static void       mcore_reorg                   (void);
104 static rtx        handle_structs_in_regs        (machine_mode, const_tree, int);
105 static void       mcore_mark_dllexport          (tree);
106 static void       mcore_mark_dllimport          (tree);
107 static int        mcore_dllexport_p             (tree);
108 static int        mcore_dllimport_p             (tree);
109 static tree       mcore_handle_naked_attribute  (tree *, tree, tree, int, bool *);
110 #ifdef OBJECT_FORMAT_ELF
111 static void	  mcore_asm_named_section       (const char *,
112 						 unsigned int, tree);
113 #endif
114 static void       mcore_print_operand           (FILE *, rtx, int);
115 static void       mcore_print_operand_address   (FILE *, machine_mode, rtx);
116 static bool       mcore_print_operand_punct_valid_p (unsigned char code);
117 static void       mcore_unique_section	        (tree, int);
118 static void mcore_encode_section_info		(tree, rtx, int);
119 static const char *mcore_strip_name_encoding	(const char *);
120 static int        mcore_const_costs             (rtx, RTX_CODE);
121 static int        mcore_and_cost                (rtx);
122 static int        mcore_ior_cost                (rtx);
123 static bool       mcore_rtx_costs		(rtx, machine_mode, int, int,
124 						 int *, bool);
125 static void       mcore_external_libcall	(rtx);
126 static bool       mcore_return_in_memory	(const_tree, const_tree);
127 static int        mcore_arg_partial_bytes       (cumulative_args_t,
128 						 machine_mode,
129 						 tree, bool);
130 static rtx        mcore_function_arg            (cumulative_args_t,
131 						 machine_mode,
132 						 const_tree, bool);
133 static void       mcore_function_arg_advance    (cumulative_args_t,
134 						 machine_mode,
135 						 const_tree, bool);
136 static unsigned int mcore_function_arg_boundary (machine_mode,
137 						 const_tree);
138 static void       mcore_asm_trampoline_template (FILE *);
139 static void       mcore_trampoline_init		(rtx, tree, rtx);
140 static bool       mcore_warn_func_return        (tree);
141 static void       mcore_option_override		(void);
142 static bool       mcore_legitimate_constant_p   (machine_mode, rtx);
143 static bool	  mcore_legitimate_address_p	(machine_mode, rtx, bool,
144 						 addr_space_t);
145 
146 /* MCore specific attributes.  */
147 
148 static const struct attribute_spec mcore_attribute_table[] =
149 {
150   /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
151        affects_type_identity } */
152   { "dllexport", 0, 0, true,  false, false, NULL, false },
153   { "dllimport", 0, 0, true,  false, false, NULL, false },
154   { "naked",     0, 0, true,  false, false, mcore_handle_naked_attribute,
155     false },
156   { NULL,        0, 0, false, false, false, NULL, false }
157 };
158 
159 /* Initialize the GCC target structure.  */
160 #undef  TARGET_ASM_EXTERNAL_LIBCALL
161 #define TARGET_ASM_EXTERNAL_LIBCALL	mcore_external_libcall
162 
163 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
164 #undef  TARGET_MERGE_DECL_ATTRIBUTES
165 #define TARGET_MERGE_DECL_ATTRIBUTES	merge_dllimport_decl_attributes
166 #endif
167 
168 #ifdef OBJECT_FORMAT_ELF
169 #undef  TARGET_ASM_UNALIGNED_HI_OP
170 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
171 #undef  TARGET_ASM_UNALIGNED_SI_OP
172 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
173 #endif
174 
175 #undef  TARGET_PRINT_OPERAND
176 #define TARGET_PRINT_OPERAND		mcore_print_operand
177 #undef  TARGET_PRINT_OPERAND_ADDRESS
178 #define TARGET_PRINT_OPERAND_ADDRESS	mcore_print_operand_address
179 #undef  TARGET_PRINT_OPERAND_PUNCT_VALID_P
180 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
181 
182 #undef  TARGET_ATTRIBUTE_TABLE
183 #define TARGET_ATTRIBUTE_TABLE 		mcore_attribute_table
184 #undef  TARGET_ASM_UNIQUE_SECTION
185 #define TARGET_ASM_UNIQUE_SECTION 	mcore_unique_section
186 #undef  TARGET_ASM_FUNCTION_RODATA_SECTION
187 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
188 #undef  TARGET_ENCODE_SECTION_INFO
189 #define TARGET_ENCODE_SECTION_INFO 	mcore_encode_section_info
190 #undef  TARGET_STRIP_NAME_ENCODING
191 #define TARGET_STRIP_NAME_ENCODING	mcore_strip_name_encoding
192 #undef  TARGET_RTX_COSTS
193 #define TARGET_RTX_COSTS 		mcore_rtx_costs
194 #undef  TARGET_ADDRESS_COST
195 #define TARGET_ADDRESS_COST 		hook_int_rtx_mode_as_bool_0
196 #undef  TARGET_MACHINE_DEPENDENT_REORG
197 #define TARGET_MACHINE_DEPENDENT_REORG	mcore_reorg
198 
199 #undef  TARGET_PROMOTE_FUNCTION_MODE
200 #define TARGET_PROMOTE_FUNCTION_MODE	default_promote_function_mode_always_promote
201 #undef  TARGET_PROMOTE_PROTOTYPES
202 #define TARGET_PROMOTE_PROTOTYPES	hook_bool_const_tree_true
203 
204 #undef  TARGET_RETURN_IN_MEMORY
205 #define TARGET_RETURN_IN_MEMORY		mcore_return_in_memory
206 #undef  TARGET_MUST_PASS_IN_STACK
207 #define TARGET_MUST_PASS_IN_STACK	must_pass_in_stack_var_size
208 #undef  TARGET_PASS_BY_REFERENCE
209 #define TARGET_PASS_BY_REFERENCE  hook_pass_by_reference_must_pass_in_stack
210 #undef  TARGET_ARG_PARTIAL_BYTES
211 #define TARGET_ARG_PARTIAL_BYTES	mcore_arg_partial_bytes
212 #undef  TARGET_FUNCTION_ARG
213 #define TARGET_FUNCTION_ARG		mcore_function_arg
214 #undef  TARGET_FUNCTION_ARG_ADVANCE
215 #define TARGET_FUNCTION_ARG_ADVANCE	mcore_function_arg_advance
216 #undef  TARGET_FUNCTION_ARG_BOUNDARY
217 #define TARGET_FUNCTION_ARG_BOUNDARY	mcore_function_arg_boundary
218 
219 #undef  TARGET_SETUP_INCOMING_VARARGS
220 #define TARGET_SETUP_INCOMING_VARARGS	mcore_setup_incoming_varargs
221 
222 #undef  TARGET_ASM_TRAMPOLINE_TEMPLATE
223 #define TARGET_ASM_TRAMPOLINE_TEMPLATE	mcore_asm_trampoline_template
224 #undef  TARGET_TRAMPOLINE_INIT
225 #define TARGET_TRAMPOLINE_INIT		mcore_trampoline_init
226 
227 #undef TARGET_OPTION_OVERRIDE
228 #define TARGET_OPTION_OVERRIDE mcore_option_override
229 
230 #undef TARGET_LEGITIMATE_CONSTANT_P
231 #define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
232 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
233 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P mcore_legitimate_address_p
234 
235 #undef TARGET_WARN_FUNC_RETURN
236 #define TARGET_WARN_FUNC_RETURN mcore_warn_func_return
237 
238 struct gcc_target targetm = TARGET_INITIALIZER;
239 
240 /* Adjust the stack and return the number of bytes taken to do it.  */
241 static void
output_stack_adjust(int direction,int size)242 output_stack_adjust (int direction, int size)
243 {
244   /* If extending stack a lot, we do it incrementally.  */
245   if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
246     {
247       rtx tmp = gen_rtx_REG (SImode, 1);
248       rtx memref;
249 
250       emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
251       do
252 	{
253 	  emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
254 	  memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
255 	  MEM_VOLATILE_P (memref) = 1;
256 	  emit_insn (gen_movsi (memref, stack_pointer_rtx));
257 	  size -= mcore_stack_increment;
258 	}
259       while (size > mcore_stack_increment);
260 
261       /* SIZE is now the residual for the last adjustment,
262 	 which doesn't require a probe.  */
263     }
264 
265   if (size)
266     {
267       rtx insn;
268       rtx val = GEN_INT (size);
269 
270       if (size > 32)
271 	{
272 	  rtx nval = gen_rtx_REG (SImode, 1);
273 	  emit_insn (gen_movsi (nval, val));
274 	  val = nval;
275 	}
276 
277       if (direction > 0)
278 	insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
279       else
280 	insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
281 
282       emit_insn (insn);
283     }
284 }
285 
286 /* Work out the registers which need to be saved,
287    both as a mask and a count.  */
288 
289 static int
calc_live_regs(int * count)290 calc_live_regs (int * count)
291 {
292   int reg;
293   int live_regs_mask = 0;
294 
295   * count = 0;
296 
297   for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
298     {
299       if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
300 	{
301 	  (*count)++;
302 	  live_regs_mask |= (1 << reg);
303 	}
304     }
305 
306   return live_regs_mask;
307 }
308 
309 /* Print the operand address in x to the stream.  */
310 
311 static void
mcore_print_operand_address(FILE * stream,machine_mode,rtx x)312 mcore_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx x)
313 {
314   switch (GET_CODE (x))
315     {
316     case REG:
317       fprintf (stream, "(%s)", reg_names[REGNO (x)]);
318       break;
319 
320     case PLUS:
321       {
322 	rtx base = XEXP (x, 0);
323 	rtx index = XEXP (x, 1);
324 
325 	if (GET_CODE (base) != REG)
326 	  {
327 	    /* Ensure that BASE is a register (one of them must be).  */
328 	    rtx temp = base;
329 	    base = index;
330 	    index = temp;
331 	  }
332 
333 	switch (GET_CODE (index))
334 	  {
335 	  case CONST_INT:
336 	    fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
337 		     reg_names[REGNO(base)], INTVAL (index));
338 	    break;
339 
340 	  default:
341 	    gcc_unreachable ();
342 	  }
343       }
344 
345       break;
346 
347     default:
348       output_addr_const (stream, x);
349       break;
350     }
351 }
352 
353 static bool
mcore_print_operand_punct_valid_p(unsigned char code)354 mcore_print_operand_punct_valid_p (unsigned char code)
355 {
356   return (code == '.' || code == '#' || code == '*' || code == '^'
357 	  || code == '!');
358 }
359 
360 /* Print operand x (an rtx) in assembler syntax to file stream
361    according to modifier code.
362 
363    'R'  print the next register or memory location along, i.e. the lsw in
364         a double word value
365    'O'  print a constant without the #
366    'M'  print a constant as its negative
367    'P'  print log2 of a power of two
368    'Q'  print log2 of an inverse of a power of two
369    'U'  print register for ldm/stm instruction
370    'X'  print byte number for xtrbN instruction.  */
371 
372 static void
mcore_print_operand(FILE * stream,rtx x,int code)373 mcore_print_operand (FILE * stream, rtx x, int code)
374 {
375   switch (code)
376     {
377     case 'N':
378       if (INTVAL(x) == -1)
379 	fprintf (asm_out_file, "32");
380       else
381 	fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
382       break;
383     case 'P':
384       fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
385       break;
386     case 'Q':
387       fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
388       break;
389     case 'O':
390       fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
391       break;
392     case 'M':
393       fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
394       break;
395     case 'R':
396       /* Next location along in memory or register.  */
397       switch (GET_CODE (x))
398 	{
399 	case REG:
400 	  fputs (reg_names[REGNO (x) + 1], (stream));
401 	  break;
402 	case MEM:
403 	  mcore_print_operand_address
404 	    (stream, GET_MODE (x), XEXP (adjust_address (x, SImode, 4), 0));
405 	  break;
406 	default:
407 	  gcc_unreachable ();
408 	}
409       break;
410     case 'U':
411       fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
412 	       reg_names[REGNO (x) + 3]);
413       break;
414     case 'x':
415       fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
416       break;
417     case 'X':
418       fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
419       break;
420 
421     default:
422       switch (GET_CODE (x))
423 	{
424 	case REG:
425 	  fputs (reg_names[REGNO (x)], (stream));
426 	  break;
427 	case MEM:
428 	  output_address (GET_MODE (x), XEXP (x, 0));
429 	  break;
430 	default:
431 	  output_addr_const (stream, x);
432 	  break;
433 	}
434       break;
435     }
436 }
437 
438 /* What does a constant cost ?  */
439 
440 static int
mcore_const_costs(rtx exp,enum rtx_code code)441 mcore_const_costs (rtx exp, enum rtx_code code)
442 {
443   HOST_WIDE_INT val = INTVAL (exp);
444 
445   /* Easy constants.  */
446   if (   CONST_OK_FOR_I (val)
447       || CONST_OK_FOR_M (val)
448       || CONST_OK_FOR_N (val)
449       || (code == PLUS && CONST_OK_FOR_L (val)))
450     return 1;
451   else if (code == AND
452 	   && (   CONST_OK_FOR_M (~val)
453 	       || CONST_OK_FOR_N (~val)))
454     return 2;
455   else if (code == PLUS
456 	   && (   CONST_OK_FOR_I (-val)
457 	       || CONST_OK_FOR_M (-val)
458 	       || CONST_OK_FOR_N (-val)))
459     return 2;
460 
461   return 5;
462 }
463 
464 /* What does an and instruction cost - we do this b/c immediates may
465    have been relaxed.   We want to ensure that cse will cse relaxed immeds
466    out.  Otherwise we'll get bad code (multiple reloads of the same const).  */
467 
468 static int
mcore_and_cost(rtx x)469 mcore_and_cost (rtx x)
470 {
471   HOST_WIDE_INT val;
472 
473   if (GET_CODE (XEXP (x, 1)) != CONST_INT)
474     return 2;
475 
476   val = INTVAL (XEXP (x, 1));
477 
478   /* Do it directly.  */
479   if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
480     return 2;
481   /* Takes one instruction to load.  */
482   else if (const_ok_for_mcore (val))
483     return 3;
484   /* Takes two instructions to load.  */
485   else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
486     return 4;
487 
488   /* Takes a lrw to load.  */
489   return 5;
490 }
491 
492 /* What does an or cost - see and_cost().  */
493 
494 static int
mcore_ior_cost(rtx x)495 mcore_ior_cost (rtx x)
496 {
497   HOST_WIDE_INT val;
498 
499   if (GET_CODE (XEXP (x, 1)) != CONST_INT)
500     return 2;
501 
502   val = INTVAL (XEXP (x, 1));
503 
504   /* Do it directly with bclri.  */
505   if (CONST_OK_FOR_M (val))
506     return 2;
507   /* Takes one instruction to load.  */
508   else if (const_ok_for_mcore (val))
509     return 3;
510   /* Takes two instructions to load.  */
511   else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
512     return 4;
513 
514   /* Takes a lrw to load.  */
515   return 5;
516 }
517 
518 static bool
mcore_rtx_costs(rtx x,machine_mode mode ATTRIBUTE_UNUSED,int outer_code,int opno ATTRIBUTE_UNUSED,int * total,bool speed ATTRIBUTE_UNUSED)519 mcore_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
520 		 int opno ATTRIBUTE_UNUSED,
521 		 int * total, bool speed ATTRIBUTE_UNUSED)
522 {
523   int code = GET_CODE (x);
524 
525   switch (code)
526     {
527     case CONST_INT:
528       *total = mcore_const_costs (x, (enum rtx_code) outer_code);
529       return true;
530     case CONST:
531     case LABEL_REF:
532     case SYMBOL_REF:
533       *total = 5;
534       return true;
535     case CONST_DOUBLE:
536       *total = 10;
537       return true;
538 
539     case AND:
540       *total = COSTS_N_INSNS (mcore_and_cost (x));
541       return true;
542 
543     case IOR:
544       *total = COSTS_N_INSNS (mcore_ior_cost (x));
545       return true;
546 
547     case DIV:
548     case UDIV:
549     case MOD:
550     case UMOD:
551     case FLOAT:
552     case FIX:
553       *total = COSTS_N_INSNS (100);
554       return true;
555 
556     default:
557       return false;
558     }
559 }
560 
561 /* Prepare the operands for a comparison.  Return whether the branch/setcc
562    should reverse the operands.  */
563 
564 bool
mcore_gen_compare(enum rtx_code code,rtx op0,rtx op1)565 mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
566 {
567   rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
568   bool invert;
569 
570   if (GET_CODE (op1) == CONST_INT)
571     {
572       HOST_WIDE_INT val = INTVAL (op1);
573 
574       switch (code)
575 	{
576 	case GTU:
577 	  /* Unsigned > 0 is the same as != 0; everything else is converted
578 	     below to LEU (reversed cmphs).  */
579 	  if (val == 0)
580 	    code = NE;
581 	  break;
582 
583         /* Check whether (LE A imm) can become (LT A imm + 1),
584 	   or (GT A imm) can become (GE A imm + 1).  */
585 	case GT:
586 	case LE:
587 	  if (CONST_OK_FOR_J (val + 1))
588 	    {
589 	      op1 = GEN_INT (val + 1);
590 	      code = code == LE ? LT : GE;
591 	    }
592 	  break;
593 
594 	default:
595 	  break;
596 	}
597     }
598 
599   if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
600     op1 = force_reg (SImode, op1);
601 
602   /* cmpnei: 0-31 (K immediate)
603      cmplti: 1-32 (J immediate, 0 using btsti x,31).  */
604   invert = false;
605   switch (code)
606     {
607     case EQ:	/* Use inverted condition, cmpne.  */
608       code = NE;
609       invert = true;
610       /* Drop through.  */
611 
612     case NE:	/* Use normal condition, cmpne.  */
613       if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
614 	op1 = force_reg (SImode, op1);
615       break;
616 
617     case LE:	/* Use inverted condition, reversed cmplt.  */
618       code = GT;
619       invert = true;
620       /* Drop through.  */
621 
622     case GT:	/* Use normal condition, reversed cmplt.  */
623       if (GET_CODE (op1) == CONST_INT)
624 	op1 = force_reg (SImode, op1);
625       break;
626 
627     case GE:	/* Use inverted condition, cmplt.  */
628       code = LT;
629       invert = true;
630       /* Drop through.  */
631 
632     case LT:	/* Use normal condition, cmplt.  */
633       if (GET_CODE (op1) == CONST_INT &&
634 	  /* covered by btsti x,31.  */
635 	  INTVAL (op1) != 0 &&
636 	  ! CONST_OK_FOR_J (INTVAL (op1)))
637 	op1 = force_reg (SImode, op1);
638       break;
639 
640     case GTU:	/* Use inverted condition, cmple.  */
641       /* We coped with unsigned > 0 above.  */
642       gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
643       code = LEU;
644       invert = true;
645       /* Drop through.  */
646 
647     case LEU:	/* Use normal condition, reversed cmphs.  */
648       if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
649 	op1 = force_reg (SImode, op1);
650       break;
651 
652     case LTU:	/* Use inverted condition, cmphs.  */
653       code = GEU;
654       invert = true;
655       /* Drop through.  */
656 
657     case GEU:	/* Use normal condition, cmphs.  */
658       if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
659 	op1 = force_reg (SImode, op1);
660       break;
661 
662     default:
663       break;
664     }
665 
666   emit_insn (gen_rtx_SET (cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
667   return invert;
668 }
669 
670 int
mcore_symbolic_address_p(rtx x)671 mcore_symbolic_address_p (rtx x)
672 {
673   switch (GET_CODE (x))
674     {
675     case SYMBOL_REF:
676     case LABEL_REF:
677       return 1;
678     case CONST:
679       x = XEXP (x, 0);
680       return (   (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
681 	       || GET_CODE (XEXP (x, 0)) == LABEL_REF)
682 	      && GET_CODE (XEXP (x, 1)) == CONST_INT);
683     default:
684       return 0;
685     }
686 }
687 
688 /* Functions to output assembly code for a function call.  */
689 
690 char *
mcore_output_call(rtx operands[],int index)691 mcore_output_call (rtx operands[], int index)
692 {
693   static char buffer[20];
694   rtx addr = operands [index];
695 
696   if (REG_P (addr))
697     {
698       if (TARGET_CG_DATA)
699 	{
700 	  gcc_assert (mcore_current_function_name);
701 
702 	  ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
703 			      "unknown", 1);
704 	}
705 
706       sprintf (buffer, "jsr\t%%%d", index);
707     }
708   else
709     {
710       if (TARGET_CG_DATA)
711 	{
712 	  gcc_assert (mcore_current_function_name);
713 	  gcc_assert (GET_CODE (addr) == SYMBOL_REF);
714 
715 	  ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
716 			      XSTR (addr, 0), 0);
717 	}
718 
719       sprintf (buffer, "jbsr\t%%%d", index);
720     }
721 
722   return buffer;
723 }
724 
725 /* Can we load a constant with a single instruction ?  */
726 
727 int
const_ok_for_mcore(HOST_WIDE_INT value)728 const_ok_for_mcore (HOST_WIDE_INT value)
729 {
730   if (value >= 0 && value <= 127)
731     return 1;
732 
733   /* Try exact power of two.  */
734   if (CONST_OK_FOR_M (value))
735     return 1;
736 
737   /* Try exact power of two - 1.  */
738   if (CONST_OK_FOR_N (value) && value != -1)
739     return 1;
740 
741   return 0;
742 }
743 
744 /* Can we load a constant inline with up to 2 instructions ?  */
745 
746 int
mcore_const_ok_for_inline(HOST_WIDE_INT value)747 mcore_const_ok_for_inline (HOST_WIDE_INT value)
748 {
749   HOST_WIDE_INT x, y;
750 
751   return try_constant_tricks (value, & x, & y) > 0;
752 }
753 
754 /* Are we loading the constant using a not ?  */
755 
756 int
mcore_const_trick_uses_not(HOST_WIDE_INT value)757 mcore_const_trick_uses_not (HOST_WIDE_INT value)
758 {
759   HOST_WIDE_INT x, y;
760 
761   return try_constant_tricks (value, & x, & y) == 2;
762 }
763 
764 /* Try tricks to load a constant inline and return the trick number if
765    success (0 is non-inlinable).
766 
767    0: not inlinable
768    1: single instruction (do the usual thing)
769    2: single insn followed by a 'not'
770    3: single insn followed by a subi
771    4: single insn followed by an addi
772    5: single insn followed by rsubi
773    6: single insn followed by bseti
774    7: single insn followed by bclri
775    8: single insn followed by rotli
776    9: single insn followed by lsli
777    10: single insn followed by ixh
778    11: single insn followed by ixw.  */
779 
780 static int
try_constant_tricks(HOST_WIDE_INT value,HOST_WIDE_INT * x,HOST_WIDE_INT * y)781 try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
782 {
783   HOST_WIDE_INT i;
784   unsigned HOST_WIDE_INT bit, shf, rot;
785 
786   if (const_ok_for_mcore (value))
787     return 1;	/* Do the usual thing.  */
788 
789   if (! TARGET_HARDLIT)
790     return 0;
791 
792   if (const_ok_for_mcore (~value))
793     {
794       *x = ~value;
795       return 2;
796     }
797 
798   for (i = 1; i <= 32; i++)
799     {
800       if (const_ok_for_mcore (value - i))
801 	{
802 	  *x = value - i;
803 	  *y = i;
804 
805 	  return 3;
806 	}
807 
808       if (const_ok_for_mcore (value + i))
809 	{
810 	  *x = value + i;
811 	  *y = i;
812 
813 	  return 4;
814 	}
815     }
816 
817   bit = 0x80000000ULL;
818 
819   for (i = 0; i <= 31; i++)
820     {
821       if (const_ok_for_mcore (i - value))
822 	{
823 	  *x = i - value;
824 	  *y = i;
825 
826 	  return 5;
827 	}
828 
829       if (const_ok_for_mcore (value & ~bit))
830 	{
831 	  *y = bit;
832 	  *x = value & ~bit;
833 	  return 6;
834 	}
835 
836       if (const_ok_for_mcore (value | bit))
837 	{
838 	  *y = ~bit;
839 	  *x = value | bit;
840 
841 	  return 7;
842 	}
843 
844       bit >>= 1;
845     }
846 
847   shf = value;
848   rot = value;
849 
850   for (i = 1; i < 31; i++)
851     {
852       int c;
853 
854       /* MCore has rotate left.  */
855       c = rot << 31;
856       rot >>= 1;
857       rot &= 0x7FFFFFFF;
858       rot |= c;   /* Simulate rotate.  */
859 
860       if (const_ok_for_mcore (rot))
861 	{
862 	  *y = i;
863 	  *x = rot;
864 
865 	  return 8;
866 	}
867 
868       if (shf & 1)
869 	shf = 0;	/* Can't use logical shift, low order bit is one.  */
870 
871       shf >>= 1;
872 
873       if (shf != 0 && const_ok_for_mcore (shf))
874 	{
875 	  *y = i;
876 	  *x = shf;
877 
878 	  return 9;
879 	}
880     }
881 
882   if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
883     {
884       *x = value / 3;
885 
886       return 10;
887     }
888 
889   if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
890     {
891       *x = value / 5;
892 
893       return 11;
894     }
895 
896   return 0;
897 }
898 
899 /* Check whether reg is dead at first.  This is done by searching ahead
900    for either the next use (i.e., reg is live), a death note, or a set of
901    reg.  Don't just use dead_or_set_p() since reload does not always mark
902    deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
903    can ignore subregs by extracting the actual register.  BRC  */
904 
905 int
mcore_is_dead(rtx_insn * first,rtx reg)906 mcore_is_dead (rtx_insn *first, rtx reg)
907 {
908   rtx_insn *insn;
909 
910   /* For mcore, subregs can't live independently of their parent regs.  */
911   if (GET_CODE (reg) == SUBREG)
912     reg = SUBREG_REG (reg);
913 
914   /* Dies immediately.  */
915   if (dead_or_set_p (first, reg))
916     return 1;
917 
918   /* Look for conclusive evidence of live/death, otherwise we have
919      to assume that it is live.  */
920   for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
921     {
922       if (JUMP_P (insn))
923 	return 0;	/* We lose track, assume it is alive.  */
924 
925       else if (CALL_P (insn))
926 	{
927 	  /* Call's might use it for target or register parms.  */
928 	  if (reg_referenced_p (reg, PATTERN (insn))
929 	      || find_reg_fusage (insn, USE, reg))
930 	    return 0;
931 	  else if (dead_or_set_p (insn, reg))
932             return 1;
933 	}
934       else if (NONJUMP_INSN_P (insn))
935 	{
936 	  if (reg_referenced_p (reg, PATTERN (insn)))
937             return 0;
938 	  else if (dead_or_set_p (insn, reg))
939             return 1;
940 	}
941     }
942 
943   /* No conclusive evidence either way, we cannot take the chance
944      that control flow hid the use from us -- "I'm not dead yet".  */
945   return 0;
946 }
947 
948 /* Count the number of ones in mask.  */
949 
950 int
mcore_num_ones(HOST_WIDE_INT mask)951 mcore_num_ones (HOST_WIDE_INT mask)
952 {
953   /* A trick to count set bits recently posted on comp.compilers.  */
954   mask =  (mask >> 1  & 0x55555555) + (mask & 0x55555555);
955   mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
956   mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
957   mask = ((mask >> 8) + mask);
958 
959   return (mask + (mask >> 16)) & 0xff;
960 }
961 
962 /* Count the number of zeros in mask.  */
963 
964 int
mcore_num_zeros(HOST_WIDE_INT mask)965 mcore_num_zeros (HOST_WIDE_INT mask)
966 {
967   return 32 - mcore_num_ones (mask);
968 }
969 
970 /* Determine byte being masked.  */
971 
972 int
mcore_byte_offset(unsigned int mask)973 mcore_byte_offset (unsigned int mask)
974 {
975   if (mask == 0x00ffffffL)
976     return 0;
977   else if (mask == 0xff00ffffL)
978     return 1;
979   else if (mask == 0xffff00ffL)
980     return 2;
981   else if (mask == 0xffffff00L)
982     return 3;
983 
984   return -1;
985 }
986 
987 /* Determine halfword being masked.  */
988 
989 int
mcore_halfword_offset(unsigned int mask)990 mcore_halfword_offset (unsigned int mask)
991 {
992   if (mask == 0x0000ffffL)
993     return 0;
994   else if (mask == 0xffff0000L)
995     return 1;
996 
997   return -1;
998 }
999 
1000 /* Output a series of bseti's corresponding to mask.  */
1001 
1002 const char *
mcore_output_bseti(rtx dst,int mask)1003 mcore_output_bseti (rtx dst, int mask)
1004 {
1005   rtx out_operands[2];
1006   int bit;
1007 
1008   out_operands[0] = dst;
1009 
1010   for (bit = 0; bit < 32; bit++)
1011     {
1012       if ((mask & 0x1) == 0x1)
1013 	{
1014 	  out_operands[1] = GEN_INT (bit);
1015 
1016 	  output_asm_insn ("bseti\t%0,%1", out_operands);
1017 	}
1018       mask >>= 1;
1019     }
1020 
1021   return "";
1022 }
1023 
1024 /* Output a series of bclri's corresponding to mask.  */
1025 
1026 const char *
mcore_output_bclri(rtx dst,int mask)1027 mcore_output_bclri (rtx dst, int mask)
1028 {
1029   rtx out_operands[2];
1030   int bit;
1031 
1032   out_operands[0] = dst;
1033 
1034   for (bit = 0; bit < 32; bit++)
1035     {
1036       if ((mask & 0x1) == 0x0)
1037 	{
1038 	  out_operands[1] = GEN_INT (bit);
1039 
1040 	  output_asm_insn ("bclri\t%0,%1", out_operands);
1041 	}
1042 
1043       mask >>= 1;
1044     }
1045 
1046   return "";
1047 }
1048 
1049 /* Output a conditional move of two constants that are +/- 1 within each
1050    other.  See the "movtK" patterns in mcore.md.   I'm not sure this is
1051    really worth the effort.  */
1052 
1053 const char *
mcore_output_cmov(rtx operands[],int cmp_t,const char * test)1054 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1055 {
1056   HOST_WIDE_INT load_value;
1057   HOST_WIDE_INT adjust_value;
1058   rtx out_operands[4];
1059 
1060   out_operands[0] = operands[0];
1061 
1062   /* Check to see which constant is loadable.  */
1063   if (const_ok_for_mcore (INTVAL (operands[1])))
1064     {
1065       out_operands[1] = operands[1];
1066       out_operands[2] = operands[2];
1067     }
1068   else if (const_ok_for_mcore (INTVAL (operands[2])))
1069     {
1070       out_operands[1] = operands[2];
1071       out_operands[2] = operands[1];
1072 
1073       /* Complement test since constants are swapped.  */
1074       cmp_t = (cmp_t == 0);
1075     }
1076   load_value   = INTVAL (out_operands[1]);
1077   adjust_value = INTVAL (out_operands[2]);
1078 
1079   /* First output the test if folded into the pattern.  */
1080 
1081   if (test)
1082     output_asm_insn (test, operands);
1083 
1084   /* Load the constant - for now, only support constants that can be
1085      generated with a single instruction.  maybe add general inlinable
1086      constants later (this will increase the # of patterns since the
1087      instruction sequence has a different length attribute).  */
1088   if (load_value >= 0 && load_value <= 127)
1089     output_asm_insn ("movi\t%0,%1", out_operands);
1090   else if (CONST_OK_FOR_M (load_value))
1091     output_asm_insn ("bgeni\t%0,%P1", out_operands);
1092   else if (CONST_OK_FOR_N (load_value))
1093     output_asm_insn ("bmaski\t%0,%N1", out_operands);
1094 
1095   /* Output the constant adjustment.  */
1096   if (load_value > adjust_value)
1097     {
1098       if (cmp_t)
1099 	output_asm_insn ("decf\t%0", out_operands);
1100       else
1101 	output_asm_insn ("dect\t%0", out_operands);
1102     }
1103   else
1104     {
1105       if (cmp_t)
1106 	output_asm_insn ("incf\t%0", out_operands);
1107       else
1108 	output_asm_insn ("inct\t%0", out_operands);
1109     }
1110 
1111   return "";
1112 }
1113 
1114 /* Outputs the peephole for moving a constant that gets not'ed followed
1115    by an and (i.e. combine the not and the and into andn). BRC  */
1116 
1117 const char *
mcore_output_andn(rtx insn ATTRIBUTE_UNUSED,rtx operands[])1118 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1119 {
1120   HOST_WIDE_INT x, y;
1121   rtx out_operands[3];
1122   const char * load_op;
1123   char buf[256];
1124   int trick_no;
1125 
1126   trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1127   gcc_assert (trick_no == 2);
1128 
1129   out_operands[0] = operands[0];
1130   out_operands[1] = GEN_INT (x);
1131   out_operands[2] = operands[2];
1132 
1133   if (x >= 0 && x <= 127)
1134     load_op = "movi\t%0,%1";
1135 
1136   /* Try exact power of two.  */
1137   else if (CONST_OK_FOR_M (x))
1138     load_op = "bgeni\t%0,%P1";
1139 
1140   /* Try exact power of two - 1.  */
1141   else if (CONST_OK_FOR_N (x))
1142     load_op = "bmaski\t%0,%N1";
1143 
1144   else
1145     {
1146       load_op = "BADMOVI-andn\t%0, %1";
1147       gcc_unreachable ();
1148     }
1149 
1150   sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1151   output_asm_insn (buf, out_operands);
1152 
1153   return "";
1154 }
1155 
1156 /* Output an inline constant.  */
1157 
1158 static const char *
output_inline_const(machine_mode mode,rtx operands[])1159 output_inline_const (machine_mode mode, rtx operands[])
1160 {
1161   HOST_WIDE_INT x = 0, y = 0;
1162   int trick_no;
1163   rtx out_operands[3];
1164   char buf[256];
1165   char load_op[256];
1166   const char *dst_fmt;
1167   HOST_WIDE_INT value;
1168 
1169   value = INTVAL (operands[1]);
1170 
1171   trick_no = try_constant_tricks (value, &x, &y);
1172   /* lrw's are handled separately: Large inlinable constants never get
1173      turned into lrw's.  Our caller uses try_constant_tricks to back
1174      off to an lrw rather than calling this routine.  */
1175   gcc_assert (trick_no != 0);
1176 
1177   if (trick_no == 1)
1178     x = value;
1179 
1180   /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment.  */
1181   out_operands[0] = operands[0];
1182   out_operands[1] = GEN_INT (x);
1183 
1184   if (trick_no > 2)
1185     out_operands[2] = GEN_INT (y);
1186 
1187   /* Select dst format based on mode.  */
1188   if (mode == DImode && (! TARGET_LITTLE_END))
1189     dst_fmt = "%R0";
1190   else
1191     dst_fmt = "%0";
1192 
1193   if (x >= 0 && x <= 127)
1194     sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1195 
1196   /* Try exact power of two.  */
1197   else if (CONST_OK_FOR_M (x))
1198     sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1199 
1200   /* Try exact power of two - 1.  */
1201   else if (CONST_OK_FOR_N (x))
1202     sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1203 
1204   else
1205     {
1206       sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1207       gcc_unreachable ();
1208     }
1209 
1210   switch (trick_no)
1211     {
1212     case 1:
1213       strcpy (buf, load_op);
1214       break;
1215     case 2:   /* not */
1216       sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1217       break;
1218     case 3:   /* add */
1219       sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1220       break;
1221     case 4:   /* sub */
1222       sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1223       break;
1224     case 5:   /* rsub */
1225       /* Never happens unless -mrsubi, see try_constant_tricks().  */
1226       sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1227       break;
1228     case 6:   /* bseti */
1229       sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1230       break;
1231     case 7:   /* bclr */
1232       sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1233       break;
1234     case 8:   /* rotl */
1235       sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1236       break;
1237     case 9:   /* lsl */
1238       sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1239       break;
1240     case 10:  /* ixh */
1241       sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1242       break;
1243     case 11:  /* ixw */
1244       sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1245       break;
1246     default:
1247       return "";
1248     }
1249 
1250   output_asm_insn (buf, out_operands);
1251 
1252   return "";
1253 }
1254 
1255 /* Output a move of a word or less value.  */
1256 
1257 const char *
mcore_output_move(rtx insn ATTRIBUTE_UNUSED,rtx operands[],machine_mode mode ATTRIBUTE_UNUSED)1258 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1259 		   machine_mode mode ATTRIBUTE_UNUSED)
1260 {
1261   rtx dst = operands[0];
1262   rtx src = operands[1];
1263 
1264   if (GET_CODE (dst) == REG)
1265     {
1266       if (GET_CODE (src) == REG)
1267 	{
1268 	  if (REGNO (src) == CC_REG)            /* r-c */
1269             return "mvc\t%0";
1270 	  else
1271             return "mov\t%0,%1";                /* r-r*/
1272 	}
1273       else if (GET_CODE (src) == MEM)
1274 	{
1275 	  if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1276             return "lrw\t%0,[%1]";              /* a-R */
1277 	  else
1278 	    switch (GET_MODE (src))		/* r-m */
1279 	      {
1280 	      case SImode:
1281 		return "ldw\t%0,%1";
1282 	      case HImode:
1283 		return "ld.h\t%0,%1";
1284 	      case QImode:
1285 		return "ld.b\t%0,%1";
1286 	      default:
1287 		gcc_unreachable ();
1288 	      }
1289 	}
1290       else if (GET_CODE (src) == CONST_INT)
1291 	{
1292 	  HOST_WIDE_INT x, y;
1293 
1294 	  if (CONST_OK_FOR_I (INTVAL (src)))       /* r-I */
1295             return "movi\t%0,%1";
1296 	  else if (CONST_OK_FOR_M (INTVAL (src)))  /* r-M */
1297             return "bgeni\t%0,%P1\t// %1 %x1";
1298 	  else if (CONST_OK_FOR_N (INTVAL (src)))  /* r-N */
1299             return "bmaski\t%0,%N1\t// %1 %x1";
1300 	  else if (try_constant_tricks (INTVAL (src), &x, &y))     /* R-P */
1301             return output_inline_const (SImode, operands);  /* 1-2 insns */
1302 	  else
1303             return "lrw\t%0,%x1\t// %1";	/* Get it from literal pool.  */
1304 	}
1305       else
1306 	return "lrw\t%0, %1";                /* Into the literal pool.  */
1307     }
1308   else if (GET_CODE (dst) == MEM)               /* m-r */
1309     switch (GET_MODE (dst))
1310       {
1311       case SImode:
1312 	return "stw\t%1,%0";
1313       case HImode:
1314 	return "st.h\t%1,%0";
1315       case QImode:
1316 	return "st.b\t%1,%0";
1317       default:
1318 	gcc_unreachable ();
1319       }
1320 
1321   gcc_unreachable ();
1322 }
1323 
1324 /* Return a sequence of instructions to perform DI or DF move.
1325    Since the MCORE cannot move a DI or DF in one instruction, we have
1326    to take care when we see overlapping source and dest registers.  */
1327 
1328 const char *
mcore_output_movedouble(rtx operands[],machine_mode mode ATTRIBUTE_UNUSED)1329 mcore_output_movedouble (rtx operands[], machine_mode mode ATTRIBUTE_UNUSED)
1330 {
1331   rtx dst = operands[0];
1332   rtx src = operands[1];
1333 
1334   if (GET_CODE (dst) == REG)
1335     {
1336       if (GET_CODE (src) == REG)
1337 	{
1338 	  int dstreg = REGNO (dst);
1339 	  int srcreg = REGNO (src);
1340 
1341 	  /* Ensure the second source not overwritten.  */
1342 	  if (srcreg + 1 == dstreg)
1343 	    return "mov	%R0,%R1\n\tmov	%0,%1";
1344 	  else
1345 	    return "mov	%0,%1\n\tmov	%R0,%R1";
1346 	}
1347       else if (GET_CODE (src) == MEM)
1348 	{
1349 	  rtx memexp = XEXP (src, 0);
1350 	  int dstreg = REGNO (dst);
1351 	  int basereg = -1;
1352 
1353 	  if (GET_CODE (memexp) == LABEL_REF)
1354 	    return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1355 	  else if (GET_CODE (memexp) == REG)
1356 	    basereg = REGNO (memexp);
1357 	  else if (GET_CODE (memexp) == PLUS)
1358 	    {
1359 	      if (GET_CODE (XEXP (memexp, 0)) == REG)
1360 		basereg = REGNO (XEXP (memexp, 0));
1361 	      else if (GET_CODE (XEXP (memexp, 1)) == REG)
1362 		basereg = REGNO (XEXP (memexp, 1));
1363 	      else
1364 		gcc_unreachable ();
1365 	    }
1366 	  else
1367 	    gcc_unreachable ();
1368 
1369           /* ??? length attribute is wrong here.  */
1370 	  if (dstreg == basereg)
1371 	    {
1372 	      /* Just load them in reverse order.  */
1373 	      return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1374 
1375 	      /* XXX: alternative: move basereg to basereg+1
1376 	         and then fall through.  */
1377 	    }
1378 	  else
1379 	    return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1380 	}
1381       else if (GET_CODE (src) == CONST_INT)
1382 	{
1383 	  if (TARGET_LITTLE_END)
1384 	    {
1385 	      if (CONST_OK_FOR_I (INTVAL (src)))
1386 		output_asm_insn ("movi	%0,%1", operands);
1387 	      else if (CONST_OK_FOR_M (INTVAL (src)))
1388 		output_asm_insn ("bgeni	%0,%P1", operands);
1389 	      else if (CONST_OK_FOR_N (INTVAL (src)))
1390 		output_asm_insn ("bmaski	%0,%N1", operands);
1391 	      else
1392 		gcc_unreachable ();
1393 
1394 	      if (INTVAL (src) < 0)
1395 		return "bmaski	%R0,32";
1396 	      else
1397 		return "movi	%R0,0";
1398 	    }
1399 	  else
1400 	    {
1401 	      if (CONST_OK_FOR_I (INTVAL (src)))
1402 		output_asm_insn ("movi	%R0,%1", operands);
1403 	      else if (CONST_OK_FOR_M (INTVAL (src)))
1404 		output_asm_insn ("bgeni	%R0,%P1", operands);
1405 	      else if (CONST_OK_FOR_N (INTVAL (src)))
1406 		output_asm_insn ("bmaski	%R0,%N1", operands);
1407 	      else
1408 		gcc_unreachable ();
1409 
1410 	      if (INTVAL (src) < 0)
1411 		return "bmaski	%0,32";
1412 	      else
1413 		return "movi	%0,0";
1414 	    }
1415 	}
1416       else
1417 	gcc_unreachable ();
1418     }
1419   else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1420     return "stw\t%1,%0\n\tstw\t%R1,%R0";
1421   else
1422     gcc_unreachable ();
1423 }
1424 
1425 /* Predicates used by the templates.  */
1426 
1427 int
mcore_arith_S_operand(rtx op)1428 mcore_arith_S_operand (rtx op)
1429 {
1430   if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1431     return 1;
1432 
1433   return 0;
1434 }
1435 
1436 /* Expand insert bit field.  BRC  */
1437 
1438 int
mcore_expand_insv(rtx operands[])1439 mcore_expand_insv (rtx operands[])
1440 {
1441   int width = INTVAL (operands[1]);
1442   int posn = INTVAL (operands[2]);
1443   int mask;
1444   rtx mreg, sreg, ereg;
1445 
1446   /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1447      for width==1 must be removed.  Look around line 368.  This is something
1448      we really want the md part to do.  */
1449   if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1450     {
1451       /* Do directly with bseti or bclri.  */
1452       /* RBE: 2/97 consider only low bit of constant.  */
1453       if ((INTVAL (operands[3]) & 1) == 0)
1454 	{
1455 	  mask = ~(1 << posn);
1456 	  emit_insn (gen_rtx_SET (operands[0],
1457 				  gen_rtx_AND (SImode, operands[0],
1458 					       GEN_INT (mask))));
1459 	}
1460       else
1461 	{
1462 	  mask = 1 << posn;
1463 	  emit_insn (gen_rtx_SET (operands[0],
1464 				  gen_rtx_IOR (SImode, operands[0],
1465 					       GEN_INT (mask))));
1466 	}
1467 
1468       return 1;
1469     }
1470 
1471   /* Look at some bit-field placements that we aren't interested
1472      in handling ourselves, unless specifically directed to do so.  */
1473   if (! TARGET_W_FIELD)
1474     return 0;		/* Generally, give up about now.  */
1475 
1476   if (width == 8 && posn % 8 == 0)
1477     /* Byte sized and aligned; let caller break it up.  */
1478     return 0;
1479 
1480   if (width == 16 && posn % 16 == 0)
1481     /* Short sized and aligned; let caller break it up.  */
1482     return 0;
1483 
1484   /* The general case - we can do this a little bit better than what the
1485      machine independent part tries.  This will get rid of all the subregs
1486      that mess up constant folding in combine when working with relaxed
1487      immediates.  */
1488 
1489   /* If setting the entire field, do it directly.  */
1490   if (GET_CODE (operands[3]) == CONST_INT
1491       && INTVAL (operands[3]) == ((1 << width) - 1))
1492     {
1493       mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1494       emit_insn (gen_rtx_SET (operands[0],
1495 			      gen_rtx_IOR (SImode, operands[0], mreg)));
1496       return 1;
1497     }
1498 
1499   /* Generate the clear mask.  */
1500   mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1501 
1502   /* Clear the field, to overlay it later with the source.  */
1503   emit_insn (gen_rtx_SET (operands[0],
1504 			  gen_rtx_AND (SImode, operands[0], mreg)));
1505 
1506   /* If the source is constant 0, we've nothing to add back.  */
1507   if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1508     return 1;
1509 
1510   /* XXX: Should we worry about more games with constant values?
1511      We've covered the high profile: set/clear single-bit and many-bit
1512      fields. How often do we see "arbitrary bit pattern" constants?  */
1513   sreg = copy_to_mode_reg (SImode, operands[3]);
1514 
1515   /* Extract src as same width as dst (needed for signed values).  We
1516      always have to do this since we widen everything to SImode.
1517      We don't have to mask if we're shifting this up against the
1518      MSB of the register (e.g., the shift will push out any hi-order
1519      bits.  */
1520   if (width + posn != (int) GET_MODE_SIZE (SImode))
1521     {
1522       ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1523       emit_insn (gen_rtx_SET (sreg, gen_rtx_AND (SImode, sreg, ereg)));
1524     }
1525 
1526   /* Insert source value in dest.  */
1527   if (posn != 0)
1528     emit_insn (gen_rtx_SET (sreg, gen_rtx_ASHIFT (SImode, sreg,
1529 						  GEN_INT (posn))));
1530 
1531   emit_insn (gen_rtx_SET (operands[0],
1532 			  gen_rtx_IOR (SImode, operands[0], sreg)));
1533 
1534   return 1;
1535 }
1536 
1537 /* ??? Block move stuff stolen from m88k.  This code has not been
1538    verified for correctness.  */
1539 
1540 /* Emit code to perform a block move.  Choose the best method.
1541 
1542    OPERANDS[0] is the destination.
1543    OPERANDS[1] is the source.
1544    OPERANDS[2] is the size.
1545    OPERANDS[3] is the alignment safe to use.  */
1546 
1547 /* Emit code to perform a block move with an offset sequence of ldw/st
1548    instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...).  SIZE and ALIGN are
1549    known constants.  DEST and SRC are registers.  OFFSET is the known
1550    starting point for the output pattern.  */
1551 
1552 static const machine_mode mode_from_align[] =
1553 {
1554   VOIDmode, QImode, HImode, VOIDmode, SImode,
1555 };
1556 
1557 static void
block_move_sequence(rtx dst_mem,rtx src_mem,int size,int align)1558 block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1559 {
1560   rtx temp[2];
1561   machine_mode mode[2];
1562   int amount[2];
1563   bool active[2];
1564   int phase = 0;
1565   int next;
1566   int offset_ld = 0;
1567   int offset_st = 0;
1568   rtx x;
1569 
1570   x = XEXP (dst_mem, 0);
1571   if (!REG_P (x))
1572     {
1573       x = force_reg (Pmode, x);
1574       dst_mem = replace_equiv_address (dst_mem, x);
1575     }
1576 
1577   x = XEXP (src_mem, 0);
1578   if (!REG_P (x))
1579     {
1580       x = force_reg (Pmode, x);
1581       src_mem = replace_equiv_address (src_mem, x);
1582     }
1583 
1584   active[0] = active[1] = false;
1585 
1586   do
1587     {
1588       next = phase;
1589       phase ^= 1;
1590 
1591       if (size > 0)
1592 	{
1593 	  int next_amount;
1594 
1595 	  next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1596 	  next_amount = MIN (next_amount, align);
1597 
1598 	  amount[next] = next_amount;
1599 	  mode[next] = mode_from_align[next_amount];
1600 	  temp[next] = gen_reg_rtx (mode[next]);
1601 
1602 	  x = adjust_address (src_mem, mode[next], offset_ld);
1603 	  emit_insn (gen_rtx_SET (temp[next], x));
1604 
1605 	  offset_ld += next_amount;
1606 	  size -= next_amount;
1607 	  active[next] = true;
1608 	}
1609 
1610       if (active[phase])
1611 	{
1612 	  active[phase] = false;
1613 
1614 	  x = adjust_address (dst_mem, mode[phase], offset_st);
1615 	  emit_insn (gen_rtx_SET (x, temp[phase]));
1616 
1617 	  offset_st += amount[phase];
1618 	}
1619     }
1620   while (active[next]);
1621 }
1622 
1623 bool
mcore_expand_block_move(rtx * operands)1624 mcore_expand_block_move (rtx *operands)
1625 {
1626   HOST_WIDE_INT align, bytes, max;
1627 
1628   if (GET_CODE (operands[2]) != CONST_INT)
1629     return false;
1630 
1631   bytes = INTVAL (operands[2]);
1632   align = INTVAL (operands[3]);
1633 
1634   if (bytes <= 0)
1635     return false;
1636   if (align > 4)
1637     align = 4;
1638 
1639   switch (align)
1640     {
1641     case 4:
1642       if (bytes & 1)
1643 	max = 4*4;
1644       else if (bytes & 3)
1645 	max = 8*4;
1646       else
1647 	max = 16*4;
1648       break;
1649     case 2:
1650       max = 4*2;
1651       break;
1652     case 1:
1653       max = 4*1;
1654       break;
1655     default:
1656       gcc_unreachable ();
1657     }
1658 
1659   if (bytes <= max)
1660     {
1661       block_move_sequence (operands[0], operands[1], bytes, align);
1662       return true;
1663     }
1664 
1665   return false;
1666 }
1667 
1668 
1669 /* Code to generate prologue and epilogue sequences.  */
1670 static int number_of_regs_before_varargs;
1671 
1672 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1673    for a varargs function.  */
1674 static int current_function_anonymous_args;
1675 
1676 #define	STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1677 #define	STORE_REACH (64)	/* Maximum displace of word store + 4.  */
1678 #define	ADDI_REACH (32)		/* Maximum addi operand.  */
1679 
1680 static void
layout_mcore_frame(struct mcore_frame * infp)1681 layout_mcore_frame (struct mcore_frame * infp)
1682 {
1683   int n;
1684   unsigned int i;
1685   int nbytes;
1686   int regarg;
1687   int localregarg;
1688   int outbounds;
1689   unsigned int growths;
1690   int step;
1691 
1692   /* Might have to spill bytes to re-assemble a big argument that
1693      was passed partially in registers and partially on the stack.  */
1694   nbytes = crtl->args.pretend_args_size;
1695 
1696   /* Determine how much space for spilled anonymous args (e.g., stdarg).  */
1697   if (current_function_anonymous_args)
1698     nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1699 
1700   infp->arg_size = nbytes;
1701 
1702   /* How much space to save non-volatile registers we stomp.  */
1703   infp->reg_mask = calc_live_regs (& n);
1704   infp->reg_size = n * 4;
1705 
1706   /* And the rest of it... locals and space for overflowed outbounds.  */
1707   infp->local_size = get_frame_size ();
1708   infp->outbound_size = crtl->outgoing_args_size;
1709 
1710   /* Make sure we have a whole number of words for the locals.  */
1711   if (infp->local_size % STACK_BYTES)
1712     infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1713 
1714   /* Only thing we know we have to pad is the outbound space, since
1715      we've aligned our locals assuming that base of locals is aligned.  */
1716   infp->pad_local = 0;
1717   infp->pad_reg = 0;
1718   infp->pad_outbound = 0;
1719   if (infp->outbound_size % STACK_BYTES)
1720     infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1721 
1722   /* Now we see how we want to stage the prologue so that it does
1723      the most appropriate stack growth and register saves to either:
1724      (1) run fast,
1725      (2) reduce instruction space, or
1726      (3) reduce stack space.  */
1727   for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1728     infp->growth[i] = 0;
1729 
1730   regarg      = infp->reg_size + infp->arg_size;
1731   localregarg = infp->local_size + regarg;
1732   outbounds   = infp->outbound_size + infp->pad_outbound;
1733   growths     = 0;
1734 
1735   /* XXX: Consider one where we consider localregarg + outbound too! */
1736 
1737   /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1738      use stw's with offsets and buy the frame in one shot.  */
1739   if (localregarg <= ADDI_REACH
1740       && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1741     {
1742       /* Make sure we'll be aligned.  */
1743       if (localregarg % STACK_BYTES)
1744 	infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1745 
1746       step = localregarg + infp->pad_reg;
1747       infp->reg_offset = infp->local_size;
1748 
1749       if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1750 	{
1751 	  step += outbounds;
1752 	  infp->reg_offset += outbounds;
1753 	  outbounds = 0;
1754 	}
1755 
1756       infp->arg_offset = step - 4;
1757       infp->growth[growths++] = step;
1758       infp->reg_growth = growths;
1759       infp->local_growth = growths;
1760 
1761       /* If we haven't already folded it in.  */
1762       if (outbounds)
1763 	infp->growth[growths++] = outbounds;
1764 
1765       goto finish;
1766     }
1767 
1768   /* Frame can't be done with a single subi, but can be done with 2
1769      insns.  If the 'stm' is getting <= 2 registers, we use stw's and
1770      shift some of the stack purchase into the first subi, so both are
1771      single instructions.  */
1772   if (localregarg <= STORE_REACH
1773       && (infp->local_size > ADDI_REACH)
1774       && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1775     {
1776       int all;
1777 
1778       /* Make sure we'll be aligned; use either pad_reg or pad_local.  */
1779       if (localregarg % STACK_BYTES)
1780 	infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1781 
1782       all = localregarg + infp->pad_reg + infp->pad_local;
1783       step = ADDI_REACH;	/* As much up front as we can.  */
1784       if (step > all)
1785 	step = all;
1786 
1787       /* XXX: Consider whether step will still be aligned; we believe so.  */
1788       infp->arg_offset = step - 4;
1789       infp->growth[growths++] = step;
1790       infp->reg_growth = growths;
1791       infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1792       all -= step;
1793 
1794       /* Can we fold in any space required for outbounds?  */
1795       if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1796 	{
1797 	  all += outbounds;
1798 	  outbounds = 0;
1799 	}
1800 
1801       /* Get the rest of the locals in place.  */
1802       step = all;
1803       infp->growth[growths++] = step;
1804       infp->local_growth = growths;
1805       all -= step;
1806 
1807       gcc_assert (all == 0);
1808 
1809       /* Finish off if we need to do so.  */
1810       if (outbounds)
1811 	infp->growth[growths++] = outbounds;
1812 
1813       goto finish;
1814     }
1815 
1816   /* Registers + args is nicely aligned, so we'll buy that in one shot.
1817      Then we buy the rest of the frame in 1 or 2 steps depending on
1818      whether we need a frame pointer.  */
1819   if ((regarg % STACK_BYTES) == 0)
1820     {
1821       infp->growth[growths++] = regarg;
1822       infp->reg_growth = growths;
1823       infp->arg_offset = regarg - 4;
1824       infp->reg_offset = 0;
1825 
1826       if (infp->local_size % STACK_BYTES)
1827 	infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1828 
1829       step = infp->local_size + infp->pad_local;
1830 
1831       if (!frame_pointer_needed)
1832 	{
1833 	  step += outbounds;
1834 	  outbounds = 0;
1835 	}
1836 
1837       infp->growth[growths++] = step;
1838       infp->local_growth = growths;
1839 
1840       /* If there's any left to be done.  */
1841       if (outbounds)
1842 	infp->growth[growths++] = outbounds;
1843 
1844       goto finish;
1845     }
1846 
1847   /* XXX: optimizations that we'll want to play with....
1848      -- regarg is not aligned, but it's a small number of registers;
1849     	use some of localsize so that regarg is aligned and then
1850     	save the registers.  */
1851 
1852   /* Simple encoding; plods down the stack buying the pieces as it goes.
1853      -- does not optimize space consumption.
1854      -- does not attempt to optimize instruction counts.
1855      -- but it is safe for all alignments.  */
1856   if (regarg % STACK_BYTES != 0)
1857     infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1858 
1859   infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1860   infp->reg_growth = growths;
1861   infp->arg_offset = infp->growth[0] - 4;
1862   infp->reg_offset = 0;
1863 
1864   if (frame_pointer_needed)
1865     {
1866       if (infp->local_size % STACK_BYTES != 0)
1867 	infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1868 
1869       infp->growth[growths++] = infp->local_size + infp->pad_local;
1870       infp->local_growth = growths;
1871 
1872       infp->growth[growths++] = outbounds;
1873     }
1874   else
1875     {
1876       if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1877 	infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1878 
1879       infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1880       infp->local_growth = growths;
1881     }
1882 
1883   /* Anything else that we've forgotten?, plus a few consistency checks.  */
1884  finish:
1885   gcc_assert (infp->reg_offset >= 0);
1886   gcc_assert (growths <= MAX_STACK_GROWS);
1887 
1888   for (i = 0; i < growths; i++)
1889     gcc_assert (!(infp->growth[i] % STACK_BYTES));
1890 }
1891 
1892 /* Define the offset between two registers, one to be eliminated, and
1893    the other its replacement, at the start of a routine.  */
1894 
1895 int
mcore_initial_elimination_offset(int from,int to)1896 mcore_initial_elimination_offset (int from, int to)
1897 {
1898   int above_frame;
1899   int below_frame;
1900   struct mcore_frame fi;
1901 
1902   layout_mcore_frame (& fi);
1903 
1904   /* fp to ap */
1905   above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1906   /* sp to fp */
1907   below_frame = fi.outbound_size + fi.pad_outbound;
1908 
1909   if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1910     return above_frame;
1911 
1912   if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1913     return above_frame + below_frame;
1914 
1915   if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1916     return below_frame;
1917 
1918   gcc_unreachable ();
1919 }
1920 
1921 /* Keep track of some information about varargs for the prolog.  */
1922 
1923 static void
mcore_setup_incoming_varargs(cumulative_args_t args_so_far_v,machine_mode mode,tree type,int * ptr_pretend_size ATTRIBUTE_UNUSED,int second_time ATTRIBUTE_UNUSED)1924 mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
1925 			      machine_mode mode, tree type,
1926 			      int * ptr_pretend_size ATTRIBUTE_UNUSED,
1927 			      int second_time ATTRIBUTE_UNUSED)
1928 {
1929   CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1930 
1931   current_function_anonymous_args = 1;
1932 
1933   /* We need to know how many argument registers are used before
1934      the varargs start, so that we can push the remaining argument
1935      registers during the prologue.  */
1936   number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1937 
1938   /* There is a bug somewhere in the arg handling code.
1939      Until I can find it this workaround always pushes the
1940      last named argument onto the stack.  */
1941   number_of_regs_before_varargs = *args_so_far;
1942 
1943   /* The last named argument may be split between argument registers
1944      and the stack.  Allow for this here.  */
1945   if (number_of_regs_before_varargs > NPARM_REGS)
1946     number_of_regs_before_varargs = NPARM_REGS;
1947 }
1948 
1949 void
mcore_expand_prolog(void)1950 mcore_expand_prolog (void)
1951 {
1952   struct mcore_frame fi;
1953   int space_allocated = 0;
1954   int growth = 0;
1955 
1956   /* Find out what we're doing.  */
1957   layout_mcore_frame (&fi);
1958 
1959   space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1960     fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1961 
1962   if (TARGET_CG_DATA)
1963     {
1964       /* Emit a symbol for this routine's frame size.  */
1965       rtx x;
1966 
1967       x = DECL_RTL (current_function_decl);
1968 
1969       gcc_assert (GET_CODE (x) == MEM);
1970 
1971       x = XEXP (x, 0);
1972 
1973       gcc_assert (GET_CODE (x) == SYMBOL_REF);
1974 
1975       free (mcore_current_function_name);
1976 
1977       mcore_current_function_name = xstrdup (XSTR (x, 0));
1978 
1979       ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1980 
1981       if (cfun->calls_alloca)
1982 	ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1983 
1984       /* 970425: RBE:
1985          We're looking at how the 8byte alignment affects stack layout
1986          and where we had to pad things. This emits information we can
1987          extract which tells us about frame sizes and the like.  */
1988       fprintf (asm_out_file,
1989 	       "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1990 	       mcore_current_function_name,
1991 	       fi.arg_size, fi.reg_size, fi.reg_mask,
1992 	       fi.local_size, fi.outbound_size,
1993 	       frame_pointer_needed);
1994     }
1995 
1996   if (mcore_naked_function_p ())
1997     return;
1998 
1999   /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes.  */
2000   output_stack_adjust (-1, fi.growth[growth++]);	/* Grows it.  */
2001 
2002   /* If we have a parameter passed partially in regs and partially in memory,
2003      the registers will have been stored to memory already in function.c.  So
2004      we only need to do something here for varargs functions.  */
2005   if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
2006     {
2007       int offset;
2008       int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2009       int remaining = fi.arg_size;
2010 
2011       for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2012         {
2013           emit_insn (gen_movsi
2014                      (gen_rtx_MEM (SImode,
2015 				   plus_constant (Pmode, stack_pointer_rtx,
2016 						  offset)),
2017                       gen_rtx_REG (SImode, rn)));
2018         }
2019     }
2020 
2021   /* Do we need another stack adjustment before we do the register saves?  */
2022   if (growth < fi.reg_growth)
2023     output_stack_adjust (-1, fi.growth[growth++]);		/* Grows it.  */
2024 
2025   if (fi.reg_size != 0)
2026     {
2027       int i;
2028       int offs = fi.reg_offset;
2029 
2030       for (i = 15; i >= 0; i--)
2031         {
2032           if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2033 	    {
2034 	      int first_reg = 15;
2035 
2036 	      while (fi.reg_mask & (1 << first_reg))
2037 	        first_reg--;
2038 	      first_reg++;
2039 
2040 	      emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2041 					     gen_rtx_REG (SImode, first_reg),
2042 					     GEN_INT (16 - first_reg)));
2043 
2044 	      i -= (15 - first_reg);
2045 	      offs += (16 - first_reg) * 4;
2046 	    }
2047           else if (fi.reg_mask & (1 << i))
2048 	    {
2049 	      emit_insn (gen_movsi
2050 		         (gen_rtx_MEM (SImode,
2051 				       plus_constant (Pmode, stack_pointer_rtx,
2052 						      offs)),
2053 		          gen_rtx_REG (SImode, i)));
2054 	      offs += 4;
2055 	    }
2056         }
2057     }
2058 
2059   /* Figure the locals + outbounds.  */
2060   if (frame_pointer_needed)
2061     {
2062       /* If we haven't already purchased to 'fp'.  */
2063       if (growth < fi.local_growth)
2064         output_stack_adjust (-1, fi.growth[growth++]);		/* Grows it.  */
2065 
2066       emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2067 
2068       /* ... and then go any remaining distance for outbounds, etc.  */
2069       if (fi.growth[growth])
2070         output_stack_adjust (-1, fi.growth[growth++]);
2071     }
2072   else
2073     {
2074       if (growth < fi.local_growth)
2075         output_stack_adjust (-1, fi.growth[growth++]);		/* Grows it.  */
2076       if (fi.growth[growth])
2077         output_stack_adjust (-1, fi.growth[growth++]);
2078     }
2079 }
2080 
2081 void
mcore_expand_epilog(void)2082 mcore_expand_epilog (void)
2083 {
2084   struct mcore_frame fi;
2085   int i;
2086   int offs;
2087   int growth = MAX_STACK_GROWS - 1 ;
2088 
2089 
2090   /* Find out what we're doing.  */
2091   layout_mcore_frame(&fi);
2092 
2093   if (mcore_naked_function_p ())
2094     return;
2095 
2096   /* If we had a frame pointer, restore the sp from that.  */
2097   if (frame_pointer_needed)
2098     {
2099       emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2100       growth = fi.local_growth - 1;
2101     }
2102   else
2103     {
2104       /* XXX: while loop should accumulate and do a single sell.  */
2105       while (growth >= fi.local_growth)
2106         {
2107           if (fi.growth[growth] != 0)
2108             output_stack_adjust (1, fi.growth[growth]);
2109 	  growth--;
2110         }
2111     }
2112 
2113   /* Make sure we've shrunk stack back to the point where the registers
2114      were laid down. This is typically 0/1 iterations.  Then pull the
2115      register save information back off the stack.  */
2116   while (growth >= fi.reg_growth)
2117     output_stack_adjust ( 1, fi.growth[growth--]);
2118 
2119   offs = fi.reg_offset;
2120 
2121   for (i = 15; i >= 0; i--)
2122     {
2123       if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2124 	{
2125 	  int first_reg;
2126 
2127 	  /* Find the starting register.  */
2128 	  first_reg = 15;
2129 
2130 	  while (fi.reg_mask & (1 << first_reg))
2131 	    first_reg--;
2132 
2133 	  first_reg++;
2134 
2135 	  emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2136 					gen_rtx_MEM (SImode, stack_pointer_rtx),
2137 					GEN_INT (16 - first_reg)));
2138 
2139 	  i -= (15 - first_reg);
2140 	  offs += (16 - first_reg) * 4;
2141 	}
2142       else if (fi.reg_mask & (1 << i))
2143 	{
2144 	  emit_insn (gen_movsi
2145 		     (gen_rtx_REG (SImode, i),
2146 		      gen_rtx_MEM (SImode,
2147 				   plus_constant (Pmode, stack_pointer_rtx,
2148 						  offs))));
2149 	  offs += 4;
2150 	}
2151     }
2152 
2153   /* Give back anything else.  */
2154   /* XXX: Should accumulate total and then give it back.  */
2155   while (growth >= 0)
2156     output_stack_adjust ( 1, fi.growth[growth--]);
2157 }
2158 
2159 /* This code is borrowed from the SH port.  */
2160 
2161 /* The MCORE cannot load a large constant into a register, constants have to
2162    come from a pc relative load.  The reference of a pc relative load
2163    instruction must be less than 1k in front of the instruction.  This
2164    means that we often have to dump a constant inside a function, and
2165    generate code to branch around it.
2166 
2167    It is important to minimize this, since the branches will slow things
2168    down and make things bigger.
2169 
2170    Worst case code looks like:
2171 
2172    lrw   L1,r0
2173    br    L2
2174    align
2175    L1:   .long value
2176    L2:
2177    ..
2178 
2179    lrw   L3,r0
2180    br    L4
2181    align
2182    L3:   .long value
2183    L4:
2184    ..
2185 
2186    We fix this by performing a scan before scheduling, which notices which
2187    instructions need to have their operands fetched from the constant table
2188    and builds the table.
2189 
2190    The algorithm is:
2191 
2192    scan, find an instruction which needs a pcrel move.  Look forward, find the
2193    last barrier which is within MAX_COUNT bytes of the requirement.
2194    If there isn't one, make one.  Process all the instructions between
2195    the find and the barrier.
2196 
2197    In the above example, we can tell that L3 is within 1k of L1, so
2198    the first move can be shrunk from the 2 insn+constant sequence into
2199    just 1 insn, and the constant moved to L3 to make:
2200 
2201    lrw          L1,r0
2202    ..
2203    lrw          L3,r0
2204    bra          L4
2205    align
2206    L3:.long value
2207    L4:.long value
2208 
2209    Then the second move becomes the target for the shortening process.  */
2210 
2211 typedef struct
2212 {
2213   rtx value;			/* Value in table.  */
2214   rtx label;			/* Label of value.  */
2215 } pool_node;
2216 
2217 /* The maximum number of constants that can fit into one pool, since
2218    the pc relative range is 0...1020 bytes and constants are at least 4
2219    bytes long.  We subtract 4 from the range to allow for the case where
2220    we need to add a branch/align before the constant pool.  */
2221 
2222 #define MAX_COUNT 1016
2223 #define MAX_POOL_SIZE (MAX_COUNT/4)
2224 static pool_node pool_vector[MAX_POOL_SIZE];
2225 static int pool_size;
2226 
2227 /* Dump out any constants accumulated in the final pass.  These
2228    will only be labels.  */
2229 
2230 const char *
mcore_output_jump_label_table(void)2231 mcore_output_jump_label_table (void)
2232 {
2233   int i;
2234 
2235   if (pool_size)
2236     {
2237       fprintf (asm_out_file, "\t.align 2\n");
2238 
2239       for (i = 0; i < pool_size; i++)
2240 	{
2241 	  pool_node * p = pool_vector + i;
2242 
2243 	  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2244 
2245 	  output_asm_insn (".long	%0", &p->value);
2246 	}
2247 
2248       pool_size = 0;
2249     }
2250 
2251   return "";
2252 }
2253 
2254 /* Check whether insn is a candidate for a conditional.  */
2255 
2256 static cond_type
is_cond_candidate(rtx insn)2257 is_cond_candidate (rtx insn)
2258 {
2259   /* The only things we conditionalize are those that can be directly
2260      changed into a conditional.  Only bother with SImode items.  If
2261      we wanted to be a little more aggressive, we could also do other
2262      modes such as DImode with reg-reg move or load 0.  */
2263   if (NONJUMP_INSN_P (insn))
2264     {
2265       rtx pat = PATTERN (insn);
2266       rtx src, dst;
2267 
2268       if (GET_CODE (pat) != SET)
2269 	return COND_NO;
2270 
2271       dst = XEXP (pat, 0);
2272 
2273       if ((GET_CODE (dst) != REG &&
2274            GET_CODE (dst) != SUBREG) ||
2275 	  GET_MODE (dst) != SImode)
2276 	return COND_NO;
2277 
2278       src = XEXP (pat, 1);
2279 
2280       if ((GET_CODE (src) == REG ||
2281            (GET_CODE (src) == SUBREG &&
2282 	    GET_CODE (SUBREG_REG (src)) == REG)) &&
2283 	  GET_MODE (src) == SImode)
2284 	return COND_MOV_INSN;
2285       else if (GET_CODE (src) == CONST_INT &&
2286                INTVAL (src) == 0)
2287 	return COND_CLR_INSN;
2288       else if (GET_CODE (src) == PLUS &&
2289                (GET_CODE (XEXP (src, 0)) == REG ||
2290                 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2291                  GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2292                GET_MODE (XEXP (src, 0)) == SImode &&
2293                GET_CODE (XEXP (src, 1)) == CONST_INT &&
2294                INTVAL (XEXP (src, 1)) == 1)
2295 	return COND_INC_INSN;
2296       else if (((GET_CODE (src) == MINUS &&
2297 		 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2298 		 INTVAL( XEXP (src, 1)) == 1) ||
2299                 (GET_CODE (src) == PLUS &&
2300 		 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2301 		 INTVAL (XEXP (src, 1)) == -1)) &&
2302                (GET_CODE (XEXP (src, 0)) == REG ||
2303 		(GET_CODE (XEXP (src, 0)) == SUBREG &&
2304 		 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2305                GET_MODE (XEXP (src, 0)) == SImode)
2306 	return COND_DEC_INSN;
2307 
2308       /* Some insns that we don't bother with:
2309 	 (set (rx:DI) (ry:DI))
2310 	 (set (rx:DI) (const_int 0))
2311       */
2312 
2313     }
2314   else if (JUMP_P (insn)
2315 	   && GET_CODE (PATTERN (insn)) == SET
2316 	   && GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2317     return COND_BRANCH_INSN;
2318 
2319   return COND_NO;
2320 }
2321 
2322 /* Emit a conditional version of insn and replace the old insn with the
2323    new one.  Return the new insn if emitted.  */
2324 
2325 static rtx_insn *
emit_new_cond_insn(rtx insn,int cond)2326 emit_new_cond_insn (rtx insn, int cond)
2327 {
2328   rtx c_insn = 0;
2329   rtx pat, dst, src;
2330   cond_type num;
2331 
2332   if ((num = is_cond_candidate (insn)) == COND_NO)
2333     return NULL;
2334 
2335   pat = PATTERN (insn);
2336 
2337   if (NONJUMP_INSN_P (insn))
2338     {
2339       dst = SET_DEST (pat);
2340       src = SET_SRC (pat);
2341     }
2342   else
2343     {
2344       dst = JUMP_LABEL (insn);
2345       src = NULL_RTX;
2346     }
2347 
2348   switch (num)
2349     {
2350     case COND_MOV_INSN:
2351     case COND_CLR_INSN:
2352       if (cond)
2353 	c_insn = gen_movt0 (dst, src, dst);
2354       else
2355 	c_insn = gen_movt0 (dst, dst, src);
2356       break;
2357 
2358     case COND_INC_INSN:
2359       if (cond)
2360 	c_insn = gen_incscc (dst, dst);
2361       else
2362 	c_insn = gen_incscc_false (dst, dst);
2363       break;
2364 
2365     case COND_DEC_INSN:
2366       if (cond)
2367 	c_insn = gen_decscc (dst, dst);
2368       else
2369 	c_insn = gen_decscc_false (dst, dst);
2370       break;
2371 
2372     case COND_BRANCH_INSN:
2373       if (cond)
2374 	c_insn = gen_branch_true (dst);
2375       else
2376 	c_insn = gen_branch_false (dst);
2377       break;
2378 
2379     default:
2380       return NULL;
2381     }
2382 
2383   /* Only copy the notes if they exist.  */
2384   if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2385     {
2386       /* We really don't need to bother with the notes and links at this
2387 	 point, but go ahead and save the notes.  This will help is_dead()
2388 	 when applying peepholes (links don't matter since they are not
2389 	 used any more beyond this point for the mcore).  */
2390       REG_NOTES (c_insn) = REG_NOTES (insn);
2391     }
2392 
2393   if (num == COND_BRANCH_INSN)
2394     {
2395       /* For jumps, we need to be a little bit careful and emit the new jump
2396          before the old one and to update the use count for the target label.
2397          This way, the barrier following the old (uncond) jump will get
2398 	 deleted, but the label won't.  */
2399       c_insn = emit_jump_insn_before (c_insn, insn);
2400 
2401       ++ LABEL_NUSES (dst);
2402 
2403       JUMP_LABEL (c_insn) = dst;
2404     }
2405   else
2406     c_insn = emit_insn_after (c_insn, insn);
2407 
2408   delete_insn (insn);
2409 
2410   return as_a <rtx_insn *> (c_insn);
2411 }
2412 
2413 /* Attempt to change a basic block into a series of conditional insns.  This
2414    works by taking the branch at the end of the 1st block and scanning for the
2415    end of the 2nd block.  If all instructions in the 2nd block have cond.
2416    versions and the label at the start of block 3 is the same as the target
2417    from the branch at block 1, then conditionalize all insn in block 2 using
2418    the inverse condition of the branch at block 1.  (Note I'm bending the
2419    definition of basic block here.)
2420 
2421    e.g., change:
2422 
2423 		bt	L2             <-- end of block 1 (delete)
2424 		mov	r7,r8
2425 		addu	r7,1
2426 		br	L3             <-- end of block 2
2427 
2428 	L2:	...                    <-- start of block 3 (NUSES==1)
2429 	L3:	...
2430 
2431    to:
2432 
2433 		movf	r7,r8
2434 		incf	r7
2435 		bf	L3
2436 
2437 	L3:	...
2438 
2439    we can delete the L2 label if NUSES==1 and re-apply the optimization
2440    starting at the last instruction of block 2.  This may allow an entire
2441    if-then-else statement to be conditionalized.  BRC  */
2442 static rtx_insn *
conditionalize_block(rtx_insn * first)2443 conditionalize_block (rtx_insn *first)
2444 {
2445   rtx_insn *insn;
2446   rtx br_pat;
2447   rtx_insn *end_blk_1_br = 0;
2448   rtx_insn *end_blk_2_insn = 0;
2449   rtx_insn *start_blk_3_lab = 0;
2450   int cond;
2451   int br_lab_num;
2452   int blk_size = 0;
2453 
2454 
2455   /* Check that the first insn is a candidate conditional jump.  This is
2456      the one that we'll eliminate.  If not, advance to the next insn to
2457      try.  */
2458   if (! JUMP_P (first)
2459       || GET_CODE (PATTERN (first)) != SET
2460       || GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2461     return NEXT_INSN (first);
2462 
2463   /* Extract some information we need.  */
2464   end_blk_1_br = first;
2465   br_pat = PATTERN (end_blk_1_br);
2466 
2467   /* Complement the condition since we use the reverse cond. for the insns.  */
2468   cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2469 
2470   /* Determine what kind of branch we have.  */
2471   if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2472     {
2473       /* A normal branch, so extract label out of first arm.  */
2474       br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2475     }
2476   else
2477     {
2478       /* An inverse branch, so extract the label out of the 2nd arm
2479 	 and complement the condition.  */
2480       cond = (cond == 0);
2481       br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2482     }
2483 
2484   /* Scan forward for the start of block 2: it must start with a
2485      label and that label must be the same as the branch target
2486      label from block 1.  We don't care about whether block 2 actually
2487      ends with a branch or a label (an uncond. branch is
2488      conditionalizable).  */
2489   for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2490     {
2491       enum rtx_code code;
2492 
2493       code = GET_CODE (insn);
2494 
2495       /* Look for the label at the start of block 3.  */
2496       if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2497 	break;
2498 
2499       /* Skip barriers, notes, and conditionalizable insns.  If the
2500          insn is not conditionalizable or makes this optimization fail,
2501          just return the next insn so we can start over from that point.  */
2502       if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2503 	return NEXT_INSN (insn);
2504 
2505       /* Remember the last real insn before the label (i.e. end of block 2).  */
2506       if (code == JUMP_INSN || code == INSN)
2507 	{
2508 	  blk_size ++;
2509 	  end_blk_2_insn = insn;
2510 	}
2511     }
2512 
2513   if (!insn)
2514     return insn;
2515 
2516   /* It is possible for this optimization to slow performance if the blocks
2517      are long.  This really depends upon whether the branch is likely taken
2518      or not.  If the branch is taken, we slow performance in many cases.  But,
2519      if the branch is not taken, we always help performance (for a single
2520      block, but for a double block (i.e. when the optimization is re-applied)
2521      this is not true since the 'right thing' depends on the overall length of
2522      the collapsed block).  As a compromise, don't apply this optimization on
2523      blocks larger than size 2 (unlikely for the mcore) when speed is important.
2524      the best threshold depends on the latencies of the instructions (i.e.,
2525      the branch penalty).  */
2526   if (optimize > 1 && blk_size > 2)
2527     return insn;
2528 
2529   /* At this point, we've found the start of block 3 and we know that
2530      it is the destination of the branch from block 1.   Also, all
2531      instructions in the block 2 are conditionalizable.  So, apply the
2532      conditionalization and delete the branch.  */
2533   start_blk_3_lab = insn;
2534 
2535   for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2536        insn = NEXT_INSN (insn))
2537     {
2538       rtx_insn *newinsn;
2539 
2540       if (insn->deleted ())
2541 	continue;
2542 
2543       /* Try to form a conditional variant of the instruction and emit it.  */
2544       if ((newinsn = emit_new_cond_insn (insn, cond)))
2545 	{
2546 	  if (end_blk_2_insn == insn)
2547             end_blk_2_insn = newinsn;
2548 
2549 	  insn = newinsn;
2550 	}
2551     }
2552 
2553   /* Note whether we will delete the label starting blk 3 when the jump
2554      gets deleted.  If so, we want to re-apply this optimization at the
2555      last real instruction right before the label.  */
2556   if (LABEL_NUSES (start_blk_3_lab) == 1)
2557     {
2558       start_blk_3_lab = 0;
2559     }
2560 
2561   /* ??? we probably should redistribute the death notes for this insn, esp.
2562      the death of cc, but it doesn't really matter this late in the game.
2563      The peepholes all use is_dead() which will find the correct death
2564      regardless of whether there is a note.  */
2565   delete_insn (end_blk_1_br);
2566 
2567   if (! start_blk_3_lab)
2568     return end_blk_2_insn;
2569 
2570   /* Return the insn right after the label at the start of block 3.  */
2571   return NEXT_INSN (start_blk_3_lab);
2572 }
2573 
2574 /* Apply the conditionalization of blocks optimization.  This is the
2575    outer loop that traverses through the insns scanning for a branch
2576    that signifies an opportunity to apply the optimization.  Note that
2577    this optimization is applied late.  If we could apply it earlier,
2578    say before cse 2, it may expose more optimization opportunities.
2579    but, the pay back probably isn't really worth the effort (we'd have
2580    to update all reg/flow/notes/links/etc to make it work - and stick it
2581    in before cse 2).  */
2582 
2583 static void
conditionalize_optimization(void)2584 conditionalize_optimization (void)
2585 {
2586   rtx_insn *insn;
2587 
2588   for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2589     continue;
2590 }
2591 
2592 /* This is to handle loads from the constant pool.  */
2593 
2594 static void
mcore_reorg(void)2595 mcore_reorg (void)
2596 {
2597   /* Reset this variable.  */
2598   current_function_anonymous_args = 0;
2599 
2600   if (optimize == 0)
2601     return;
2602 
2603   /* Conditionalize blocks where we can.  */
2604   conditionalize_optimization ();
2605 
2606   /* Literal pool generation is now pushed off until the assembler.  */
2607 }
2608 
2609 
2610 /* Return true if X is something that can be moved directly into r15.  */
2611 
2612 bool
mcore_r15_operand_p(rtx x)2613 mcore_r15_operand_p (rtx x)
2614 {
2615   switch (GET_CODE (x))
2616     {
2617     case CONST_INT:
2618       return mcore_const_ok_for_inline (INTVAL (x));
2619 
2620     case REG:
2621     case SUBREG:
2622     case MEM:
2623       return 1;
2624 
2625     default:
2626       return 0;
2627     }
2628 }
2629 
2630 /* Implement SECONDARY_RELOAD_CLASS.  If RCLASS contains r15, and we can't
2631    directly move X into it, use r1-r14 as a temporary.  */
2632 
2633 enum reg_class
mcore_secondary_reload_class(enum reg_class rclass,machine_mode mode ATTRIBUTE_UNUSED,rtx x)2634 mcore_secondary_reload_class (enum reg_class rclass,
2635 			      machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2636 {
2637   if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2638       && !mcore_r15_operand_p (x))
2639     return LRW_REGS;
2640   return NO_REGS;
2641 }
2642 
2643 /* Return the reg_class to use when reloading the rtx X into the class
2644    RCLASS.  If X is too complex to move directly into r15, prefer to
2645    use LRW_REGS instead.  */
2646 
2647 enum reg_class
mcore_reload_class(rtx x,enum reg_class rclass)2648 mcore_reload_class (rtx x, enum reg_class rclass)
2649 {
2650   if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2651     return LRW_REGS;
2652 
2653   return rclass;
2654 }
2655 
2656 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2657    register.  Note that the current version doesn't worry about whether
2658    they are the same mode or note (e.g., a QImode in r2 matches an HImode
2659    in r2 matches an SImode in r2. Might think in the future about whether
2660    we want to be able to say something about modes.  */
2661 
2662 int
mcore_is_same_reg(rtx x,rtx y)2663 mcore_is_same_reg (rtx x, rtx y)
2664 {
2665   /* Strip any and all of the subreg wrappers.  */
2666   while (GET_CODE (x) == SUBREG)
2667     x = SUBREG_REG (x);
2668 
2669   while (GET_CODE (y) == SUBREG)
2670     y = SUBREG_REG (y);
2671 
2672   if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2673     return 1;
2674 
2675   return 0;
2676 }
2677 
2678 static void
mcore_option_override(void)2679 mcore_option_override (void)
2680 {
2681   /* Only the m340 supports little endian code.  */
2682   if (TARGET_LITTLE_END && ! TARGET_M340)
2683     target_flags |= MASK_M340;
2684 }
2685 
2686 
2687 /* Compute the number of word sized registers needed to
2688    hold a function argument of mode MODE and type TYPE.  */
2689 
2690 int
mcore_num_arg_regs(machine_mode mode,const_tree type)2691 mcore_num_arg_regs (machine_mode mode, const_tree type)
2692 {
2693   int size;
2694 
2695   if (targetm.calls.must_pass_in_stack (mode, type))
2696     return 0;
2697 
2698   if (type && mode == BLKmode)
2699     size = int_size_in_bytes (type);
2700   else
2701     size = GET_MODE_SIZE (mode);
2702 
2703   return ROUND_ADVANCE (size);
2704 }
2705 
2706 static rtx
handle_structs_in_regs(machine_mode mode,const_tree type,int reg)2707 handle_structs_in_regs (machine_mode mode, const_tree type, int reg)
2708 {
2709   int size;
2710 
2711   /* The MCore ABI defines that a structure whose size is not a whole multiple
2712      of bytes is passed packed into registers (or spilled onto the stack if
2713      not enough registers are available) with the last few bytes of the
2714      structure being packed, left-justified, into the last register/stack slot.
2715      GCC handles this correctly if the last word is in a stack slot, but we
2716      have to generate a special, PARALLEL RTX if the last word is in an
2717      argument register.  */
2718   if (type
2719       && TYPE_MODE (type) == BLKmode
2720       && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2721       && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2722       && (size % UNITS_PER_WORD != 0)
2723       && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2724     {
2725       rtx    arg_regs [NPARM_REGS];
2726       int    nregs;
2727       rtx    result;
2728       rtvec  rtvec;
2729 
2730       for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2731         {
2732           arg_regs [nregs] =
2733 	    gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2734 		  	       GEN_INT (nregs * UNITS_PER_WORD));
2735 	  nregs ++;
2736         }
2737 
2738       /* We assume here that NPARM_REGS == 6.  The assert checks this.  */
2739       gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2740       rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2741 			  arg_regs[3], arg_regs[4], arg_regs[5]);
2742 
2743       result = gen_rtx_PARALLEL (mode, rtvec);
2744       return result;
2745     }
2746 
2747   return gen_rtx_REG (mode, reg);
2748 }
2749 
2750 rtx
mcore_function_value(const_tree valtype,const_tree func)2751 mcore_function_value (const_tree valtype, const_tree func)
2752 {
2753   machine_mode mode;
2754   int unsigned_p;
2755 
2756   mode = TYPE_MODE (valtype);
2757 
2758   /* Since we promote return types, we must promote the mode here too.  */
2759   mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2760 
2761   return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2762 }
2763 
2764 /* Define where to put the arguments to a function.
2765    Value is zero to push the argument on the stack,
2766    or a hard register in which to store the argument.
2767 
2768    MODE is the argument's machine mode.
2769    TYPE is the data type of the argument (as a tree).
2770     This is null for libcalls where that information may
2771     not be available.
2772    CUM is a variable of type CUMULATIVE_ARGS which gives info about
2773     the preceding args and about the function being called.
2774    NAMED is nonzero if this argument is a named parameter
2775     (otherwise it is an extra parameter matching an ellipsis).
2776 
2777    On MCore the first args are normally in registers
2778    and the rest are pushed.  Any arg that starts within the first
2779    NPARM_REGS words is at least partially passed in a register unless
2780    its data type forbids.  */
2781 
2782 static rtx
mcore_function_arg(cumulative_args_t cum,machine_mode mode,const_tree type,bool named)2783 mcore_function_arg (cumulative_args_t cum, machine_mode mode,
2784 		    const_tree type, bool named)
2785 {
2786   int arg_reg;
2787 
2788   if (! named || mode == VOIDmode)
2789     return 0;
2790 
2791   if (targetm.calls.must_pass_in_stack (mode, type))
2792     return 0;
2793 
2794   arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
2795 
2796   if (arg_reg < NPARM_REGS)
2797     return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2798 
2799   return 0;
2800 }
2801 
2802 static void
mcore_function_arg_advance(cumulative_args_t cum_v,machine_mode mode,const_tree type,bool named ATTRIBUTE_UNUSED)2803 mcore_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
2804 			    const_tree type, bool named ATTRIBUTE_UNUSED)
2805 {
2806   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2807 
2808   *cum = (ROUND_REG (*cum, mode)
2809 	  + (int)named * mcore_num_arg_regs (mode, type));
2810 }
2811 
2812 static unsigned int
mcore_function_arg_boundary(machine_mode mode,const_tree type ATTRIBUTE_UNUSED)2813 mcore_function_arg_boundary (machine_mode mode,
2814 			     const_tree type ATTRIBUTE_UNUSED)
2815 {
2816   /* Doubles must be aligned to an 8 byte boundary.  */
2817   return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2818 	  ? BIGGEST_ALIGNMENT
2819 	  : PARM_BOUNDARY);
2820 }
2821 
2822 /* Returns the number of bytes of argument registers required to hold *part*
2823    of a parameter of machine mode MODE and type TYPE (which may be NULL if
2824    the type is not known).  If the argument fits entirely in the argument
2825    registers, or entirely on the stack, then 0 is returned.  CUM is the
2826    number of argument registers already used by earlier parameters to
2827    the function.  */
2828 
2829 static int
mcore_arg_partial_bytes(cumulative_args_t cum,machine_mode mode,tree type,bool named)2830 mcore_arg_partial_bytes (cumulative_args_t cum, machine_mode mode,
2831 			 tree type, bool named)
2832 {
2833   int reg = ROUND_REG (*get_cumulative_args (cum), mode);
2834 
2835   if (named == 0)
2836     return 0;
2837 
2838   if (targetm.calls.must_pass_in_stack (mode, type))
2839     return 0;
2840 
2841   /* REG is not the *hardware* register number of the register that holds
2842      the argument, it is the *argument* register number.  So for example,
2843      the first argument to a function goes in argument register 0, which
2844      translates (for the MCore) into hardware register 2.  The second
2845      argument goes into argument register 1, which translates into hardware
2846      register 3, and so on.  NPARM_REGS is the number of argument registers
2847      supported by the target, not the maximum hardware register number of
2848      the target.  */
2849   if (reg >= NPARM_REGS)
2850     return 0;
2851 
2852   /* If the argument fits entirely in registers, return 0.  */
2853   if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2854     return 0;
2855 
2856   /* The argument overflows the number of available argument registers.
2857      Compute how many argument registers have not yet been assigned to
2858      hold an argument.  */
2859   reg = NPARM_REGS - reg;
2860 
2861   /* Return partially in registers and partially on the stack.  */
2862   return reg * UNITS_PER_WORD;
2863 }
2864 
2865 /* Return nonzero if SYMBOL is marked as being dllexport'd.  */
2866 
2867 int
mcore_dllexport_name_p(const char * symbol)2868 mcore_dllexport_name_p (const char * symbol)
2869 {
2870   return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2871 }
2872 
2873 /* Return nonzero if SYMBOL is marked as being dllimport'd.  */
2874 
2875 int
mcore_dllimport_name_p(const char * symbol)2876 mcore_dllimport_name_p (const char * symbol)
2877 {
2878   return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2879 }
2880 
2881 /* Mark a DECL as being dllexport'd.  */
2882 
2883 static void
mcore_mark_dllexport(tree decl)2884 mcore_mark_dllexport (tree decl)
2885 {
2886   const char * oldname;
2887   char * newname;
2888   rtx    rtlname;
2889   tree   idp;
2890 
2891   rtlname = XEXP (DECL_RTL (decl), 0);
2892 
2893   if (GET_CODE (rtlname) == MEM)
2894     rtlname = XEXP (rtlname, 0);
2895   gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2896   oldname = XSTR (rtlname, 0);
2897 
2898   if (mcore_dllexport_name_p (oldname))
2899     return;  /* Already done.  */
2900 
2901   newname = XALLOCAVEC (char, strlen (oldname) + 4);
2902   sprintf (newname, "@e.%s", oldname);
2903 
2904   /* We pass newname through get_identifier to ensure it has a unique
2905      address.  RTL processing can sometimes peek inside the symbol ref
2906      and compare the string's addresses to see if two symbols are
2907      identical.  */
2908   /* ??? At least I think that's why we do this.  */
2909   idp = get_identifier (newname);
2910 
2911   XEXP (DECL_RTL (decl), 0) =
2912     gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2913 }
2914 
2915 /* Mark a DECL as being dllimport'd.  */
2916 
2917 static void
mcore_mark_dllimport(tree decl)2918 mcore_mark_dllimport (tree decl)
2919 {
2920   const char * oldname;
2921   char * newname;
2922   tree   idp;
2923   rtx    rtlname;
2924   rtx    newrtl;
2925 
2926   rtlname = XEXP (DECL_RTL (decl), 0);
2927 
2928   if (GET_CODE (rtlname) == MEM)
2929     rtlname = XEXP (rtlname, 0);
2930   gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2931   oldname = XSTR (rtlname, 0);
2932 
2933   gcc_assert (!mcore_dllexport_name_p (oldname));
2934   if (mcore_dllimport_name_p (oldname))
2935     return; /* Already done.  */
2936 
2937   /* ??? One can well ask why we're making these checks here,
2938      and that would be a good question.  */
2939 
2940   /* Imported variables can't be initialized.  */
2941   if (TREE_CODE (decl) == VAR_DECL
2942       && !DECL_VIRTUAL_P (decl)
2943       && DECL_INITIAL (decl))
2944     {
2945       error ("initialized variable %q+D is marked dllimport", decl);
2946       return;
2947     }
2948 
2949   /* `extern' needn't be specified with dllimport.
2950      Specify `extern' now and hope for the best.  Sigh.  */
2951   if (TREE_CODE (decl) == VAR_DECL
2952       /* ??? Is this test for vtables needed?  */
2953       && !DECL_VIRTUAL_P (decl))
2954     {
2955       DECL_EXTERNAL (decl) = 1;
2956       TREE_PUBLIC (decl) = 1;
2957     }
2958 
2959   newname = XALLOCAVEC (char, strlen (oldname) + 11);
2960   sprintf (newname, "@i.__imp_%s", oldname);
2961 
2962   /* We pass newname through get_identifier to ensure it has a unique
2963      address.  RTL processing can sometimes peek inside the symbol ref
2964      and compare the string's addresses to see if two symbols are
2965      identical.  */
2966   /* ??? At least I think that's why we do this.  */
2967   idp = get_identifier (newname);
2968 
2969   newrtl = gen_rtx_MEM (Pmode,
2970 		    gen_rtx_SYMBOL_REF (Pmode,
2971 			     IDENTIFIER_POINTER (idp)));
2972   XEXP (DECL_RTL (decl), 0) = newrtl;
2973 }
2974 
2975 static int
mcore_dllexport_p(tree decl)2976 mcore_dllexport_p (tree decl)
2977 {
2978   if (   TREE_CODE (decl) != VAR_DECL
2979       && TREE_CODE (decl) != FUNCTION_DECL)
2980     return 0;
2981 
2982   return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2983 }
2984 
2985 static int
mcore_dllimport_p(tree decl)2986 mcore_dllimport_p (tree decl)
2987 {
2988   if (   TREE_CODE (decl) != VAR_DECL
2989       && TREE_CODE (decl) != FUNCTION_DECL)
2990     return 0;
2991 
2992   return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
2993 }
2994 
2995 /* We must mark dll symbols specially.  Definitions of dllexport'd objects
2996    install some info in the .drective (PE) or .exports (ELF) sections.  */
2997 
2998 static void
mcore_encode_section_info(tree decl,rtx rtl ATTRIBUTE_UNUSED,int first ATTRIBUTE_UNUSED)2999 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3000 {
3001   /* Mark the decl so we can tell from the rtl whether the object is
3002      dllexport'd or dllimport'd.  */
3003   if (mcore_dllexport_p (decl))
3004     mcore_mark_dllexport (decl);
3005   else if (mcore_dllimport_p (decl))
3006     mcore_mark_dllimport (decl);
3007 
3008   /* It might be that DECL has already been marked as dllimport, but
3009      a subsequent definition nullified that.  The attribute is gone
3010      but DECL_RTL still has @i.__imp_foo.  We need to remove that.  */
3011   else if ((TREE_CODE (decl) == FUNCTION_DECL
3012 	    || TREE_CODE (decl) == VAR_DECL)
3013 	   && DECL_RTL (decl) != NULL_RTX
3014 	   && GET_CODE (DECL_RTL (decl)) == MEM
3015 	   && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3016 	   && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3017 	   && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3018     {
3019       const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3020       tree idp = get_identifier (oldname + 9);
3021       rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3022 
3023       XEXP (DECL_RTL (decl), 0) = newrtl;
3024 
3025       /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3026 	 ??? We leave these alone for now.  */
3027     }
3028 }
3029 
3030 /* Undo the effects of the above.  */
3031 
3032 static const char *
mcore_strip_name_encoding(const char * str)3033 mcore_strip_name_encoding (const char * str)
3034 {
3035   return str + (str[0] == '@' ? 3 : 0);
3036 }
3037 
3038 /* MCore specific attribute support.
3039    dllexport - for exporting a function/variable that will live in a dll
3040    dllimport - for importing a function/variable from a dll
3041    naked     - do not create a function prologue/epilogue.  */
3042 
3043 /* Handle a "naked" attribute; arguments as in
3044    struct attribute_spec.handler.  */
3045 
3046 static tree
mcore_handle_naked_attribute(tree * node,tree name,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)3047 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3048 			      int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3049 {
3050   if (TREE_CODE (*node) != FUNCTION_DECL)
3051     {
3052       warning (OPT_Wattributes, "%qE attribute only applies to functions",
3053 	       name);
3054       *no_add_attrs = true;
3055     }
3056 
3057   return NULL_TREE;
3058 }
3059 
3060 /* ??? It looks like this is PE specific?  Oh well, this is what the
3061    old code did as well.  */
3062 
3063 static void
mcore_unique_section(tree decl,int reloc ATTRIBUTE_UNUSED)3064 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3065 {
3066   int len;
3067   const char * name;
3068   char * string;
3069   const char * prefix;
3070 
3071   name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3072 
3073   /* Strip off any encoding in name.  */
3074   name = (* targetm.strip_name_encoding) (name);
3075 
3076   /* The object is put in, for example, section .text$foo.
3077      The linker will then ultimately place them in .text
3078      (everything from the $ on is stripped).  */
3079   if (TREE_CODE (decl) == FUNCTION_DECL)
3080     prefix = ".text$";
3081   /* For compatibility with EPOC, we ignore the fact that the
3082      section might have relocs against it.  */
3083   else if (decl_readonly_section (decl, 0))
3084     prefix = ".rdata$";
3085   else
3086     prefix = ".data$";
3087 
3088   len = strlen (name) + strlen (prefix);
3089   string = XALLOCAVEC (char, len + 1);
3090 
3091   sprintf (string, "%s%s", prefix, name);
3092 
3093   set_decl_section_name (decl, string);
3094 }
3095 
3096 int
mcore_naked_function_p(void)3097 mcore_naked_function_p (void)
3098 {
3099   return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3100 }
3101 
3102 static bool
mcore_warn_func_return(tree decl)3103 mcore_warn_func_return (tree decl)
3104 {
3105   /* Naked functions are implemented entirely in assembly, including the
3106      return sequence, so suppress warnings about this.  */
3107   return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3108 }
3109 
3110 #ifdef OBJECT_FORMAT_ELF
3111 static void
mcore_asm_named_section(const char * name,unsigned int flags ATTRIBUTE_UNUSED,tree decl ATTRIBUTE_UNUSED)3112 mcore_asm_named_section (const char *name,
3113 			 unsigned int flags ATTRIBUTE_UNUSED,
3114 			 tree decl ATTRIBUTE_UNUSED)
3115 {
3116   fprintf (asm_out_file, "\t.section %s\n", name);
3117 }
3118 #endif /* OBJECT_FORMAT_ELF */
3119 
3120 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL.  */
3121 
3122 static void
mcore_external_libcall(rtx fun)3123 mcore_external_libcall (rtx fun)
3124 {
3125   fprintf (asm_out_file, "\t.import\t");
3126   assemble_name (asm_out_file, XSTR (fun, 0));
3127   fprintf (asm_out_file, "\n");
3128 }
3129 
3130 /* Worker function for TARGET_RETURN_IN_MEMORY.  */
3131 
3132 static bool
mcore_return_in_memory(const_tree type,const_tree fntype ATTRIBUTE_UNUSED)3133 mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3134 {
3135   const HOST_WIDE_INT size = int_size_in_bytes (type);
3136   return (size == -1 || size > 2 * UNITS_PER_WORD);
3137 }
3138 
3139 /* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3140    Output assembler code for a block containing the constant parts
3141    of a trampoline, leaving space for the variable parts.
3142 
3143    On the MCore, the trampoline looks like:
3144    	lrw	r1,  function
3145      	lrw	r13, area
3146    	jmp	r13
3147    	or	r0, r0
3148     .literals                                                */
3149 
3150 static void
mcore_asm_trampoline_template(FILE * f)3151 mcore_asm_trampoline_template (FILE *f)
3152 {
3153   fprintf (f, "\t.short	0x7102\n");
3154   fprintf (f, "\t.short	0x7d02\n");
3155   fprintf (f, "\t.short	0x00cd\n");
3156   fprintf (f, "\t.short	0x1e00\n");
3157   fprintf (f, "\t.long	0\n");
3158   fprintf (f, "\t.long	0\n");
3159 }
3160 
3161 /* Worker function for TARGET_TRAMPOLINE_INIT.  */
3162 
3163 static void
mcore_trampoline_init(rtx m_tramp,tree fndecl,rtx chain_value)3164 mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3165 {
3166   rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3167   rtx mem;
3168 
3169   emit_block_move (m_tramp, assemble_trampoline_template (),
3170 		   GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3171 
3172   mem = adjust_address (m_tramp, SImode, 8);
3173   emit_move_insn (mem, chain_value);
3174   mem = adjust_address (m_tramp, SImode, 12);
3175   emit_move_insn (mem, fnaddr);
3176 }
3177 
3178 /* Implement TARGET_LEGITIMATE_CONSTANT_P
3179 
3180    On the MCore, allow anything but a double.  */
3181 
3182 static bool
mcore_legitimate_constant_p(machine_mode mode ATTRIBUTE_UNUSED,rtx x)3183 mcore_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3184 {
3185   return GET_CODE (x) != CONST_DOUBLE;
3186 }
3187 
3188 /* Helper function for `mcore_legitimate_address_p'.  */
3189 
3190 static bool
mcore_reg_ok_for_base_p(const_rtx reg,bool strict_p)3191 mcore_reg_ok_for_base_p (const_rtx reg, bool strict_p)
3192 {
3193   if (strict_p)
3194     return REGNO_OK_FOR_BASE_P (REGNO (reg));
3195   else
3196     return (REGNO (reg) <= 16 || !HARD_REGISTER_P (reg));
3197 }
3198 
3199 static bool
mcore_base_register_rtx_p(const_rtx x,bool strict_p)3200 mcore_base_register_rtx_p (const_rtx x, bool strict_p)
3201 {
3202   return REG_P(x) && mcore_reg_ok_for_base_p (x, strict_p);
3203 }
3204 
3205 /*  A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
3206     and for DI is 0..56 because we use two SI loads, etc.  */
3207 
3208 static bool
mcore_legitimate_index_p(machine_mode mode,const_rtx op)3209 mcore_legitimate_index_p (machine_mode mode, const_rtx op)
3210 {
3211   if (CONST_INT_P (op))
3212     {
3213       if (GET_MODE_SIZE (mode) >= 4
3214 	  && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 4) == 0
3215 	  &&  ((unsigned HOST_WIDE_INT) INTVAL (op))
3216 	      <= (unsigned HOST_WIDE_INT) 64 - GET_MODE_SIZE (mode))
3217 	return true;
3218       if (GET_MODE_SIZE (mode) == 2
3219 	  && (((unsigned HOST_WIDE_INT) INTVAL (op)) % 2) == 0
3220 	  &&  ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 30)
3221 	return true;
3222       if (GET_MODE_SIZE (mode) == 1
3223 	  && ((unsigned HOST_WIDE_INT) INTVAL (op)) <= 15)
3224 	return true;
3225   }
3226   return false;
3227 }
3228 
3229 
3230 /* Worker function for TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P.
3231 
3232    Allow  REG
3233 	  REG + disp  */
3234 
3235 static bool
mcore_legitimate_address_p(machine_mode mode,rtx x,bool strict_p,addr_space_t as)3236 mcore_legitimate_address_p (machine_mode mode, rtx x, bool strict_p,
3237 			    addr_space_t as)
3238 {
3239   gcc_assert (ADDR_SPACE_GENERIC_P (as));
3240 
3241   if (mcore_base_register_rtx_p (x, strict_p))
3242     return true;
3243   else if (GET_CODE (x) == PLUS || GET_CODE (x) == LO_SUM)
3244     {
3245       rtx xop0 = XEXP (x, 0);
3246       rtx xop1 = XEXP (x, 1);
3247       if (mcore_base_register_rtx_p (xop0, strict_p)
3248 	  && mcore_legitimate_index_p (mode, xop1))
3249 	return true;
3250       if (mcore_base_register_rtx_p (xop1, strict_p)
3251  	  && mcore_legitimate_index_p (mode, xop0))
3252 	return true;
3253     }
3254 
3255   return false;
3256 }
3257 
3258