1 /* Target Code for R8C/M16C/M32C
2    Copyright (C) 2005-2019 Free Software Foundation, Inc.
3    Contributed by Red Hat.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published
9    by the Free Software Foundation; either version 3, or (at your
10    option) any later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #define IN_TARGET_CODE 1
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "stringpool.h"
31 #include "attribs.h"
32 #include "df.h"
33 #include "memmodel.h"
34 #include "tm_p.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "reload.h"
44 #include "stor-layout.h"
45 #include "varasm.h"
46 #include "calls.h"
47 #include "explow.h"
48 #include "expr.h"
49 #include "tm-constrs.h"
50 #include "builtins.h"
51 
52 /* This file should be included last.  */
53 #include "target-def.h"
54 
55 /* Prototypes */
56 
57 /* Used by m32c_pushm_popm.  */
58 typedef enum
59 {
60   PP_pushm,
61   PP_popm,
62   PP_justcount
63 } Push_Pop_Type;
64 
65 static bool m32c_function_needs_enter (void);
66 static tree interrupt_handler (tree *, tree, tree, int, bool *);
67 static tree function_vector_handler (tree *, tree, tree, int, bool *);
68 static int interrupt_p (tree node);
69 static int bank_switch_p (tree node);
70 static int fast_interrupt_p (tree node);
71 static int interrupt_p (tree node);
72 static bool m32c_asm_integer (rtx, unsigned int, int);
73 static int m32c_comp_type_attributes (const_tree, const_tree);
74 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
75 static struct machine_function *m32c_init_machine_status (void);
76 static void m32c_insert_attributes (tree, tree *);
77 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
78 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
79 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
80 			      const_tree, bool);
81 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
82 				    const_tree, bool);
83 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
84 				       const_tree, bool);
85 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
86 static int m32c_pushm_popm (Push_Pop_Type);
87 static bool m32c_strict_argument_naming (cumulative_args_t);
88 static rtx m32c_struct_value_rtx (tree, int);
89 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
90 static int need_to_save (int);
91 static rtx m32c_function_value (const_tree, const_tree, bool);
92 static rtx m32c_libcall_value (machine_mode, const_rtx);
93 
94 /* Returns true if an address is specified, else false.  */
95 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
96 
97 static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
98 
99 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
100 
101 #define streq(a,b) (strcmp ((a), (b)) == 0)
102 
103 /* Internal support routines */
104 
105 /* Debugging statements are tagged with DEBUG0 only so that they can
106    be easily enabled individually, by replacing the '0' with '1' as
107    needed.  */
108 #define DEBUG0 0
109 #define DEBUG1 1
110 
111 #if DEBUG0
112 #include "print-tree.h"
113 /* This is needed by some of the commented-out debug statements
114    below.  */
115 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
116 #endif
117 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
118 
119 /* These are all to support encode_pattern().  */
120 static char pattern[30], *patternp;
121 static GTY(()) rtx patternr[30];
122 #define RTX_IS(x) (streq (pattern, x))
123 
124 /* Some macros to simplify the logic throughout this file.  */
125 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
126 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
127 
128 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
129 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
130 
131 static int
far_addr_space_p(rtx x)132 far_addr_space_p (rtx x)
133 {
134   if (GET_CODE (x) != MEM)
135     return 0;
136 #if DEBUG0
137   fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
138   fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
139 #endif
140   return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
141 }
142 
143 /* We do most RTX matching by converting the RTX into a string, and
144    using string compares.  This vastly simplifies the logic in many of
145    the functions in this file.
146 
147    On exit, pattern[] has the encoded string (use RTX_IS("...") to
148    compare it) and patternr[] has pointers to the nodes in the RTX
149    corresponding to each character in the encoded string.  The latter
150    is mostly used by print_operand().
151 
152    Unrecognized patterns have '?' in them; this shows up when the
153    assembler complains about syntax errors.
154 */
155 
156 static void
encode_pattern_1(rtx x)157 encode_pattern_1 (rtx x)
158 {
159   int i;
160 
161   if (patternp == pattern + sizeof (pattern) - 2)
162     {
163       patternp[-1] = '?';
164       return;
165     }
166 
167   patternr[patternp - pattern] = x;
168 
169   switch (GET_CODE (x))
170     {
171     case REG:
172       *patternp++ = 'r';
173       break;
174     case SUBREG:
175       if (GET_MODE_SIZE (GET_MODE (x)) !=
176 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
177 	*patternp++ = 'S';
178       if (GET_MODE (x) == PSImode
179 	  && GET_CODE (XEXP (x, 0)) == REG)
180 	*patternp++ = 'S';
181       encode_pattern_1 (XEXP (x, 0));
182       break;
183     case MEM:
184       *patternp++ = 'm';
185       /* FALLTHRU */
186     case CONST:
187       encode_pattern_1 (XEXP (x, 0));
188       break;
189     case SIGN_EXTEND:
190       *patternp++ = '^';
191       *patternp++ = 'S';
192       encode_pattern_1 (XEXP (x, 0));
193       break;
194     case ZERO_EXTEND:
195       *patternp++ = '^';
196       *patternp++ = 'Z';
197       encode_pattern_1 (XEXP (x, 0));
198       break;
199     case PLUS:
200       *patternp++ = '+';
201       encode_pattern_1 (XEXP (x, 0));
202       encode_pattern_1 (XEXP (x, 1));
203       break;
204     case PRE_DEC:
205       *patternp++ = '>';
206       encode_pattern_1 (XEXP (x, 0));
207       break;
208     case POST_INC:
209       *patternp++ = '<';
210       encode_pattern_1 (XEXP (x, 0));
211       break;
212     case LO_SUM:
213       *patternp++ = 'L';
214       encode_pattern_1 (XEXP (x, 0));
215       encode_pattern_1 (XEXP (x, 1));
216       break;
217     case HIGH:
218       *patternp++ = 'H';
219       encode_pattern_1 (XEXP (x, 0));
220       break;
221     case SYMBOL_REF:
222       *patternp++ = 's';
223       break;
224     case LABEL_REF:
225       *patternp++ = 'l';
226       break;
227     case CODE_LABEL:
228       *patternp++ = 'c';
229       break;
230     case CONST_INT:
231     case CONST_DOUBLE:
232       *patternp++ = 'i';
233       break;
234     case UNSPEC:
235       *patternp++ = 'u';
236       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
237       for (i = 0; i < XVECLEN (x, 0); i++)
238 	encode_pattern_1 (XVECEXP (x, 0, i));
239       break;
240     case USE:
241       *patternp++ = 'U';
242       break;
243     case PARALLEL:
244       *patternp++ = '|';
245       for (i = 0; i < XVECLEN (x, 0); i++)
246 	encode_pattern_1 (XVECEXP (x, 0, i));
247       break;
248     case EXPR_LIST:
249       *patternp++ = 'E';
250       encode_pattern_1 (XEXP (x, 0));
251       if (XEXP (x, 1))
252 	encode_pattern_1 (XEXP (x, 1));
253       break;
254     default:
255       *patternp++ = '?';
256 #if DEBUG0
257       fprintf (stderr, "can't encode pattern %s\n",
258 	       GET_RTX_NAME (GET_CODE (x)));
259       debug_rtx (x);
260 #endif
261       break;
262     }
263 }
264 
265 static void
encode_pattern(rtx x)266 encode_pattern (rtx x)
267 {
268   patternp = pattern;
269   encode_pattern_1 (x);
270   *patternp = 0;
271 }
272 
273 /* Since register names indicate the mode they're used in, we need a
274    way to determine which name to refer to the register with.  Called
275    by print_operand().  */
276 
277 static const char *
reg_name_with_mode(int regno,machine_mode mode)278 reg_name_with_mode (int regno, machine_mode mode)
279 {
280   int mlen = GET_MODE_SIZE (mode);
281   if (regno == R0_REGNO && mlen == 1)
282     return "r0l";
283   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
284     return "r2r0";
285   if (regno == R0_REGNO && mlen == 6)
286     return "r2r1r0";
287   if (regno == R0_REGNO && mlen == 8)
288     return "r3r1r2r0";
289   if (regno == R1_REGNO && mlen == 1)
290     return "r1l";
291   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
292     return "r3r1";
293   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
294     return "a1a0";
295   return reg_names[regno];
296 }
297 
298 /* How many bytes a register uses on stack when it's pushed.  We need
299    to know this because the push opcode needs to explicitly indicate
300    the size of the register, even though the name of the register
301    already tells it that.  Used by m32c_output_reg_{push,pop}, which
302    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
303 
304 static int
reg_push_size(int regno)305 reg_push_size (int regno)
306 {
307   switch (regno)
308     {
309     case R0_REGNO:
310     case R1_REGNO:
311       return 2;
312     case R2_REGNO:
313     case R3_REGNO:
314     case FLG_REGNO:
315       return 2;
316     case A0_REGNO:
317     case A1_REGNO:
318     case SB_REGNO:
319     case FB_REGNO:
320     case SP_REGNO:
321       if (TARGET_A16)
322 	return 2;
323       else
324 	return 3;
325     default:
326       gcc_unreachable ();
327     }
328 }
329 
330 /* Given two register classes, find the largest intersection between
331    them.  If there is no intersection, return RETURNED_IF_EMPTY
332    instead.  */
333 static reg_class_t
reduce_class(reg_class_t original_class,reg_class_t limiting_class,reg_class_t returned_if_empty)334 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
335 	      reg_class_t returned_if_empty)
336 {
337   HARD_REG_SET cc;
338   int i;
339   reg_class_t best = NO_REGS;
340   unsigned int best_size = 0;
341 
342   if (original_class == limiting_class)
343     return original_class;
344 
345   cc = reg_class_contents[original_class];
346   AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
347 
348   for (i = 0; i < LIM_REG_CLASSES; i++)
349     {
350       if (hard_reg_set_subset_p (reg_class_contents[i], cc))
351 	if (best_size < reg_class_size[i])
352 	  {
353 	    best = (reg_class_t) i;
354 	    best_size = reg_class_size[i];
355 	  }
356 
357     }
358   if (best == NO_REGS)
359     return returned_if_empty;
360   return best;
361 }
362 
363 /* Used by m32c_register_move_cost to determine if a move is
364    impossibly expensive.  */
365 static bool
class_can_hold_mode(reg_class_t rclass,machine_mode mode)366 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
367 {
368   /* Cache the results:  0=untested  1=no  2=yes */
369   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
370 
371   if (results[(int) rclass][mode] == 0)
372     {
373       int r;
374       results[rclass][mode] = 1;
375       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
376 	if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
377 	    && m32c_hard_regno_mode_ok (r, mode))
378 	  {
379 	    results[rclass][mode] = 2;
380 	    break;
381 	  }
382     }
383 
384 #if DEBUG0
385   fprintf (stderr, "class %s can hold %s? %s\n",
386 	   class_names[(int) rclass], mode_name[mode],
387 	   (results[rclass][mode] == 2) ? "yes" : "no");
388 #endif
389   return results[(int) rclass][mode] == 2;
390 }
391 
392 /* Run-time Target Specification.  */
393 
394 /* Memregs are memory locations that gcc treats like general
395    registers, as there are a limited number of true registers and the
396    m32c families can use memory in most places that registers can be
397    used.
398 
399    However, since memory accesses are more expensive than registers,
400    we allow the user to limit the number of memregs available, in
401    order to try to persuade gcc to try harder to use real registers.
402 
403    Memregs are provided by lib1funcs.S.
404 */
405 
406 int ok_to_change_target_memregs = TRUE;
407 
408 /* Implements TARGET_OPTION_OVERRIDE.  */
409 
410 #undef TARGET_OPTION_OVERRIDE
411 #define TARGET_OPTION_OVERRIDE m32c_option_override
412 
413 static void
m32c_option_override(void)414 m32c_option_override (void)
415 {
416   /* We limit memregs to 0..16, and provide a default.  */
417   if (global_options_set.x_target_memregs)
418     {
419       if (target_memregs < 0 || target_memregs > 16)
420 	error ("invalid target memregs value %<%d%>", target_memregs);
421     }
422   else
423     target_memregs = 16;
424 
425   if (TARGET_A24)
426     flag_ivopts = 0;
427 
428   /* This target defaults to strict volatile bitfields.  */
429   if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
430     flag_strict_volatile_bitfields = 1;
431 
432   /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
433      This is always worse than an absolute call.  */
434   if (TARGET_A16)
435     flag_no_function_cse = 1;
436 
437   /* This wants to put insns between compares and their jumps.  */
438   /* FIXME: The right solution is to properly trace the flags register
439      values, but that is too much work for stage 4.  */
440   flag_combine_stack_adjustments = 0;
441 }
442 
443 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
444 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
445 
446 static void
m32c_override_options_after_change(void)447 m32c_override_options_after_change (void)
448 {
449   if (TARGET_A16)
450     flag_no_function_cse = 1;
451 }
452 
453 /* Defining data structures for per-function information */
454 
455 /* The usual; we set up our machine_function data.  */
456 static struct machine_function *
m32c_init_machine_status(void)457 m32c_init_machine_status (void)
458 {
459   return ggc_cleared_alloc<machine_function> ();
460 }
461 
462 /* Implements INIT_EXPANDERS.  We just set up to call the above
463    function.  */
464 void
m32c_init_expanders(void)465 m32c_init_expanders (void)
466 {
467   init_machine_status = m32c_init_machine_status;
468 }
469 
470 /* Storage Layout */
471 
472 /* Register Basics */
473 
474 /* Basic Characteristics of Registers */
475 
476 /* Whether a mode fits in a register is complex enough to warrant a
477    table.  */
478 static struct
479 {
480   char qi_regs;
481   char hi_regs;
482   char pi_regs;
483   char si_regs;
484   char di_regs;
485 } nregs_table[FIRST_PSEUDO_REGISTER] =
486 {
487   { 1, 1, 2, 2, 4 },		/* r0 */
488   { 0, 1, 0, 0, 0 },		/* r2 */
489   { 1, 1, 2, 2, 0 },		/* r1 */
490   { 0, 1, 0, 0, 0 },		/* r3 */
491   { 0, 1, 1, 0, 0 },		/* a0 */
492   { 0, 1, 1, 0, 0 },		/* a1 */
493   { 0, 1, 1, 0, 0 },		/* sb */
494   { 0, 1, 1, 0, 0 },		/* fb */
495   { 0, 1, 1, 0, 0 },		/* sp */
496   { 1, 1, 1, 0, 0 },		/* pc */
497   { 0, 0, 0, 0, 0 },		/* fl */
498   { 1, 1, 1, 0, 0 },		/* ap */
499   { 1, 1, 2, 2, 4 },		/* mem0 */
500   { 1, 1, 2, 2, 4 },		/* mem1 */
501   { 1, 1, 2, 2, 4 },		/* mem2 */
502   { 1, 1, 2, 2, 4 },		/* mem3 */
503   { 1, 1, 2, 2, 4 },		/* mem4 */
504   { 1, 1, 2, 2, 0 },		/* mem5 */
505   { 1, 1, 2, 2, 0 },		/* mem6 */
506   { 1, 1, 0, 0, 0 },		/* mem7 */
507 };
508 
509 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE.  We adjust the number
510    of available memregs, and select which registers need to be preserved
511    across calls based on the chip family.  */
512 
513 #undef TARGET_CONDITIONAL_REGISTER_USAGE
514 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
515 void
m32c_conditional_register_usage(void)516 m32c_conditional_register_usage (void)
517 {
518   int i;
519 
520   if (target_memregs >= 0 && target_memregs <= 16)
521     {
522       /* The command line option is bytes, but our "registers" are
523 	 16-bit words.  */
524       for (i = (target_memregs+1)/2; i < 8; i++)
525 	{
526 	  fixed_regs[MEM0_REGNO + i] = 1;
527 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
528 	}
529     }
530 
531   /* M32CM and M32C preserve more registers across function calls.  */
532   if (TARGET_A24)
533     {
534       call_used_regs[R1_REGNO] = 0;
535       call_used_regs[R2_REGNO] = 0;
536       call_used_regs[R3_REGNO] = 0;
537       call_used_regs[A0_REGNO] = 0;
538       call_used_regs[A1_REGNO] = 0;
539     }
540 }
541 
542 /* How Values Fit in Registers */
543 
544 /* Implements TARGET_HARD_REGNO_NREGS.  This is complicated by the fact that
545    different registers are different sizes from each other, *and* may
546    be different sizes in different chip families.  */
547 static unsigned int
m32c_hard_regno_nregs_1(unsigned int regno,machine_mode mode)548 m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
549 {
550   if (regno == FLG_REGNO && mode == CCmode)
551     return 1;
552   if (regno >= FIRST_PSEUDO_REGISTER)
553     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
554 
555   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
556     return (GET_MODE_SIZE (mode) + 1) / 2;
557 
558   if (GET_MODE_SIZE (mode) <= 1)
559     return nregs_table[regno].qi_regs;
560   if (GET_MODE_SIZE (mode) <= 2)
561     return nregs_table[regno].hi_regs;
562   if (regno == A0_REGNO && mode == SImode && TARGET_A16)
563     return 2;
564   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
565     return nregs_table[regno].pi_regs;
566   if (GET_MODE_SIZE (mode) <= 4)
567     return nregs_table[regno].si_regs;
568   if (GET_MODE_SIZE (mode) <= 8)
569     return nregs_table[regno].di_regs;
570   return 0;
571 }
572 
573 static unsigned int
m32c_hard_regno_nregs(unsigned int regno,machine_mode mode)574 m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
575 {
576   unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
577   return rv ? rv : 1;
578 }
579 
580 /* Implement TARGET_HARD_REGNO_MODE_OK.  The above function does the work
581    already; just test its return value.  */
582 static bool
m32c_hard_regno_mode_ok(unsigned int regno,machine_mode mode)583 m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
584 {
585   return m32c_hard_regno_nregs_1 (regno, mode) != 0;
586 }
587 
588 /* Implement TARGET_MODES_TIEABLE_P.  In general, modes aren't tieable since
589    registers are all different sizes.  However, since most modes are
590    bigger than our registers anyway, it's easier to implement this
591    function that way, leaving QImode as the only unique case.  */
592 static bool
m32c_modes_tieable_p(machine_mode m1,machine_mode m2)593 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
594 {
595   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
596     return 1;
597 
598 #if 0
599   if (m1 == QImode || m2 == QImode)
600     return 0;
601 #endif
602 
603   return 1;
604 }
605 
606 /* Register Classes */
607 
608 /* Implements REGNO_REG_CLASS.  */
609 enum reg_class
m32c_regno_reg_class(int regno)610 m32c_regno_reg_class (int regno)
611 {
612   switch (regno)
613     {
614     case R0_REGNO:
615       return R0_REGS;
616     case R1_REGNO:
617       return R1_REGS;
618     case R2_REGNO:
619       return R2_REGS;
620     case R3_REGNO:
621       return R3_REGS;
622     case A0_REGNO:
623       return A0_REGS;
624     case A1_REGNO:
625       return A1_REGS;
626     case SB_REGNO:
627       return SB_REGS;
628     case FB_REGNO:
629       return FB_REGS;
630     case SP_REGNO:
631       return SP_REGS;
632     case FLG_REGNO:
633       return FLG_REGS;
634     default:
635       if (IS_MEM_REGNO (regno))
636 	return MEM_REGS;
637       return ALL_REGS;
638     }
639 }
640 
641 /* Implements REGNO_OK_FOR_BASE_P.  */
642 int
m32c_regno_ok_for_base_p(int regno)643 m32c_regno_ok_for_base_p (int regno)
644 {
645   if (regno == A0_REGNO
646       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
647     return 1;
648   return 0;
649 }
650 
651 /* Implements TARGET_PREFERRED_RELOAD_CLASS.  In general, prefer general
652    registers of the appropriate size.  */
653 
654 #undef TARGET_PREFERRED_RELOAD_CLASS
655 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
656 
657 static reg_class_t
m32c_preferred_reload_class(rtx x,reg_class_t rclass)658 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
659 {
660   reg_class_t newclass = rclass;
661 
662 #if DEBUG0
663   fprintf (stderr, "\npreferred_reload_class for %s is ",
664 	   class_names[rclass]);
665 #endif
666   if (rclass == NO_REGS)
667     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
668 
669   if (reg_classes_intersect_p (rclass, CR_REGS))
670     {
671       switch (GET_MODE (x))
672 	{
673 	case E_QImode:
674 	  newclass = HL_REGS;
675 	  break;
676 	default:
677 	  /*      newclass = HI_REGS; */
678 	  break;
679 	}
680     }
681 
682   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
683     newclass = SI_REGS;
684   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
685 	   && ! reg_class_subset_p (R03_REGS, rclass))
686     newclass = DI_REGS;
687 
688   rclass = reduce_class (rclass, newclass, rclass);
689 
690   if (GET_MODE (x) == QImode)
691     rclass = reduce_class (rclass, HL_REGS, rclass);
692 
693 #if DEBUG0
694   fprintf (stderr, "%s\n", class_names[rclass]);
695   debug_rtx (x);
696 
697   if (GET_CODE (x) == MEM
698       && GET_CODE (XEXP (x, 0)) == PLUS
699       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
700     fprintf (stderr, "Glorm!\n");
701 #endif
702   return rclass;
703 }
704 
705 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
706 
707 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
708 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
709 
710 static reg_class_t
m32c_preferred_output_reload_class(rtx x,reg_class_t rclass)711 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
712 {
713   return m32c_preferred_reload_class (x, rclass);
714 }
715 
716 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
717    address registers for reloads since they're needed for address
718    reloads.  */
719 int
m32c_limit_reload_class(machine_mode mode,int rclass)720 m32c_limit_reload_class (machine_mode mode, int rclass)
721 {
722 #if DEBUG0
723   fprintf (stderr, "limit_reload_class for %s: %s ->",
724 	   mode_name[mode], class_names[rclass]);
725 #endif
726 
727   if (mode == QImode)
728     rclass = reduce_class (rclass, HL_REGS, rclass);
729   else if (mode == HImode)
730     rclass = reduce_class (rclass, HI_REGS, rclass);
731   else if (mode == SImode)
732     rclass = reduce_class (rclass, SI_REGS, rclass);
733 
734   if (rclass != A_REGS)
735     rclass = reduce_class (rclass, DI_REGS, rclass);
736 
737 #if DEBUG0
738   fprintf (stderr, " %s\n", class_names[rclass]);
739 #endif
740   return rclass;
741 }
742 
743 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
744    r0 or r1, as those are the only real QImode registers.  CR regs get
745    reloaded through appropriately sized general or address
746    registers.  */
747 int
m32c_secondary_reload_class(int rclass,machine_mode mode,rtx x)748 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
749 {
750   int cc = class_contents[rclass][0];
751 #if DEBUG0
752   fprintf (stderr, "\nsecondary reload class %s %s\n",
753 	   class_names[rclass], mode_name[mode]);
754   debug_rtx (x);
755 #endif
756   if (mode == QImode
757       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
758     return QI_REGS;
759   if (reg_classes_intersect_p (rclass, CR_REGS)
760       && GET_CODE (x) == REG
761       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
762     return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
763   return NO_REGS;
764 }
765 
766 /* Implements TARGET_CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
767    reloads.  */
768 
769 #undef TARGET_CLASS_LIKELY_SPILLED_P
770 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
771 
772 static bool
m32c_class_likely_spilled_p(reg_class_t regclass)773 m32c_class_likely_spilled_p (reg_class_t regclass)
774 {
775   if (regclass == A_REGS)
776     return true;
777 
778   return (reg_class_size[(int) regclass] == 1);
779 }
780 
781 /* Implements TARGET_CLASS_MAX_NREGS.  We calculate this according to its
782    documented meaning, to avoid potential inconsistencies with actual
783    class definitions.  */
784 
785 #undef TARGET_CLASS_MAX_NREGS
786 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
787 
788 static unsigned char
m32c_class_max_nregs(reg_class_t regclass,machine_mode mode)789 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
790 {
791   int rn;
792   unsigned char max = 0;
793 
794   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
795     if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
796       {
797 	unsigned char n = m32c_hard_regno_nregs (rn, mode);
798 	if (max < n)
799 	  max = n;
800       }
801   return max;
802 }
803 
804 /* Implements TARGET_CAN_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
805    QI (r0l, r1l) because the chip doesn't support QI ops on other
806    registers (well, it does on a0/a1 but if we let gcc do that, reload
807    suffers).  Otherwise, we allow changes to larger modes.  */
808 static bool
m32c_can_change_mode_class(machine_mode from,machine_mode to,reg_class_t rclass)809 m32c_can_change_mode_class (machine_mode from,
810 			    machine_mode to, reg_class_t rclass)
811 {
812   int rn;
813 #if DEBUG0
814   fprintf (stderr, "can change from %s to %s in %s\n",
815 	   mode_name[from], mode_name[to], class_names[rclass]);
816 #endif
817 
818   /* If the larger mode isn't allowed in any of these registers, we
819      can't allow the change.  */
820   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
821     if (class_contents[rclass][0] & (1 << rn))
822       if (! m32c_hard_regno_mode_ok (rn, to))
823 	return false;
824 
825   if (to == QImode)
826     return (class_contents[rclass][0] & 0x1ffa) == 0;
827 
828   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
829       && GET_MODE_SIZE (from) > 1)
830     return true;
831   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
832     return true;
833 
834   return false;
835 }
836 
837 /* Helpers for the rest of the file.  */
838 /* TRUE if the rtx is a REG rtx for the given register.  */
839 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
840 			   && REGNO (rtx) == regno)
841 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
842    base register in address calculations (hence the "strict"
843    argument).  */
844 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
845 			       && (REGNO (rtx) == AP_REGNO \
846 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
847 
848 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
849 
850 /* Implements matching for constraints (see next function too).  'S' is
851    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
852    call return values.  */
853 bool
m32c_matches_constraint_p(rtx value,int constraint)854 m32c_matches_constraint_p (rtx value, int constraint)
855 {
856   encode_pattern (value);
857 
858   switch (constraint) {
859   case CONSTRAINT_SF:
860     return (far_addr_space_p (value)
861 	    && ((RTX_IS ("mr")
862 		 && A0_OR_PSEUDO (patternr[1])
863 		 && GET_MODE (patternr[1]) == SImode)
864 		|| (RTX_IS ("m+^Sri")
865 		    && A0_OR_PSEUDO (patternr[4])
866 		    && GET_MODE (patternr[4]) == HImode)
867 		|| (RTX_IS ("m+^Srs")
868 		    && A0_OR_PSEUDO (patternr[4])
869 		    && GET_MODE (patternr[4]) == HImode)
870 		|| (RTX_IS ("m+^S+ris")
871 		    && A0_OR_PSEUDO (patternr[5])
872 		    && GET_MODE (patternr[5]) == HImode)
873 		|| RTX_IS ("ms")));
874   case CONSTRAINT_Sd:
875     {
876       /* This is the common "src/dest" address */
877       rtx r;
878       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
879 	return true;
880       if (RTX_IS ("ms") || RTX_IS ("m+si"))
881 	return true;
882       if (RTX_IS ("m++rii"))
883 	{
884 	  if (REGNO (patternr[3]) == FB_REGNO
885 	      && INTVAL (patternr[4]) == 0)
886 	    return true;
887 	}
888       if (RTX_IS ("mr"))
889 	r = patternr[1];
890       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
891 	r = patternr[2];
892       else
893 	return false;
894       if (REGNO (r) == SP_REGNO)
895 	return false;
896       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
897     }
898   case CONSTRAINT_Sa:
899     {
900       rtx r;
901       if (RTX_IS ("mr"))
902 	r = patternr[1];
903       else if (RTX_IS ("m+ri"))
904 	r = patternr[2];
905       else
906 	return false;
907       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
908     }
909   case CONSTRAINT_Si:
910     return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
911   case CONSTRAINT_Ss:
912     return ((RTX_IS ("mr")
913 	     && (IS_REG (patternr[1], SP_REGNO)))
914 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
915   case CONSTRAINT_Sf:
916     return ((RTX_IS ("mr")
917 	     && (IS_REG (patternr[1], FB_REGNO)))
918 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
919   case CONSTRAINT_Sb:
920     return ((RTX_IS ("mr")
921 	     && (IS_REG (patternr[1], SB_REGNO)))
922 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
923   case CONSTRAINT_Sp:
924     /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
925     return (RTX_IS ("mi")
926 	    && !(INTVAL (patternr[1]) & ~0x1fff));
927   case CONSTRAINT_S1:
928     return r1h_operand (value, QImode);
929   case CONSTRAINT_Rpa:
930     return GET_CODE (value) == PARALLEL;
931   default:
932     return false;
933   }
934 }
935 
936 /* STACK AND CALLING */
937 
938 /* Frame Layout */
939 
940 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
941    (yes, THREE bytes) onto the stack for the return address, but we
942    don't support pointers bigger than 16 bits on those chips.  This
943    will likely wreak havoc with exception unwinding.  FIXME.  */
944 rtx
m32c_return_addr_rtx(int count)945 m32c_return_addr_rtx (int count)
946 {
947   machine_mode mode;
948   int offset;
949   rtx ra_mem;
950 
951   if (count)
952     return NULL_RTX;
953   /* we want 2[$fb] */
954 
955   if (TARGET_A24)
956     {
957       /* It's four bytes */
958       mode = PSImode;
959       offset = 4;
960     }
961   else
962     {
963       /* FIXME: it's really 3 bytes */
964       mode = HImode;
965       offset = 2;
966     }
967 
968   ra_mem =
969     gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
970 				      offset));
971   return copy_to_mode_reg (mode, ra_mem);
972 }
973 
974 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
975 rtx
m32c_incoming_return_addr_rtx(void)976 m32c_incoming_return_addr_rtx (void)
977 {
978   /* we want [sp] */
979   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
980 }
981 
982 /* Exception Handling Support */
983 
984 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
985    pointers.  */
986 int
m32c_eh_return_data_regno(int n)987 m32c_eh_return_data_regno (int n)
988 {
989   switch (n)
990     {
991     case 0:
992       return MEM0_REGNO;
993     case 1:
994       return MEM0_REGNO+4;
995     default:
996       return INVALID_REGNUM;
997     }
998 }
999 
1000 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
1001    m32c_emit_eh_epilogue.  */
1002 rtx
m32c_eh_return_stackadj_rtx(void)1003 m32c_eh_return_stackadj_rtx (void)
1004 {
1005   if (!cfun->machine->eh_stack_adjust)
1006     {
1007       rtx sa;
1008 
1009       sa = gen_rtx_REG (Pmode, R0_REGNO);
1010       cfun->machine->eh_stack_adjust = sa;
1011     }
1012   return cfun->machine->eh_stack_adjust;
1013 }
1014 
1015 /* Registers That Address the Stack Frame */
1016 
1017 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1018    the original spec called for dwarf numbers to vary with register
1019    width as well, for example, r0l, r0, and r2r0 would each have
1020    different dwarf numbers.  GCC doesn't support this, and we don't do
1021    it, and gdb seems to like it this way anyway.  */
1022 unsigned int
m32c_dwarf_frame_regnum(int n)1023 m32c_dwarf_frame_regnum (int n)
1024 {
1025   switch (n)
1026     {
1027     case R0_REGNO:
1028       return 5;
1029     case R1_REGNO:
1030       return 6;
1031     case R2_REGNO:
1032       return 7;
1033     case R3_REGNO:
1034       return 8;
1035     case A0_REGNO:
1036       return 9;
1037     case A1_REGNO:
1038       return 10;
1039     case FB_REGNO:
1040       return 11;
1041     case SB_REGNO:
1042       return 19;
1043 
1044     case SP_REGNO:
1045       return 12;
1046     case PC_REGNO:
1047       return 13;
1048     default:
1049       return DWARF_FRAME_REGISTERS + 1;
1050     }
1051 }
1052 
1053 /* The frame looks like this:
1054 
1055    ap -> +------------------------------
1056          | Return address (3 or 4 bytes)
1057 	 | Saved FB (2 or 4 bytes)
1058    fb -> +------------------------------
1059 	 | local vars
1060          | register saves fb
1061 	 |        through r0 as needed
1062    sp -> +------------------------------
1063 */
1064 
1065 /* We use this to wrap all emitted insns in the prologue.  */
1066 static rtx
F(rtx x)1067 F (rtx x)
1068 {
1069   RTX_FRAME_RELATED_P (x) = 1;
1070   return x;
1071 }
1072 
1073 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1074    how much the stack pointer moves for each, for each cpu family.  */
1075 static struct
1076 {
1077   int reg1;
1078   int bit;
1079   int a16_bytes;
1080   int a24_bytes;
1081 } pushm_info[] =
1082 {
1083   /* These are in reverse push (nearest-to-sp) order.  */
1084   { R0_REGNO, 0x80, 2, 2 },
1085   { R1_REGNO, 0x40, 2, 2 },
1086   { R2_REGNO, 0x20, 2, 2 },
1087   { R3_REGNO, 0x10, 2, 2 },
1088   { A0_REGNO, 0x08, 2, 4 },
1089   { A1_REGNO, 0x04, 2, 4 },
1090   { SB_REGNO, 0x02, 2, 4 },
1091   { FB_REGNO, 0x01, 2, 4 }
1092 };
1093 
1094 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1095 
1096 /* Returns TRUE if we need to save/restore the given register.  We
1097    save everything for exception handlers, so that any register can be
1098    unwound.  For interrupt handlers, we save everything if the handler
1099    calls something else (because we don't know what *that* function
1100    might do), but try to be a bit smarter if the handler is a leaf
1101    function.  We always save $a0, though, because we use that in the
1102    epilogue to copy $fb to $sp.  */
1103 static int
need_to_save(int regno)1104 need_to_save (int regno)
1105 {
1106   if (fixed_regs[regno])
1107     return 0;
1108   if (crtl->calls_eh_return)
1109     return 1;
1110   if (regno == FP_REGNO)
1111     return 0;
1112   if (cfun->machine->is_interrupt
1113       && (!cfun->machine->is_leaf
1114 	  || (regno == A0_REGNO
1115 	      && m32c_function_needs_enter ())
1116 	  ))
1117     return 1;
1118   if (df_regs_ever_live_p (regno)
1119       && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1120     return 1;
1121   return 0;
1122 }
1123 
1124 /* This function contains all the intelligence about saving and
1125    restoring registers.  It always figures out the register save set.
1126    When called with PP_justcount, it merely returns the size of the
1127    save set (for eliminating the frame pointer, for example).  When
1128    called with PP_pushm or PP_popm, it emits the appropriate
1129    instructions for saving (pushm) or restoring (popm) the
1130    registers.  */
1131 static int
m32c_pushm_popm(Push_Pop_Type ppt)1132 m32c_pushm_popm (Push_Pop_Type ppt)
1133 {
1134   int reg_mask = 0;
1135   int byte_count = 0, bytes;
1136   int i;
1137   rtx dwarf_set[PUSHM_N];
1138   int n_dwarfs = 0;
1139   int nosave_mask = 0;
1140 
1141   if (crtl->return_rtx
1142       && GET_CODE (crtl->return_rtx) == PARALLEL
1143       && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1144     {
1145       rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1146       rtx rv = XEXP (exp, 0);
1147       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1148 
1149       if (rv_bytes > 2)
1150 	nosave_mask |= 0x20;	/* PSI, SI */
1151       else
1152 	nosave_mask |= 0xf0;	/* DF */
1153       if (rv_bytes > 4)
1154 	nosave_mask |= 0x50;	/* DI */
1155     }
1156 
1157   for (i = 0; i < (int) PUSHM_N; i++)
1158     {
1159       /* Skip if neither register needs saving.  */
1160       if (!need_to_save (pushm_info[i].reg1))
1161 	continue;
1162 
1163       if (pushm_info[i].bit & nosave_mask)
1164 	continue;
1165 
1166       reg_mask |= pushm_info[i].bit;
1167       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1168 
1169       if (ppt == PP_pushm)
1170 	{
1171 	  machine_mode mode = (bytes == 2) ? HImode : SImode;
1172 	  rtx addr;
1173 
1174 	  /* Always use stack_pointer_rtx instead of calling
1175 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1176 	     that there is a single rtx representing the stack pointer,
1177 	     namely stack_pointer_rtx, and uses == to recognize it.  */
1178 	  addr = stack_pointer_rtx;
1179 
1180 	  if (byte_count != 0)
1181 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1182 
1183 	  dwarf_set[n_dwarfs++] =
1184 	    gen_rtx_SET (gen_rtx_MEM (mode, addr),
1185 			 gen_rtx_REG (mode, pushm_info[i].reg1));
1186 	  F (dwarf_set[n_dwarfs - 1]);
1187 
1188 	}
1189       byte_count += bytes;
1190     }
1191 
1192   if (cfun->machine->is_interrupt)
1193     {
1194       cfun->machine->intr_pushm = reg_mask & 0xfe;
1195       reg_mask = 0;
1196       byte_count = 0;
1197     }
1198 
1199   if (cfun->machine->is_interrupt)
1200     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1201       if (need_to_save (i))
1202 	{
1203 	  byte_count += 2;
1204 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1205 	}
1206 
1207   if (ppt == PP_pushm && byte_count)
1208     {
1209       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1210       rtx pushm;
1211 
1212       if (reg_mask)
1213 	{
1214 	  XVECEXP (note, 0, 0)
1215 	    = gen_rtx_SET (stack_pointer_rtx,
1216 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1217 					 stack_pointer_rtx,
1218 					 GEN_INT (-byte_count)));
1219 	  F (XVECEXP (note, 0, 0));
1220 
1221 	  for (i = 0; i < n_dwarfs; i++)
1222 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
1223 
1224 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1225 
1226 	  add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1227 	}
1228 
1229       if (cfun->machine->is_interrupt)
1230 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1231 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1232 	    {
1233 	      if (TARGET_A16)
1234 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1235 	      else
1236 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1237 	      F (pushm);
1238 	    }
1239     }
1240   if (ppt == PP_popm && byte_count)
1241     {
1242       if (cfun->machine->is_interrupt)
1243 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1244 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1245 	    {
1246 	      if (TARGET_A16)
1247 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1248 	      else
1249 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1250 	    }
1251       if (reg_mask)
1252 	emit_insn (gen_popm (GEN_INT (reg_mask)));
1253     }
1254 
1255   return byte_count;
1256 }
1257 
1258 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1259    diagrams our call frame.  */
1260 int
m32c_initial_elimination_offset(int from,int to)1261 m32c_initial_elimination_offset (int from, int to)
1262 {
1263   int ofs = 0;
1264 
1265   if (from == AP_REGNO)
1266     {
1267       if (TARGET_A16)
1268 	ofs += 5;
1269       else
1270 	ofs += 8;
1271     }
1272 
1273   if (to == SP_REGNO)
1274     {
1275       ofs += m32c_pushm_popm (PP_justcount);
1276       ofs += get_frame_size ();
1277     }
1278 
1279   /* Account for push rounding.  */
1280   if (TARGET_A24)
1281     ofs = (ofs + 1) & ~1;
1282 #if DEBUG0
1283   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1284 	   to, ofs);
1285 #endif
1286   return ofs;
1287 }
1288 
1289 /* Passing Function Arguments on the Stack */
1290 
1291 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1292    M32C has word stacks.  */
1293 poly_int64
m32c_push_rounding(poly_int64 n)1294 m32c_push_rounding (poly_int64 n)
1295 {
1296   if (TARGET_R8C || TARGET_M16C)
1297     return n;
1298   return (n + 1) & ~1;
1299 }
1300 
1301 /* Passing Arguments in Registers */
1302 
1303 /* Implements TARGET_FUNCTION_ARG.  Arguments are passed partly in
1304    registers, partly on stack.  If our function returns a struct, a
1305    pointer to a buffer for it is at the top of the stack (last thing
1306    pushed).  The first few real arguments may be in registers as
1307    follows:
1308 
1309    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
1310 		arg2 in r2 if it's HI (else pushed on stack)
1311 		rest on stack
1312    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1313 		rest on stack
1314 
1315    Structs are not passed in registers, even if they fit.  Only
1316    integer and pointer types are passed in registers.
1317 
1318    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1319    r2 if it fits.  */
1320 #undef TARGET_FUNCTION_ARG
1321 #define TARGET_FUNCTION_ARG m32c_function_arg
1322 static rtx
m32c_function_arg(cumulative_args_t ca_v,machine_mode mode,const_tree type,bool named)1323 m32c_function_arg (cumulative_args_t ca_v,
1324 		   machine_mode mode, const_tree type, bool named)
1325 {
1326   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1327 
1328   /* Can return a reg, parallel, or 0 for stack */
1329   rtx rv = NULL_RTX;
1330 #if DEBUG0
1331   fprintf (stderr, "func_arg %d (%s, %d)\n",
1332 	   ca->parm_num, mode_name[mode], named);
1333   debug_tree ((tree)type);
1334 #endif
1335 
1336   if (mode == VOIDmode)
1337     return GEN_INT (0);
1338 
1339   if (ca->force_mem || !named)
1340     {
1341 #if DEBUG0
1342       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1343 	       named);
1344 #endif
1345       return NULL_RTX;
1346     }
1347 
1348   if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1349     return NULL_RTX;
1350 
1351   if (type && AGGREGATE_TYPE_P (type))
1352     return NULL_RTX;
1353 
1354   switch (ca->parm_num)
1355     {
1356     case 1:
1357       if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1358 	rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1359       break;
1360 
1361     case 2:
1362       if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1363 	rv = gen_rtx_REG (mode, R2_REGNO);
1364       break;
1365     }
1366 
1367 #if DEBUG0
1368   debug_rtx (rv);
1369 #endif
1370   return rv;
1371 }
1372 
1373 #undef TARGET_PASS_BY_REFERENCE
1374 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1375 static bool
m32c_pass_by_reference(cumulative_args_t ca ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED,bool named ATTRIBUTE_UNUSED)1376 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1377 			machine_mode mode ATTRIBUTE_UNUSED,
1378 			const_tree type ATTRIBUTE_UNUSED,
1379 			bool named ATTRIBUTE_UNUSED)
1380 {
1381   return 0;
1382 }
1383 
1384 /* Implements INIT_CUMULATIVE_ARGS.  */
1385 void
m32c_init_cumulative_args(CUMULATIVE_ARGS * ca,tree fntype,rtx libname ATTRIBUTE_UNUSED,tree fndecl,int n_named_args ATTRIBUTE_UNUSED)1386 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1387 			   tree fntype,
1388 			   rtx libname ATTRIBUTE_UNUSED,
1389 			   tree fndecl,
1390 			   int n_named_args ATTRIBUTE_UNUSED)
1391 {
1392   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1393     ca->force_mem = 1;
1394   else
1395     ca->force_mem = 0;
1396   ca->parm_num = 1;
1397 }
1398 
1399 /* Implements TARGET_FUNCTION_ARG_ADVANCE.  force_mem is set for
1400    functions returning structures, so we always reset that.  Otherwise,
1401    we only need to know the sequence number of the argument to know what
1402    to do with it.  */
1403 #undef TARGET_FUNCTION_ARG_ADVANCE
1404 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1405 static void
m32c_function_arg_advance(cumulative_args_t ca_v,machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED,bool named ATTRIBUTE_UNUSED)1406 m32c_function_arg_advance (cumulative_args_t ca_v,
1407 			   machine_mode mode ATTRIBUTE_UNUSED,
1408 			   const_tree type ATTRIBUTE_UNUSED,
1409 			   bool named ATTRIBUTE_UNUSED)
1410 {
1411   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1412 
1413   if (ca->force_mem)
1414     ca->force_mem = 0;
1415   else
1416     ca->parm_num++;
1417 }
1418 
1419 /* Implements TARGET_FUNCTION_ARG_BOUNDARY.  */
1420 #undef TARGET_FUNCTION_ARG_BOUNDARY
1421 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1422 static unsigned int
m32c_function_arg_boundary(machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED)1423 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1424 			    const_tree type ATTRIBUTE_UNUSED)
1425 {
1426   return (TARGET_A16 ? 8 : 16);
1427 }
1428 
1429 /* Implements FUNCTION_ARG_REGNO_P.  */
1430 int
m32c_function_arg_regno_p(int r)1431 m32c_function_arg_regno_p (int r)
1432 {
1433   if (TARGET_A24)
1434     return (r == R0_REGNO);
1435   return (r == R1_REGNO || r == R2_REGNO);
1436 }
1437 
1438 /* HImode and PSImode are the two "native" modes as far as GCC is
1439    concerned, but the chips also support a 32-bit mode which is used
1440    for some opcodes in R8C/M16C and for reset vectors and such.  */
1441 #undef TARGET_VALID_POINTER_MODE
1442 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1443 static bool
m32c_valid_pointer_mode(scalar_int_mode mode)1444 m32c_valid_pointer_mode (scalar_int_mode mode)
1445 {
1446   if (mode == HImode
1447       || mode == PSImode
1448       || mode == SImode
1449       )
1450     return 1;
1451   return 0;
1452 }
1453 
1454 /* How Scalar Function Values Are Returned */
1455 
1456 /* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
1457    combination of registers starting there (r2r0 for longs, r3r1r2r0
1458    for long long, r3r2r1r0 for doubles), except that that ABI
1459    currently doesn't work because it ends up using all available
1460    general registers and gcc often can't compile it.  So, instead, we
1461    return anything bigger than 16 bits in "mem0" (effectively, a
1462    memory location).  */
1463 
1464 #undef TARGET_LIBCALL_VALUE
1465 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1466 
1467 static rtx
m32c_libcall_value(machine_mode mode,const_rtx fun ATTRIBUTE_UNUSED)1468 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1469 {
1470   /* return reg or parallel */
1471 #if 0
1472   /* FIXME: GCC has difficulty returning large values in registers,
1473      because that ties up most of the general registers and gives the
1474      register allocator little to work with.  Until we can resolve
1475      this, large values are returned in memory.  */
1476   if (mode == DFmode)
1477     {
1478       rtx rv;
1479 
1480       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1481       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1482 					      gen_rtx_REG (HImode,
1483 							   R0_REGNO),
1484 					      GEN_INT (0));
1485       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1486 					      gen_rtx_REG (HImode,
1487 							   R1_REGNO),
1488 					      GEN_INT (2));
1489       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1490 					      gen_rtx_REG (HImode,
1491 							   R2_REGNO),
1492 					      GEN_INT (4));
1493       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1494 					      gen_rtx_REG (HImode,
1495 							   R3_REGNO),
1496 					      GEN_INT (6));
1497       return rv;
1498     }
1499 
1500   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1501     {
1502       rtx rv;
1503 
1504       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1505       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1506 					      gen_rtx_REG (mode,
1507 							   R0_REGNO),
1508 					      GEN_INT (0));
1509       return rv;
1510     }
1511 #endif
1512 
1513   if (GET_MODE_SIZE (mode) > 2)
1514     return gen_rtx_REG (mode, MEM0_REGNO);
1515   return gen_rtx_REG (mode, R0_REGNO);
1516 }
1517 
1518 /* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
1519    conventions.  */
1520 
1521 #undef TARGET_FUNCTION_VALUE
1522 #define TARGET_FUNCTION_VALUE m32c_function_value
1523 
1524 static rtx
m32c_function_value(const_tree valtype,const_tree fn_decl_or_type ATTRIBUTE_UNUSED,bool outgoing ATTRIBUTE_UNUSED)1525 m32c_function_value (const_tree valtype,
1526 		     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1527 		     bool outgoing ATTRIBUTE_UNUSED)
1528 {
1529   /* return reg or parallel */
1530   const machine_mode mode = TYPE_MODE (valtype);
1531   return m32c_libcall_value (mode, NULL_RTX);
1532 }
1533 
1534 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.  */
1535 
1536 #undef TARGET_FUNCTION_VALUE_REGNO_P
1537 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1538 
1539 static bool
m32c_function_value_regno_p(const unsigned int regno)1540 m32c_function_value_regno_p (const unsigned int regno)
1541 {
1542   return (regno == R0_REGNO || regno == MEM0_REGNO);
1543 }
1544 
1545 /* How Large Values Are Returned */
1546 
1547 /* We return structures by pushing the address on the stack, even if
1548    we use registers for the first few "real" arguments.  */
1549 #undef TARGET_STRUCT_VALUE_RTX
1550 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1551 static rtx
m32c_struct_value_rtx(tree fndecl ATTRIBUTE_UNUSED,int incoming ATTRIBUTE_UNUSED)1552 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1553 		       int incoming ATTRIBUTE_UNUSED)
1554 {
1555   return 0;
1556 }
1557 
1558 /* Function Entry and Exit */
1559 
1560 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1561 int
m32c_epilogue_uses(int regno ATTRIBUTE_UNUSED)1562 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1563 {
1564   if (cfun->machine->is_interrupt)
1565     return 1;
1566   return 0;
1567 }
1568 
1569 /* Implementing the Varargs Macros */
1570 
1571 #undef TARGET_STRICT_ARGUMENT_NAMING
1572 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1573 static bool
m32c_strict_argument_naming(cumulative_args_t ca ATTRIBUTE_UNUSED)1574 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1575 {
1576   return 1;
1577 }
1578 
1579 /* Trampolines for Nested Functions */
1580 
1581 /*
1582    m16c:
1583    1 0000 75C43412              mov.w   #0x1234,a0
1584    2 0004 FC000000              jmp.a   label
1585 
1586    m32c:
1587    1 0000 BC563412              mov.l:s #0x123456,a0
1588    2 0004 CC000000              jmp.a   label
1589 */
1590 
1591 /* Implements TRAMPOLINE_SIZE.  */
1592 int
m32c_trampoline_size(void)1593 m32c_trampoline_size (void)
1594 {
1595   /* Allocate extra space so we can avoid the messy shifts when we
1596      initialize the trampoline; we just write past the end of the
1597      opcode.  */
1598   return TARGET_A16 ? 8 : 10;
1599 }
1600 
1601 /* Implements TRAMPOLINE_ALIGNMENT.  */
1602 int
m32c_trampoline_alignment(void)1603 m32c_trampoline_alignment (void)
1604 {
1605   return 2;
1606 }
1607 
1608 /* Implements TARGET_TRAMPOLINE_INIT.  */
1609 
1610 #undef TARGET_TRAMPOLINE_INIT
1611 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1612 static void
m32c_trampoline_init(rtx m_tramp,tree fndecl,rtx chainval)1613 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1614 {
1615   rtx function = XEXP (DECL_RTL (fndecl), 0);
1616 
1617 #define A0(m,i) adjust_address (m_tramp, m, i)
1618   if (TARGET_A16)
1619     {
1620       /* Note: we subtract a "word" because the moves want signed
1621 	 constants, not unsigned constants.  */
1622       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1623       emit_move_insn (A0 (HImode, 2), chainval);
1624       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1625       /* We use 16-bit addresses here, but store the zero to turn it
1626 	 into a 24-bit offset.  */
1627       emit_move_insn (A0 (HImode, 5), function);
1628       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1629     }
1630   else
1631     {
1632       /* Note that the PSI moves actually write 4 bytes.  Make sure we
1633 	 write stuff out in the right order, and leave room for the
1634 	 extra byte at the end.  */
1635       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1636       emit_move_insn (A0 (PSImode, 1), chainval);
1637       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1638       emit_move_insn (A0 (PSImode, 5), function);
1639     }
1640 #undef A0
1641 }
1642 
1643 #undef TARGET_LRA_P
1644 #define TARGET_LRA_P hook_bool_void_false
1645 
1646 /* Addressing Modes */
1647 
1648 /* The r8c/m32c family supports a wide range of non-orthogonal
1649    addressing modes, including the ability to double-indirect on *some*
1650    of them.  Not all insns support all modes, either, but we rely on
1651    predicates and constraints to deal with that.  */
1652 #undef TARGET_LEGITIMATE_ADDRESS_P
1653 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1654 bool
m32c_legitimate_address_p(machine_mode mode,rtx x,bool strict)1655 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1656 {
1657   int mode_adjust;
1658   if (CONSTANT_P (x))
1659     return 1;
1660 
1661   if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1662     return 0;
1663   if (TARGET_A24 && GET_MODE (x) != PSImode)
1664     return 0;
1665 
1666   /* Wide references to memory will be split after reload, so we must
1667      ensure that all parts of such splits remain legitimate
1668      addresses.  */
1669   mode_adjust = GET_MODE_SIZE (mode) - 1;
1670 
1671   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1672   if (GET_CODE (x) == PRE_DEC
1673       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1674     {
1675       return (GET_CODE (XEXP (x, 0)) == REG
1676 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
1677     }
1678 
1679 #if 0
1680   /* This is the double indirection detection, but it currently
1681      doesn't work as cleanly as this code implies, so until we've had
1682      a chance to debug it, leave it disabled.  */
1683   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1684     {
1685 #if DEBUG_DOUBLE
1686       fprintf (stderr, "double indirect\n");
1687 #endif
1688       x = XEXP (x, 0);
1689     }
1690 #endif
1691 
1692   encode_pattern (x);
1693   if (RTX_IS ("r"))
1694     {
1695       /* Most indexable registers can be used without displacements,
1696 	 although some of them will be emitted with an explicit zero
1697 	 to please the assembler.  */
1698       switch (REGNO (patternr[0]))
1699 	{
1700 	case A1_REGNO:
1701 	case SB_REGNO:
1702 	case FB_REGNO:
1703 	case SP_REGNO:
1704 	  if (TARGET_A16 && GET_MODE (x) == SImode)
1705 	    return 0;
1706 	  /* FALLTHRU */
1707 	case A0_REGNO:
1708 	  return 1;
1709 
1710 	default:
1711 	  if (IS_PSEUDO (patternr[0], strict))
1712 	    return 1;
1713 	  return 0;
1714 	}
1715     }
1716 
1717   if (TARGET_A16 && GET_MODE (x) == SImode)
1718     return 0;
1719 
1720   if (RTX_IS ("+ri"))
1721     {
1722       /* This is more interesting, because different base registers
1723 	 allow for different displacements - both range and signedness
1724 	 - and it differs from chip series to chip series too.  */
1725       int rn = REGNO (patternr[1]);
1726       HOST_WIDE_INT offs = INTVAL (patternr[2]);
1727       switch (rn)
1728 	{
1729 	case A0_REGNO:
1730 	case A1_REGNO:
1731 	case SB_REGNO:
1732 	  /* The syntax only allows positive offsets, but when the
1733 	     offsets span the entire memory range, we can simulate
1734 	     negative offsets by wrapping.  */
1735 	  if (TARGET_A16)
1736 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
1737 	  if (rn == SB_REGNO)
1738 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
1739 	  /* A0 or A1 */
1740 	  return (offs >= -16777216 && offs <= 16777215);
1741 
1742 	case FB_REGNO:
1743 	  if (TARGET_A16)
1744 	    return (offs >= -128 && offs <= 127 - mode_adjust);
1745 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
1746 
1747 	case SP_REGNO:
1748 	  return (offs >= -128 && offs <= 127 - mode_adjust);
1749 
1750 	default:
1751 	  if (IS_PSEUDO (patternr[1], strict))
1752 	    return 1;
1753 	  return 0;
1754 	}
1755     }
1756   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1757     {
1758       rtx reg = patternr[1];
1759 
1760       /* We don't know where the symbol is, so only allow base
1761 	 registers which support displacements spanning the whole
1762 	 address range.  */
1763       switch (REGNO (reg))
1764 	{
1765 	case A0_REGNO:
1766 	case A1_REGNO:
1767 	  /* $sb needs a secondary reload, but since it's involved in
1768 	     memory address reloads too, we don't deal with it very
1769 	     well.  */
1770 	  /*    case SB_REGNO: */
1771 	  return 1;
1772 	default:
1773 	  if (GET_CODE (reg) == SUBREG)
1774 	    return 0;
1775 	  if (IS_PSEUDO (reg, strict))
1776 	    return 1;
1777 	  return 0;
1778 	}
1779     }
1780   return 0;
1781 }
1782 
1783 /* Implements REG_OK_FOR_BASE_P.  */
1784 int
m32c_reg_ok_for_base_p(rtx x,int strict)1785 m32c_reg_ok_for_base_p (rtx x, int strict)
1786 {
1787   if (GET_CODE (x) != REG)
1788     return 0;
1789   switch (REGNO (x))
1790     {
1791     case A0_REGNO:
1792     case A1_REGNO:
1793     case SB_REGNO:
1794     case FB_REGNO:
1795     case SP_REGNO:
1796       return 1;
1797     default:
1798       if (IS_PSEUDO (x, strict))
1799 	return 1;
1800       return 0;
1801     }
1802 }
1803 
1804 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
1805    we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1806    like this:
1807        EB 4B FF    mova    -128[$fb],$a0
1808        D8 0C FF FF mov.w:Q #0,-1[$a0]
1809 
1810    Alternately, we subtract the frame size, and hopefully use 8-bit aN
1811    displacements:
1812        7B F4       stc $fb,$a0
1813        77 54 00 01 sub #256,$a0
1814        D8 08 01    mov.w:Q #0,1[$a0]
1815 
1816    If we don't offset (i.e. offset by zero), we end up with:
1817        7B F4       stc $fb,$a0
1818        D8 0C 00 FF mov.w:Q #0,-256[$a0]
1819 
1820    We have to subtract *something* so that we have a PLUS rtx to mark
1821    that we've done this reload.  The -128 offset will never result in
1822    an 8-bit aN offset, and the payoff for the second case is five
1823    loads *if* those loads are within 256 bytes of the other end of the
1824    frame, so the third case seems best.  Note that we subtract the
1825    zero, but detect that in the addhi3 pattern.  */
1826 
1827 #define BIG_FB_ADJ 0
1828 
1829 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1830    worry about is frame base offsets, as $fb has a limited
1831    displacement range.  We deal with this by attempting to reload $fb
1832    itself into an address register; that seems to result in the best
1833    code.  */
1834 #undef TARGET_LEGITIMIZE_ADDRESS
1835 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1836 static rtx
m32c_legitimize_address(rtx x,rtx oldx ATTRIBUTE_UNUSED,machine_mode mode)1837 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1838 			 machine_mode mode)
1839 {
1840 #if DEBUG0
1841   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1842   debug_rtx (x);
1843   fprintf (stderr, "\n");
1844 #endif
1845 
1846   if (GET_CODE (x) == PLUS
1847       && GET_CODE (XEXP (x, 0)) == REG
1848       && REGNO (XEXP (x, 0)) == FB_REGNO
1849       && GET_CODE (XEXP (x, 1)) == CONST_INT
1850       && (INTVAL (XEXP (x, 1)) < -128
1851 	  || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1852     {
1853       /* reload FB to A_REGS */
1854       rtx temp = gen_reg_rtx (Pmode);
1855       x = copy_rtx (x);
1856       emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1857       XEXP (x, 0) = temp;
1858     }
1859 
1860   return x;
1861 }
1862 
1863 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
1864 int
m32c_legitimize_reload_address(rtx * x,machine_mode mode,int opnum,int type,int ind_levels ATTRIBUTE_UNUSED)1865 m32c_legitimize_reload_address (rtx * x,
1866 				machine_mode mode,
1867 				int opnum,
1868 				int type, int ind_levels ATTRIBUTE_UNUSED)
1869 {
1870 #if DEBUG0
1871   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1872 	   mode_name[mode]);
1873   debug_rtx (*x);
1874 #endif
1875 
1876   /* At one point, this function tried to get $fb copied to an address
1877      register, which in theory would maximize sharing, but gcc was
1878      *also* still trying to reload the whole address, and we'd run out
1879      of address registers.  So we let gcc do the naive (but safe)
1880      reload instead, when the above function doesn't handle it for
1881      us.
1882 
1883      The code below is a second attempt at the above.  */
1884 
1885   if (GET_CODE (*x) == PLUS
1886       && GET_CODE (XEXP (*x, 0)) == REG
1887       && REGNO (XEXP (*x, 0)) == FB_REGNO
1888       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1889       && (INTVAL (XEXP (*x, 1)) < -128
1890 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1891     {
1892       rtx sum;
1893       int offset = INTVAL (XEXP (*x, 1));
1894       int adjustment = -BIG_FB_ADJ;
1895 
1896       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1897 			  GEN_INT (adjustment));
1898       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1899       if (type == RELOAD_OTHER)
1900 	type = RELOAD_FOR_OTHER_ADDRESS;
1901       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1902 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1903 		   (enum reload_type) type);
1904       return 1;
1905     }
1906 
1907   if (GET_CODE (*x) == PLUS
1908       && GET_CODE (XEXP (*x, 0)) == PLUS
1909       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1910       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1911       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1912       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1913       )
1914     {
1915       if (type == RELOAD_OTHER)
1916 	type = RELOAD_FOR_OTHER_ADDRESS;
1917       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1918 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1919 		   (enum reload_type) type);
1920       return 1;
1921     }
1922 
1923   if (TARGET_A24 && GET_MODE (*x) == PSImode)
1924     {
1925       push_reload (*x, NULL_RTX, x, NULL,
1926 		   A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1927 		   (enum reload_type) type);
1928       return 1;
1929     }
1930 
1931   return 0;
1932 }
1933 
1934 /* Return the appropriate mode for a named address pointer.  */
1935 #undef TARGET_ADDR_SPACE_POINTER_MODE
1936 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1937 static scalar_int_mode
m32c_addr_space_pointer_mode(addr_space_t addrspace)1938 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1939 {
1940   switch (addrspace)
1941     {
1942     case ADDR_SPACE_GENERIC:
1943       return TARGET_A24 ? PSImode : HImode;
1944     case ADDR_SPACE_FAR:
1945       return SImode;
1946     default:
1947       gcc_unreachable ();
1948     }
1949 }
1950 
1951 /* Return the appropriate mode for a named address address.  */
1952 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1953 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1954 static scalar_int_mode
m32c_addr_space_address_mode(addr_space_t addrspace)1955 m32c_addr_space_address_mode (addr_space_t addrspace)
1956 {
1957   switch (addrspace)
1958     {
1959     case ADDR_SPACE_GENERIC:
1960       return TARGET_A24 ? PSImode : HImode;
1961     case ADDR_SPACE_FAR:
1962       return SImode;
1963     default:
1964       gcc_unreachable ();
1965     }
1966 }
1967 
1968 /* Like m32c_legitimate_address_p, except with named addresses.  */
1969 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1970 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1971   m32c_addr_space_legitimate_address_p
1972 static bool
m32c_addr_space_legitimate_address_p(machine_mode mode,rtx x,bool strict,addr_space_t as)1973 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1974 				      bool strict, addr_space_t as)
1975 {
1976   if (as == ADDR_SPACE_FAR)
1977     {
1978       if (TARGET_A24)
1979 	return 0;
1980       encode_pattern (x);
1981       if (RTX_IS ("r"))
1982 	{
1983 	  if (GET_MODE (x) != SImode)
1984 	    return 0;
1985 	  switch (REGNO (patternr[0]))
1986 	    {
1987 	    case A0_REGNO:
1988 	      return 1;
1989 
1990 	    default:
1991 	      if (IS_PSEUDO (patternr[0], strict))
1992 		return 1;
1993 	      return 0;
1994 	    }
1995 	}
1996       if (RTX_IS ("+^Sri"))
1997 	{
1998 	  int rn = REGNO (patternr[3]);
1999 	  HOST_WIDE_INT offs = INTVAL (patternr[4]);
2000 	  if (GET_MODE (patternr[3]) != HImode)
2001 	    return 0;
2002 	  switch (rn)
2003 	    {
2004 	    case A0_REGNO:
2005 	      return (offs >= 0 && offs <= 0xfffff);
2006 
2007 	    default:
2008 	      if (IS_PSEUDO (patternr[3], strict))
2009 		return 1;
2010 	      return 0;
2011 	    }
2012 	}
2013       if (RTX_IS ("+^Srs"))
2014 	{
2015 	  int rn = REGNO (patternr[3]);
2016 	  if (GET_MODE (patternr[3]) != HImode)
2017 	    return 0;
2018 	  switch (rn)
2019 	    {
2020 	    case A0_REGNO:
2021 	      return 1;
2022 
2023 	    default:
2024 	      if (IS_PSEUDO (patternr[3], strict))
2025 		return 1;
2026 	      return 0;
2027 	    }
2028 	}
2029       if (RTX_IS ("+^S+ris"))
2030 	{
2031 	  int rn = REGNO (patternr[4]);
2032 	  if (GET_MODE (patternr[4]) != HImode)
2033 	    return 0;
2034 	  switch (rn)
2035 	    {
2036 	    case A0_REGNO:
2037 	      return 1;
2038 
2039 	    default:
2040 	      if (IS_PSEUDO (patternr[4], strict))
2041 		return 1;
2042 	      return 0;
2043 	    }
2044 	}
2045       if (RTX_IS ("s"))
2046 	{
2047 	  return 1;
2048 	}
2049       return 0;
2050     }
2051 
2052   else if (as != ADDR_SPACE_GENERIC)
2053     gcc_unreachable ();
2054 
2055   return m32c_legitimate_address_p (mode, x, strict);
2056 }
2057 
2058 /* Like m32c_legitimate_address, except with named address support.  */
2059 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2060 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2061 static rtx
m32c_addr_space_legitimize_address(rtx x,rtx oldx,machine_mode mode,addr_space_t as)2062 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2063 				    addr_space_t as)
2064 {
2065   if (as != ADDR_SPACE_GENERIC)
2066     {
2067 #if DEBUG0
2068       fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2069       debug_rtx (x);
2070       fprintf (stderr, "\n");
2071 #endif
2072 
2073       if (GET_CODE (x) != REG)
2074 	{
2075 	  x = force_reg (SImode, x);
2076 	}
2077       return x;
2078     }
2079 
2080   return m32c_legitimize_address (x, oldx, mode);
2081 }
2082 
2083 /* Determine if one named address space is a subset of another.  */
2084 #undef TARGET_ADDR_SPACE_SUBSET_P
2085 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2086 static bool
m32c_addr_space_subset_p(addr_space_t subset,addr_space_t superset)2087 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2088 {
2089   gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2090   gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2091 
2092   if (subset == superset)
2093     return true;
2094 
2095   else
2096     return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2097 }
2098 
2099 #undef TARGET_ADDR_SPACE_CONVERT
2100 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2101 /* Convert from one address space to another.  */
2102 static rtx
m32c_addr_space_convert(rtx op,tree from_type,tree to_type)2103 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2104 {
2105   addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2106   addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2107   rtx result;
2108 
2109   gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2110   gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2111 
2112   if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2113     {
2114       /* This is unpredictable, as we're truncating off usable address
2115 	 bits.  */
2116 
2117       result = gen_reg_rtx (HImode);
2118       emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2119       return result;
2120     }
2121   else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2122     {
2123       /* This always works.  */
2124       result = gen_reg_rtx (SImode);
2125       emit_insn (gen_zero_extendhisi2 (result, op));
2126       return result;
2127     }
2128   else
2129     gcc_unreachable ();
2130 }
2131 
2132 /* Condition Code Status */
2133 
2134 #undef TARGET_FIXED_CONDITION_CODE_REGS
2135 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2136 static bool
m32c_fixed_condition_code_regs(unsigned int * p1,unsigned int * p2)2137 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2138 {
2139   *p1 = FLG_REGNO;
2140   *p2 = INVALID_REGNUM;
2141   return true;
2142 }
2143 
2144 /* Describing Relative Costs of Operations */
2145 
2146 /* Implements TARGET_REGISTER_MOVE_COST.  We make impossible moves
2147    prohibitively expensive, like trying to put QIs in r2/r3 (there are
2148    no opcodes to do that).  We also discourage use of mem* registers
2149    since they're really memory.  */
2150 
2151 #undef TARGET_REGISTER_MOVE_COST
2152 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2153 
2154 static int
m32c_register_move_cost(machine_mode mode,reg_class_t from,reg_class_t to)2155 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2156 			 reg_class_t to)
2157 {
2158   int cost = COSTS_N_INSNS (3);
2159   HARD_REG_SET cc;
2160 
2161 /* FIXME: pick real values, but not 2 for now.  */
2162   COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2163   IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2164 
2165   if (mode == QImode
2166       && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2167     {
2168       if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2169 	cost = COSTS_N_INSNS (1000);
2170       else
2171 	cost = COSTS_N_INSNS (80);
2172     }
2173 
2174   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2175     cost = COSTS_N_INSNS (1000);
2176 
2177   if (reg_classes_intersect_p (from, CR_REGS))
2178     cost += COSTS_N_INSNS (5);
2179 
2180   if (reg_classes_intersect_p (to, CR_REGS))
2181     cost += COSTS_N_INSNS (5);
2182 
2183   if (from == MEM_REGS || to == MEM_REGS)
2184     cost += COSTS_N_INSNS (50);
2185   else if (reg_classes_intersect_p (from, MEM_REGS)
2186 	   || reg_classes_intersect_p (to, MEM_REGS))
2187     cost += COSTS_N_INSNS (10);
2188 
2189 #if DEBUG0
2190   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2191 	   mode_name[mode], class_names[(int) from], class_names[(int) to],
2192 	   cost);
2193 #endif
2194   return cost;
2195 }
2196 
2197 /*  Implements TARGET_MEMORY_MOVE_COST.  */
2198 
2199 #undef TARGET_MEMORY_MOVE_COST
2200 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2201 
2202 static int
m32c_memory_move_cost(machine_mode mode ATTRIBUTE_UNUSED,reg_class_t rclass ATTRIBUTE_UNUSED,bool in ATTRIBUTE_UNUSED)2203 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2204 		       reg_class_t rclass ATTRIBUTE_UNUSED,
2205 		       bool in ATTRIBUTE_UNUSED)
2206 {
2207   /* FIXME: pick real values.  */
2208   return COSTS_N_INSNS (10);
2209 }
2210 
2211 /* Here we try to describe when we use multiple opcodes for one RTX so
2212    that gcc knows when to use them.  */
2213 #undef TARGET_RTX_COSTS
2214 #define TARGET_RTX_COSTS m32c_rtx_costs
2215 static bool
m32c_rtx_costs(rtx x,machine_mode mode,int outer_code,int opno ATTRIBUTE_UNUSED,int * total,bool speed ATTRIBUTE_UNUSED)2216 m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2217 		int opno ATTRIBUTE_UNUSED,
2218 		int *total, bool speed ATTRIBUTE_UNUSED)
2219 {
2220   int code = GET_CODE (x);
2221   switch (code)
2222     {
2223     case REG:
2224       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2225 	*total += COSTS_N_INSNS (500);
2226       else
2227 	*total += COSTS_N_INSNS (1);
2228       return true;
2229 
2230     case ASHIFT:
2231     case LSHIFTRT:
2232     case ASHIFTRT:
2233       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2234 	{
2235 	  /* mov.b r1l, r1h */
2236 	  *total +=  COSTS_N_INSNS (1);
2237 	  return true;
2238 	}
2239       if (INTVAL (XEXP (x, 1)) > 8
2240 	  || INTVAL (XEXP (x, 1)) < -8)
2241 	{
2242 	  /* mov.b #N, r1l */
2243 	  /* mov.b r1l, r1h */
2244 	  *total +=  COSTS_N_INSNS (2);
2245 	  return true;
2246 	}
2247       return true;
2248 
2249     case LE:
2250     case LEU:
2251     case LT:
2252     case LTU:
2253     case GT:
2254     case GTU:
2255     case GE:
2256     case GEU:
2257     case NE:
2258     case EQ:
2259       if (outer_code == SET)
2260 	{
2261 	  *total += COSTS_N_INSNS (2);
2262 	  return true;
2263 	}
2264       break;
2265 
2266     case ZERO_EXTRACT:
2267       {
2268 	rtx dest = XEXP (x, 0);
2269 	rtx addr = XEXP (dest, 0);
2270 	switch (GET_CODE (addr))
2271 	  {
2272 	  case CONST_INT:
2273 	    *total += COSTS_N_INSNS (1);
2274 	    break;
2275 	  case SYMBOL_REF:
2276 	    *total += COSTS_N_INSNS (3);
2277 	    break;
2278 	  default:
2279 	    *total += COSTS_N_INSNS (2);
2280 	    break;
2281 	  }
2282 	return true;
2283       }
2284       break;
2285 
2286     default:
2287       /* Reasonable default.  */
2288       if (TARGET_A16 && mode == SImode)
2289 	*total += COSTS_N_INSNS (2);
2290       break;
2291     }
2292   return false;
2293 }
2294 
2295 #undef TARGET_ADDRESS_COST
2296 #define TARGET_ADDRESS_COST m32c_address_cost
2297 static int
m32c_address_cost(rtx addr,machine_mode mode ATTRIBUTE_UNUSED,addr_space_t as ATTRIBUTE_UNUSED,bool speed ATTRIBUTE_UNUSED)2298 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2299 		   addr_space_t as ATTRIBUTE_UNUSED,
2300 		   bool speed ATTRIBUTE_UNUSED)
2301 {
2302   int i;
2303   /*  fprintf(stderr, "\naddress_cost\n");
2304       debug_rtx(addr);*/
2305   switch (GET_CODE (addr))
2306     {
2307     case CONST_INT:
2308       i = INTVAL (addr);
2309       if (i == 0)
2310 	return COSTS_N_INSNS(1);
2311       if (i > 0 && i <= 255)
2312 	return COSTS_N_INSNS(2);
2313       if (i > 0 && i <= 65535)
2314 	return COSTS_N_INSNS(3);
2315       return COSTS_N_INSNS(4);
2316     case SYMBOL_REF:
2317       return COSTS_N_INSNS(4);
2318     case REG:
2319       return COSTS_N_INSNS(1);
2320     case PLUS:
2321       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2322 	{
2323 	  i = INTVAL (XEXP (addr, 1));
2324 	  if (i == 0)
2325 	    return COSTS_N_INSNS(1);
2326 	  if (i > 0 && i <= 255)
2327 	    return COSTS_N_INSNS(2);
2328 	  if (i > 0 && i <= 65535)
2329 	    return COSTS_N_INSNS(3);
2330 	}
2331       return COSTS_N_INSNS(4);
2332     default:
2333       return 0;
2334     }
2335 }
2336 
2337 /* Defining the Output Assembler Language */
2338 
2339 /* Output of Data */
2340 
2341 /* We may have 24 bit sizes, which is the native address size.
2342    Currently unused, but provided for completeness.  */
2343 #undef TARGET_ASM_INTEGER
2344 #define TARGET_ASM_INTEGER m32c_asm_integer
2345 static bool
m32c_asm_integer(rtx x,unsigned int size,int aligned_p)2346 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2347 {
2348   switch (size)
2349     {
2350     case 3:
2351       fprintf (asm_out_file, "\t.3byte\t");
2352       output_addr_const (asm_out_file, x);
2353       fputc ('\n', asm_out_file);
2354       return true;
2355     case 4:
2356       if (GET_CODE (x) == SYMBOL_REF)
2357 	{
2358 	  fprintf (asm_out_file, "\t.long\t");
2359 	  output_addr_const (asm_out_file, x);
2360 	  fputc ('\n', asm_out_file);
2361 	  return true;
2362 	}
2363       break;
2364     }
2365   return default_assemble_integer (x, size, aligned_p);
2366 }
2367 
2368 /* Output of Assembler Instructions */
2369 
2370 /* We use a lookup table because the addressing modes are non-orthogonal.  */
2371 
2372 static struct
2373 {
2374   char code;
2375   char const *pattern;
2376   char const *format;
2377 }
2378 const conversions[] = {
2379   { 0, "r", "0" },
2380 
2381   { 0, "mr", "z[1]" },
2382   { 0, "m+ri", "3[2]" },
2383   { 0, "m+rs", "3[2]" },
2384   { 0, "m+^Zrs", "5[4]" },
2385   { 0, "m+^Zri", "5[4]" },
2386   { 0, "m+^Z+ris", "7+6[5]" },
2387   { 0, "m+^Srs", "5[4]" },
2388   { 0, "m+^Sri", "5[4]" },
2389   { 0, "m+^S+ris", "7+6[5]" },
2390   { 0, "m+r+si", "4+5[2]" },
2391   { 0, "ms", "1" },
2392   { 0, "mi", "1" },
2393   { 0, "m+si", "2+3" },
2394 
2395   { 0, "mmr", "[z[2]]" },
2396   { 0, "mm+ri", "[4[3]]" },
2397   { 0, "mm+rs", "[4[3]]" },
2398   { 0, "mm+r+si", "[5+6[3]]" },
2399   { 0, "mms", "[[2]]" },
2400   { 0, "mmi", "[[2]]" },
2401   { 0, "mm+si", "[4[3]]" },
2402 
2403   { 0, "i", "#0" },
2404   { 0, "s", "#0" },
2405   { 0, "+si", "#1+2" },
2406   { 0, "l", "#0" },
2407 
2408   { 'l', "l", "0" },
2409   { 'd', "i", "0" },
2410   { 'd', "s", "0" },
2411   { 'd', "+si", "1+2" },
2412   { 'D', "i", "0" },
2413   { 'D', "s", "0" },
2414   { 'D', "+si", "1+2" },
2415   { 'x', "i", "#0" },
2416   { 'X', "i", "#0" },
2417   { 'm', "i", "#0" },
2418   { 'b', "i", "#0" },
2419   { 'B', "i", "0" },
2420   { 'p', "i", "0" },
2421 
2422   { 0, 0, 0 }
2423 };
2424 
2425 /* This is in order according to the bitfield that pushm/popm use.  */
2426 static char const *pushm_regs[] = {
2427   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2428 };
2429 
2430 /* Implements TARGET_PRINT_OPERAND.  */
2431 
2432 #undef TARGET_PRINT_OPERAND
2433 #define TARGET_PRINT_OPERAND m32c_print_operand
2434 
2435 static void
m32c_print_operand(FILE * file,rtx x,int code)2436 m32c_print_operand (FILE * file, rtx x, int code)
2437 {
2438   int i, j, b;
2439   const char *comma;
2440   HOST_WIDE_INT ival;
2441   int unsigned_const = 0;
2442   int force_sign;
2443 
2444   /* Multiplies; constants are converted to sign-extended format but
2445    we need unsigned, so 'u' and 'U' tell us what size unsigned we
2446    need.  */
2447   if (code == 'u')
2448     {
2449       unsigned_const = 2;
2450       code = 0;
2451     }
2452   if (code == 'U')
2453     {
2454       unsigned_const = 1;
2455       code = 0;
2456     }
2457   /* This one is only for debugging; you can put it in a pattern to
2458      force this error.  */
2459   if (code == '!')
2460     {
2461       fprintf (stderr, "dj: unreviewed pattern:");
2462       if (current_output_insn)
2463 	debug_rtx (current_output_insn);
2464       gcc_unreachable ();
2465     }
2466   /* PSImode operations are either .w or .l depending on the target.  */
2467   if (code == '&')
2468     {
2469       if (TARGET_A16)
2470 	fprintf (file, "w");
2471       else
2472 	fprintf (file, "l");
2473       return;
2474     }
2475   /* Inverted conditionals.  */
2476   if (code == 'C')
2477     {
2478       switch (GET_CODE (x))
2479 	{
2480 	case LE:
2481 	  fputs ("gt", file);
2482 	  break;
2483 	case LEU:
2484 	  fputs ("gtu", file);
2485 	  break;
2486 	case LT:
2487 	  fputs ("ge", file);
2488 	  break;
2489 	case LTU:
2490 	  fputs ("geu", file);
2491 	  break;
2492 	case GT:
2493 	  fputs ("le", file);
2494 	  break;
2495 	case GTU:
2496 	  fputs ("leu", file);
2497 	  break;
2498 	case GE:
2499 	  fputs ("lt", file);
2500 	  break;
2501 	case GEU:
2502 	  fputs ("ltu", file);
2503 	  break;
2504 	case NE:
2505 	  fputs ("eq", file);
2506 	  break;
2507 	case EQ:
2508 	  fputs ("ne", file);
2509 	  break;
2510 	default:
2511 	  gcc_unreachable ();
2512 	}
2513       return;
2514     }
2515   /* Regular conditionals.  */
2516   if (code == 'c')
2517     {
2518       switch (GET_CODE (x))
2519 	{
2520 	case LE:
2521 	  fputs ("le", file);
2522 	  break;
2523 	case LEU:
2524 	  fputs ("leu", file);
2525 	  break;
2526 	case LT:
2527 	  fputs ("lt", file);
2528 	  break;
2529 	case LTU:
2530 	  fputs ("ltu", file);
2531 	  break;
2532 	case GT:
2533 	  fputs ("gt", file);
2534 	  break;
2535 	case GTU:
2536 	  fputs ("gtu", file);
2537 	  break;
2538 	case GE:
2539 	  fputs ("ge", file);
2540 	  break;
2541 	case GEU:
2542 	  fputs ("geu", file);
2543 	  break;
2544 	case NE:
2545 	  fputs ("ne", file);
2546 	  break;
2547 	case EQ:
2548 	  fputs ("eq", file);
2549 	  break;
2550 	default:
2551 	  gcc_unreachable ();
2552 	}
2553       return;
2554     }
2555   /* Used in negsi2 to do HImode ops on the two parts of an SImode
2556      operand.  */
2557   if (code == 'h' && GET_MODE (x) == SImode)
2558     {
2559       x = m32c_subreg (HImode, x, SImode, 0);
2560       code = 0;
2561     }
2562   if (code == 'H' && GET_MODE (x) == SImode)
2563     {
2564       x = m32c_subreg (HImode, x, SImode, 2);
2565       code = 0;
2566     }
2567   if (code == 'h' && GET_MODE (x) == HImode)
2568     {
2569       x = m32c_subreg (QImode, x, HImode, 0);
2570       code = 0;
2571     }
2572   if (code == 'H' && GET_MODE (x) == HImode)
2573     {
2574       /* We can't actually represent this as an rtx.  Do it here.  */
2575       if (GET_CODE (x) == REG)
2576 	{
2577 	  switch (REGNO (x))
2578 	    {
2579 	    case R0_REGNO:
2580 	      fputs ("r0h", file);
2581 	      return;
2582 	    case R1_REGNO:
2583 	      fputs ("r1h", file);
2584 	      return;
2585 	    default:
2586 	      gcc_unreachable();
2587 	    }
2588 	}
2589       /* This should be a MEM.  */
2590       x = m32c_subreg (QImode, x, HImode, 1);
2591       code = 0;
2592     }
2593   /* This is for BMcond, which always wants word register names.  */
2594   if (code == 'h' && GET_MODE (x) == QImode)
2595     {
2596       if (GET_CODE (x) == REG)
2597 	x = gen_rtx_REG (HImode, REGNO (x));
2598       code = 0;
2599     }
2600   /* 'x' and 'X' need to be ignored for non-immediates.  */
2601   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2602     code = 0;
2603 
2604   encode_pattern (x);
2605   force_sign = 0;
2606   for (i = 0; conversions[i].pattern; i++)
2607     if (conversions[i].code == code
2608 	&& streq (conversions[i].pattern, pattern))
2609       {
2610 	for (j = 0; conversions[i].format[j]; j++)
2611 	  /* backslash quotes the next character in the output pattern.  */
2612 	  if (conversions[i].format[j] == '\\')
2613 	    {
2614 	      fputc (conversions[i].format[j + 1], file);
2615 	      j++;
2616 	    }
2617 	  /* Digits in the output pattern indicate that the
2618 	     corresponding RTX is to be output at that point.  */
2619 	  else if (ISDIGIT (conversions[i].format[j]))
2620 	    {
2621 	      rtx r = patternr[conversions[i].format[j] - '0'];
2622 	      switch (GET_CODE (r))
2623 		{
2624 		case REG:
2625 		  fprintf (file, "%s",
2626 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
2627 		  break;
2628 		case CONST_INT:
2629 		  switch (code)
2630 		    {
2631 		    case 'b':
2632 		    case 'B':
2633 		      {
2634 			int v = INTVAL (r);
2635 			int i = (int) exact_log2 (v);
2636 			if (i == -1)
2637 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2638 			if (i == -1)
2639 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2640 			/* Bit position.  */
2641 			fprintf (file, "%d", i);
2642 		      }
2643 		      break;
2644 		    case 'x':
2645 		      /* Unsigned byte.  */
2646 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2647 			       INTVAL (r) & 0xff);
2648 		      break;
2649 		    case 'X':
2650 		      /* Unsigned word.  */
2651 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2652 			       INTVAL (r) & 0xffff);
2653 		      break;
2654 		    case 'p':
2655 		      /* pushm and popm encode a register set into a single byte.  */
2656 		      comma = "";
2657 		      for (b = 7; b >= 0; b--)
2658 			if (INTVAL (r) & (1 << b))
2659 			  {
2660 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
2661 			    comma = ",";
2662 			  }
2663 		      break;
2664 		    case 'm':
2665 		      /* "Minus".  Output -X  */
2666 		      ival = (-INTVAL (r) & 0xffff);
2667 		      if (ival & 0x8000)
2668 			ival = ival - 0x10000;
2669 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2670 		      break;
2671 		    default:
2672 		      ival = INTVAL (r);
2673 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
2674 			{
2675 			  /* We can simulate negative displacements by
2676 			     taking advantage of address space
2677 			     wrapping when the offset can span the
2678 			     entire address range.  */
2679 			  rtx base =
2680 			    patternr[conversions[i].format[j + 2] - '0'];
2681 			  if (GET_CODE (base) == REG)
2682 			    switch (REGNO (base))
2683 			      {
2684 			      case A0_REGNO:
2685 			      case A1_REGNO:
2686 				if (TARGET_A24)
2687 				  ival = 0x1000000 + ival;
2688 				else
2689 				  ival = 0x10000 + ival;
2690 				break;
2691 			      case SB_REGNO:
2692 				if (TARGET_A16)
2693 				  ival = 0x10000 + ival;
2694 				break;
2695 			      }
2696 			}
2697 		      else if (code == 'd' && ival < 0 && j == 0)
2698 			/* The "mova" opcode is used to do addition by
2699 			   computing displacements, but again, we need
2700 			   displacements to be unsigned *if* they're
2701 			   the only component of the displacement
2702 			   (i.e. no "symbol-4" type displacement).  */
2703 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2704 
2705 		      if (conversions[i].format[j] == '0')
2706 			{
2707 			  /* More conversions to unsigned.  */
2708 			  if (unsigned_const == 2)
2709 			    ival &= 0xffff;
2710 			  if (unsigned_const == 1)
2711 			    ival &= 0xff;
2712 			}
2713 		      if (streq (conversions[i].pattern, "mi")
2714 			  || streq (conversions[i].pattern, "mmi"))
2715 			{
2716 			  /* Integers used as addresses are unsigned.  */
2717 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2718 			}
2719 		      if (force_sign && ival >= 0)
2720 			fputc ('+', file);
2721 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2722 		      break;
2723 		    }
2724 		  break;
2725 		case CONST_DOUBLE:
2726 		  /* We don't have const_double constants.  If it
2727 		     happens, make it obvious.  */
2728 		  fprintf (file, "[const_double 0x%lx]",
2729 			   (unsigned long) CONST_DOUBLE_HIGH (r));
2730 		  break;
2731 		case SYMBOL_REF:
2732 		  assemble_name (file, XSTR (r, 0));
2733 		  break;
2734 		case LABEL_REF:
2735 		  output_asm_label (r);
2736 		  break;
2737 		default:
2738 		  fprintf (stderr, "don't know how to print this operand:");
2739 		  debug_rtx (r);
2740 		  gcc_unreachable ();
2741 		}
2742 	    }
2743 	  else
2744 	    {
2745 	      if (conversions[i].format[j] == 'z')
2746 		{
2747 		  /* Some addressing modes *must* have a displacement,
2748 		     so insert a zero here if needed.  */
2749 		  int k;
2750 		  for (k = j + 1; conversions[i].format[k]; k++)
2751 		    if (ISDIGIT (conversions[i].format[k]))
2752 		      {
2753 			rtx reg = patternr[conversions[i].format[k] - '0'];
2754 			if (GET_CODE (reg) == REG
2755 			    && (REGNO (reg) == SB_REGNO
2756 				|| REGNO (reg) == FB_REGNO
2757 				|| REGNO (reg) == SP_REGNO))
2758 			  fputc ('0', file);
2759 		      }
2760 		  continue;
2761 		}
2762 	      /* Signed displacements off symbols need to have signs
2763 		 blended cleanly.  */
2764 	      if (conversions[i].format[j] == '+'
2765 		  && (!code || code == 'D' || code == 'd')
2766 		  && ISDIGIT (conversions[i].format[j + 1])
2767 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2768 		      == CONST_INT))
2769 		{
2770 		  force_sign = 1;
2771 		  continue;
2772 		}
2773 	      fputc (conversions[i].format[j], file);
2774 	    }
2775 	break;
2776       }
2777   if (!conversions[i].pattern)
2778     {
2779       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2780 	       pattern);
2781       debug_rtx (x);
2782       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2783     }
2784 
2785   return;
2786 }
2787 
2788 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2789 
2790    See m32c_print_operand above for descriptions of what these do.  */
2791 
2792 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2793 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2794 
2795 static bool
m32c_print_operand_punct_valid_p(unsigned char c)2796 m32c_print_operand_punct_valid_p (unsigned char c)
2797 {
2798   if (c == '&' || c == '!')
2799     return true;
2800 
2801   return false;
2802 }
2803 
2804 /* Implements TARGET_PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2805 
2806 #undef TARGET_PRINT_OPERAND_ADDRESS
2807 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2808 
2809 static void
m32c_print_operand_address(FILE * stream,machine_mode,rtx address)2810 m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
2811 {
2812   if (GET_CODE (address) == MEM)
2813     address = XEXP (address, 0);
2814   else
2815     /* cf: gcc.dg/asm-4.c.  */
2816     gcc_assert (GET_CODE (address) == REG);
2817 
2818   m32c_print_operand (stream, address, 0);
2819 }
2820 
2821 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2822    differently than general registers.  */
2823 void
m32c_output_reg_push(FILE * s,int regno)2824 m32c_output_reg_push (FILE * s, int regno)
2825 {
2826   if (regno == FLG_REGNO)
2827     fprintf (s, "\tpushc\tflg\n");
2828   else
2829     fprintf (s, "\tpush.%c\t%s\n",
2830 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2831 }
2832 
2833 /* Likewise for ASM_OUTPUT_REG_POP.  */
2834 void
m32c_output_reg_pop(FILE * s,int regno)2835 m32c_output_reg_pop (FILE * s, int regno)
2836 {
2837   if (regno == FLG_REGNO)
2838     fprintf (s, "\tpopc\tflg\n");
2839   else
2840     fprintf (s, "\tpop.%c\t%s\n",
2841 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2842 }
2843 
2844 /* Defining target-specific uses of `__attribute__' */
2845 
2846 /* Used to simplify the logic below.  Find the attributes wherever
2847    they may be.  */
2848 #define M32C_ATTRIBUTES(decl) \
2849   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2850                 : DECL_ATTRIBUTES (decl) \
2851                   ? (DECL_ATTRIBUTES (decl)) \
2852 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2853 
2854 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
2855 static int
interrupt_p(tree node ATTRIBUTE_UNUSED)2856 interrupt_p (tree node ATTRIBUTE_UNUSED)
2857 {
2858   tree list = M32C_ATTRIBUTES (node);
2859   while (list)
2860     {
2861       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2862 	return 1;
2863       list = TREE_CHAIN (list);
2864     }
2865   return fast_interrupt_p (node);
2866 }
2867 
2868 /* Returns TRUE if the given tree has the "bank_switch" attribute.  */
2869 static int
bank_switch_p(tree node ATTRIBUTE_UNUSED)2870 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2871 {
2872   tree list = M32C_ATTRIBUTES (node);
2873   while (list)
2874     {
2875       if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2876 	return 1;
2877       list = TREE_CHAIN (list);
2878     }
2879   return 0;
2880 }
2881 
2882 /* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
2883 static int
fast_interrupt_p(tree node ATTRIBUTE_UNUSED)2884 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2885 {
2886   tree list = M32C_ATTRIBUTES (node);
2887   while (list)
2888     {
2889       if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2890 	return 1;
2891       list = TREE_CHAIN (list);
2892     }
2893   return 0;
2894 }
2895 
2896 static tree
interrupt_handler(tree * node ATTRIBUTE_UNUSED,tree name ATTRIBUTE_UNUSED,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs ATTRIBUTE_UNUSED)2897 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2898 		   tree name ATTRIBUTE_UNUSED,
2899 		   tree args ATTRIBUTE_UNUSED,
2900 		   int flags ATTRIBUTE_UNUSED,
2901 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
2902 {
2903   return NULL_TREE;
2904 }
2905 
2906 /* Returns TRUE if given tree has the "function_vector" attribute. */
2907 int
m32c_special_page_vector_p(tree func)2908 m32c_special_page_vector_p (tree func)
2909 {
2910   tree list;
2911 
2912   if (TREE_CODE (func) != FUNCTION_DECL)
2913     return 0;
2914 
2915   list = M32C_ATTRIBUTES (func);
2916   while (list)
2917     {
2918       if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2919         return 1;
2920       list = TREE_CHAIN (list);
2921     }
2922   return 0;
2923 }
2924 
2925 static tree
function_vector_handler(tree * node ATTRIBUTE_UNUSED,tree name ATTRIBUTE_UNUSED,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs ATTRIBUTE_UNUSED)2926 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2927                          tree name ATTRIBUTE_UNUSED,
2928                          tree args ATTRIBUTE_UNUSED,
2929                          int flags ATTRIBUTE_UNUSED,
2930                          bool * no_add_attrs ATTRIBUTE_UNUSED)
2931 {
2932   if (TARGET_R8C)
2933     {
2934       /* The attribute is not supported for R8C target.  */
2935       warning (OPT_Wattributes,
2936                 "%qE attribute is not supported for R8C target",
2937                 name);
2938       *no_add_attrs = true;
2939     }
2940   else if (TREE_CODE (*node) != FUNCTION_DECL)
2941     {
2942       /* The attribute must be applied to functions only.  */
2943       warning (OPT_Wattributes,
2944                 "%qE attribute applies only to functions",
2945                 name);
2946       *no_add_attrs = true;
2947     }
2948   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2949     {
2950       /* The argument must be a constant integer.  */
2951       warning (OPT_Wattributes,
2952                 "%qE attribute argument not an integer constant",
2953                 name);
2954       *no_add_attrs = true;
2955     }
2956   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2957            || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2958     {
2959       /* The argument value must be between 18 to 255.  */
2960       warning (OPT_Wattributes,
2961                 "%qE attribute argument should be between 18 to 255",
2962                 name);
2963       *no_add_attrs = true;
2964     }
2965   return NULL_TREE;
2966 }
2967 
2968 /* If the function is assigned the attribute 'function_vector', it
2969    returns the function vector number, otherwise returns zero.  */
2970 int
current_function_special_page_vector(rtx x)2971 current_function_special_page_vector (rtx x)
2972 {
2973   int num;
2974 
2975   if ((GET_CODE(x) == SYMBOL_REF)
2976       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2977     {
2978       tree list;
2979       tree t = SYMBOL_REF_DECL (x);
2980 
2981       if (TREE_CODE (t) != FUNCTION_DECL)
2982         return 0;
2983 
2984       list = M32C_ATTRIBUTES (t);
2985       while (list)
2986         {
2987           if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2988             {
2989               num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2990               return num;
2991             }
2992 
2993           list = TREE_CHAIN (list);
2994         }
2995 
2996       return 0;
2997     }
2998   else
2999     return 0;
3000 }
3001 
3002 #undef TARGET_ATTRIBUTE_TABLE
3003 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3004 static const struct attribute_spec m32c_attribute_table[] = {
3005   /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
3006        affects_type_identity, handler, exclude } */
3007   { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
3008   { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
3009   { "fast_interrupt", 0, 0, false, false, false, false,
3010     interrupt_handler, NULL },
3011   { "function_vector", 1, 1, true,  false, false, false,
3012     function_vector_handler, NULL },
3013   { NULL, 0, 0, false, false, false, false, NULL, NULL }
3014 };
3015 
3016 #undef TARGET_COMP_TYPE_ATTRIBUTES
3017 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3018 static int
m32c_comp_type_attributes(const_tree type1 ATTRIBUTE_UNUSED,const_tree type2 ATTRIBUTE_UNUSED)3019 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3020 			   const_tree type2 ATTRIBUTE_UNUSED)
3021 {
3022   /* 0=incompatible 1=compatible 2=warning */
3023   return 1;
3024 }
3025 
3026 #undef TARGET_INSERT_ATTRIBUTES
3027 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3028 static void
m32c_insert_attributes(tree node ATTRIBUTE_UNUSED,tree * attr_ptr ATTRIBUTE_UNUSED)3029 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3030 			tree * attr_ptr ATTRIBUTE_UNUSED)
3031 {
3032   unsigned addr;
3033   /* See if we need to make #pragma address variables volatile.  */
3034 
3035   if (TREE_CODE (node) == VAR_DECL)
3036     {
3037       const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3038       if (m32c_get_pragma_address  (name, &addr))
3039 	{
3040 	  TREE_THIS_VOLATILE (node) = true;
3041 	}
3042     }
3043 }
3044 
3045 /* Hash table of pragma info.  */
3046 static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
3047 
3048 void
m32c_note_pragma_address(const char * varname,unsigned address)3049 m32c_note_pragma_address (const char *varname, unsigned address)
3050 {
3051   if (!pragma_htab)
3052     pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
3053 
3054   const char *name = ggc_strdup (varname);
3055   unsigned int *slot = &pragma_htab->get_or_insert (name);
3056   *slot = address;
3057 }
3058 
3059 static bool
m32c_get_pragma_address(const char * varname,unsigned * address)3060 m32c_get_pragma_address (const char *varname, unsigned *address)
3061 {
3062   if (!pragma_htab)
3063     return false;
3064 
3065   unsigned int *slot = pragma_htab->get (varname);
3066   if (slot)
3067     {
3068       *address = *slot;
3069       return true;
3070     }
3071   return false;
3072 }
3073 
3074 void
m32c_output_aligned_common(FILE * stream,tree decl ATTRIBUTE_UNUSED,const char * name,int size,int align,int global)3075 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3076 			    const char *name,
3077 			    int size, int align, int global)
3078 {
3079   unsigned address;
3080 
3081   if (m32c_get_pragma_address (name, &address))
3082     {
3083       /* We never output these as global.  */
3084       assemble_name (stream, name);
3085       fprintf (stream, " = 0x%04x\n", address);
3086       return;
3087     }
3088   if (!global)
3089     {
3090       fprintf (stream, "\t.local\t");
3091       assemble_name (stream, name);
3092       fprintf (stream, "\n");
3093     }
3094   fprintf (stream, "\t.comm\t");
3095   assemble_name (stream, name);
3096   fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3097 }
3098 
3099 /* Predicates */
3100 
3101 /* This is a list of legal subregs of hard regs.  */
3102 static const struct {
3103   unsigned char outer_mode_size;
3104   unsigned char inner_mode_size;
3105   unsigned char byte_mask;
3106   unsigned char legal_when;
3107   unsigned int regno;
3108 } legal_subregs[] = {
3109   {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3110   {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3111   {1, 2, 0x01, 1, A0_REGNO},
3112   {1, 2, 0x01, 1, A1_REGNO},
3113 
3114   {1, 4, 0x01, 1, A0_REGNO},
3115   {1, 4, 0x01, 1, A1_REGNO},
3116 
3117   {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3118   {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3119   {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3120   {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3121   {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3122 
3123   {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3124 };
3125 
3126 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3127    support.  We also bail on MEMs with illegal addresses.  */
3128 bool
m32c_illegal_subreg_p(rtx op)3129 m32c_illegal_subreg_p (rtx op)
3130 {
3131   int offset;
3132   unsigned int i;
3133   machine_mode src_mode, dest_mode;
3134 
3135   if (GET_CODE (op) == MEM
3136       && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3137     {
3138       return true;
3139     }
3140 
3141   if (GET_CODE (op) != SUBREG)
3142     return false;
3143 
3144   dest_mode = GET_MODE (op);
3145   offset = SUBREG_BYTE (op);
3146   op = SUBREG_REG (op);
3147   src_mode = GET_MODE (op);
3148 
3149   if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3150     return false;
3151   if (GET_CODE (op) != REG)
3152     return false;
3153   if (REGNO (op) >= MEM0_REGNO)
3154     return false;
3155 
3156   offset = (1 << offset);
3157 
3158   for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3159     if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3160 	&& legal_subregs[i].regno == REGNO (op)
3161 	&& legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3162 	&& legal_subregs[i].byte_mask & offset)
3163       {
3164 	switch (legal_subregs[i].legal_when)
3165 	  {
3166 	  case 1:
3167 	    return false;
3168 	  case 16:
3169 	    if (TARGET_A16)
3170 	      return false;
3171 	    break;
3172 	  case 24:
3173 	    if (TARGET_A24)
3174 	      return false;
3175 	    break;
3176 	  }
3177       }
3178   return true;
3179 }
3180 
3181 /* Returns TRUE if we support a move between the first two operands.
3182    At the moment, we just want to discourage mem to mem moves until
3183    after reload, because reload has a hard time with our limited
3184    number of address registers, and we can get into a situation where
3185    we need three of them when we only have two.  */
3186 bool
m32c_mov_ok(rtx * operands,machine_mode mode ATTRIBUTE_UNUSED)3187 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3188 {
3189   rtx op0 = operands[0];
3190   rtx op1 = operands[1];
3191 
3192   if (TARGET_A24)
3193     return true;
3194 
3195 #define DEBUG_MOV_OK 0
3196 #if DEBUG_MOV_OK
3197   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3198   debug_rtx (op0);
3199   debug_rtx (op1);
3200 #endif
3201 
3202   if (GET_CODE (op0) == SUBREG)
3203     op0 = XEXP (op0, 0);
3204   if (GET_CODE (op1) == SUBREG)
3205     op1 = XEXP (op1, 0);
3206 
3207   if (GET_CODE (op0) == MEM
3208       && GET_CODE (op1) == MEM
3209       && ! reload_completed)
3210     {
3211 #if DEBUG_MOV_OK
3212       fprintf (stderr, " - no, mem to mem\n");
3213 #endif
3214       return false;
3215     }
3216 
3217 #if DEBUG_MOV_OK
3218   fprintf (stderr, " - ok\n");
3219 #endif
3220   return true;
3221 }
3222 
3223 /* Returns TRUE if two consecutive HImode mov instructions, generated
3224    for moving an immediate double data to a double data type variable
3225    location, can be combined into single SImode mov instruction.  */
3226 bool
m32c_immd_dbl_mov(rtx * operands ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED)3227 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3228 		   machine_mode mode ATTRIBUTE_UNUSED)
3229 {
3230   /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3231      flags.  */
3232   return false;
3233 }
3234 
3235 /* Expanders */
3236 
3237 /* Subregs are non-orthogonal for us, because our registers are all
3238    different sizes.  */
3239 static rtx
m32c_subreg(machine_mode outer,rtx x,machine_mode inner,int byte)3240 m32c_subreg (machine_mode outer,
3241 	     rtx x, machine_mode inner, int byte)
3242 {
3243   int r, nr = -1;
3244 
3245   /* Converting MEMs to different types that are the same size, we
3246      just rewrite them.  */
3247   if (GET_CODE (x) == SUBREG
3248       && SUBREG_BYTE (x) == 0
3249       && GET_CODE (SUBREG_REG (x)) == MEM
3250       && (GET_MODE_SIZE (GET_MODE (x))
3251 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3252     {
3253       rtx oldx = x;
3254       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3255       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3256     }
3257 
3258   /* Push/pop get done as smaller push/pops.  */
3259   if (GET_CODE (x) == MEM
3260       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3261 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
3262     return gen_rtx_MEM (outer, XEXP (x, 0));
3263   if (GET_CODE (x) == SUBREG
3264       && GET_CODE (XEXP (x, 0)) == MEM
3265       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3266 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3267     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3268 
3269   if (GET_CODE (x) != REG)
3270     {
3271       rtx r = simplify_gen_subreg (outer, x, inner, byte);
3272       if (GET_CODE (r) == SUBREG
3273 	  && GET_CODE (x) == MEM
3274 	  && MEM_VOLATILE_P (x))
3275 	{
3276 	  /* Volatile MEMs don't get simplified, but we need them to
3277 	     be.  We are little endian, so the subreg byte is the
3278 	     offset.  */
3279 	  r = adjust_address_nv (x, outer, byte);
3280 	}
3281       return r;
3282     }
3283 
3284   r = REGNO (x);
3285   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3286     return simplify_gen_subreg (outer, x, inner, byte);
3287 
3288   if (IS_MEM_REGNO (r))
3289     return simplify_gen_subreg (outer, x, inner, byte);
3290 
3291   /* This is where the complexities of our register layout are
3292      described.  */
3293   if (byte == 0)
3294     nr = r;
3295   else if (outer == HImode)
3296     {
3297       if (r == R0_REGNO && byte == 2)
3298 	nr = R2_REGNO;
3299       else if (r == R0_REGNO && byte == 4)
3300 	nr = R1_REGNO;
3301       else if (r == R0_REGNO && byte == 6)
3302 	nr = R3_REGNO;
3303       else if (r == R1_REGNO && byte == 2)
3304 	nr = R3_REGNO;
3305       else if (r == A0_REGNO && byte == 2)
3306 	nr = A1_REGNO;
3307     }
3308   else if (outer == SImode)
3309     {
3310       if (r == R0_REGNO && byte == 0)
3311 	nr = R0_REGNO;
3312       else if (r == R0_REGNO && byte == 4)
3313 	nr = R1_REGNO;
3314     }
3315   if (nr == -1)
3316     {
3317       fprintf (stderr, "m32c_subreg %s %s %d\n",
3318 	       mode_name[outer], mode_name[inner], byte);
3319       debug_rtx (x);
3320       gcc_unreachable ();
3321     }
3322   return gen_rtx_REG (outer, nr);
3323 }
3324 
3325 /* Used to emit move instructions.  We split some moves,
3326    and avoid mem-mem moves.  */
3327 int
m32c_prepare_move(rtx * operands,machine_mode mode)3328 m32c_prepare_move (rtx * operands, machine_mode mode)
3329 {
3330   if (far_addr_space_p (operands[0])
3331       && CONSTANT_P (operands[1]))
3332     {
3333       operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3334     }
3335   if (TARGET_A16 && mode == PSImode)
3336     return m32c_split_move (operands, mode, 1);
3337   if ((GET_CODE (operands[0]) == MEM)
3338       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3339     {
3340       rtx pmv = XEXP (operands[0], 0);
3341       rtx dest_reg = XEXP (pmv, 0);
3342       rtx dest_mod = XEXP (pmv, 1);
3343 
3344       emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3345       operands[0] = gen_rtx_MEM (mode, dest_reg);
3346     }
3347   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3348     operands[1] = copy_to_mode_reg (mode, operands[1]);
3349   return 0;
3350 }
3351 
3352 #define DEBUG_SPLIT 0
3353 
3354 /* Returns TRUE if the given PSImode move should be split.  We split
3355    for all r8c/m16c moves, since it doesn't support them, and for
3356    POP.L as we can only *push* SImode.  */
3357 int
m32c_split_psi_p(rtx * operands)3358 m32c_split_psi_p (rtx * operands)
3359 {
3360 #if DEBUG_SPLIT
3361   fprintf (stderr, "\nm32c_split_psi_p\n");
3362   debug_rtx (operands[0]);
3363   debug_rtx (operands[1]);
3364 #endif
3365   if (TARGET_A16)
3366     {
3367 #if DEBUG_SPLIT
3368       fprintf (stderr, "yes, A16\n");
3369 #endif
3370       return 1;
3371     }
3372   if (GET_CODE (operands[1]) == MEM
3373       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3374     {
3375 #if DEBUG_SPLIT
3376       fprintf (stderr, "yes, pop.l\n");
3377 #endif
3378       return 1;
3379     }
3380 #if DEBUG_SPLIT
3381   fprintf (stderr, "no, default\n");
3382 #endif
3383   return 0;
3384 }
3385 
3386 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3387    (define_expand), 1 if it is not optional (define_insn_and_split),
3388    and 3 for define_split (alternate api). */
3389 int
m32c_split_move(rtx * operands,machine_mode mode,int split_all)3390 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3391 {
3392   rtx s[4], d[4];
3393   int parts, si, di, rev = 0;
3394   int rv = 0, opi = 2;
3395   machine_mode submode = HImode;
3396   rtx *ops, local_ops[10];
3397 
3398   /* define_split modifies the existing operands, but the other two
3399      emit new insns.  OPS is where we store the operand pairs, which
3400      we emit later.  */
3401   if (split_all == 3)
3402     ops = operands;
3403   else
3404     ops = local_ops;
3405 
3406   /* Else HImode.  */
3407   if (mode == DImode)
3408     submode = SImode;
3409 
3410   /* Before splitting mem-mem moves, force one operand into a
3411      register.  */
3412   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3413     {
3414 #if DEBUG0
3415       fprintf (stderr, "force_reg...\n");
3416       debug_rtx (operands[1]);
3417 #endif
3418       operands[1] = force_reg (mode, operands[1]);
3419 #if DEBUG0
3420       debug_rtx (operands[1]);
3421 #endif
3422     }
3423 
3424   parts = 2;
3425 
3426 #if DEBUG_SPLIT
3427   fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3428 	   split_all);
3429   debug_rtx (operands[0]);
3430   debug_rtx (operands[1]);
3431 #endif
3432 
3433   /* Note that split_all is not used to select the api after this
3434      point, so it's safe to set it to 3 even with define_insn.  */
3435   /* None of the chips can move SI operands to sp-relative addresses,
3436      so we always split those.  */
3437   if (satisfies_constraint_Ss (operands[0]))
3438     split_all = 3;
3439 
3440   if (TARGET_A16
3441       && (far_addr_space_p (operands[0])
3442 	  || far_addr_space_p (operands[1])))
3443     split_all |= 1;
3444 
3445   /* We don't need to split these.  */
3446   if (TARGET_A24
3447       && split_all != 3
3448       && (mode == SImode || mode == PSImode)
3449       && !(GET_CODE (operands[1]) == MEM
3450 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3451     return 0;
3452 
3453   /* First, enumerate the subregs we'll be dealing with.  */
3454   for (si = 0; si < parts; si++)
3455     {
3456       d[si] =
3457 	m32c_subreg (submode, operands[0], mode,
3458 		     si * GET_MODE_SIZE (submode));
3459       s[si] =
3460 	m32c_subreg (submode, operands[1], mode,
3461 		     si * GET_MODE_SIZE (submode));
3462     }
3463 
3464   /* Split pushes by emitting a sequence of smaller pushes.  */
3465   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3466     {
3467       for (si = parts - 1; si >= 0; si--)
3468 	{
3469 	  ops[opi++] = gen_rtx_MEM (submode,
3470 				    gen_rtx_PRE_DEC (Pmode,
3471 						     gen_rtx_REG (Pmode,
3472 								  SP_REGNO)));
3473 	  ops[opi++] = s[si];
3474 	}
3475 
3476       rv = 1;
3477     }
3478   /* Likewise for pops.  */
3479   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3480     {
3481       for (di = 0; di < parts; di++)
3482 	{
3483 	  ops[opi++] = d[di];
3484 	  ops[opi++] = gen_rtx_MEM (submode,
3485 				    gen_rtx_POST_INC (Pmode,
3486 						      gen_rtx_REG (Pmode,
3487 								   SP_REGNO)));
3488 	}
3489       rv = 1;
3490     }
3491   else if (split_all)
3492     {
3493       /* if d[di] == s[si] for any di < si, we'll early clobber. */
3494       for (di = 0; di < parts - 1; di++)
3495 	for (si = di + 1; si < parts; si++)
3496 	  if (reg_mentioned_p (d[di], s[si]))
3497 	    rev = 1;
3498 
3499       if (rev)
3500 	for (si = 0; si < parts; si++)
3501 	  {
3502 	    ops[opi++] = d[si];
3503 	    ops[opi++] = s[si];
3504 	  }
3505       else
3506 	for (si = parts - 1; si >= 0; si--)
3507 	  {
3508 	    ops[opi++] = d[si];
3509 	    ops[opi++] = s[si];
3510 	  }
3511       rv = 1;
3512     }
3513   /* Now emit any moves we may have accumulated.  */
3514   if (rv && split_all != 3)
3515     {
3516       int i;
3517       for (i = 2; i < opi; i += 2)
3518 	emit_move_insn (ops[i], ops[i + 1]);
3519     }
3520   return rv;
3521 }
3522 
3523 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3524    the like.  For the R8C they expect one of the addresses to be in
3525    R1L:An so we need to arrange for that.  Otherwise, it's just a
3526    matter of picking out the operands we want and emitting the right
3527    pattern for them.  All these expanders, which correspond to
3528    patterns in blkmov.md, must return nonzero if they expand the insn,
3529    or zero if they should FAIL.  */
3530 
3531 /* This is a memset() opcode.  All operands are implied, so we need to
3532    arrange for them to be in the right registers.  The opcode wants
3533    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3534    the count (HI), and $2 the value (QI).  */
3535 int
m32c_expand_setmemhi(rtx * operands)3536 m32c_expand_setmemhi(rtx *operands)
3537 {
3538   rtx desta, count, val;
3539   rtx desto, counto;
3540 
3541   desta = XEXP (operands[0], 0);
3542   count = operands[1];
3543   val = operands[2];
3544 
3545   desto = gen_reg_rtx (Pmode);
3546   counto = gen_reg_rtx (HImode);
3547 
3548   if (GET_CODE (desta) != REG
3549       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3550     desta = copy_to_mode_reg (Pmode, desta);
3551 
3552   /* This looks like an arbitrary restriction, but this is by far the
3553      most common case.  For counts 8..14 this actually results in
3554      smaller code with no speed penalty because the half-sized
3555      constant can be loaded with a shorter opcode.  */
3556   if (GET_CODE (count) == CONST_INT
3557       && GET_CODE (val) == CONST_INT
3558       && ! (INTVAL (count) & 1)
3559       && (INTVAL (count) > 1)
3560       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3561     {
3562       unsigned v = INTVAL (val) & 0xff;
3563       v = v | (v << 8);
3564       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3565       val = copy_to_mode_reg (HImode, GEN_INT (v));
3566       if (TARGET_A16)
3567 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3568       else
3569 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3570       return 1;
3571     }
3572 
3573   /* This is the generalized memset() case.  */
3574   if (GET_CODE (val) != REG
3575       || REGNO (val) < FIRST_PSEUDO_REGISTER)
3576     val = copy_to_mode_reg (QImode, val);
3577 
3578   if (GET_CODE (count) != REG
3579       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3580     count = copy_to_mode_reg (HImode, count);
3581 
3582   if (TARGET_A16)
3583     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3584   else
3585     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3586 
3587   return 1;
3588 }
3589 
3590 /* This is a memcpy() opcode.  All operands are implied, so we need to
3591    arrange for them to be in the right registers.  The opcode wants
3592    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3593    is the source (MEM:BLK), and $2 the count (HI).  */
3594 int
m32c_expand_movmemhi(rtx * operands)3595 m32c_expand_movmemhi(rtx *operands)
3596 {
3597   rtx desta, srca, count;
3598   rtx desto, srco, counto;
3599 
3600   desta = XEXP (operands[0], 0);
3601   srca = XEXP (operands[1], 0);
3602   count = operands[2];
3603 
3604   desto = gen_reg_rtx (Pmode);
3605   srco = gen_reg_rtx (Pmode);
3606   counto = gen_reg_rtx (HImode);
3607 
3608   if (GET_CODE (desta) != REG
3609       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3610     desta = copy_to_mode_reg (Pmode, desta);
3611 
3612   if (GET_CODE (srca) != REG
3613       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3614     srca = copy_to_mode_reg (Pmode, srca);
3615 
3616   /* Similar to setmem, but we don't need to check the value.  */
3617   if (GET_CODE (count) == CONST_INT
3618       && ! (INTVAL (count) & 1)
3619       && (INTVAL (count) > 1))
3620     {
3621       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3622       if (TARGET_A16)
3623 	emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3624       else
3625 	emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3626       return 1;
3627     }
3628 
3629   /* This is the generalized memset() case.  */
3630   if (GET_CODE (count) != REG
3631       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3632     count = copy_to_mode_reg (HImode, count);
3633 
3634   if (TARGET_A16)
3635     emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3636   else
3637     emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3638 
3639   return 1;
3640 }
3641 
3642 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3643    the copy, which should point to the NUL at the end of the string,
3644    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3645    Since our opcode leaves the destination pointing *after* the NUL,
3646    we must emit an adjustment.  */
3647 int
m32c_expand_movstr(rtx * operands)3648 m32c_expand_movstr(rtx *operands)
3649 {
3650   rtx desta, srca;
3651   rtx desto, srco;
3652 
3653   desta = XEXP (operands[1], 0);
3654   srca = XEXP (operands[2], 0);
3655 
3656   desto = gen_reg_rtx (Pmode);
3657   srco = gen_reg_rtx (Pmode);
3658 
3659   if (GET_CODE (desta) != REG
3660       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3661     desta = copy_to_mode_reg (Pmode, desta);
3662 
3663   if (GET_CODE (srca) != REG
3664       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3665     srca = copy_to_mode_reg (Pmode, srca);
3666 
3667   emit_insn (gen_movstr_op (desto, srco, desta, srca));
3668   /* desto ends up being a1, which allows this type of add through MOVA.  */
3669   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3670 
3671   return 1;
3672 }
3673 
3674 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3675    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3676    $2 is the other (MEM:BLK).  We must do the comparison, and then
3677    convert the flags to a signed integer result.  */
3678 int
m32c_expand_cmpstr(rtx * operands)3679 m32c_expand_cmpstr(rtx *operands)
3680 {
3681   rtx src1a, src2a;
3682 
3683   src1a = XEXP (operands[1], 0);
3684   src2a = XEXP (operands[2], 0);
3685 
3686   if (GET_CODE (src1a) != REG
3687       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3688     src1a = copy_to_mode_reg (Pmode, src1a);
3689 
3690   if (GET_CODE (src2a) != REG
3691       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3692     src2a = copy_to_mode_reg (Pmode, src2a);
3693 
3694   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3695   emit_insn (gen_cond_to_int (operands[0]));
3696 
3697   return 1;
3698 }
3699 
3700 
3701 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3702 
3703 static shift_gen_func
shift_gen_func_for(int mode,int code)3704 shift_gen_func_for (int mode, int code)
3705 {
3706 #define GFF(m,c,f) if (mode == m && code == c) return f
3707   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3708   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3709   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3710   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3711   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3712   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3713   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3714   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3715   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3716   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3717   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3718   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3719 #undef GFF
3720   gcc_unreachable ();
3721 }
3722 
3723 /* The m32c only has one shift, but it takes a signed count.  GCC
3724    doesn't want this, so we fake it by negating any shift count when
3725    we're pretending to shift the other way.  Also, the shift count is
3726    limited to -8..8.  It's slightly better to use two shifts for 9..15
3727    than to load the count into r1h, so we do that too.  */
3728 int
m32c_prepare_shift(rtx * operands,int scale,int shift_code)3729 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3730 {
3731   machine_mode mode = GET_MODE (operands[0]);
3732   shift_gen_func func = shift_gen_func_for (mode, shift_code);
3733   rtx temp;
3734 
3735   if (GET_CODE (operands[2]) == CONST_INT)
3736     {
3737       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3738       int count = INTVAL (operands[2]) * scale;
3739 
3740       while (count > maxc)
3741 	{
3742 	  temp = gen_reg_rtx (mode);
3743 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3744 	  operands[1] = temp;
3745 	  count -= maxc;
3746 	}
3747       while (count < -maxc)
3748 	{
3749 	  temp = gen_reg_rtx (mode);
3750 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3751 	  operands[1] = temp;
3752 	  count += maxc;
3753 	}
3754       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3755       return 1;
3756     }
3757 
3758   temp = gen_reg_rtx (QImode);
3759   if (scale < 0)
3760     /* The pattern has a NEG that corresponds to this. */
3761     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3762   else if (TARGET_A16 && mode == SImode)
3763     /* We do this because the code below may modify this, we don't
3764        want to modify the origin of this value.  */
3765     emit_move_insn (temp, operands[2]);
3766   else
3767     /* We'll only use it for the shift, no point emitting a move.  */
3768     temp = operands[2];
3769 
3770   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3771     {
3772       /* The m16c has a limit of -16..16 for SI shifts, even when the
3773 	 shift count is in a register.  Since there are so many targets
3774 	 of these shifts, it's better to expand the RTL here than to
3775 	 call a helper function.
3776 
3777 	 The resulting code looks something like this:
3778 
3779 		cmp.b	r1h,-16
3780 		jge.b	1f
3781 		shl.l	-16,dest
3782 		add.b	r1h,16
3783 	1f:	cmp.b	r1h,16
3784 		jle.b	1f
3785 		shl.l	16,dest
3786 		sub.b	r1h,16
3787 	1f:	shl.l	r1h,dest
3788 
3789 	 We take advantage of the fact that "negative" shifts are
3790 	 undefined to skip one of the comparisons.  */
3791 
3792       rtx count;
3793       rtx tempvar;
3794       rtx_insn *insn;
3795 
3796       emit_move_insn (operands[0], operands[1]);
3797 
3798       count = temp;
3799       rtx_code_label *label = gen_label_rtx ();
3800       LABEL_NUSES (label) ++;
3801 
3802       tempvar = gen_reg_rtx (mode);
3803 
3804       if (shift_code == ASHIFT)
3805 	{
3806 	  /* This is a left shift.  We only need check positive counts.  */
3807 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3808 					  count, GEN_INT (16), label));
3809 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3810 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3811 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3812 	  emit_label_after (label, insn);
3813 	}
3814       else
3815 	{
3816 	  /* This is a right shift.  We only need check negative counts.  */
3817 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3818 					  count, GEN_INT (-16), label));
3819 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3820 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3821 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3822 	  emit_label_after (label, insn);
3823 	}
3824       operands[1] = operands[0];
3825       emit_insn (func (operands[0], operands[0], count));
3826       return 1;
3827     }
3828 
3829   operands[2] = temp;
3830   return 0;
3831 }
3832 
3833 /* The m32c has a limited range of operations that work on PSImode
3834    values; we have to expand to SI, do the math, and truncate back to
3835    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3836    those cases.  */
3837 void
m32c_expand_neg_mulpsi3(rtx * operands)3838 m32c_expand_neg_mulpsi3 (rtx * operands)
3839 {
3840   /* operands: a = b * i */
3841   rtx temp1; /* b as SI */
3842   rtx scale /* i as SI */;
3843   rtx temp2; /* a*b as SI */
3844 
3845   temp1 = gen_reg_rtx (SImode);
3846   temp2 = gen_reg_rtx (SImode);
3847   if (GET_CODE (operands[2]) != CONST_INT)
3848     {
3849       scale = gen_reg_rtx (SImode);
3850       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3851     }
3852   else
3853     scale = copy_to_mode_reg (SImode, operands[2]);
3854 
3855   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3856   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3857   emit_insn (gen_truncsipsi2 (operands[0], temp2));
3858 }
3859 
3860 /* Pattern Output Functions */
3861 
3862 int
m32c_expand_movcc(rtx * operands)3863 m32c_expand_movcc (rtx *operands)
3864 {
3865   rtx rel = operands[1];
3866 
3867   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3868     return 1;
3869   if (GET_CODE (operands[2]) != CONST_INT
3870       || GET_CODE (operands[3]) != CONST_INT)
3871     return 1;
3872   if (GET_CODE (rel) == NE)
3873     {
3874       rtx tmp = operands[2];
3875       operands[2] = operands[3];
3876       operands[3] = tmp;
3877       rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3878     }
3879 
3880   emit_move_insn (operands[0],
3881 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3882 					rel,
3883 					operands[2],
3884 					operands[3]));
3885   return 0;
3886 }
3887 
3888 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3889 int
m32c_expand_insv(rtx * operands)3890 m32c_expand_insv (rtx *operands)
3891 {
3892   rtx op0, src0, p;
3893   int mask;
3894 
3895   if (INTVAL (operands[1]) != 1)
3896     return 1;
3897 
3898   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3899   if (GET_CODE (operands[3]) != CONST_INT)
3900     return 1;
3901   if (INTVAL (operands[3]) != 0
3902       && INTVAL (operands[3]) != 1
3903       && INTVAL (operands[3]) != -1)
3904     return 1;
3905 
3906   mask = 1 << INTVAL (operands[2]);
3907 
3908   op0 = operands[0];
3909   if (GET_CODE (op0) == SUBREG
3910       && SUBREG_BYTE (op0) == 0)
3911     {
3912       rtx sub = SUBREG_REG (op0);
3913       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3914 	op0 = sub;
3915     }
3916 
3917   if (!can_create_pseudo_p ()
3918       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3919     src0 = op0;
3920   else
3921     {
3922       src0 = gen_reg_rtx (GET_MODE (op0));
3923       emit_move_insn (src0, op0);
3924     }
3925 
3926   if (GET_MODE (op0) == HImode
3927       && INTVAL (operands[2]) >= 8
3928       && GET_CODE (op0) == MEM)
3929     {
3930       /* We are little endian.  */
3931       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3932 							XEXP (op0, 0), 1));
3933       MEM_COPY_ATTRIBUTES (new_mem, op0);
3934       mask >>= 8;
3935     }
3936 
3937   /* First, we generate a mask with the correct polarity.  If we are
3938      storing a zero, we want an AND mask, so invert it.  */
3939   if (INTVAL (operands[3]) == 0)
3940     {
3941       /* Storing a zero, use an AND mask */
3942       if (GET_MODE (op0) == HImode)
3943 	mask ^= 0xffff;
3944       else
3945 	mask ^= 0xff;
3946     }
3947   /* Now we need to properly sign-extend the mask in case we need to
3948      fall back to an AND or OR opcode.  */
3949   if (GET_MODE (op0) == HImode)
3950     {
3951       if (mask & 0x8000)
3952 	mask -= 0x10000;
3953     }
3954   else
3955     {
3956       if (mask & 0x80)
3957 	mask -= 0x100;
3958     }
3959 
3960   switch (  (INTVAL (operands[3]) ? 4 : 0)
3961 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
3962 	  + (TARGET_A24 ? 1 : 0))
3963     {
3964     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3965     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3966     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3967     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3968     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3969     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3970     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3971     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3972     default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
3973     }
3974 
3975   emit_insn (p);
3976   return 0;
3977 }
3978 
3979 const char *
m32c_scc_pattern(rtx * operands,RTX_CODE code)3980 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3981 {
3982   static char buf[30];
3983   if (GET_CODE (operands[0]) == REG
3984       && REGNO (operands[0]) == R0_REGNO)
3985     {
3986       if (code == EQ)
3987 	return "stzx\t#1,#0,r0l";
3988       if (code == NE)
3989 	return "stzx\t#0,#1,r0l";
3990     }
3991   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3992   return buf;
3993 }
3994 
3995 /* Encode symbol attributes of a SYMBOL_REF into its
3996    SYMBOL_REF_FLAGS. */
3997 static void
m32c_encode_section_info(tree decl,rtx rtl,int first)3998 m32c_encode_section_info (tree decl, rtx rtl, int first)
3999 {
4000   int extra_flags = 0;
4001 
4002   default_encode_section_info (decl, rtl, first);
4003   if (TREE_CODE (decl) == FUNCTION_DECL
4004       && m32c_special_page_vector_p (decl))
4005 
4006     extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4007 
4008   if (extra_flags)
4009     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4010 }
4011 
4012 /* Returns TRUE if the current function is a leaf, and thus we can
4013    determine which registers an interrupt function really needs to
4014    save.  The logic below is mostly about finding the insn sequence
4015    that's the function, versus any sequence that might be open for the
4016    current insn.  */
4017 static int
m32c_leaf_function_p(void)4018 m32c_leaf_function_p (void)
4019 {
4020   int rv;
4021 
4022   push_topmost_sequence ();
4023   rv = leaf_function_p ();
4024   pop_topmost_sequence ();
4025   return rv;
4026 }
4027 
4028 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4029    opcodes.  If the function doesn't need the frame base or stack
4030    pointer, it can use the simpler RTS opcode.  */
4031 static bool
m32c_function_needs_enter(void)4032 m32c_function_needs_enter (void)
4033 {
4034   rtx_insn *insn;
4035   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4036   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4037 
4038   for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4039     if (NONDEBUG_INSN_P (insn))
4040       {
4041 	if (reg_mentioned_p (sp, insn))
4042 	  return true;
4043 	if (reg_mentioned_p (fb, insn))
4044 	  return true;
4045       }
4046   return false;
4047 }
4048 
4049 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4050    frame-related.  Return PAR.
4051 
4052    dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4053    PARALLEL rtx other than the first if they do not have the
4054    FRAME_RELATED flag set on them.  So this function is handy for
4055    marking up 'enter' instructions.  */
4056 static rtx
m32c_all_frame_related(rtx par)4057 m32c_all_frame_related (rtx par)
4058 {
4059   int len = XVECLEN (par, 0);
4060   int i;
4061 
4062   for (i = 0; i < len; i++)
4063     F (XVECEXP (par, 0, i));
4064 
4065   return par;
4066 }
4067 
4068 /* Emits the prologue.  See the frame layout comment earlier in this
4069    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
4070    that we manually update sp.  */
4071 void
m32c_emit_prologue(void)4072 m32c_emit_prologue (void)
4073 {
4074   int frame_size, extra_frame_size = 0, reg_save_size;
4075   int complex_prologue = 0;
4076 
4077   cfun->machine->is_leaf = m32c_leaf_function_p ();
4078   if (interrupt_p (cfun->decl))
4079     {
4080       cfun->machine->is_interrupt = 1;
4081       complex_prologue = 1;
4082     }
4083   else if (bank_switch_p (cfun->decl))
4084     warning (OPT_Wattributes,
4085 	     "%<bank_switch%> has no effect on non-interrupt functions");
4086 
4087   reg_save_size = m32c_pushm_popm (PP_justcount);
4088 
4089   if (interrupt_p (cfun->decl))
4090     {
4091       if (bank_switch_p (cfun->decl))
4092 	emit_insn (gen_fset_b ());
4093       else if (cfun->machine->intr_pushm)
4094 	emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4095     }
4096 
4097   frame_size =
4098     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4099   if (frame_size == 0
4100       && !m32c_function_needs_enter ())
4101     cfun->machine->use_rts = 1;
4102 
4103   if (flag_stack_usage_info)
4104     current_function_static_stack_size = frame_size;
4105 
4106   if (frame_size > 254)
4107     {
4108       extra_frame_size = frame_size - 254;
4109       frame_size = 254;
4110     }
4111   if (cfun->machine->use_rts == 0)
4112     F (emit_insn (m32c_all_frame_related
4113 		  (TARGET_A16
4114 		   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4115 		   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4116 
4117   if (extra_frame_size)
4118     {
4119       complex_prologue = 1;
4120       if (TARGET_A16)
4121 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4122 				  gen_rtx_REG (HImode, SP_REGNO),
4123 				  GEN_INT (-extra_frame_size))));
4124       else
4125 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4126 				   gen_rtx_REG (PSImode, SP_REGNO),
4127 				   GEN_INT (-extra_frame_size))));
4128     }
4129 
4130   complex_prologue += m32c_pushm_popm (PP_pushm);
4131 
4132   /* This just emits a comment into the .s file for debugging.  */
4133   if (complex_prologue)
4134     emit_insn (gen_prologue_end ());
4135 }
4136 
4137 /* Likewise, for the epilogue.  The only exception is that, for
4138    interrupts, we must manually unwind the frame as the REIT opcode
4139    doesn't do that.  */
4140 void
m32c_emit_epilogue(void)4141 m32c_emit_epilogue (void)
4142 {
4143   int popm_count = m32c_pushm_popm (PP_justcount);
4144 
4145   /* This just emits a comment into the .s file for debugging.  */
4146   if (popm_count > 0 || cfun->machine->is_interrupt)
4147     emit_insn (gen_epilogue_start ());
4148 
4149   if (popm_count > 0)
4150     m32c_pushm_popm (PP_popm);
4151 
4152   if (cfun->machine->is_interrupt)
4153     {
4154       machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4155 
4156       /* REIT clears B flag and restores $fp for us, but we still
4157 	 have to fix up the stack.  USE_RTS just means we didn't
4158 	 emit ENTER.  */
4159       if (!cfun->machine->use_rts)
4160 	{
4161 	  emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4162 			  gen_rtx_REG (spmode, FP_REGNO));
4163 	  emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4164 			  gen_rtx_REG (spmode, A0_REGNO));
4165 	  /* We can't just add this to the POPM because it would be in
4166 	     the wrong order, and wouldn't fix the stack if we're bank
4167 	     switching.  */
4168 	  if (TARGET_A16)
4169 	    emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4170 	  else
4171 	    emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4172 	}
4173       if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4174 	emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4175 
4176       /* The FREIT (Fast REturn from InTerrupt) instruction should be
4177          generated only for M32C/M32CM targets (generate the REIT
4178          instruction otherwise).  */
4179       if (fast_interrupt_p (cfun->decl))
4180         {
4181           /* Check if fast_attribute is set for M32C or M32CM.  */
4182           if (TARGET_A24)
4183             {
4184               emit_jump_insn (gen_epilogue_freit ());
4185             }
4186           /* If fast_interrupt attribute is set for an R8C or M16C
4187              target ignore this attribute and generated REIT
4188              instruction.  */
4189           else
4190 	    {
4191 	      warning (OPT_Wattributes,
4192 		       "%<fast_interrupt%> attribute directive ignored");
4193 	      emit_jump_insn (gen_epilogue_reit_16 ());
4194 	    }
4195         }
4196       else if (TARGET_A16)
4197 	emit_jump_insn (gen_epilogue_reit_16 ());
4198       else
4199 	emit_jump_insn (gen_epilogue_reit_24 ());
4200     }
4201   else if (cfun->machine->use_rts)
4202     emit_jump_insn (gen_epilogue_rts ());
4203   else if (TARGET_A16)
4204     emit_jump_insn (gen_epilogue_exitd_16 ());
4205   else
4206     emit_jump_insn (gen_epilogue_exitd_24 ());
4207 }
4208 
4209 void
m32c_emit_eh_epilogue(rtx ret_addr)4210 m32c_emit_eh_epilogue (rtx ret_addr)
4211 {
4212   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
4213      return to.  We have to fudge the stack, pop everything, pop SP
4214      (fudged), and return (fudged).  This is actually easier to do in
4215      assembler, so punt to libgcc.  */
4216   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4217   /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4218 }
4219 
4220 /* Indicate which flags must be properly set for a given conditional.  */
4221 static int
flags_needed_for_conditional(rtx cond)4222 flags_needed_for_conditional (rtx cond)
4223 {
4224   switch (GET_CODE (cond))
4225     {
4226     case LE:
4227     case GT:
4228       return FLAGS_OSZ;
4229     case LEU:
4230     case GTU:
4231       return FLAGS_ZC;
4232     case LT:
4233     case GE:
4234       return FLAGS_OS;
4235     case LTU:
4236     case GEU:
4237       return FLAGS_C;
4238     case EQ:
4239     case NE:
4240       return FLAGS_Z;
4241     default:
4242       return FLAGS_N;
4243     }
4244 }
4245 
4246 #define DEBUG_CMP 0
4247 
4248 /* Returns true if a compare insn is redundant because it would only
4249    set flags that are already set correctly.  */
4250 static bool
m32c_compare_redundant(rtx_insn * cmp,rtx * operands)4251 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4252 {
4253   int flags_needed;
4254   int pflags;
4255   rtx_insn *prev;
4256   rtx pp, next;
4257   rtx op0, op1;
4258 #if DEBUG_CMP
4259   int prev_icode, i;
4260 #endif
4261 
4262   op0 = operands[0];
4263   op1 = operands[1];
4264 
4265 #if DEBUG_CMP
4266   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4267   debug_rtx(cmp);
4268   for (i=0; i<2; i++)
4269     {
4270       fprintf(stderr, "operands[%d] = ", i);
4271       debug_rtx(operands[i]);
4272     }
4273 #endif
4274 
4275   next = next_nonnote_insn (cmp);
4276   if (!next || !INSN_P (next))
4277     {
4278 #if DEBUG_CMP
4279       fprintf(stderr, "compare not followed by insn\n");
4280       debug_rtx(next);
4281 #endif
4282       return false;
4283     }
4284   if (GET_CODE (PATTERN (next)) == SET
4285       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4286     {
4287       next = XEXP (XEXP (PATTERN (next), 1), 0);
4288     }
4289   else if (GET_CODE (PATTERN (next)) == SET)
4290     {
4291       /* If this is a conditional, flags_needed will be something
4292 	 other than FLAGS_N, which we test below.  */
4293       next = XEXP (PATTERN (next), 1);
4294     }
4295   else
4296     {
4297 #if DEBUG_CMP
4298       fprintf(stderr, "compare not followed by conditional\n");
4299       debug_rtx(next);
4300 #endif
4301       return false;
4302     }
4303 #if DEBUG_CMP
4304   fprintf(stderr, "conditional is: ");
4305   debug_rtx(next);
4306 #endif
4307 
4308   flags_needed = flags_needed_for_conditional (next);
4309   if (flags_needed == FLAGS_N)
4310     {
4311 #if DEBUG_CMP
4312       fprintf(stderr, "compare not followed by conditional\n");
4313       debug_rtx(next);
4314 #endif
4315       return false;
4316     }
4317 
4318   /* Compare doesn't set overflow and carry the same way that
4319      arithmetic instructions do, so we can't replace those.  */
4320   if (flags_needed & FLAGS_OC)
4321     return false;
4322 
4323   prev = cmp;
4324   do {
4325     prev = prev_nonnote_insn (prev);
4326     if (!prev)
4327       {
4328 #if DEBUG_CMP
4329 	fprintf(stderr, "No previous insn.\n");
4330 #endif
4331 	return false;
4332       }
4333     if (!INSN_P (prev))
4334       {
4335 #if DEBUG_CMP
4336 	fprintf(stderr, "Previous insn is a non-insn.\n");
4337 #endif
4338 	return false;
4339       }
4340     pp = PATTERN (prev);
4341     if (GET_CODE (pp) != SET)
4342       {
4343 #if DEBUG_CMP
4344 	fprintf(stderr, "Previous insn is not a SET.\n");
4345 #endif
4346 	return false;
4347       }
4348     pflags = get_attr_flags (prev);
4349 
4350     /* Looking up attributes of previous insns corrupted the recog
4351        tables.  */
4352     INSN_UID (cmp) = -1;
4353     recog (PATTERN (cmp), cmp, 0);
4354 
4355     if (pflags == FLAGS_N
4356 	&& reg_mentioned_p (op0, pp))
4357       {
4358 #if DEBUG_CMP
4359 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
4360 	debug_rtx(prev);
4361 #endif
4362 	return false;
4363       }
4364 
4365     /* Check for comparisons against memory - between volatiles and
4366        aliases, we just can't risk this one.  */
4367     if (GET_CODE (operands[0]) == MEM
4368 	|| GET_CODE (operands[0]) == MEM)
4369       {
4370 #if DEBUG_CMP
4371 	fprintf(stderr, "comparisons with memory:\n");
4372 	debug_rtx(prev);
4373 #endif
4374 	return false;
4375       }
4376 
4377     /* Check for PREV changing a register that's used to compute a
4378        value in CMP, even if it doesn't otherwise change flags.  */
4379     if (GET_CODE (operands[0]) == REG
4380 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4381       {
4382 #if DEBUG_CMP
4383 	fprintf(stderr, "sub-value affected, op0:\n");
4384 	debug_rtx(prev);
4385 #endif
4386 	return false;
4387       }
4388     if (GET_CODE (operands[1]) == REG
4389 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4390       {
4391 #if DEBUG_CMP
4392 	fprintf(stderr, "sub-value affected, op1:\n");
4393 	debug_rtx(prev);
4394 #endif
4395 	return false;
4396       }
4397 
4398   } while (pflags == FLAGS_N);
4399 #if DEBUG_CMP
4400   fprintf(stderr, "previous flag-setting insn:\n");
4401   debug_rtx(prev);
4402   debug_rtx(pp);
4403 #endif
4404 
4405   if (GET_CODE (pp) == SET
4406       && GET_CODE (XEXP (pp, 0)) == REG
4407       && REGNO (XEXP (pp, 0)) == FLG_REGNO
4408       && GET_CODE (XEXP (pp, 1)) == COMPARE)
4409     {
4410       /* Adjacent cbranches must have the same operands to be
4411 	 redundant.  */
4412       rtx pop0 = XEXP (XEXP (pp, 1), 0);
4413       rtx pop1 = XEXP (XEXP (pp, 1), 1);
4414 #if DEBUG_CMP
4415       fprintf(stderr, "adjacent cbranches\n");
4416       debug_rtx(pop0);
4417       debug_rtx(pop1);
4418 #endif
4419       if (rtx_equal_p (op0, pop0)
4420 	  && rtx_equal_p (op1, pop1))
4421 	return true;
4422 #if DEBUG_CMP
4423       fprintf(stderr, "prev cmp not same\n");
4424 #endif
4425       return false;
4426     }
4427 
4428   /* Else the previous insn must be a SET, with either the source or
4429      dest equal to operands[0], and operands[1] must be zero.  */
4430 
4431   if (!rtx_equal_p (op1, const0_rtx))
4432     {
4433 #if DEBUG_CMP
4434       fprintf(stderr, "operands[1] not const0_rtx\n");
4435 #endif
4436       return false;
4437     }
4438   if (GET_CODE (pp) != SET)
4439     {
4440 #if DEBUG_CMP
4441       fprintf (stderr, "pp not set\n");
4442 #endif
4443       return false;
4444     }
4445   if (!rtx_equal_p (op0, SET_SRC (pp))
4446       && !rtx_equal_p (op0, SET_DEST (pp)))
4447     {
4448 #if DEBUG_CMP
4449       fprintf(stderr, "operands[0] not found in set\n");
4450 #endif
4451       return false;
4452     }
4453 
4454 #if DEBUG_CMP
4455   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4456 #endif
4457   if ((pflags & flags_needed) == flags_needed)
4458     return true;
4459 
4460   return false;
4461 }
4462 
4463 /* Return the pattern for a compare.  This will be commented out if
4464    the compare is redundant, else a normal pattern is returned.  Thus,
4465    the assembler output says where the compare would have been.  */
4466 char *
m32c_output_compare(rtx_insn * insn,rtx * operands)4467 m32c_output_compare (rtx_insn *insn, rtx *operands)
4468 {
4469   static char templ[] = ";cmp.b\t%1,%0";
4470   /*                             ^ 5  */
4471 
4472   templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4473   if (m32c_compare_redundant (insn, operands))
4474     {
4475 #if DEBUG_CMP
4476       fprintf(stderr, "cbranch: cmp not needed\n");
4477 #endif
4478       return templ;
4479     }
4480 
4481 #if DEBUG_CMP
4482   fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4483 #endif
4484   return templ + 1;
4485 }
4486 
4487 #undef TARGET_ENCODE_SECTION_INFO
4488 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4489 
4490 /* If the frame pointer isn't used, we detect it manually.  But the
4491    stack pointer doesn't have as flexible addressing as the frame
4492    pointer, so we always assume we have it.  */
4493 
4494 #undef TARGET_FRAME_POINTER_REQUIRED
4495 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4496 
4497 #undef TARGET_HARD_REGNO_NREGS
4498 #define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
4499 #undef TARGET_HARD_REGNO_MODE_OK
4500 #define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
4501 #undef TARGET_MODES_TIEABLE_P
4502 #define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
4503 
4504 #undef TARGET_CAN_CHANGE_MODE_CLASS
4505 #define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
4506 
4507 /* The Global `targetm' Variable. */
4508 
4509 struct gcc_target targetm = TARGET_INITIALIZER;
4510 
4511 #include "gt-m32c.h"
4512