1 /* Target Code for R8C/M16C/M32C
2    Copyright (C) 2005-2021 Free Software Foundation, Inc.
3    Contributed by Red Hat.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published
9    by the Free Software Foundation; either version 3, or (at your
10    option) any later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #define IN_TARGET_CODE 1
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "stringpool.h"
31 #include "attribs.h"
32 #include "df.h"
33 #include "memmodel.h"
34 #include "tm_p.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "emit-rtl.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "output.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "reload.h"
44 #include "stor-layout.h"
45 #include "varasm.h"
46 #include "calls.h"
47 #include "explow.h"
48 #include "expr.h"
49 #include "tm-constrs.h"
50 #include "builtins.h"
51 
52 /* This file should be included last.  */
53 #include "target-def.h"
54 
55 /* Prototypes */
56 
57 /* Used by m32c_pushm_popm.  */
58 typedef enum
59 {
60   PP_pushm,
61   PP_popm,
62   PP_justcount
63 } Push_Pop_Type;
64 
65 static bool m32c_function_needs_enter (void);
66 static tree interrupt_handler (tree *, tree, tree, int, bool *);
67 static tree function_vector_handler (tree *, tree, tree, int, bool *);
68 static int interrupt_p (tree node);
69 static int bank_switch_p (tree node);
70 static int fast_interrupt_p (tree node);
71 static int interrupt_p (tree node);
72 static bool m32c_asm_integer (rtx, unsigned int, int);
73 static int m32c_comp_type_attributes (const_tree, const_tree);
74 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
75 static struct machine_function *m32c_init_machine_status (void);
76 static void m32c_insert_attributes (tree, tree *);
77 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
78 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
79 static rtx m32c_function_arg (cumulative_args_t, const function_arg_info &);
80 static bool m32c_pass_by_reference (cumulative_args_t,
81 				    const function_arg_info &);
82 static void m32c_function_arg_advance (cumulative_args_t,
83 				       const function_arg_info &);
84 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
85 static int m32c_pushm_popm (Push_Pop_Type);
86 static bool m32c_strict_argument_naming (cumulative_args_t);
87 static rtx m32c_struct_value_rtx (tree, int);
88 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
89 static int need_to_save (int);
90 static rtx m32c_function_value (const_tree, const_tree, bool);
91 static rtx m32c_libcall_value (machine_mode, const_rtx);
92 
93 /* Returns true if an address is specified, else false.  */
94 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
95 
96 static bool m32c_hard_regno_mode_ok (unsigned int, machine_mode);
97 
98 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
99 
100 #define streq(a,b) (strcmp ((a), (b)) == 0)
101 
102 /* Internal support routines */
103 
104 /* Debugging statements are tagged with DEBUG0 only so that they can
105    be easily enabled individually, by replacing the '0' with '1' as
106    needed.  */
107 #define DEBUG0 0
108 #define DEBUG1 1
109 
110 #if DEBUG0
111 #include "print-tree.h"
112 /* This is needed by some of the commented-out debug statements
113    below.  */
114 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
115 #endif
116 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
117 
118 /* These are all to support encode_pattern().  */
119 static char pattern[30], *patternp;
120 static GTY(()) rtx patternr[30];
121 #define RTX_IS(x) (streq (pattern, x))
122 
123 /* Some macros to simplify the logic throughout this file.  */
124 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
125 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
126 
127 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
128 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
129 
130 static int
far_addr_space_p(rtx x)131 far_addr_space_p (rtx x)
132 {
133   if (GET_CODE (x) != MEM)
134     return 0;
135 #if DEBUG0
136   fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
137   fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
138 #endif
139   return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
140 }
141 
142 /* We do most RTX matching by converting the RTX into a string, and
143    using string compares.  This vastly simplifies the logic in many of
144    the functions in this file.
145 
146    On exit, pattern[] has the encoded string (use RTX_IS("...") to
147    compare it) and patternr[] has pointers to the nodes in the RTX
148    corresponding to each character in the encoded string.  The latter
149    is mostly used by print_operand().
150 
151    Unrecognized patterns have '?' in them; this shows up when the
152    assembler complains about syntax errors.
153 */
154 
155 static void
encode_pattern_1(rtx x)156 encode_pattern_1 (rtx x)
157 {
158   int i;
159 
160   if (patternp == pattern + sizeof (pattern) - 2)
161     {
162       patternp[-1] = '?';
163       return;
164     }
165 
166   patternr[patternp - pattern] = x;
167 
168   switch (GET_CODE (x))
169     {
170     case REG:
171       *patternp++ = 'r';
172       break;
173     case SUBREG:
174       if (GET_MODE_SIZE (GET_MODE (x)) !=
175 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
176 	*patternp++ = 'S';
177       if (GET_MODE (x) == PSImode
178 	  && GET_CODE (XEXP (x, 0)) == REG)
179 	*patternp++ = 'S';
180       encode_pattern_1 (XEXP (x, 0));
181       break;
182     case MEM:
183       *patternp++ = 'm';
184       /* FALLTHRU */
185     case CONST:
186       encode_pattern_1 (XEXP (x, 0));
187       break;
188     case SIGN_EXTEND:
189       *patternp++ = '^';
190       *patternp++ = 'S';
191       encode_pattern_1 (XEXP (x, 0));
192       break;
193     case ZERO_EXTEND:
194       *patternp++ = '^';
195       *patternp++ = 'Z';
196       encode_pattern_1 (XEXP (x, 0));
197       break;
198     case PLUS:
199       *patternp++ = '+';
200       encode_pattern_1 (XEXP (x, 0));
201       encode_pattern_1 (XEXP (x, 1));
202       break;
203     case PRE_DEC:
204       *patternp++ = '>';
205       encode_pattern_1 (XEXP (x, 0));
206       break;
207     case POST_INC:
208       *patternp++ = '<';
209       encode_pattern_1 (XEXP (x, 0));
210       break;
211     case LO_SUM:
212       *patternp++ = 'L';
213       encode_pattern_1 (XEXP (x, 0));
214       encode_pattern_1 (XEXP (x, 1));
215       break;
216     case HIGH:
217       *patternp++ = 'H';
218       encode_pattern_1 (XEXP (x, 0));
219       break;
220     case SYMBOL_REF:
221       *patternp++ = 's';
222       break;
223     case LABEL_REF:
224       *patternp++ = 'l';
225       break;
226     case CODE_LABEL:
227       *patternp++ = 'c';
228       break;
229     case CONST_INT:
230     case CONST_DOUBLE:
231       *patternp++ = 'i';
232       break;
233     case UNSPEC:
234       *patternp++ = 'u';
235       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
236       for (i = 0; i < XVECLEN (x, 0); i++)
237 	encode_pattern_1 (XVECEXP (x, 0, i));
238       break;
239     case USE:
240       *patternp++ = 'U';
241       break;
242     case PARALLEL:
243       *patternp++ = '|';
244       for (i = 0; i < XVECLEN (x, 0); i++)
245 	encode_pattern_1 (XVECEXP (x, 0, i));
246       break;
247     case EXPR_LIST:
248       *patternp++ = 'E';
249       encode_pattern_1 (XEXP (x, 0));
250       if (XEXP (x, 1))
251 	encode_pattern_1 (XEXP (x, 1));
252       break;
253     default:
254       *patternp++ = '?';
255 #if DEBUG0
256       fprintf (stderr, "can't encode pattern %s\n",
257 	       GET_RTX_NAME (GET_CODE (x)));
258       debug_rtx (x);
259 #endif
260       break;
261     }
262 }
263 
264 static void
encode_pattern(rtx x)265 encode_pattern (rtx x)
266 {
267   patternp = pattern;
268   encode_pattern_1 (x);
269   *patternp = 0;
270 }
271 
272 /* Since register names indicate the mode they're used in, we need a
273    way to determine which name to refer to the register with.  Called
274    by print_operand().  */
275 
276 static const char *
reg_name_with_mode(int regno,machine_mode mode)277 reg_name_with_mode (int regno, machine_mode mode)
278 {
279   int mlen = GET_MODE_SIZE (mode);
280   if (regno == R0_REGNO && mlen == 1)
281     return "r0l";
282   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
283     return "r2r0";
284   if (regno == R0_REGNO && mlen == 6)
285     return "r2r1r0";
286   if (regno == R0_REGNO && mlen == 8)
287     return "r3r1r2r0";
288   if (regno == R1_REGNO && mlen == 1)
289     return "r1l";
290   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
291     return "r3r1";
292   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
293     return "a1a0";
294   return reg_names[regno];
295 }
296 
297 /* How many bytes a register uses on stack when it's pushed.  We need
298    to know this because the push opcode needs to explicitly indicate
299    the size of the register, even though the name of the register
300    already tells it that.  Used by m32c_output_reg_{push,pop}, which
301    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
302 
303 static int
reg_push_size(int regno)304 reg_push_size (int regno)
305 {
306   switch (regno)
307     {
308     case R0_REGNO:
309     case R1_REGNO:
310       return 2;
311     case R2_REGNO:
312     case R3_REGNO:
313     case FLG_REGNO:
314       return 2;
315     case A0_REGNO:
316     case A1_REGNO:
317     case SB_REGNO:
318     case FB_REGNO:
319     case SP_REGNO:
320       if (TARGET_A16)
321 	return 2;
322       else
323 	return 3;
324     default:
325       gcc_unreachable ();
326     }
327 }
328 
329 /* Given two register classes, find the largest intersection between
330    them.  If there is no intersection, return RETURNED_IF_EMPTY
331    instead.  */
332 static reg_class_t
reduce_class(reg_class_t original_class,reg_class_t limiting_class,reg_class_t returned_if_empty)333 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
334 	      reg_class_t returned_if_empty)
335 {
336   HARD_REG_SET cc;
337   int i;
338   reg_class_t best = NO_REGS;
339   unsigned int best_size = 0;
340 
341   if (original_class == limiting_class)
342     return original_class;
343 
344   cc = reg_class_contents[original_class] & reg_class_contents[limiting_class];
345 
346   for (i = 0; i < LIM_REG_CLASSES; i++)
347     {
348       if (hard_reg_set_subset_p (reg_class_contents[i], cc))
349 	if (best_size < reg_class_size[i])
350 	  {
351 	    best = (reg_class_t) i;
352 	    best_size = reg_class_size[i];
353 	  }
354 
355     }
356   if (best == NO_REGS)
357     return returned_if_empty;
358   return best;
359 }
360 
361 /* Used by m32c_register_move_cost to determine if a move is
362    impossibly expensive.  */
363 static bool
class_can_hold_mode(reg_class_t rclass,machine_mode mode)364 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
365 {
366   /* Cache the results:  0=untested  1=no  2=yes */
367   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
368 
369   if (results[(int) rclass][mode] == 0)
370     {
371       int r;
372       results[rclass][mode] = 1;
373       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
374 	if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
375 	    && m32c_hard_regno_mode_ok (r, mode))
376 	  {
377 	    results[rclass][mode] = 2;
378 	    break;
379 	  }
380     }
381 
382 #if DEBUG0
383   fprintf (stderr, "class %s can hold %s? %s\n",
384 	   class_names[(int) rclass], mode_name[mode],
385 	   (results[rclass][mode] == 2) ? "yes" : "no");
386 #endif
387   return results[(int) rclass][mode] == 2;
388 }
389 
390 /* Run-time Target Specification.  */
391 
392 /* Memregs are memory locations that gcc treats like general
393    registers, as there are a limited number of true registers and the
394    m32c families can use memory in most places that registers can be
395    used.
396 
397    However, since memory accesses are more expensive than registers,
398    we allow the user to limit the number of memregs available, in
399    order to try to persuade gcc to try harder to use real registers.
400 
401    Memregs are provided by lib1funcs.S.
402 */
403 
404 int ok_to_change_target_memregs = TRUE;
405 
406 /* Implements TARGET_OPTION_OVERRIDE.  */
407 
408 #undef TARGET_OPTION_OVERRIDE
409 #define TARGET_OPTION_OVERRIDE m32c_option_override
410 
411 static void
m32c_option_override(void)412 m32c_option_override (void)
413 {
414   /* We limit memregs to 0..16, and provide a default.  */
415   if (global_options_set.x_target_memregs)
416     {
417       if (target_memregs < 0 || target_memregs > 16)
418 	error ("invalid target memregs value %<%d%>", target_memregs);
419     }
420   else
421     target_memregs = 16;
422 
423   if (TARGET_A24)
424     flag_ivopts = 0;
425 
426   /* This target defaults to strict volatile bitfields.  */
427   if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
428     flag_strict_volatile_bitfields = 1;
429 
430   /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
431      This is always worse than an absolute call.  */
432   if (TARGET_A16)
433     flag_no_function_cse = 1;
434 
435   /* This wants to put insns between compares and their jumps.  */
436   /* FIXME: The right solution is to properly trace the flags register
437      values, but that is too much work for stage 4.  */
438   flag_combine_stack_adjustments = 0;
439 }
440 
441 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
442 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
443 
444 static void
m32c_override_options_after_change(void)445 m32c_override_options_after_change (void)
446 {
447   if (TARGET_A16)
448     flag_no_function_cse = 1;
449 }
450 
451 /* Defining data structures for per-function information */
452 
453 /* The usual; we set up our machine_function data.  */
454 static struct machine_function *
m32c_init_machine_status(void)455 m32c_init_machine_status (void)
456 {
457   return ggc_cleared_alloc<machine_function> ();
458 }
459 
460 /* Implements INIT_EXPANDERS.  We just set up to call the above
461    function.  */
462 void
m32c_init_expanders(void)463 m32c_init_expanders (void)
464 {
465   init_machine_status = m32c_init_machine_status;
466 }
467 
468 /* Storage Layout */
469 
470 /* Register Basics */
471 
472 /* Basic Characteristics of Registers */
473 
474 /* Whether a mode fits in a register is complex enough to warrant a
475    table.  */
476 static struct
477 {
478   char qi_regs;
479   char hi_regs;
480   char pi_regs;
481   char si_regs;
482   char di_regs;
483 } nregs_table[FIRST_PSEUDO_REGISTER] =
484 {
485   { 1, 1, 2, 2, 4 },		/* r0 */
486   { 0, 1, 0, 0, 0 },		/* r2 */
487   { 1, 1, 2, 2, 0 },		/* r1 */
488   { 0, 1, 0, 0, 0 },		/* r3 */
489   { 0, 1, 1, 0, 0 },		/* a0 */
490   { 0, 1, 1, 0, 0 },		/* a1 */
491   { 0, 1, 1, 0, 0 },		/* sb */
492   { 0, 1, 1, 0, 0 },		/* fb */
493   { 0, 1, 1, 0, 0 },		/* sp */
494   { 1, 1, 1, 0, 0 },		/* pc */
495   { 0, 0, 0, 0, 0 },		/* fl */
496   { 1, 1, 1, 0, 0 },		/* ap */
497   { 1, 1, 2, 2, 4 },		/* mem0 */
498   { 1, 1, 2, 2, 4 },		/* mem1 */
499   { 1, 1, 2, 2, 4 },		/* mem2 */
500   { 1, 1, 2, 2, 4 },		/* mem3 */
501   { 1, 1, 2, 2, 4 },		/* mem4 */
502   { 1, 1, 2, 2, 0 },		/* mem5 */
503   { 1, 1, 2, 2, 0 },		/* mem6 */
504   { 1, 1, 0, 0, 0 },		/* mem7 */
505 };
506 
507 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE.  We adjust the number
508    of available memregs, and select which registers need to be preserved
509    across calls based on the chip family.  */
510 
511 #undef TARGET_CONDITIONAL_REGISTER_USAGE
512 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
513 void
m32c_conditional_register_usage(void)514 m32c_conditional_register_usage (void)
515 {
516   int i;
517 
518   if (target_memregs >= 0 && target_memregs <= 16)
519     {
520       /* The command line option is bytes, but our "registers" are
521 	 16-bit words.  */
522       for (i = (target_memregs+1)/2; i < 8; i++)
523 	{
524 	  fixed_regs[MEM0_REGNO + i] = 1;
525 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
526 	}
527     }
528 
529   /* M32CM and M32C preserve more registers across function calls.  */
530   if (TARGET_A24)
531     {
532       call_used_regs[R1_REGNO] = 0;
533       call_used_regs[R2_REGNO] = 0;
534       call_used_regs[R3_REGNO] = 0;
535       call_used_regs[A0_REGNO] = 0;
536       call_used_regs[A1_REGNO] = 0;
537     }
538 }
539 
540 /* How Values Fit in Registers */
541 
542 /* Implements TARGET_HARD_REGNO_NREGS.  This is complicated by the fact that
543    different registers are different sizes from each other, *and* may
544    be different sizes in different chip families.  */
545 static unsigned int
m32c_hard_regno_nregs_1(unsigned int regno,machine_mode mode)546 m32c_hard_regno_nregs_1 (unsigned int regno, machine_mode mode)
547 {
548   if (regno == FLG_REGNO && mode == CCmode)
549     return 1;
550   if (regno >= FIRST_PSEUDO_REGISTER)
551     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
552 
553   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
554     return (GET_MODE_SIZE (mode) + 1) / 2;
555 
556   if (GET_MODE_SIZE (mode) <= 1)
557     return nregs_table[regno].qi_regs;
558   if (GET_MODE_SIZE (mode) <= 2)
559     return nregs_table[regno].hi_regs;
560   if (regno == A0_REGNO && mode == SImode && TARGET_A16)
561     return 2;
562   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
563     return nregs_table[regno].pi_regs;
564   if (GET_MODE_SIZE (mode) <= 4)
565     return nregs_table[regno].si_regs;
566   if (GET_MODE_SIZE (mode) <= 8)
567     return nregs_table[regno].di_regs;
568   return 0;
569 }
570 
571 static unsigned int
m32c_hard_regno_nregs(unsigned int regno,machine_mode mode)572 m32c_hard_regno_nregs (unsigned int regno, machine_mode mode)
573 {
574   unsigned int rv = m32c_hard_regno_nregs_1 (regno, mode);
575   return rv ? rv : 1;
576 }
577 
578 /* Implement TARGET_HARD_REGNO_MODE_OK.  The above function does the work
579    already; just test its return value.  */
580 static bool
m32c_hard_regno_mode_ok(unsigned int regno,machine_mode mode)581 m32c_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
582 {
583   return m32c_hard_regno_nregs_1 (regno, mode) != 0;
584 }
585 
586 /* Implement TARGET_MODES_TIEABLE_P.  In general, modes aren't tieable since
587    registers are all different sizes.  However, since most modes are
588    bigger than our registers anyway, it's easier to implement this
589    function that way, leaving QImode as the only unique case.  */
590 static bool
m32c_modes_tieable_p(machine_mode m1,machine_mode m2)591 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
592 {
593   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
594     return 1;
595 
596 #if 0
597   if (m1 == QImode || m2 == QImode)
598     return 0;
599 #endif
600 
601   return 1;
602 }
603 
604 /* Register Classes */
605 
606 /* Implements REGNO_REG_CLASS.  */
607 enum reg_class
m32c_regno_reg_class(int regno)608 m32c_regno_reg_class (int regno)
609 {
610   switch (regno)
611     {
612     case R0_REGNO:
613       return R0_REGS;
614     case R1_REGNO:
615       return R1_REGS;
616     case R2_REGNO:
617       return R2_REGS;
618     case R3_REGNO:
619       return R3_REGS;
620     case A0_REGNO:
621       return A0_REGS;
622     case A1_REGNO:
623       return A1_REGS;
624     case SB_REGNO:
625       return SB_REGS;
626     case FB_REGNO:
627       return FB_REGS;
628     case SP_REGNO:
629       return SP_REGS;
630     case FLG_REGNO:
631       return FLG_REGS;
632     default:
633       if (IS_MEM_REGNO (regno))
634 	return MEM_REGS;
635       return ALL_REGS;
636     }
637 }
638 
639 /* Implements REGNO_OK_FOR_BASE_P.  */
640 int
m32c_regno_ok_for_base_p(int regno)641 m32c_regno_ok_for_base_p (int regno)
642 {
643   if (regno == A0_REGNO
644       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
645     return 1;
646   return 0;
647 }
648 
649 /* Implements TARGET_PREFERRED_RELOAD_CLASS.  In general, prefer general
650    registers of the appropriate size.  */
651 
652 #undef TARGET_PREFERRED_RELOAD_CLASS
653 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
654 
655 static reg_class_t
m32c_preferred_reload_class(rtx x,reg_class_t rclass)656 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
657 {
658   reg_class_t newclass = rclass;
659 
660 #if DEBUG0
661   fprintf (stderr, "\npreferred_reload_class for %s is ",
662 	   class_names[rclass]);
663 #endif
664   if (rclass == NO_REGS)
665     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
666 
667   if (reg_classes_intersect_p (rclass, CR_REGS))
668     {
669       switch (GET_MODE (x))
670 	{
671 	case E_QImode:
672 	  newclass = HL_REGS;
673 	  break;
674 	default:
675 	  /*      newclass = HI_REGS; */
676 	  break;
677 	}
678     }
679 
680   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
681     newclass = SI_REGS;
682   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
683 	   && ! reg_class_subset_p (R03_REGS, rclass))
684     newclass = DI_REGS;
685 
686   rclass = reduce_class (rclass, newclass, rclass);
687 
688   if (GET_MODE (x) == QImode)
689     rclass = reduce_class (rclass, HL_REGS, rclass);
690 
691 #if DEBUG0
692   fprintf (stderr, "%s\n", class_names[rclass]);
693   debug_rtx (x);
694 
695   if (GET_CODE (x) == MEM
696       && GET_CODE (XEXP (x, 0)) == PLUS
697       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
698     fprintf (stderr, "Glorm!\n");
699 #endif
700   return rclass;
701 }
702 
703 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
704 
705 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
706 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
707 
708 static reg_class_t
m32c_preferred_output_reload_class(rtx x,reg_class_t rclass)709 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
710 {
711   return m32c_preferred_reload_class (x, rclass);
712 }
713 
714 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
715    address registers for reloads since they're needed for address
716    reloads.  */
717 int
m32c_limit_reload_class(machine_mode mode,int rclass)718 m32c_limit_reload_class (machine_mode mode, int rclass)
719 {
720 #if DEBUG0
721   fprintf (stderr, "limit_reload_class for %s: %s ->",
722 	   mode_name[mode], class_names[rclass]);
723 #endif
724 
725   if (mode == QImode)
726     rclass = reduce_class (rclass, HL_REGS, rclass);
727   else if (mode == HImode)
728     rclass = reduce_class (rclass, HI_REGS, rclass);
729   else if (mode == SImode)
730     rclass = reduce_class (rclass, SI_REGS, rclass);
731 
732   if (rclass != A_REGS)
733     rclass = reduce_class (rclass, DI_REGS, rclass);
734 
735 #if DEBUG0
736   fprintf (stderr, " %s\n", class_names[rclass]);
737 #endif
738   return rclass;
739 }
740 
741 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
742    r0 or r1, as those are the only real QImode registers.  CR regs get
743    reloaded through appropriately sized general or address
744    registers.  */
745 int
m32c_secondary_reload_class(int rclass,machine_mode mode,rtx x)746 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
747 {
748   int cc = class_contents[rclass][0];
749 #if DEBUG0
750   fprintf (stderr, "\nsecondary reload class %s %s\n",
751 	   class_names[rclass], mode_name[mode]);
752   debug_rtx (x);
753 #endif
754   if (mode == QImode
755       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
756     return QI_REGS;
757   if (reg_classes_intersect_p (rclass, CR_REGS)
758       && GET_CODE (x) == REG
759       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
760     return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
761   return NO_REGS;
762 }
763 
764 /* Implements TARGET_CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
765    reloads.  */
766 
767 #undef TARGET_CLASS_LIKELY_SPILLED_P
768 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
769 
770 static bool
m32c_class_likely_spilled_p(reg_class_t regclass)771 m32c_class_likely_spilled_p (reg_class_t regclass)
772 {
773   if (regclass == A_REGS)
774     return true;
775 
776   return (reg_class_size[(int) regclass] == 1);
777 }
778 
779 /* Implements TARGET_CLASS_MAX_NREGS.  We calculate this according to its
780    documented meaning, to avoid potential inconsistencies with actual
781    class definitions.  */
782 
783 #undef TARGET_CLASS_MAX_NREGS
784 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
785 
786 static unsigned char
m32c_class_max_nregs(reg_class_t regclass,machine_mode mode)787 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
788 {
789   int rn;
790   unsigned char max = 0;
791 
792   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
793     if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
794       {
795 	unsigned char n = m32c_hard_regno_nregs (rn, mode);
796 	if (max < n)
797 	  max = n;
798       }
799   return max;
800 }
801 
802 /* Implements TARGET_CAN_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
803    QI (r0l, r1l) because the chip doesn't support QI ops on other
804    registers (well, it does on a0/a1 but if we let gcc do that, reload
805    suffers).  Otherwise, we allow changes to larger modes.  */
806 static bool
m32c_can_change_mode_class(machine_mode from,machine_mode to,reg_class_t rclass)807 m32c_can_change_mode_class (machine_mode from,
808 			    machine_mode to, reg_class_t rclass)
809 {
810   int rn;
811 #if DEBUG0
812   fprintf (stderr, "can change from %s to %s in %s\n",
813 	   mode_name[from], mode_name[to], class_names[rclass]);
814 #endif
815 
816   /* If the larger mode isn't allowed in any of these registers, we
817      can't allow the change.  */
818   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
819     if (class_contents[rclass][0] & (1 << rn))
820       if (! m32c_hard_regno_mode_ok (rn, to))
821 	return false;
822 
823   if (to == QImode)
824     return (class_contents[rclass][0] & 0x1ffa) == 0;
825 
826   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
827       && GET_MODE_SIZE (from) > 1)
828     return true;
829   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
830     return true;
831 
832   return false;
833 }
834 
835 /* Helpers for the rest of the file.  */
836 /* TRUE if the rtx is a REG rtx for the given register.  */
837 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
838 			   && REGNO (rtx) == regno)
839 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
840    base register in address calculations (hence the "strict"
841    argument).  */
842 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
843 			       && (REGNO (rtx) == AP_REGNO \
844 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
845 
846 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
847 
848 /* Implements matching for constraints (see next function too).  'S' is
849    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
850    call return values.  */
851 bool
m32c_matches_constraint_p(rtx value,int constraint)852 m32c_matches_constraint_p (rtx value, int constraint)
853 {
854   encode_pattern (value);
855 
856   switch (constraint) {
857   case CONSTRAINT_SF:
858     return (far_addr_space_p (value)
859 	    && ((RTX_IS ("mr")
860 		 && A0_OR_PSEUDO (patternr[1])
861 		 && GET_MODE (patternr[1]) == SImode)
862 		|| (RTX_IS ("m+^Sri")
863 		    && A0_OR_PSEUDO (patternr[4])
864 		    && GET_MODE (patternr[4]) == HImode)
865 		|| (RTX_IS ("m+^Srs")
866 		    && A0_OR_PSEUDO (patternr[4])
867 		    && GET_MODE (patternr[4]) == HImode)
868 		|| (RTX_IS ("m+^S+ris")
869 		    && A0_OR_PSEUDO (patternr[5])
870 		    && GET_MODE (patternr[5]) == HImode)
871 		|| RTX_IS ("ms")));
872   case CONSTRAINT_Sd:
873     {
874       /* This is the common "src/dest" address */
875       rtx r;
876       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
877 	return true;
878       if (RTX_IS ("ms") || RTX_IS ("m+si"))
879 	return true;
880       if (RTX_IS ("m++rii"))
881 	{
882 	  if (REGNO (patternr[3]) == FB_REGNO
883 	      && INTVAL (patternr[4]) == 0)
884 	    return true;
885 	}
886       if (RTX_IS ("mr"))
887 	r = patternr[1];
888       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
889 	r = patternr[2];
890       else
891 	return false;
892       if (REGNO (r) == SP_REGNO)
893 	return false;
894       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
895     }
896   case CONSTRAINT_Sa:
897     {
898       rtx r;
899       if (RTX_IS ("mr"))
900 	r = patternr[1];
901       else if (RTX_IS ("m+ri"))
902 	r = patternr[2];
903       else
904 	return false;
905       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
906     }
907   case CONSTRAINT_Si:
908     return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
909   case CONSTRAINT_Ss:
910     return ((RTX_IS ("mr")
911 	     && (IS_REG (patternr[1], SP_REGNO)))
912 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
913   case CONSTRAINT_Sf:
914     return ((RTX_IS ("mr")
915 	     && (IS_REG (patternr[1], FB_REGNO)))
916 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
917   case CONSTRAINT_Sb:
918     return ((RTX_IS ("mr")
919 	     && (IS_REG (patternr[1], SB_REGNO)))
920 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
921   case CONSTRAINT_Sp:
922     /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
923     return (RTX_IS ("mi")
924 	    && !(INTVAL (patternr[1]) & ~0x1fff));
925   case CONSTRAINT_S1:
926     return r1h_operand (value, QImode);
927   case CONSTRAINT_Rpa:
928     return GET_CODE (value) == PARALLEL;
929   default:
930     return false;
931   }
932 }
933 
934 /* STACK AND CALLING */
935 
936 /* Frame Layout */
937 
938 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
939    (yes, THREE bytes) onto the stack for the return address, but we
940    don't support pointers bigger than 16 bits on those chips.  This
941    will likely wreak havoc with exception unwinding.  FIXME.  */
942 rtx
m32c_return_addr_rtx(int count)943 m32c_return_addr_rtx (int count)
944 {
945   machine_mode mode;
946   int offset;
947   rtx ra_mem;
948 
949   if (count)
950     return NULL_RTX;
951   /* we want 2[$fb] */
952 
953   if (TARGET_A24)
954     {
955       /* It's four bytes */
956       mode = PSImode;
957       offset = 4;
958     }
959   else
960     {
961       /* FIXME: it's really 3 bytes */
962       mode = HImode;
963       offset = 2;
964     }
965 
966   ra_mem =
967     gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
968 				      offset));
969   return copy_to_mode_reg (mode, ra_mem);
970 }
971 
972 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
973 rtx
m32c_incoming_return_addr_rtx(void)974 m32c_incoming_return_addr_rtx (void)
975 {
976   /* we want [sp] */
977   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
978 }
979 
980 /* Exception Handling Support */
981 
982 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
983    pointers.  */
984 int
m32c_eh_return_data_regno(int n)985 m32c_eh_return_data_regno (int n)
986 {
987   switch (n)
988     {
989     case 0:
990       return MEM0_REGNO;
991     case 1:
992       return MEM0_REGNO+4;
993     default:
994       return INVALID_REGNUM;
995     }
996 }
997 
998 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
999    m32c_emit_eh_epilogue.  */
1000 rtx
m32c_eh_return_stackadj_rtx(void)1001 m32c_eh_return_stackadj_rtx (void)
1002 {
1003   if (!cfun->machine->eh_stack_adjust)
1004     {
1005       rtx sa;
1006 
1007       sa = gen_rtx_REG (Pmode, R0_REGNO);
1008       cfun->machine->eh_stack_adjust = sa;
1009     }
1010   return cfun->machine->eh_stack_adjust;
1011 }
1012 
1013 /* Registers That Address the Stack Frame */
1014 
1015 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1016    the original spec called for dwarf numbers to vary with register
1017    width as well, for example, r0l, r0, and r2r0 would each have
1018    different dwarf numbers.  GCC doesn't support this, and we don't do
1019    it, and gdb seems to like it this way anyway.  */
1020 unsigned int
m32c_dwarf_frame_regnum(int n)1021 m32c_dwarf_frame_regnum (int n)
1022 {
1023   switch (n)
1024     {
1025     case R0_REGNO:
1026       return 5;
1027     case R1_REGNO:
1028       return 6;
1029     case R2_REGNO:
1030       return 7;
1031     case R3_REGNO:
1032       return 8;
1033     case A0_REGNO:
1034       return 9;
1035     case A1_REGNO:
1036       return 10;
1037     case FB_REGNO:
1038       return 11;
1039     case SB_REGNO:
1040       return 19;
1041 
1042     case SP_REGNO:
1043       return 12;
1044     case PC_REGNO:
1045       return 13;
1046     default:
1047       return DWARF_FRAME_REGISTERS + 1;
1048     }
1049 }
1050 
1051 /* The frame looks like this:
1052 
1053    ap -> +------------------------------
1054          | Return address (3 or 4 bytes)
1055 	 | Saved FB (2 or 4 bytes)
1056    fb -> +------------------------------
1057 	 | local vars
1058          | register saves fb
1059 	 |        through r0 as needed
1060    sp -> +------------------------------
1061 */
1062 
1063 /* We use this to wrap all emitted insns in the prologue.  */
1064 static rtx
F(rtx x)1065 F (rtx x)
1066 {
1067   RTX_FRAME_RELATED_P (x) = 1;
1068   return x;
1069 }
1070 
1071 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1072    how much the stack pointer moves for each, for each cpu family.  */
1073 static struct
1074 {
1075   int reg1;
1076   int bit;
1077   int a16_bytes;
1078   int a24_bytes;
1079 } pushm_info[] =
1080 {
1081   /* These are in reverse push (nearest-to-sp) order.  */
1082   { R0_REGNO, 0x80, 2, 2 },
1083   { R1_REGNO, 0x40, 2, 2 },
1084   { R2_REGNO, 0x20, 2, 2 },
1085   { R3_REGNO, 0x10, 2, 2 },
1086   { A0_REGNO, 0x08, 2, 4 },
1087   { A1_REGNO, 0x04, 2, 4 },
1088   { SB_REGNO, 0x02, 2, 4 },
1089   { FB_REGNO, 0x01, 2, 4 }
1090 };
1091 
1092 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1093 
1094 /* Returns TRUE if we need to save/restore the given register.  We
1095    save everything for exception handlers, so that any register can be
1096    unwound.  For interrupt handlers, we save everything if the handler
1097    calls something else (because we don't know what *that* function
1098    might do), but try to be a bit smarter if the handler is a leaf
1099    function.  We always save $a0, though, because we use that in the
1100    epilogue to copy $fb to $sp.  */
1101 static int
need_to_save(int regno)1102 need_to_save (int regno)
1103 {
1104   if (fixed_regs[regno])
1105     return 0;
1106   if (crtl->calls_eh_return)
1107     return 1;
1108   if (regno == FP_REGNO)
1109     return 0;
1110   if (cfun->machine->is_interrupt
1111       && (!cfun->machine->is_leaf
1112 	  || (regno == A0_REGNO
1113 	      && m32c_function_needs_enter ())
1114 	  ))
1115     return 1;
1116   if (df_regs_ever_live_p (regno)
1117       && (!call_used_or_fixed_reg_p (regno) || cfun->machine->is_interrupt))
1118     return 1;
1119   return 0;
1120 }
1121 
1122 /* This function contains all the intelligence about saving and
1123    restoring registers.  It always figures out the register save set.
1124    When called with PP_justcount, it merely returns the size of the
1125    save set (for eliminating the frame pointer, for example).  When
1126    called with PP_pushm or PP_popm, it emits the appropriate
1127    instructions for saving (pushm) or restoring (popm) the
1128    registers.  */
1129 static int
m32c_pushm_popm(Push_Pop_Type ppt)1130 m32c_pushm_popm (Push_Pop_Type ppt)
1131 {
1132   int reg_mask = 0;
1133   int byte_count = 0, bytes;
1134   int i;
1135   rtx dwarf_set[PUSHM_N];
1136   int n_dwarfs = 0;
1137   int nosave_mask = 0;
1138 
1139   if (crtl->return_rtx
1140       && GET_CODE (crtl->return_rtx) == PARALLEL
1141       && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1142     {
1143       rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1144       rtx rv = XEXP (exp, 0);
1145       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1146 
1147       if (rv_bytes > 2)
1148 	nosave_mask |= 0x20;	/* PSI, SI */
1149       else
1150 	nosave_mask |= 0xf0;	/* DF */
1151       if (rv_bytes > 4)
1152 	nosave_mask |= 0x50;	/* DI */
1153     }
1154 
1155   for (i = 0; i < (int) PUSHM_N; i++)
1156     {
1157       /* Skip if neither register needs saving.  */
1158       if (!need_to_save (pushm_info[i].reg1))
1159 	continue;
1160 
1161       if (pushm_info[i].bit & nosave_mask)
1162 	continue;
1163 
1164       reg_mask |= pushm_info[i].bit;
1165       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1166 
1167       if (ppt == PP_pushm)
1168 	{
1169 	  machine_mode mode = (bytes == 2) ? HImode : SImode;
1170 	  rtx addr;
1171 
1172 	  /* Always use stack_pointer_rtx instead of calling
1173 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1174 	     that there is a single rtx representing the stack pointer,
1175 	     namely stack_pointer_rtx, and uses == to recognize it.  */
1176 	  addr = stack_pointer_rtx;
1177 
1178 	  if (byte_count != 0)
1179 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1180 
1181 	  dwarf_set[n_dwarfs++] =
1182 	    gen_rtx_SET (gen_rtx_MEM (mode, addr),
1183 			 gen_rtx_REG (mode, pushm_info[i].reg1));
1184 	  F (dwarf_set[n_dwarfs - 1]);
1185 
1186 	}
1187       byte_count += bytes;
1188     }
1189 
1190   if (cfun->machine->is_interrupt)
1191     {
1192       cfun->machine->intr_pushm = reg_mask & 0xfe;
1193       reg_mask = 0;
1194       byte_count = 0;
1195     }
1196 
1197   if (cfun->machine->is_interrupt)
1198     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1199       if (need_to_save (i))
1200 	{
1201 	  byte_count += 2;
1202 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1203 	}
1204 
1205   if (ppt == PP_pushm && byte_count)
1206     {
1207       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1208       rtx pushm;
1209 
1210       if (reg_mask)
1211 	{
1212 	  XVECEXP (note, 0, 0)
1213 	    = gen_rtx_SET (stack_pointer_rtx,
1214 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1215 					 stack_pointer_rtx,
1216 					 GEN_INT (-byte_count)));
1217 	  F (XVECEXP (note, 0, 0));
1218 
1219 	  for (i = 0; i < n_dwarfs; i++)
1220 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
1221 
1222 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1223 
1224 	  add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1225 	}
1226 
1227       if (cfun->machine->is_interrupt)
1228 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1229 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1230 	    {
1231 	      if (TARGET_A16)
1232 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1233 	      else
1234 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1235 	      F (pushm);
1236 	    }
1237     }
1238   if (ppt == PP_popm && byte_count)
1239     {
1240       if (cfun->machine->is_interrupt)
1241 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1242 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1243 	    {
1244 	      if (TARGET_A16)
1245 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1246 	      else
1247 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1248 	    }
1249       if (reg_mask)
1250 	emit_insn (gen_popm (GEN_INT (reg_mask)));
1251     }
1252 
1253   return byte_count;
1254 }
1255 
1256 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1257    diagrams our call frame.  */
1258 int
m32c_initial_elimination_offset(int from,int to)1259 m32c_initial_elimination_offset (int from, int to)
1260 {
1261   int ofs = 0;
1262 
1263   if (from == AP_REGNO)
1264     {
1265       if (TARGET_A16)
1266 	ofs += 5;
1267       else
1268 	ofs += 8;
1269     }
1270 
1271   if (to == SP_REGNO)
1272     {
1273       ofs += m32c_pushm_popm (PP_justcount);
1274       ofs += get_frame_size ();
1275     }
1276 
1277   /* Account for push rounding.  */
1278   if (TARGET_A24)
1279     ofs = (ofs + 1) & ~1;
1280 #if DEBUG0
1281   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1282 	   to, ofs);
1283 #endif
1284   return ofs;
1285 }
1286 
1287 /* Passing Function Arguments on the Stack */
1288 
1289 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1290    M32C has word stacks.  */
1291 poly_int64
m32c_push_rounding(poly_int64 n)1292 m32c_push_rounding (poly_int64 n)
1293 {
1294   if (TARGET_R8C || TARGET_M16C)
1295     return n;
1296   return (n + 1) & ~1;
1297 }
1298 
1299 /* Passing Arguments in Registers */
1300 
1301 /* Implements TARGET_FUNCTION_ARG.  Arguments are passed partly in
1302    registers, partly on stack.  If our function returns a struct, a
1303    pointer to a buffer for it is at the top of the stack (last thing
1304    pushed).  The first few real arguments may be in registers as
1305    follows:
1306 
1307    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
1308 		arg2 in r2 if it's HI (else pushed on stack)
1309 		rest on stack
1310    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1311 		rest on stack
1312 
1313    Structs are not passed in registers, even if they fit.  Only
1314    integer and pointer types are passed in registers.
1315 
1316    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1317    r2 if it fits.  */
1318 #undef TARGET_FUNCTION_ARG
1319 #define TARGET_FUNCTION_ARG m32c_function_arg
1320 static rtx
m32c_function_arg(cumulative_args_t ca_v,const function_arg_info & arg)1321 m32c_function_arg (cumulative_args_t ca_v, const function_arg_info &arg)
1322 {
1323   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1324 
1325   /* Can return a reg, parallel, or 0 for stack */
1326   rtx rv = NULL_RTX;
1327 #if DEBUG0
1328   fprintf (stderr, "func_arg %d (%s, %d)\n",
1329 	   ca->parm_num, mode_name[arg.mode], arg.named);
1330   debug_tree (arg.type);
1331 #endif
1332 
1333   if (arg.end_marker_p ())
1334     return GEN_INT (0);
1335 
1336   if (ca->force_mem || !arg.named)
1337     {
1338 #if DEBUG0
1339       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1340 	       arg.named);
1341 #endif
1342       return NULL_RTX;
1343     }
1344 
1345   if (arg.type && INTEGRAL_TYPE_P (arg.type) && POINTER_TYPE_P (arg.type))
1346     return NULL_RTX;
1347 
1348   if (arg.aggregate_type_p ())
1349     return NULL_RTX;
1350 
1351   switch (ca->parm_num)
1352     {
1353     case 1:
1354       if (GET_MODE_SIZE (arg.mode) == 1 || GET_MODE_SIZE (arg.mode) == 2)
1355 	rv = gen_rtx_REG (arg.mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1356       break;
1357 
1358     case 2:
1359       if (TARGET_A16 && GET_MODE_SIZE (arg.mode) == 2)
1360 	rv = gen_rtx_REG (arg.mode, R2_REGNO);
1361       break;
1362     }
1363 
1364 #if DEBUG0
1365   debug_rtx (rv);
1366 #endif
1367   return rv;
1368 }
1369 
1370 #undef TARGET_PASS_BY_REFERENCE
1371 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1372 static bool
m32c_pass_by_reference(cumulative_args_t,const function_arg_info &)1373 m32c_pass_by_reference (cumulative_args_t, const function_arg_info &)
1374 {
1375   return 0;
1376 }
1377 
1378 /* Implements INIT_CUMULATIVE_ARGS.  */
1379 void
m32c_init_cumulative_args(CUMULATIVE_ARGS * ca,tree fntype,rtx libname ATTRIBUTE_UNUSED,tree fndecl,int n_named_args ATTRIBUTE_UNUSED)1380 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1381 			   tree fntype,
1382 			   rtx libname ATTRIBUTE_UNUSED,
1383 			   tree fndecl,
1384 			   int n_named_args ATTRIBUTE_UNUSED)
1385 {
1386   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1387     ca->force_mem = 1;
1388   else
1389     ca->force_mem = 0;
1390   ca->parm_num = 1;
1391 }
1392 
1393 /* Implements TARGET_FUNCTION_ARG_ADVANCE.  force_mem is set for
1394    functions returning structures, so we always reset that.  Otherwise,
1395    we only need to know the sequence number of the argument to know what
1396    to do with it.  */
1397 #undef TARGET_FUNCTION_ARG_ADVANCE
1398 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1399 static void
m32c_function_arg_advance(cumulative_args_t ca_v,const function_arg_info &)1400 m32c_function_arg_advance (cumulative_args_t ca_v,
1401 			   const function_arg_info &)
1402 {
1403   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1404 
1405   if (ca->force_mem)
1406     ca->force_mem = 0;
1407   else
1408     ca->parm_num++;
1409 }
1410 
1411 /* Implements TARGET_FUNCTION_ARG_BOUNDARY.  */
1412 #undef TARGET_FUNCTION_ARG_BOUNDARY
1413 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1414 static unsigned int
m32c_function_arg_boundary(machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED)1415 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1416 			    const_tree type ATTRIBUTE_UNUSED)
1417 {
1418   return (TARGET_A16 ? 8 : 16);
1419 }
1420 
1421 /* Implements FUNCTION_ARG_REGNO_P.  */
1422 int
m32c_function_arg_regno_p(int r)1423 m32c_function_arg_regno_p (int r)
1424 {
1425   if (TARGET_A24)
1426     return (r == R0_REGNO);
1427   return (r == R1_REGNO || r == R2_REGNO);
1428 }
1429 
1430 /* HImode and PSImode are the two "native" modes as far as GCC is
1431    concerned, but the chips also support a 32-bit mode which is used
1432    for some opcodes in R8C/M16C and for reset vectors and such.  */
1433 #undef TARGET_VALID_POINTER_MODE
1434 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1435 static bool
m32c_valid_pointer_mode(scalar_int_mode mode)1436 m32c_valid_pointer_mode (scalar_int_mode mode)
1437 {
1438   if (mode == HImode
1439       || mode == PSImode
1440       || mode == SImode
1441       )
1442     return 1;
1443   return 0;
1444 }
1445 
1446 /* How Scalar Function Values Are Returned */
1447 
1448 /* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
1449    combination of registers starting there (r2r0 for longs, r3r1r2r0
1450    for long long, r3r2r1r0 for doubles), except that that ABI
1451    currently doesn't work because it ends up using all available
1452    general registers and gcc often can't compile it.  So, instead, we
1453    return anything bigger than 16 bits in "mem0" (effectively, a
1454    memory location).  */
1455 
1456 #undef TARGET_LIBCALL_VALUE
1457 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1458 
1459 static rtx
m32c_libcall_value(machine_mode mode,const_rtx fun ATTRIBUTE_UNUSED)1460 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1461 {
1462   /* return reg or parallel */
1463 #if 0
1464   /* FIXME: GCC has difficulty returning large values in registers,
1465      because that ties up most of the general registers and gives the
1466      register allocator little to work with.  Until we can resolve
1467      this, large values are returned in memory.  */
1468   if (mode == DFmode)
1469     {
1470       rtx rv;
1471 
1472       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1473       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1474 					      gen_rtx_REG (HImode,
1475 							   R0_REGNO),
1476 					      GEN_INT (0));
1477       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1478 					      gen_rtx_REG (HImode,
1479 							   R1_REGNO),
1480 					      GEN_INT (2));
1481       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1482 					      gen_rtx_REG (HImode,
1483 							   R2_REGNO),
1484 					      GEN_INT (4));
1485       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1486 					      gen_rtx_REG (HImode,
1487 							   R3_REGNO),
1488 					      GEN_INT (6));
1489       return rv;
1490     }
1491 
1492   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1493     {
1494       rtx rv;
1495 
1496       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1497       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1498 					      gen_rtx_REG (mode,
1499 							   R0_REGNO),
1500 					      GEN_INT (0));
1501       return rv;
1502     }
1503 #endif
1504 
1505   if (GET_MODE_SIZE (mode) > 2)
1506     return gen_rtx_REG (mode, MEM0_REGNO);
1507   return gen_rtx_REG (mode, R0_REGNO);
1508 }
1509 
1510 /* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
1511    conventions.  */
1512 
1513 #undef TARGET_FUNCTION_VALUE
1514 #define TARGET_FUNCTION_VALUE m32c_function_value
1515 
1516 static rtx
m32c_function_value(const_tree valtype,const_tree fn_decl_or_type ATTRIBUTE_UNUSED,bool outgoing ATTRIBUTE_UNUSED)1517 m32c_function_value (const_tree valtype,
1518 		     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1519 		     bool outgoing ATTRIBUTE_UNUSED)
1520 {
1521   /* return reg or parallel */
1522   const machine_mode mode = TYPE_MODE (valtype);
1523   return m32c_libcall_value (mode, NULL_RTX);
1524 }
1525 
1526 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.  */
1527 
1528 #undef TARGET_FUNCTION_VALUE_REGNO_P
1529 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1530 
1531 static bool
m32c_function_value_regno_p(const unsigned int regno)1532 m32c_function_value_regno_p (const unsigned int regno)
1533 {
1534   return (regno == R0_REGNO || regno == MEM0_REGNO);
1535 }
1536 
1537 /* How Large Values Are Returned */
1538 
1539 /* We return structures by pushing the address on the stack, even if
1540    we use registers for the first few "real" arguments.  */
1541 #undef TARGET_STRUCT_VALUE_RTX
1542 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1543 static rtx
m32c_struct_value_rtx(tree fndecl ATTRIBUTE_UNUSED,int incoming ATTRIBUTE_UNUSED)1544 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1545 		       int incoming ATTRIBUTE_UNUSED)
1546 {
1547   return 0;
1548 }
1549 
1550 /* Function Entry and Exit */
1551 
1552 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1553 int
m32c_epilogue_uses(int regno ATTRIBUTE_UNUSED)1554 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1555 {
1556   if (cfun->machine->is_interrupt)
1557     return 1;
1558   return 0;
1559 }
1560 
1561 /* Implementing the Varargs Macros */
1562 
1563 #undef TARGET_STRICT_ARGUMENT_NAMING
1564 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1565 static bool
m32c_strict_argument_naming(cumulative_args_t ca ATTRIBUTE_UNUSED)1566 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1567 {
1568   return 1;
1569 }
1570 
1571 /* Trampolines for Nested Functions */
1572 
1573 /*
1574    m16c:
1575    1 0000 75C43412              mov.w   #0x1234,a0
1576    2 0004 FC000000              jmp.a   label
1577 
1578    m32c:
1579    1 0000 BC563412              mov.l:s #0x123456,a0
1580    2 0004 CC000000              jmp.a   label
1581 */
1582 
1583 /* Implements TRAMPOLINE_SIZE.  */
1584 int
m32c_trampoline_size(void)1585 m32c_trampoline_size (void)
1586 {
1587   /* Allocate extra space so we can avoid the messy shifts when we
1588      initialize the trampoline; we just write past the end of the
1589      opcode.  */
1590   return TARGET_A16 ? 8 : 10;
1591 }
1592 
1593 /* Implements TRAMPOLINE_ALIGNMENT.  */
1594 int
m32c_trampoline_alignment(void)1595 m32c_trampoline_alignment (void)
1596 {
1597   return 2;
1598 }
1599 
1600 /* Implements TARGET_TRAMPOLINE_INIT.  */
1601 
1602 #undef TARGET_TRAMPOLINE_INIT
1603 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1604 static void
m32c_trampoline_init(rtx m_tramp,tree fndecl,rtx chainval)1605 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1606 {
1607   rtx function = XEXP (DECL_RTL (fndecl), 0);
1608 
1609 #define A0(m,i) adjust_address (m_tramp, m, i)
1610   if (TARGET_A16)
1611     {
1612       /* Note: we subtract a "word" because the moves want signed
1613 	 constants, not unsigned constants.  */
1614       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1615       emit_move_insn (A0 (HImode, 2), chainval);
1616       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1617       /* We use 16-bit addresses here, but store the zero to turn it
1618 	 into a 24-bit offset.  */
1619       emit_move_insn (A0 (HImode, 5), function);
1620       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1621     }
1622   else
1623     {
1624       /* Note that the PSI moves actually write 4 bytes.  Make sure we
1625 	 write stuff out in the right order, and leave room for the
1626 	 extra byte at the end.  */
1627       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1628       emit_move_insn (A0 (PSImode, 1), chainval);
1629       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1630       emit_move_insn (A0 (PSImode, 5), function);
1631     }
1632 #undef A0
1633 }
1634 
1635 #undef TARGET_LRA_P
1636 #define TARGET_LRA_P hook_bool_void_false
1637 
1638 /* Addressing Modes */
1639 
1640 /* The r8c/m32c family supports a wide range of non-orthogonal
1641    addressing modes, including the ability to double-indirect on *some*
1642    of them.  Not all insns support all modes, either, but we rely on
1643    predicates and constraints to deal with that.  */
1644 #undef TARGET_LEGITIMATE_ADDRESS_P
1645 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1646 bool
m32c_legitimate_address_p(machine_mode mode,rtx x,bool strict)1647 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1648 {
1649   int mode_adjust;
1650   if (CONSTANT_P (x))
1651     return 1;
1652 
1653   if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1654     return 0;
1655   if (TARGET_A24 && GET_MODE (x) != PSImode)
1656     return 0;
1657 
1658   /* Wide references to memory will be split after reload, so we must
1659      ensure that all parts of such splits remain legitimate
1660      addresses.  */
1661   mode_adjust = GET_MODE_SIZE (mode) - 1;
1662 
1663   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1664   if (GET_CODE (x) == PRE_DEC
1665       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1666     {
1667       return (GET_CODE (XEXP (x, 0)) == REG
1668 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
1669     }
1670 
1671 #if 0
1672   /* This is the double indirection detection, but it currently
1673      doesn't work as cleanly as this code implies, so until we've had
1674      a chance to debug it, leave it disabled.  */
1675   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1676     {
1677 #if DEBUG_DOUBLE
1678       fprintf (stderr, "double indirect\n");
1679 #endif
1680       x = XEXP (x, 0);
1681     }
1682 #endif
1683 
1684   encode_pattern (x);
1685   if (RTX_IS ("r"))
1686     {
1687       /* Most indexable registers can be used without displacements,
1688 	 although some of them will be emitted with an explicit zero
1689 	 to please the assembler.  */
1690       switch (REGNO (patternr[0]))
1691 	{
1692 	case A1_REGNO:
1693 	case SB_REGNO:
1694 	case FB_REGNO:
1695 	case SP_REGNO:
1696 	  if (TARGET_A16 && GET_MODE (x) == SImode)
1697 	    return 0;
1698 	  /* FALLTHRU */
1699 	case A0_REGNO:
1700 	  return 1;
1701 
1702 	default:
1703 	  if (IS_PSEUDO (patternr[0], strict))
1704 	    return 1;
1705 	  return 0;
1706 	}
1707     }
1708 
1709   if (TARGET_A16 && GET_MODE (x) == SImode)
1710     return 0;
1711 
1712   if (RTX_IS ("+ri"))
1713     {
1714       /* This is more interesting, because different base registers
1715 	 allow for different displacements - both range and signedness
1716 	 - and it differs from chip series to chip series too.  */
1717       int rn = REGNO (patternr[1]);
1718       HOST_WIDE_INT offs = INTVAL (patternr[2]);
1719       switch (rn)
1720 	{
1721 	case A0_REGNO:
1722 	case A1_REGNO:
1723 	case SB_REGNO:
1724 	  /* The syntax only allows positive offsets, but when the
1725 	     offsets span the entire memory range, we can simulate
1726 	     negative offsets by wrapping.  */
1727 	  if (TARGET_A16)
1728 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
1729 	  if (rn == SB_REGNO)
1730 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
1731 	  /* A0 or A1 */
1732 	  return (offs >= -16777216 && offs <= 16777215);
1733 
1734 	case FB_REGNO:
1735 	  if (TARGET_A16)
1736 	    return (offs >= -128 && offs <= 127 - mode_adjust);
1737 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
1738 
1739 	case SP_REGNO:
1740 	  return (offs >= -128 && offs <= 127 - mode_adjust);
1741 
1742 	default:
1743 	  if (IS_PSEUDO (patternr[1], strict))
1744 	    return 1;
1745 	  return 0;
1746 	}
1747     }
1748   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1749     {
1750       rtx reg = patternr[1];
1751 
1752       /* We don't know where the symbol is, so only allow base
1753 	 registers which support displacements spanning the whole
1754 	 address range.  */
1755       switch (REGNO (reg))
1756 	{
1757 	case A0_REGNO:
1758 	case A1_REGNO:
1759 	  /* $sb needs a secondary reload, but since it's involved in
1760 	     memory address reloads too, we don't deal with it very
1761 	     well.  */
1762 	  /*    case SB_REGNO: */
1763 	  return 1;
1764 	default:
1765 	  if (GET_CODE (reg) == SUBREG)
1766 	    return 0;
1767 	  if (IS_PSEUDO (reg, strict))
1768 	    return 1;
1769 	  return 0;
1770 	}
1771     }
1772   return 0;
1773 }
1774 
1775 /* Implements REG_OK_FOR_BASE_P.  */
1776 int
m32c_reg_ok_for_base_p(rtx x,int strict)1777 m32c_reg_ok_for_base_p (rtx x, int strict)
1778 {
1779   if (GET_CODE (x) != REG)
1780     return 0;
1781   switch (REGNO (x))
1782     {
1783     case A0_REGNO:
1784     case A1_REGNO:
1785     case SB_REGNO:
1786     case FB_REGNO:
1787     case SP_REGNO:
1788       return 1;
1789     default:
1790       if (IS_PSEUDO (x, strict))
1791 	return 1;
1792       return 0;
1793     }
1794 }
1795 
1796 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
1797    we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1798    like this:
1799        EB 4B FF    mova    -128[$fb],$a0
1800        D8 0C FF FF mov.w:Q #0,-1[$a0]
1801 
1802    Alternately, we subtract the frame size, and hopefully use 8-bit aN
1803    displacements:
1804        7B F4       stc $fb,$a0
1805        77 54 00 01 sub #256,$a0
1806        D8 08 01    mov.w:Q #0,1[$a0]
1807 
1808    If we don't offset (i.e. offset by zero), we end up with:
1809        7B F4       stc $fb,$a0
1810        D8 0C 00 FF mov.w:Q #0,-256[$a0]
1811 
1812    We have to subtract *something* so that we have a PLUS rtx to mark
1813    that we've done this reload.  The -128 offset will never result in
1814    an 8-bit aN offset, and the payoff for the second case is five
1815    loads *if* those loads are within 256 bytes of the other end of the
1816    frame, so the third case seems best.  Note that we subtract the
1817    zero, but detect that in the addhi3 pattern.  */
1818 
1819 #define BIG_FB_ADJ 0
1820 
1821 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1822    worry about is frame base offsets, as $fb has a limited
1823    displacement range.  We deal with this by attempting to reload $fb
1824    itself into an address register; that seems to result in the best
1825    code.  */
1826 #undef TARGET_LEGITIMIZE_ADDRESS
1827 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1828 static rtx
m32c_legitimize_address(rtx x,rtx oldx ATTRIBUTE_UNUSED,machine_mode mode)1829 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1830 			 machine_mode mode)
1831 {
1832 #if DEBUG0
1833   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1834   debug_rtx (x);
1835   fprintf (stderr, "\n");
1836 #endif
1837 
1838   if (GET_CODE (x) == PLUS
1839       && GET_CODE (XEXP (x, 0)) == REG
1840       && REGNO (XEXP (x, 0)) == FB_REGNO
1841       && GET_CODE (XEXP (x, 1)) == CONST_INT
1842       && (INTVAL (XEXP (x, 1)) < -128
1843 	  || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1844     {
1845       /* reload FB to A_REGS */
1846       rtx temp = gen_reg_rtx (Pmode);
1847       x = copy_rtx (x);
1848       emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1849       XEXP (x, 0) = temp;
1850     }
1851 
1852   return x;
1853 }
1854 
1855 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
1856 int
m32c_legitimize_reload_address(rtx * x,machine_mode mode,int opnum,int type,int ind_levels ATTRIBUTE_UNUSED)1857 m32c_legitimize_reload_address (rtx * x,
1858 				machine_mode mode,
1859 				int opnum,
1860 				int type, int ind_levels ATTRIBUTE_UNUSED)
1861 {
1862 #if DEBUG0
1863   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1864 	   mode_name[mode]);
1865   debug_rtx (*x);
1866 #endif
1867 
1868   /* At one point, this function tried to get $fb copied to an address
1869      register, which in theory would maximize sharing, but gcc was
1870      *also* still trying to reload the whole address, and we'd run out
1871      of address registers.  So we let gcc do the naive (but safe)
1872      reload instead, when the above function doesn't handle it for
1873      us.
1874 
1875      The code below is a second attempt at the above.  */
1876 
1877   if (GET_CODE (*x) == PLUS
1878       && GET_CODE (XEXP (*x, 0)) == REG
1879       && REGNO (XEXP (*x, 0)) == FB_REGNO
1880       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1881       && (INTVAL (XEXP (*x, 1)) < -128
1882 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1883     {
1884       rtx sum;
1885       int offset = INTVAL (XEXP (*x, 1));
1886       int adjustment = -BIG_FB_ADJ;
1887 
1888       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1889 			  GEN_INT (adjustment));
1890       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1891       if (type == RELOAD_OTHER)
1892 	type = RELOAD_FOR_OTHER_ADDRESS;
1893       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1894 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1895 		   (enum reload_type) type);
1896       return 1;
1897     }
1898 
1899   if (GET_CODE (*x) == PLUS
1900       && GET_CODE (XEXP (*x, 0)) == PLUS
1901       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1902       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1903       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1904       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1905       )
1906     {
1907       if (type == RELOAD_OTHER)
1908 	type = RELOAD_FOR_OTHER_ADDRESS;
1909       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1910 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1911 		   (enum reload_type) type);
1912       return 1;
1913     }
1914 
1915   if (TARGET_A24 && GET_MODE (*x) == PSImode)
1916     {
1917       push_reload (*x, NULL_RTX, x, NULL,
1918 		   A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1919 		   (enum reload_type) type);
1920       return 1;
1921     }
1922 
1923   return 0;
1924 }
1925 
1926 /* Return the appropriate mode for a named address pointer.  */
1927 #undef TARGET_ADDR_SPACE_POINTER_MODE
1928 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1929 static scalar_int_mode
m32c_addr_space_pointer_mode(addr_space_t addrspace)1930 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1931 {
1932   switch (addrspace)
1933     {
1934     case ADDR_SPACE_GENERIC:
1935       return TARGET_A24 ? PSImode : HImode;
1936     case ADDR_SPACE_FAR:
1937       return SImode;
1938     default:
1939       gcc_unreachable ();
1940     }
1941 }
1942 
1943 /* Return the appropriate mode for a named address address.  */
1944 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1945 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1946 static scalar_int_mode
m32c_addr_space_address_mode(addr_space_t addrspace)1947 m32c_addr_space_address_mode (addr_space_t addrspace)
1948 {
1949   switch (addrspace)
1950     {
1951     case ADDR_SPACE_GENERIC:
1952       return TARGET_A24 ? PSImode : HImode;
1953     case ADDR_SPACE_FAR:
1954       return SImode;
1955     default:
1956       gcc_unreachable ();
1957     }
1958 }
1959 
1960 /* Like m32c_legitimate_address_p, except with named addresses.  */
1961 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1962 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1963   m32c_addr_space_legitimate_address_p
1964 static bool
m32c_addr_space_legitimate_address_p(machine_mode mode,rtx x,bool strict,addr_space_t as)1965 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1966 				      bool strict, addr_space_t as)
1967 {
1968   if (as == ADDR_SPACE_FAR)
1969     {
1970       if (TARGET_A24)
1971 	return 0;
1972       encode_pattern (x);
1973       if (RTX_IS ("r"))
1974 	{
1975 	  if (GET_MODE (x) != SImode)
1976 	    return 0;
1977 	  switch (REGNO (patternr[0]))
1978 	    {
1979 	    case A0_REGNO:
1980 	      return 1;
1981 
1982 	    default:
1983 	      if (IS_PSEUDO (patternr[0], strict))
1984 		return 1;
1985 	      return 0;
1986 	    }
1987 	}
1988       if (RTX_IS ("+^Sri"))
1989 	{
1990 	  int rn = REGNO (patternr[3]);
1991 	  HOST_WIDE_INT offs = INTVAL (patternr[4]);
1992 	  if (GET_MODE (patternr[3]) != HImode)
1993 	    return 0;
1994 	  switch (rn)
1995 	    {
1996 	    case A0_REGNO:
1997 	      return (offs >= 0 && offs <= 0xfffff);
1998 
1999 	    default:
2000 	      if (IS_PSEUDO (patternr[3], strict))
2001 		return 1;
2002 	      return 0;
2003 	    }
2004 	}
2005       if (RTX_IS ("+^Srs"))
2006 	{
2007 	  int rn = REGNO (patternr[3]);
2008 	  if (GET_MODE (patternr[3]) != HImode)
2009 	    return 0;
2010 	  switch (rn)
2011 	    {
2012 	    case A0_REGNO:
2013 	      return 1;
2014 
2015 	    default:
2016 	      if (IS_PSEUDO (patternr[3], strict))
2017 		return 1;
2018 	      return 0;
2019 	    }
2020 	}
2021       if (RTX_IS ("+^S+ris"))
2022 	{
2023 	  int rn = REGNO (patternr[4]);
2024 	  if (GET_MODE (patternr[4]) != HImode)
2025 	    return 0;
2026 	  switch (rn)
2027 	    {
2028 	    case A0_REGNO:
2029 	      return 1;
2030 
2031 	    default:
2032 	      if (IS_PSEUDO (patternr[4], strict))
2033 		return 1;
2034 	      return 0;
2035 	    }
2036 	}
2037       if (RTX_IS ("s"))
2038 	{
2039 	  return 1;
2040 	}
2041       return 0;
2042     }
2043 
2044   else if (as != ADDR_SPACE_GENERIC)
2045     gcc_unreachable ();
2046 
2047   return m32c_legitimate_address_p (mode, x, strict);
2048 }
2049 
2050 /* Like m32c_legitimate_address, except with named address support.  */
2051 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2052 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2053 static rtx
m32c_addr_space_legitimize_address(rtx x,rtx oldx,machine_mode mode,addr_space_t as)2054 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2055 				    addr_space_t as)
2056 {
2057   if (as != ADDR_SPACE_GENERIC)
2058     {
2059 #if DEBUG0
2060       fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2061       debug_rtx (x);
2062       fprintf (stderr, "\n");
2063 #endif
2064 
2065       if (GET_CODE (x) != REG)
2066 	{
2067 	  x = force_reg (SImode, x);
2068 	}
2069       return x;
2070     }
2071 
2072   return m32c_legitimize_address (x, oldx, mode);
2073 }
2074 
2075 /* Determine if one named address space is a subset of another.  */
2076 #undef TARGET_ADDR_SPACE_SUBSET_P
2077 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2078 static bool
m32c_addr_space_subset_p(addr_space_t subset,addr_space_t superset)2079 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2080 {
2081   gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2082   gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2083 
2084   if (subset == superset)
2085     return true;
2086 
2087   else
2088     return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2089 }
2090 
2091 #undef TARGET_ADDR_SPACE_CONVERT
2092 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2093 /* Convert from one address space to another.  */
2094 static rtx
m32c_addr_space_convert(rtx op,tree from_type,tree to_type)2095 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2096 {
2097   addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2098   addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2099   rtx result;
2100 
2101   gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2102   gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2103 
2104   if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2105     {
2106       /* This is unpredictable, as we're truncating off usable address
2107 	 bits.  */
2108 
2109       result = gen_reg_rtx (HImode);
2110       emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2111       return result;
2112     }
2113   else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2114     {
2115       /* This always works.  */
2116       result = gen_reg_rtx (SImode);
2117       emit_insn (gen_zero_extendhisi2 (result, op));
2118       return result;
2119     }
2120   else
2121     gcc_unreachable ();
2122 }
2123 
2124 /* Condition Code Status */
2125 
2126 #undef TARGET_FIXED_CONDITION_CODE_REGS
2127 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2128 static bool
m32c_fixed_condition_code_regs(unsigned int * p1,unsigned int * p2)2129 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2130 {
2131   *p1 = FLG_REGNO;
2132   *p2 = INVALID_REGNUM;
2133   return true;
2134 }
2135 
2136 /* Describing Relative Costs of Operations */
2137 
2138 /* Implements TARGET_REGISTER_MOVE_COST.  We make impossible moves
2139    prohibitively expensive, like trying to put QIs in r2/r3 (there are
2140    no opcodes to do that).  We also discourage use of mem* registers
2141    since they're really memory.  */
2142 
2143 #undef TARGET_REGISTER_MOVE_COST
2144 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2145 
2146 static int
m32c_register_move_cost(machine_mode mode,reg_class_t from,reg_class_t to)2147 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2148 			 reg_class_t to)
2149 {
2150   int cost = COSTS_N_INSNS (3);
2151   HARD_REG_SET cc;
2152 
2153 /* FIXME: pick real values, but not 2 for now.  */
2154   cc = reg_class_contents[from] | reg_class_contents[(int) to];
2155 
2156   if (mode == QImode
2157       && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2158     {
2159       if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2160 	cost = COSTS_N_INSNS (1000);
2161       else
2162 	cost = COSTS_N_INSNS (80);
2163     }
2164 
2165   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2166     cost = COSTS_N_INSNS (1000);
2167 
2168   if (reg_classes_intersect_p (from, CR_REGS))
2169     cost += COSTS_N_INSNS (5);
2170 
2171   if (reg_classes_intersect_p (to, CR_REGS))
2172     cost += COSTS_N_INSNS (5);
2173 
2174   if (from == MEM_REGS || to == MEM_REGS)
2175     cost += COSTS_N_INSNS (50);
2176   else if (reg_classes_intersect_p (from, MEM_REGS)
2177 	   || reg_classes_intersect_p (to, MEM_REGS))
2178     cost += COSTS_N_INSNS (10);
2179 
2180 #if DEBUG0
2181   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2182 	   mode_name[mode], class_names[(int) from], class_names[(int) to],
2183 	   cost);
2184 #endif
2185   return cost;
2186 }
2187 
2188 /*  Implements TARGET_MEMORY_MOVE_COST.  */
2189 
2190 #undef TARGET_MEMORY_MOVE_COST
2191 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2192 
2193 static int
m32c_memory_move_cost(machine_mode mode ATTRIBUTE_UNUSED,reg_class_t rclass ATTRIBUTE_UNUSED,bool in ATTRIBUTE_UNUSED)2194 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2195 		       reg_class_t rclass ATTRIBUTE_UNUSED,
2196 		       bool in ATTRIBUTE_UNUSED)
2197 {
2198   /* FIXME: pick real values.  */
2199   return COSTS_N_INSNS (10);
2200 }
2201 
2202 /* Here we try to describe when we use multiple opcodes for one RTX so
2203    that gcc knows when to use them.  */
2204 #undef TARGET_RTX_COSTS
2205 #define TARGET_RTX_COSTS m32c_rtx_costs
2206 static bool
m32c_rtx_costs(rtx x,machine_mode mode,int outer_code,int opno ATTRIBUTE_UNUSED,int * total,bool speed ATTRIBUTE_UNUSED)2207 m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2208 		int opno ATTRIBUTE_UNUSED,
2209 		int *total, bool speed ATTRIBUTE_UNUSED)
2210 {
2211   int code = GET_CODE (x);
2212   switch (code)
2213     {
2214     case REG:
2215       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2216 	*total += COSTS_N_INSNS (500);
2217       else
2218 	*total += COSTS_N_INSNS (1);
2219       return true;
2220 
2221     case ASHIFT:
2222     case LSHIFTRT:
2223     case ASHIFTRT:
2224       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2225 	{
2226 	  /* mov.b r1l, r1h */
2227 	  *total +=  COSTS_N_INSNS (1);
2228 	  return true;
2229 	}
2230       if (INTVAL (XEXP (x, 1)) > 8
2231 	  || INTVAL (XEXP (x, 1)) < -8)
2232 	{
2233 	  /* mov.b #N, r1l */
2234 	  /* mov.b r1l, r1h */
2235 	  *total +=  COSTS_N_INSNS (2);
2236 	  return true;
2237 	}
2238       return true;
2239 
2240     case LE:
2241     case LEU:
2242     case LT:
2243     case LTU:
2244     case GT:
2245     case GTU:
2246     case GE:
2247     case GEU:
2248     case NE:
2249     case EQ:
2250       if (outer_code == SET)
2251 	{
2252 	  *total += COSTS_N_INSNS (2);
2253 	  return true;
2254 	}
2255       break;
2256 
2257     case ZERO_EXTRACT:
2258       {
2259 	rtx dest = XEXP (x, 0);
2260 	rtx addr = XEXP (dest, 0);
2261 	switch (GET_CODE (addr))
2262 	  {
2263 	  case CONST_INT:
2264 	    *total += COSTS_N_INSNS (1);
2265 	    break;
2266 	  case SYMBOL_REF:
2267 	    *total += COSTS_N_INSNS (3);
2268 	    break;
2269 	  default:
2270 	    *total += COSTS_N_INSNS (2);
2271 	    break;
2272 	  }
2273 	return true;
2274       }
2275       break;
2276 
2277     default:
2278       /* Reasonable default.  */
2279       if (TARGET_A16 && mode == SImode)
2280 	*total += COSTS_N_INSNS (2);
2281       break;
2282     }
2283   return false;
2284 }
2285 
2286 #undef TARGET_ADDRESS_COST
2287 #define TARGET_ADDRESS_COST m32c_address_cost
2288 static int
m32c_address_cost(rtx addr,machine_mode mode ATTRIBUTE_UNUSED,addr_space_t as ATTRIBUTE_UNUSED,bool speed ATTRIBUTE_UNUSED)2289 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2290 		   addr_space_t as ATTRIBUTE_UNUSED,
2291 		   bool speed ATTRIBUTE_UNUSED)
2292 {
2293   int i;
2294   /*  fprintf(stderr, "\naddress_cost\n");
2295       debug_rtx(addr);*/
2296   switch (GET_CODE (addr))
2297     {
2298     case CONST_INT:
2299       i = INTVAL (addr);
2300       if (i == 0)
2301 	return COSTS_N_INSNS(1);
2302       if (i > 0 && i <= 255)
2303 	return COSTS_N_INSNS(2);
2304       if (i > 0 && i <= 65535)
2305 	return COSTS_N_INSNS(3);
2306       return COSTS_N_INSNS(4);
2307     case SYMBOL_REF:
2308       return COSTS_N_INSNS(4);
2309     case REG:
2310       return COSTS_N_INSNS(1);
2311     case PLUS:
2312       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2313 	{
2314 	  i = INTVAL (XEXP (addr, 1));
2315 	  if (i == 0)
2316 	    return COSTS_N_INSNS(1);
2317 	  if (i > 0 && i <= 255)
2318 	    return COSTS_N_INSNS(2);
2319 	  if (i > 0 && i <= 65535)
2320 	    return COSTS_N_INSNS(3);
2321 	}
2322       return COSTS_N_INSNS(4);
2323     default:
2324       return 0;
2325     }
2326 }
2327 
2328 /* Defining the Output Assembler Language */
2329 
2330 /* Output of Data */
2331 
2332 /* We may have 24 bit sizes, which is the native address size.
2333    Currently unused, but provided for completeness.  */
2334 #undef TARGET_ASM_INTEGER
2335 #define TARGET_ASM_INTEGER m32c_asm_integer
2336 static bool
m32c_asm_integer(rtx x,unsigned int size,int aligned_p)2337 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2338 {
2339   switch (size)
2340     {
2341     case 3:
2342       fprintf (asm_out_file, "\t.3byte\t");
2343       output_addr_const (asm_out_file, x);
2344       fputc ('\n', asm_out_file);
2345       return true;
2346     case 4:
2347       if (GET_CODE (x) == SYMBOL_REF)
2348 	{
2349 	  fprintf (asm_out_file, "\t.long\t");
2350 	  output_addr_const (asm_out_file, x);
2351 	  fputc ('\n', asm_out_file);
2352 	  return true;
2353 	}
2354       break;
2355     }
2356   return default_assemble_integer (x, size, aligned_p);
2357 }
2358 
2359 /* Output of Assembler Instructions */
2360 
2361 /* We use a lookup table because the addressing modes are non-orthogonal.  */
2362 
2363 static struct
2364 {
2365   char code;
2366   char const *pattern;
2367   char const *format;
2368 }
2369 const conversions[] = {
2370   { 0, "r", "0" },
2371 
2372   { 0, "mr", "z[1]" },
2373   { 0, "m+ri", "3[2]" },
2374   { 0, "m+rs", "3[2]" },
2375   { 0, "m+^Zrs", "5[4]" },
2376   { 0, "m+^Zri", "5[4]" },
2377   { 0, "m+^Z+ris", "7+6[5]" },
2378   { 0, "m+^Srs", "5[4]" },
2379   { 0, "m+^Sri", "5[4]" },
2380   { 0, "m+^S+ris", "7+6[5]" },
2381   { 0, "m+r+si", "4+5[2]" },
2382   { 0, "ms", "1" },
2383   { 0, "mi", "1" },
2384   { 0, "m+si", "2+3" },
2385 
2386   { 0, "mmr", "[z[2]]" },
2387   { 0, "mm+ri", "[4[3]]" },
2388   { 0, "mm+rs", "[4[3]]" },
2389   { 0, "mm+r+si", "[5+6[3]]" },
2390   { 0, "mms", "[[2]]" },
2391   { 0, "mmi", "[[2]]" },
2392   { 0, "mm+si", "[4[3]]" },
2393 
2394   { 0, "i", "#0" },
2395   { 0, "s", "#0" },
2396   { 0, "+si", "#1+2" },
2397   { 0, "l", "#0" },
2398 
2399   { 'l', "l", "0" },
2400   { 'd', "i", "0" },
2401   { 'd', "s", "0" },
2402   { 'd', "+si", "1+2" },
2403   { 'D', "i", "0" },
2404   { 'D', "s", "0" },
2405   { 'D', "+si", "1+2" },
2406   { 'x', "i", "#0" },
2407   { 'X', "i", "#0" },
2408   { 'm', "i", "#0" },
2409   { 'b', "i", "#0" },
2410   { 'B', "i", "0" },
2411   { 'p', "i", "0" },
2412 
2413   { 0, 0, 0 }
2414 };
2415 
2416 /* This is in order according to the bitfield that pushm/popm use.  */
2417 static char const *pushm_regs[] = {
2418   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2419 };
2420 
2421 /* Implements TARGET_PRINT_OPERAND.  */
2422 
2423 #undef TARGET_PRINT_OPERAND
2424 #define TARGET_PRINT_OPERAND m32c_print_operand
2425 
2426 static void
m32c_print_operand(FILE * file,rtx x,int code)2427 m32c_print_operand (FILE * file, rtx x, int code)
2428 {
2429   int i, j, b;
2430   const char *comma;
2431   HOST_WIDE_INT ival;
2432   int unsigned_const = 0;
2433   int force_sign;
2434 
2435   /* Multiplies; constants are converted to sign-extended format but
2436    we need unsigned, so 'u' and 'U' tell us what size unsigned we
2437    need.  */
2438   if (code == 'u')
2439     {
2440       unsigned_const = 2;
2441       code = 0;
2442     }
2443   if (code == 'U')
2444     {
2445       unsigned_const = 1;
2446       code = 0;
2447     }
2448   /* This one is only for debugging; you can put it in a pattern to
2449      force this error.  */
2450   if (code == '!')
2451     {
2452       fprintf (stderr, "dj: unreviewed pattern:");
2453       if (current_output_insn)
2454 	debug_rtx (current_output_insn);
2455       gcc_unreachable ();
2456     }
2457   /* PSImode operations are either .w or .l depending on the target.  */
2458   if (code == '&')
2459     {
2460       if (TARGET_A16)
2461 	fprintf (file, "w");
2462       else
2463 	fprintf (file, "l");
2464       return;
2465     }
2466   /* Inverted conditionals.  */
2467   if (code == 'C')
2468     {
2469       switch (GET_CODE (x))
2470 	{
2471 	case LE:
2472 	  fputs ("gt", file);
2473 	  break;
2474 	case LEU:
2475 	  fputs ("gtu", file);
2476 	  break;
2477 	case LT:
2478 	  fputs ("ge", file);
2479 	  break;
2480 	case LTU:
2481 	  fputs ("geu", file);
2482 	  break;
2483 	case GT:
2484 	  fputs ("le", file);
2485 	  break;
2486 	case GTU:
2487 	  fputs ("leu", file);
2488 	  break;
2489 	case GE:
2490 	  fputs ("lt", file);
2491 	  break;
2492 	case GEU:
2493 	  fputs ("ltu", file);
2494 	  break;
2495 	case NE:
2496 	  fputs ("eq", file);
2497 	  break;
2498 	case EQ:
2499 	  fputs ("ne", file);
2500 	  break;
2501 	default:
2502 	  gcc_unreachable ();
2503 	}
2504       return;
2505     }
2506   /* Regular conditionals.  */
2507   if (code == 'c')
2508     {
2509       switch (GET_CODE (x))
2510 	{
2511 	case LE:
2512 	  fputs ("le", file);
2513 	  break;
2514 	case LEU:
2515 	  fputs ("leu", file);
2516 	  break;
2517 	case LT:
2518 	  fputs ("lt", file);
2519 	  break;
2520 	case LTU:
2521 	  fputs ("ltu", file);
2522 	  break;
2523 	case GT:
2524 	  fputs ("gt", file);
2525 	  break;
2526 	case GTU:
2527 	  fputs ("gtu", file);
2528 	  break;
2529 	case GE:
2530 	  fputs ("ge", file);
2531 	  break;
2532 	case GEU:
2533 	  fputs ("geu", file);
2534 	  break;
2535 	case NE:
2536 	  fputs ("ne", file);
2537 	  break;
2538 	case EQ:
2539 	  fputs ("eq", file);
2540 	  break;
2541 	default:
2542 	  gcc_unreachable ();
2543 	}
2544       return;
2545     }
2546   /* Used in negsi2 to do HImode ops on the two parts of an SImode
2547      operand.  */
2548   if (code == 'h' && GET_MODE (x) == SImode)
2549     {
2550       x = m32c_subreg (HImode, x, SImode, 0);
2551       code = 0;
2552     }
2553   if (code == 'H' && GET_MODE (x) == SImode)
2554     {
2555       x = m32c_subreg (HImode, x, SImode, 2);
2556       code = 0;
2557     }
2558   if (code == 'h' && GET_MODE (x) == HImode)
2559     {
2560       x = m32c_subreg (QImode, x, HImode, 0);
2561       code = 0;
2562     }
2563   if (code == 'H' && GET_MODE (x) == HImode)
2564     {
2565       /* We can't actually represent this as an rtx.  Do it here.  */
2566       if (GET_CODE (x) == REG)
2567 	{
2568 	  switch (REGNO (x))
2569 	    {
2570 	    case R0_REGNO:
2571 	      fputs ("r0h", file);
2572 	      return;
2573 	    case R1_REGNO:
2574 	      fputs ("r1h", file);
2575 	      return;
2576 	    default:
2577 	      gcc_unreachable();
2578 	    }
2579 	}
2580       /* This should be a MEM.  */
2581       x = m32c_subreg (QImode, x, HImode, 1);
2582       code = 0;
2583     }
2584   /* This is for BMcond, which always wants word register names.  */
2585   if (code == 'h' && GET_MODE (x) == QImode)
2586     {
2587       if (GET_CODE (x) == REG)
2588 	x = gen_rtx_REG (HImode, REGNO (x));
2589       code = 0;
2590     }
2591   /* 'x' and 'X' need to be ignored for non-immediates.  */
2592   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2593     code = 0;
2594 
2595   encode_pattern (x);
2596   force_sign = 0;
2597   for (i = 0; conversions[i].pattern; i++)
2598     if (conversions[i].code == code
2599 	&& streq (conversions[i].pattern, pattern))
2600       {
2601 	for (j = 0; conversions[i].format[j]; j++)
2602 	  /* backslash quotes the next character in the output pattern.  */
2603 	  if (conversions[i].format[j] == '\\')
2604 	    {
2605 	      fputc (conversions[i].format[j + 1], file);
2606 	      j++;
2607 	    }
2608 	  /* Digits in the output pattern indicate that the
2609 	     corresponding RTX is to be output at that point.  */
2610 	  else if (ISDIGIT (conversions[i].format[j]))
2611 	    {
2612 	      rtx r = patternr[conversions[i].format[j] - '0'];
2613 	      switch (GET_CODE (r))
2614 		{
2615 		case REG:
2616 		  fprintf (file, "%s",
2617 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
2618 		  break;
2619 		case CONST_INT:
2620 		  switch (code)
2621 		    {
2622 		    case 'b':
2623 		    case 'B':
2624 		      {
2625 			int v = INTVAL (r);
2626 			int i = (int) exact_log2 (v);
2627 			if (i == -1)
2628 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2629 			if (i == -1)
2630 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2631 			/* Bit position.  */
2632 			fprintf (file, "%d", i);
2633 		      }
2634 		      break;
2635 		    case 'x':
2636 		      /* Unsigned byte.  */
2637 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2638 			       INTVAL (r) & 0xff);
2639 		      break;
2640 		    case 'X':
2641 		      /* Unsigned word.  */
2642 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2643 			       INTVAL (r) & 0xffff);
2644 		      break;
2645 		    case 'p':
2646 		      /* pushm and popm encode a register set into a single byte.  */
2647 		      comma = "";
2648 		      for (b = 7; b >= 0; b--)
2649 			if (INTVAL (r) & (1 << b))
2650 			  {
2651 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
2652 			    comma = ",";
2653 			  }
2654 		      break;
2655 		    case 'm':
2656 		      /* "Minus".  Output -X  */
2657 		      ival = (-INTVAL (r) & 0xffff);
2658 		      if (ival & 0x8000)
2659 			ival = ival - 0x10000;
2660 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2661 		      break;
2662 		    default:
2663 		      ival = INTVAL (r);
2664 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
2665 			{
2666 			  /* We can simulate negative displacements by
2667 			     taking advantage of address space
2668 			     wrapping when the offset can span the
2669 			     entire address range.  */
2670 			  rtx base =
2671 			    patternr[conversions[i].format[j + 2] - '0'];
2672 			  if (GET_CODE (base) == REG)
2673 			    switch (REGNO (base))
2674 			      {
2675 			      case A0_REGNO:
2676 			      case A1_REGNO:
2677 				if (TARGET_A24)
2678 				  ival = 0x1000000 + ival;
2679 				else
2680 				  ival = 0x10000 + ival;
2681 				break;
2682 			      case SB_REGNO:
2683 				if (TARGET_A16)
2684 				  ival = 0x10000 + ival;
2685 				break;
2686 			      }
2687 			}
2688 		      else if (code == 'd' && ival < 0 && j == 0)
2689 			/* The "mova" opcode is used to do addition by
2690 			   computing displacements, but again, we need
2691 			   displacements to be unsigned *if* they're
2692 			   the only component of the displacement
2693 			   (i.e. no "symbol-4" type displacement).  */
2694 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2695 
2696 		      if (conversions[i].format[j] == '0')
2697 			{
2698 			  /* More conversions to unsigned.  */
2699 			  if (unsigned_const == 2)
2700 			    ival &= 0xffff;
2701 			  if (unsigned_const == 1)
2702 			    ival &= 0xff;
2703 			}
2704 		      if (streq (conversions[i].pattern, "mi")
2705 			  || streq (conversions[i].pattern, "mmi"))
2706 			{
2707 			  /* Integers used as addresses are unsigned.  */
2708 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2709 			}
2710 		      if (force_sign && ival >= 0)
2711 			fputc ('+', file);
2712 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2713 		      break;
2714 		    }
2715 		  break;
2716 		case CONST_DOUBLE:
2717 		  /* We don't have const_double constants.  If it
2718 		     happens, make it obvious.  */
2719 		  fprintf (file, "[const_double 0x%lx]",
2720 			   (unsigned long) CONST_DOUBLE_HIGH (r));
2721 		  break;
2722 		case SYMBOL_REF:
2723 		  assemble_name (file, XSTR (r, 0));
2724 		  break;
2725 		case LABEL_REF:
2726 		  output_asm_label (r);
2727 		  break;
2728 		default:
2729 		  fprintf (stderr, "don't know how to print this operand:");
2730 		  debug_rtx (r);
2731 		  gcc_unreachable ();
2732 		}
2733 	    }
2734 	  else
2735 	    {
2736 	      if (conversions[i].format[j] == 'z')
2737 		{
2738 		  /* Some addressing modes *must* have a displacement,
2739 		     so insert a zero here if needed.  */
2740 		  int k;
2741 		  for (k = j + 1; conversions[i].format[k]; k++)
2742 		    if (ISDIGIT (conversions[i].format[k]))
2743 		      {
2744 			rtx reg = patternr[conversions[i].format[k] - '0'];
2745 			if (GET_CODE (reg) == REG
2746 			    && (REGNO (reg) == SB_REGNO
2747 				|| REGNO (reg) == FB_REGNO
2748 				|| REGNO (reg) == SP_REGNO))
2749 			  fputc ('0', file);
2750 		      }
2751 		  continue;
2752 		}
2753 	      /* Signed displacements off symbols need to have signs
2754 		 blended cleanly.  */
2755 	      if (conversions[i].format[j] == '+'
2756 		  && (!code || code == 'D' || code == 'd')
2757 		  && ISDIGIT (conversions[i].format[j + 1])
2758 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2759 		      == CONST_INT))
2760 		{
2761 		  force_sign = 1;
2762 		  continue;
2763 		}
2764 	      fputc (conversions[i].format[j], file);
2765 	    }
2766 	break;
2767       }
2768   if (!conversions[i].pattern)
2769     {
2770       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2771 	       pattern);
2772       debug_rtx (x);
2773       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2774     }
2775 
2776   return;
2777 }
2778 
2779 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2780 
2781    See m32c_print_operand above for descriptions of what these do.  */
2782 
2783 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2784 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2785 
2786 static bool
m32c_print_operand_punct_valid_p(unsigned char c)2787 m32c_print_operand_punct_valid_p (unsigned char c)
2788 {
2789   if (c == '&' || c == '!')
2790     return true;
2791 
2792   return false;
2793 }
2794 
2795 /* Implements TARGET_PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2796 
2797 #undef TARGET_PRINT_OPERAND_ADDRESS
2798 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2799 
2800 static void
m32c_print_operand_address(FILE * stream,machine_mode,rtx address)2801 m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
2802 {
2803   if (GET_CODE (address) == MEM)
2804     address = XEXP (address, 0);
2805   else
2806     /* cf: gcc.dg/asm-4.c.  */
2807     gcc_assert (GET_CODE (address) == REG);
2808 
2809   m32c_print_operand (stream, address, 0);
2810 }
2811 
2812 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2813    differently than general registers.  */
2814 void
m32c_output_reg_push(FILE * s,int regno)2815 m32c_output_reg_push (FILE * s, int regno)
2816 {
2817   if (regno == FLG_REGNO)
2818     fprintf (s, "\tpushc\tflg\n");
2819   else
2820     fprintf (s, "\tpush.%c\t%s\n",
2821 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2822 }
2823 
2824 /* Likewise for ASM_OUTPUT_REG_POP.  */
2825 void
m32c_output_reg_pop(FILE * s,int regno)2826 m32c_output_reg_pop (FILE * s, int regno)
2827 {
2828   if (regno == FLG_REGNO)
2829     fprintf (s, "\tpopc\tflg\n");
2830   else
2831     fprintf (s, "\tpop.%c\t%s\n",
2832 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2833 }
2834 
2835 /* Defining target-specific uses of `__attribute__' */
2836 
2837 /* Used to simplify the logic below.  Find the attributes wherever
2838    they may be.  */
2839 #define M32C_ATTRIBUTES(decl) \
2840   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2841                 : DECL_ATTRIBUTES (decl) \
2842                   ? (DECL_ATTRIBUTES (decl)) \
2843 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2844 
2845 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
2846 static int
interrupt_p(tree node ATTRIBUTE_UNUSED)2847 interrupt_p (tree node ATTRIBUTE_UNUSED)
2848 {
2849   tree list = M32C_ATTRIBUTES (node);
2850   while (list)
2851     {
2852       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2853 	return 1;
2854       list = TREE_CHAIN (list);
2855     }
2856   return fast_interrupt_p (node);
2857 }
2858 
2859 /* Returns TRUE if the given tree has the "bank_switch" attribute.  */
2860 static int
bank_switch_p(tree node ATTRIBUTE_UNUSED)2861 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2862 {
2863   tree list = M32C_ATTRIBUTES (node);
2864   while (list)
2865     {
2866       if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2867 	return 1;
2868       list = TREE_CHAIN (list);
2869     }
2870   return 0;
2871 }
2872 
2873 /* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
2874 static int
fast_interrupt_p(tree node ATTRIBUTE_UNUSED)2875 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2876 {
2877   tree list = M32C_ATTRIBUTES (node);
2878   while (list)
2879     {
2880       if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2881 	return 1;
2882       list = TREE_CHAIN (list);
2883     }
2884   return 0;
2885 }
2886 
2887 static tree
interrupt_handler(tree * node ATTRIBUTE_UNUSED,tree name ATTRIBUTE_UNUSED,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs ATTRIBUTE_UNUSED)2888 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2889 		   tree name ATTRIBUTE_UNUSED,
2890 		   tree args ATTRIBUTE_UNUSED,
2891 		   int flags ATTRIBUTE_UNUSED,
2892 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
2893 {
2894   return NULL_TREE;
2895 }
2896 
2897 /* Returns TRUE if given tree has the "function_vector" attribute. */
2898 int
m32c_special_page_vector_p(tree func)2899 m32c_special_page_vector_p (tree func)
2900 {
2901   tree list;
2902 
2903   if (TREE_CODE (func) != FUNCTION_DECL)
2904     return 0;
2905 
2906   list = M32C_ATTRIBUTES (func);
2907   while (list)
2908     {
2909       if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2910         return 1;
2911       list = TREE_CHAIN (list);
2912     }
2913   return 0;
2914 }
2915 
2916 static tree
function_vector_handler(tree * node ATTRIBUTE_UNUSED,tree name ATTRIBUTE_UNUSED,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs ATTRIBUTE_UNUSED)2917 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2918                          tree name ATTRIBUTE_UNUSED,
2919                          tree args ATTRIBUTE_UNUSED,
2920                          int flags ATTRIBUTE_UNUSED,
2921                          bool * no_add_attrs ATTRIBUTE_UNUSED)
2922 {
2923   if (TARGET_R8C)
2924     {
2925       /* The attribute is not supported for R8C target.  */
2926       warning (OPT_Wattributes,
2927                 "%qE attribute is not supported for R8C target",
2928                 name);
2929       *no_add_attrs = true;
2930     }
2931   else if (TREE_CODE (*node) != FUNCTION_DECL)
2932     {
2933       /* The attribute must be applied to functions only.  */
2934       warning (OPT_Wattributes,
2935                 "%qE attribute applies only to functions",
2936                 name);
2937       *no_add_attrs = true;
2938     }
2939   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2940     {
2941       /* The argument must be a constant integer.  */
2942       warning (OPT_Wattributes,
2943                 "%qE attribute argument not an integer constant",
2944                 name);
2945       *no_add_attrs = true;
2946     }
2947   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2948            || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2949     {
2950       /* The argument value must be between 18 to 255.  */
2951       warning (OPT_Wattributes,
2952                 "%qE attribute argument should be between 18 to 255",
2953                 name);
2954       *no_add_attrs = true;
2955     }
2956   return NULL_TREE;
2957 }
2958 
2959 /* If the function is assigned the attribute 'function_vector', it
2960    returns the function vector number, otherwise returns zero.  */
2961 int
current_function_special_page_vector(rtx x)2962 current_function_special_page_vector (rtx x)
2963 {
2964   int num;
2965 
2966   if ((GET_CODE(x) == SYMBOL_REF)
2967       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2968     {
2969       tree list;
2970       tree t = SYMBOL_REF_DECL (x);
2971 
2972       if (TREE_CODE (t) != FUNCTION_DECL)
2973         return 0;
2974 
2975       list = M32C_ATTRIBUTES (t);
2976       while (list)
2977         {
2978           if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2979             {
2980               num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2981               return num;
2982             }
2983 
2984           list = TREE_CHAIN (list);
2985         }
2986 
2987       return 0;
2988     }
2989   else
2990     return 0;
2991 }
2992 
2993 #undef TARGET_ATTRIBUTE_TABLE
2994 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2995 static const struct attribute_spec m32c_attribute_table[] = {
2996   /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
2997        affects_type_identity, handler, exclude } */
2998   { "interrupt", 0, 0, false, false, false, false, interrupt_handler, NULL },
2999   { "bank_switch", 0, 0, false, false, false, false, interrupt_handler, NULL },
3000   { "fast_interrupt", 0, 0, false, false, false, false,
3001     interrupt_handler, NULL },
3002   { "function_vector", 1, 1, true,  false, false, false,
3003     function_vector_handler, NULL },
3004   { NULL, 0, 0, false, false, false, false, NULL, NULL }
3005 };
3006 
3007 #undef TARGET_COMP_TYPE_ATTRIBUTES
3008 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3009 static int
m32c_comp_type_attributes(const_tree type1 ATTRIBUTE_UNUSED,const_tree type2 ATTRIBUTE_UNUSED)3010 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3011 			   const_tree type2 ATTRIBUTE_UNUSED)
3012 {
3013   /* 0=incompatible 1=compatible 2=warning */
3014   return 1;
3015 }
3016 
3017 #undef TARGET_INSERT_ATTRIBUTES
3018 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3019 static void
m32c_insert_attributes(tree node ATTRIBUTE_UNUSED,tree * attr_ptr ATTRIBUTE_UNUSED)3020 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3021 			tree * attr_ptr ATTRIBUTE_UNUSED)
3022 {
3023   unsigned addr;
3024   /* See if we need to make #pragma address variables volatile.  */
3025 
3026   if (TREE_CODE (node) == VAR_DECL)
3027     {
3028       const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3029       if (m32c_get_pragma_address  (name, &addr))
3030 	{
3031 	  TREE_THIS_VOLATILE (node) = true;
3032 	}
3033     }
3034 }
3035 
3036 /* Hash table of pragma info.  */
3037 static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
3038 
3039 void
m32c_note_pragma_address(const char * varname,unsigned address)3040 m32c_note_pragma_address (const char *varname, unsigned address)
3041 {
3042   if (!pragma_htab)
3043     pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
3044 
3045   const char *name = ggc_strdup (varname);
3046   unsigned int *slot = &pragma_htab->get_or_insert (name);
3047   *slot = address;
3048 }
3049 
3050 static bool
m32c_get_pragma_address(const char * varname,unsigned * address)3051 m32c_get_pragma_address (const char *varname, unsigned *address)
3052 {
3053   if (!pragma_htab)
3054     return false;
3055 
3056   unsigned int *slot = pragma_htab->get (varname);
3057   if (slot)
3058     {
3059       *address = *slot;
3060       return true;
3061     }
3062   return false;
3063 }
3064 
3065 void
m32c_output_aligned_common(FILE * stream,tree decl ATTRIBUTE_UNUSED,const char * name,int size,int align,int global)3066 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3067 			    const char *name,
3068 			    int size, int align, int global)
3069 {
3070   unsigned address;
3071 
3072   if (m32c_get_pragma_address (name, &address))
3073     {
3074       /* We never output these as global.  */
3075       assemble_name (stream, name);
3076       fprintf (stream, " = 0x%04x\n", address);
3077       return;
3078     }
3079   if (!global)
3080     {
3081       fprintf (stream, "\t.local\t");
3082       assemble_name (stream, name);
3083       fprintf (stream, "\n");
3084     }
3085   fprintf (stream, "\t.comm\t");
3086   assemble_name (stream, name);
3087   fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3088 }
3089 
3090 /* Predicates */
3091 
3092 /* This is a list of legal subregs of hard regs.  */
3093 static const struct {
3094   unsigned char outer_mode_size;
3095   unsigned char inner_mode_size;
3096   unsigned char byte_mask;
3097   unsigned char legal_when;
3098   unsigned int regno;
3099 } legal_subregs[] = {
3100   {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3101   {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3102   {1, 2, 0x01, 1, A0_REGNO},
3103   {1, 2, 0x01, 1, A1_REGNO},
3104 
3105   {1, 4, 0x01, 1, A0_REGNO},
3106   {1, 4, 0x01, 1, A1_REGNO},
3107 
3108   {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3109   {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3110   {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3111   {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3112   {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3113 
3114   {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3115 };
3116 
3117 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3118    support.  We also bail on MEMs with illegal addresses.  */
3119 bool
m32c_illegal_subreg_p(rtx op)3120 m32c_illegal_subreg_p (rtx op)
3121 {
3122   int offset;
3123   unsigned int i;
3124   machine_mode src_mode, dest_mode;
3125 
3126   if (GET_CODE (op) == MEM
3127       && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3128     {
3129       return true;
3130     }
3131 
3132   if (GET_CODE (op) != SUBREG)
3133     return false;
3134 
3135   dest_mode = GET_MODE (op);
3136   offset = SUBREG_BYTE (op);
3137   op = SUBREG_REG (op);
3138   src_mode = GET_MODE (op);
3139 
3140   if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3141     return false;
3142   if (GET_CODE (op) != REG)
3143     return false;
3144   if (REGNO (op) >= MEM0_REGNO)
3145     return false;
3146 
3147   offset = (1 << offset);
3148 
3149   for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3150     if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3151 	&& legal_subregs[i].regno == REGNO (op)
3152 	&& legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3153 	&& legal_subregs[i].byte_mask & offset)
3154       {
3155 	switch (legal_subregs[i].legal_when)
3156 	  {
3157 	  case 1:
3158 	    return false;
3159 	  case 16:
3160 	    if (TARGET_A16)
3161 	      return false;
3162 	    break;
3163 	  case 24:
3164 	    if (TARGET_A24)
3165 	      return false;
3166 	    break;
3167 	  }
3168       }
3169   return true;
3170 }
3171 
3172 /* Returns TRUE if we support a move between the first two operands.
3173    At the moment, we just want to discourage mem to mem moves until
3174    after reload, because reload has a hard time with our limited
3175    number of address registers, and we can get into a situation where
3176    we need three of them when we only have two.  */
3177 bool
m32c_mov_ok(rtx * operands,machine_mode mode ATTRIBUTE_UNUSED)3178 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3179 {
3180   rtx op0 = operands[0];
3181   rtx op1 = operands[1];
3182 
3183   if (TARGET_A24)
3184     return true;
3185 
3186 #define DEBUG_MOV_OK 0
3187 #if DEBUG_MOV_OK
3188   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3189   debug_rtx (op0);
3190   debug_rtx (op1);
3191 #endif
3192 
3193   if (GET_CODE (op0) == SUBREG)
3194     op0 = XEXP (op0, 0);
3195   if (GET_CODE (op1) == SUBREG)
3196     op1 = XEXP (op1, 0);
3197 
3198   if (GET_CODE (op0) == MEM
3199       && GET_CODE (op1) == MEM
3200       && ! reload_completed)
3201     {
3202 #if DEBUG_MOV_OK
3203       fprintf (stderr, " - no, mem to mem\n");
3204 #endif
3205       return false;
3206     }
3207 
3208 #if DEBUG_MOV_OK
3209   fprintf (stderr, " - ok\n");
3210 #endif
3211   return true;
3212 }
3213 
3214 /* Returns TRUE if two consecutive HImode mov instructions, generated
3215    for moving an immediate double data to a double data type variable
3216    location, can be combined into single SImode mov instruction.  */
3217 bool
m32c_immd_dbl_mov(rtx * operands ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED)3218 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3219 		   machine_mode mode ATTRIBUTE_UNUSED)
3220 {
3221   /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3222      flags.  */
3223   return false;
3224 }
3225 
3226 /* Expanders */
3227 
3228 /* Subregs are non-orthogonal for us, because our registers are all
3229    different sizes.  */
3230 static rtx
m32c_subreg(machine_mode outer,rtx x,machine_mode inner,int byte)3231 m32c_subreg (machine_mode outer,
3232 	     rtx x, machine_mode inner, int byte)
3233 {
3234   int r, nr = -1;
3235 
3236   /* Converting MEMs to different types that are the same size, we
3237      just rewrite them.  */
3238   if (GET_CODE (x) == SUBREG
3239       && SUBREG_BYTE (x) == 0
3240       && GET_CODE (SUBREG_REG (x)) == MEM
3241       && (GET_MODE_SIZE (GET_MODE (x))
3242 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3243     {
3244       rtx oldx = x;
3245       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3246       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3247     }
3248 
3249   /* Push/pop get done as smaller push/pops.  */
3250   if (GET_CODE (x) == MEM
3251       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3252 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
3253     return gen_rtx_MEM (outer, XEXP (x, 0));
3254   if (GET_CODE (x) == SUBREG
3255       && GET_CODE (XEXP (x, 0)) == MEM
3256       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3257 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3258     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3259 
3260   if (GET_CODE (x) != REG)
3261     {
3262       rtx r = simplify_gen_subreg (outer, x, inner, byte);
3263       if (GET_CODE (r) == SUBREG
3264 	  && GET_CODE (x) == MEM
3265 	  && MEM_VOLATILE_P (x))
3266 	{
3267 	  /* Volatile MEMs don't get simplified, but we need them to
3268 	     be.  We are little endian, so the subreg byte is the
3269 	     offset.  */
3270 	  r = adjust_address_nv (x, outer, byte);
3271 	}
3272       return r;
3273     }
3274 
3275   r = REGNO (x);
3276   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3277     return simplify_gen_subreg (outer, x, inner, byte);
3278 
3279   if (IS_MEM_REGNO (r))
3280     return simplify_gen_subreg (outer, x, inner, byte);
3281 
3282   /* This is where the complexities of our register layout are
3283      described.  */
3284   if (byte == 0)
3285     nr = r;
3286   else if (outer == HImode)
3287     {
3288       if (r == R0_REGNO && byte == 2)
3289 	nr = R2_REGNO;
3290       else if (r == R0_REGNO && byte == 4)
3291 	nr = R1_REGNO;
3292       else if (r == R0_REGNO && byte == 6)
3293 	nr = R3_REGNO;
3294       else if (r == R1_REGNO && byte == 2)
3295 	nr = R3_REGNO;
3296       else if (r == A0_REGNO && byte == 2)
3297 	nr = A1_REGNO;
3298     }
3299   else if (outer == SImode)
3300     {
3301       if (r == R0_REGNO && byte == 0)
3302 	nr = R0_REGNO;
3303       else if (r == R0_REGNO && byte == 4)
3304 	nr = R1_REGNO;
3305     }
3306   if (nr == -1)
3307     {
3308       fprintf (stderr, "m32c_subreg %s %s %d\n",
3309 	       mode_name[outer], mode_name[inner], byte);
3310       debug_rtx (x);
3311       gcc_unreachable ();
3312     }
3313   return gen_rtx_REG (outer, nr);
3314 }
3315 
3316 /* Used to emit move instructions.  We split some moves,
3317    and avoid mem-mem moves.  */
3318 int
m32c_prepare_move(rtx * operands,machine_mode mode)3319 m32c_prepare_move (rtx * operands, machine_mode mode)
3320 {
3321   if (far_addr_space_p (operands[0])
3322       && CONSTANT_P (operands[1]))
3323     {
3324       operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3325     }
3326   if (TARGET_A16 && mode == PSImode)
3327     return m32c_split_move (operands, mode, 1);
3328   if ((GET_CODE (operands[0]) == MEM)
3329       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3330     {
3331       rtx pmv = XEXP (operands[0], 0);
3332       rtx dest_reg = XEXP (pmv, 0);
3333       rtx dest_mod = XEXP (pmv, 1);
3334 
3335       emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3336       operands[0] = gen_rtx_MEM (mode, dest_reg);
3337     }
3338   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3339     operands[1] = copy_to_mode_reg (mode, operands[1]);
3340   return 0;
3341 }
3342 
3343 #define DEBUG_SPLIT 0
3344 
3345 /* Returns TRUE if the given PSImode move should be split.  We split
3346    for all r8c/m16c moves, since it doesn't support them, and for
3347    POP.L as we can only *push* SImode.  */
3348 int
m32c_split_psi_p(rtx * operands)3349 m32c_split_psi_p (rtx * operands)
3350 {
3351 #if DEBUG_SPLIT
3352   fprintf (stderr, "\nm32c_split_psi_p\n");
3353   debug_rtx (operands[0]);
3354   debug_rtx (operands[1]);
3355 #endif
3356   if (TARGET_A16)
3357     {
3358 #if DEBUG_SPLIT
3359       fprintf (stderr, "yes, A16\n");
3360 #endif
3361       return 1;
3362     }
3363   if (GET_CODE (operands[1]) == MEM
3364       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3365     {
3366 #if DEBUG_SPLIT
3367       fprintf (stderr, "yes, pop.l\n");
3368 #endif
3369       return 1;
3370     }
3371 #if DEBUG_SPLIT
3372   fprintf (stderr, "no, default\n");
3373 #endif
3374   return 0;
3375 }
3376 
3377 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3378    (define_expand), 1 if it is not optional (define_insn_and_split),
3379    and 3 for define_split (alternate api). */
3380 int
m32c_split_move(rtx * operands,machine_mode mode,int split_all)3381 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3382 {
3383   rtx s[4], d[4];
3384   int parts, si, di, rev = 0;
3385   int rv = 0, opi = 2;
3386   machine_mode submode = HImode;
3387   rtx *ops, local_ops[10];
3388 
3389   /* define_split modifies the existing operands, but the other two
3390      emit new insns.  OPS is where we store the operand pairs, which
3391      we emit later.  */
3392   if (split_all == 3)
3393     ops = operands;
3394   else
3395     ops = local_ops;
3396 
3397   /* Else HImode.  */
3398   if (mode == DImode)
3399     submode = SImode;
3400 
3401   /* Before splitting mem-mem moves, force one operand into a
3402      register.  */
3403   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3404     {
3405 #if DEBUG0
3406       fprintf (stderr, "force_reg...\n");
3407       debug_rtx (operands[1]);
3408 #endif
3409       operands[1] = force_reg (mode, operands[1]);
3410 #if DEBUG0
3411       debug_rtx (operands[1]);
3412 #endif
3413     }
3414 
3415   parts = 2;
3416 
3417 #if DEBUG_SPLIT
3418   fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3419 	   split_all);
3420   debug_rtx (operands[0]);
3421   debug_rtx (operands[1]);
3422 #endif
3423 
3424   /* Note that split_all is not used to select the api after this
3425      point, so it's safe to set it to 3 even with define_insn.  */
3426   /* None of the chips can move SI operands to sp-relative addresses,
3427      so we always split those.  */
3428   if (satisfies_constraint_Ss (operands[0]))
3429     split_all = 3;
3430 
3431   if (TARGET_A16
3432       && (far_addr_space_p (operands[0])
3433 	  || far_addr_space_p (operands[1])))
3434     split_all |= 1;
3435 
3436   /* We don't need to split these.  */
3437   if (TARGET_A24
3438       && split_all != 3
3439       && (mode == SImode || mode == PSImode)
3440       && !(GET_CODE (operands[1]) == MEM
3441 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3442     return 0;
3443 
3444   /* First, enumerate the subregs we'll be dealing with.  */
3445   for (si = 0; si < parts; si++)
3446     {
3447       d[si] =
3448 	m32c_subreg (submode, operands[0], mode,
3449 		     si * GET_MODE_SIZE (submode));
3450       s[si] =
3451 	m32c_subreg (submode, operands[1], mode,
3452 		     si * GET_MODE_SIZE (submode));
3453     }
3454 
3455   /* Split pushes by emitting a sequence of smaller pushes.  */
3456   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3457     {
3458       for (si = parts - 1; si >= 0; si--)
3459 	{
3460 	  ops[opi++] = gen_rtx_MEM (submode,
3461 				    gen_rtx_PRE_DEC (Pmode,
3462 						     gen_rtx_REG (Pmode,
3463 								  SP_REGNO)));
3464 	  ops[opi++] = s[si];
3465 	}
3466 
3467       rv = 1;
3468     }
3469   /* Likewise for pops.  */
3470   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3471     {
3472       for (di = 0; di < parts; di++)
3473 	{
3474 	  ops[opi++] = d[di];
3475 	  ops[opi++] = gen_rtx_MEM (submode,
3476 				    gen_rtx_POST_INC (Pmode,
3477 						      gen_rtx_REG (Pmode,
3478 								   SP_REGNO)));
3479 	}
3480       rv = 1;
3481     }
3482   else if (split_all)
3483     {
3484       /* if d[di] == s[si] for any di < si, we'll early clobber. */
3485       for (di = 0; di < parts - 1; di++)
3486 	for (si = di + 1; si < parts; si++)
3487 	  if (reg_mentioned_p (d[di], s[si]))
3488 	    rev = 1;
3489 
3490       if (rev)
3491 	for (si = 0; si < parts; si++)
3492 	  {
3493 	    ops[opi++] = d[si];
3494 	    ops[opi++] = s[si];
3495 	  }
3496       else
3497 	for (si = parts - 1; si >= 0; si--)
3498 	  {
3499 	    ops[opi++] = d[si];
3500 	    ops[opi++] = s[si];
3501 	  }
3502       rv = 1;
3503     }
3504   /* Now emit any moves we may have accumulated.  */
3505   if (rv && split_all != 3)
3506     {
3507       int i;
3508       for (i = 2; i < opi; i += 2)
3509 	emit_move_insn (ops[i], ops[i + 1]);
3510     }
3511   return rv;
3512 }
3513 
3514 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3515    the like.  For the R8C they expect one of the addresses to be in
3516    R1L:An so we need to arrange for that.  Otherwise, it's just a
3517    matter of picking out the operands we want and emitting the right
3518    pattern for them.  All these expanders, which correspond to
3519    patterns in blkmov.md, must return nonzero if they expand the insn,
3520    or zero if they should FAIL.  */
3521 
3522 /* This is a memset() opcode.  All operands are implied, so we need to
3523    arrange for them to be in the right registers.  The opcode wants
3524    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3525    the count (HI), and $2 the value (QI).  */
3526 int
m32c_expand_setmemhi(rtx * operands)3527 m32c_expand_setmemhi(rtx *operands)
3528 {
3529   rtx desta, count, val;
3530   rtx desto, counto;
3531 
3532   desta = XEXP (operands[0], 0);
3533   count = operands[1];
3534   val = operands[2];
3535 
3536   desto = gen_reg_rtx (Pmode);
3537   counto = gen_reg_rtx (HImode);
3538 
3539   if (GET_CODE (desta) != REG
3540       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3541     desta = copy_to_mode_reg (Pmode, desta);
3542 
3543   /* This looks like an arbitrary restriction, but this is by far the
3544      most common case.  For counts 8..14 this actually results in
3545      smaller code with no speed penalty because the half-sized
3546      constant can be loaded with a shorter opcode.  */
3547   if (GET_CODE (count) == CONST_INT
3548       && GET_CODE (val) == CONST_INT
3549       && ! (INTVAL (count) & 1)
3550       && (INTVAL (count) > 1)
3551       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3552     {
3553       unsigned v = INTVAL (val) & 0xff;
3554       v = v | (v << 8);
3555       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3556       val = copy_to_mode_reg (HImode, GEN_INT (v));
3557       if (TARGET_A16)
3558 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3559       else
3560 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3561       return 1;
3562     }
3563 
3564   /* This is the generalized memset() case.  */
3565   if (GET_CODE (val) != REG
3566       || REGNO (val) < FIRST_PSEUDO_REGISTER)
3567     val = copy_to_mode_reg (QImode, val);
3568 
3569   if (GET_CODE (count) != REG
3570       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3571     count = copy_to_mode_reg (HImode, count);
3572 
3573   if (TARGET_A16)
3574     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3575   else
3576     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3577 
3578   return 1;
3579 }
3580 
3581 /* This is a memcpy() opcode.  All operands are implied, so we need to
3582    arrange for them to be in the right registers.  The opcode wants
3583    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3584    is the source (MEM:BLK), and $2 the count (HI).  */
3585 int
m32c_expand_cpymemhi(rtx * operands)3586 m32c_expand_cpymemhi(rtx *operands)
3587 {
3588   rtx desta, srca, count;
3589   rtx desto, srco, counto;
3590 
3591   desta = XEXP (operands[0], 0);
3592   srca = XEXP (operands[1], 0);
3593   count = operands[2];
3594 
3595   desto = gen_reg_rtx (Pmode);
3596   srco = gen_reg_rtx (Pmode);
3597   counto = gen_reg_rtx (HImode);
3598 
3599   if (GET_CODE (desta) != REG
3600       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3601     desta = copy_to_mode_reg (Pmode, desta);
3602 
3603   if (GET_CODE (srca) != REG
3604       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3605     srca = copy_to_mode_reg (Pmode, srca);
3606 
3607   /* Similar to setmem, but we don't need to check the value.  */
3608   if (GET_CODE (count) == CONST_INT
3609       && ! (INTVAL (count) & 1)
3610       && (INTVAL (count) > 1))
3611     {
3612       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3613       if (TARGET_A16)
3614 	emit_insn (gen_cpymemhi_whi_op (desto, srco, counto, desta, srca, count));
3615       else
3616 	emit_insn (gen_cpymemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3617       return 1;
3618     }
3619 
3620   /* This is the generalized memset() case.  */
3621   if (GET_CODE (count) != REG
3622       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3623     count = copy_to_mode_reg (HImode, count);
3624 
3625   if (TARGET_A16)
3626     emit_insn (gen_cpymemhi_bhi_op (desto, srco, counto, desta, srca, count));
3627   else
3628     emit_insn (gen_cpymemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3629 
3630   return 1;
3631 }
3632 
3633 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3634    the copy, which should point to the NUL at the end of the string,
3635    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3636    Since our opcode leaves the destination pointing *after* the NUL,
3637    we must emit an adjustment.  */
3638 int
m32c_expand_movstr(rtx * operands)3639 m32c_expand_movstr(rtx *operands)
3640 {
3641   rtx desta, srca;
3642   rtx desto, srco;
3643 
3644   desta = XEXP (operands[1], 0);
3645   srca = XEXP (operands[2], 0);
3646 
3647   desto = gen_reg_rtx (Pmode);
3648   srco = gen_reg_rtx (Pmode);
3649 
3650   if (GET_CODE (desta) != REG
3651       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3652     desta = copy_to_mode_reg (Pmode, desta);
3653 
3654   if (GET_CODE (srca) != REG
3655       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3656     srca = copy_to_mode_reg (Pmode, srca);
3657 
3658   emit_insn (gen_movstr_op (desto, srco, desta, srca));
3659   /* desto ends up being a1, which allows this type of add through MOVA.  */
3660   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3661 
3662   return 1;
3663 }
3664 
3665 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3666    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3667    $2 is the other (MEM:BLK).  We must do the comparison, and then
3668    convert the flags to a signed integer result.  */
3669 int
m32c_expand_cmpstr(rtx * operands)3670 m32c_expand_cmpstr(rtx *operands)
3671 {
3672   rtx src1a, src2a;
3673 
3674   src1a = XEXP (operands[1], 0);
3675   src2a = XEXP (operands[2], 0);
3676 
3677   if (GET_CODE (src1a) != REG
3678       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3679     src1a = copy_to_mode_reg (Pmode, src1a);
3680 
3681   if (GET_CODE (src2a) != REG
3682       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3683     src2a = copy_to_mode_reg (Pmode, src2a);
3684 
3685   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3686   emit_insn (gen_cond_to_int (operands[0]));
3687 
3688   return 1;
3689 }
3690 
3691 
3692 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3693 
3694 static shift_gen_func
shift_gen_func_for(int mode,int code)3695 shift_gen_func_for (int mode, int code)
3696 {
3697 #define GFF(m,c,f) if (mode == m && code == c) return f
3698   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3699   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3700   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3701   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3702   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3703   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3704   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3705   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3706   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3707   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3708   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3709   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3710 #undef GFF
3711   gcc_unreachable ();
3712 }
3713 
3714 /* The m32c only has one shift, but it takes a signed count.  GCC
3715    doesn't want this, so we fake it by negating any shift count when
3716    we're pretending to shift the other way.  Also, the shift count is
3717    limited to -8..8.  It's slightly better to use two shifts for 9..15
3718    than to load the count into r1h, so we do that too.  */
3719 int
m32c_prepare_shift(rtx * operands,int scale,int shift_code)3720 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3721 {
3722   machine_mode mode = GET_MODE (operands[0]);
3723   shift_gen_func func = shift_gen_func_for (mode, shift_code);
3724   rtx temp;
3725 
3726   if (GET_CODE (operands[2]) == CONST_INT)
3727     {
3728       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3729       int count = INTVAL (operands[2]) * scale;
3730 
3731       while (count > maxc)
3732 	{
3733 	  temp = gen_reg_rtx (mode);
3734 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3735 	  operands[1] = temp;
3736 	  count -= maxc;
3737 	}
3738       while (count < -maxc)
3739 	{
3740 	  temp = gen_reg_rtx (mode);
3741 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3742 	  operands[1] = temp;
3743 	  count += maxc;
3744 	}
3745       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3746       return 1;
3747     }
3748 
3749   temp = gen_reg_rtx (QImode);
3750   if (scale < 0)
3751     /* The pattern has a NEG that corresponds to this. */
3752     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3753   else if (TARGET_A16 && mode == SImode)
3754     /* We do this because the code below may modify this, we don't
3755        want to modify the origin of this value.  */
3756     emit_move_insn (temp, operands[2]);
3757   else
3758     /* We'll only use it for the shift, no point emitting a move.  */
3759     temp = operands[2];
3760 
3761   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3762     {
3763       /* The m16c has a limit of -16..16 for SI shifts, even when the
3764 	 shift count is in a register.  Since there are so many targets
3765 	 of these shifts, it's better to expand the RTL here than to
3766 	 call a helper function.
3767 
3768 	 The resulting code looks something like this:
3769 
3770 		cmp.b	r1h,-16
3771 		jge.b	1f
3772 		shl.l	-16,dest
3773 		add.b	r1h,16
3774 	1f:	cmp.b	r1h,16
3775 		jle.b	1f
3776 		shl.l	16,dest
3777 		sub.b	r1h,16
3778 	1f:	shl.l	r1h,dest
3779 
3780 	 We take advantage of the fact that "negative" shifts are
3781 	 undefined to skip one of the comparisons.  */
3782 
3783       rtx count;
3784       rtx tempvar;
3785       rtx_insn *insn;
3786 
3787       emit_move_insn (operands[0], operands[1]);
3788 
3789       count = temp;
3790       rtx_code_label *label = gen_label_rtx ();
3791       LABEL_NUSES (label) ++;
3792 
3793       tempvar = gen_reg_rtx (mode);
3794 
3795       if (shift_code == ASHIFT)
3796 	{
3797 	  /* This is a left shift.  We only need check positive counts.  */
3798 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3799 					  count, GEN_INT (16), label));
3800 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3801 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3802 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3803 	  emit_label_after (label, insn);
3804 	}
3805       else
3806 	{
3807 	  /* This is a right shift.  We only need check negative counts.  */
3808 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3809 					  count, GEN_INT (-16), label));
3810 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3811 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3812 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3813 	  emit_label_after (label, insn);
3814 	}
3815       operands[1] = operands[0];
3816       emit_insn (func (operands[0], operands[0], count));
3817       return 1;
3818     }
3819 
3820   operands[2] = temp;
3821   return 0;
3822 }
3823 
3824 /* The m32c has a limited range of operations that work on PSImode
3825    values; we have to expand to SI, do the math, and truncate back to
3826    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3827    those cases.  */
3828 void
m32c_expand_neg_mulpsi3(rtx * operands)3829 m32c_expand_neg_mulpsi3 (rtx * operands)
3830 {
3831   /* operands: a = b * i */
3832   rtx temp1; /* b as SI */
3833   rtx scale /* i as SI */;
3834   rtx temp2; /* a*b as SI */
3835 
3836   temp1 = gen_reg_rtx (SImode);
3837   temp2 = gen_reg_rtx (SImode);
3838   if (GET_CODE (operands[2]) != CONST_INT)
3839     {
3840       scale = gen_reg_rtx (SImode);
3841       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3842     }
3843   else
3844     scale = copy_to_mode_reg (SImode, operands[2]);
3845 
3846   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3847   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3848   emit_insn (gen_truncsipsi2 (operands[0], temp2));
3849 }
3850 
3851 /* Pattern Output Functions */
3852 
3853 int
m32c_expand_movcc(rtx * operands)3854 m32c_expand_movcc (rtx *operands)
3855 {
3856   rtx rel = operands[1];
3857 
3858   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3859     return 1;
3860   if (GET_CODE (operands[2]) != CONST_INT
3861       || GET_CODE (operands[3]) != CONST_INT)
3862     return 1;
3863   if (GET_CODE (rel) == NE)
3864     {
3865       rtx tmp = operands[2];
3866       operands[2] = operands[3];
3867       operands[3] = tmp;
3868       rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3869     }
3870 
3871   emit_move_insn (operands[0],
3872 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3873 					rel,
3874 					operands[2],
3875 					operands[3]));
3876   return 0;
3877 }
3878 
3879 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3880 int
m32c_expand_insv(rtx * operands)3881 m32c_expand_insv (rtx *operands)
3882 {
3883   rtx op0, src0, p;
3884   int mask;
3885 
3886   if (INTVAL (operands[1]) != 1)
3887     return 1;
3888 
3889   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3890   if (GET_CODE (operands[3]) != CONST_INT)
3891     return 1;
3892   if (INTVAL (operands[3]) != 0
3893       && INTVAL (operands[3]) != 1
3894       && INTVAL (operands[3]) != -1)
3895     return 1;
3896 
3897   mask = 1 << INTVAL (operands[2]);
3898 
3899   op0 = operands[0];
3900   if (GET_CODE (op0) == SUBREG
3901       && SUBREG_BYTE (op0) == 0)
3902     {
3903       rtx sub = SUBREG_REG (op0);
3904       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3905 	op0 = sub;
3906     }
3907 
3908   if (!can_create_pseudo_p ()
3909       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3910     src0 = op0;
3911   else
3912     {
3913       src0 = gen_reg_rtx (GET_MODE (op0));
3914       emit_move_insn (src0, op0);
3915     }
3916 
3917   if (GET_MODE (op0) == HImode
3918       && INTVAL (operands[2]) >= 8
3919       && GET_CODE (op0) == MEM)
3920     {
3921       /* We are little endian.  */
3922       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3923 							XEXP (op0, 0), 1));
3924       MEM_COPY_ATTRIBUTES (new_mem, op0);
3925       mask >>= 8;
3926     }
3927 
3928   /* First, we generate a mask with the correct polarity.  If we are
3929      storing a zero, we want an AND mask, so invert it.  */
3930   if (INTVAL (operands[3]) == 0)
3931     {
3932       /* Storing a zero, use an AND mask */
3933       if (GET_MODE (op0) == HImode)
3934 	mask ^= 0xffff;
3935       else
3936 	mask ^= 0xff;
3937     }
3938   /* Now we need to properly sign-extend the mask in case we need to
3939      fall back to an AND or OR opcode.  */
3940   if (GET_MODE (op0) == HImode)
3941     {
3942       if (mask & 0x8000)
3943 	mask -= 0x10000;
3944     }
3945   else
3946     {
3947       if (mask & 0x80)
3948 	mask -= 0x100;
3949     }
3950 
3951   switch (  (INTVAL (operands[3]) ? 4 : 0)
3952 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
3953 	  + (TARGET_A24 ? 1 : 0))
3954     {
3955     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3956     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3957     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3958     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3959     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3960     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3961     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3962     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3963     default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
3964     }
3965 
3966   emit_insn (p);
3967   return 0;
3968 }
3969 
3970 const char *
m32c_scc_pattern(rtx * operands,RTX_CODE code)3971 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3972 {
3973   static char buf[30];
3974   if (GET_CODE (operands[0]) == REG
3975       && REGNO (operands[0]) == R0_REGNO)
3976     {
3977       if (code == EQ)
3978 	return "stzx\t#1,#0,r0l";
3979       if (code == NE)
3980 	return "stzx\t#0,#1,r0l";
3981     }
3982   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3983   return buf;
3984 }
3985 
3986 /* Encode symbol attributes of a SYMBOL_REF into its
3987    SYMBOL_REF_FLAGS. */
3988 static void
m32c_encode_section_info(tree decl,rtx rtl,int first)3989 m32c_encode_section_info (tree decl, rtx rtl, int first)
3990 {
3991   int extra_flags = 0;
3992 
3993   default_encode_section_info (decl, rtl, first);
3994   if (TREE_CODE (decl) == FUNCTION_DECL
3995       && m32c_special_page_vector_p (decl))
3996 
3997     extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3998 
3999   if (extra_flags)
4000     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4001 }
4002 
4003 /* Returns TRUE if the current function is a leaf, and thus we can
4004    determine which registers an interrupt function really needs to
4005    save.  The logic below is mostly about finding the insn sequence
4006    that's the function, versus any sequence that might be open for the
4007    current insn.  */
4008 static int
m32c_leaf_function_p(void)4009 m32c_leaf_function_p (void)
4010 {
4011   int rv;
4012 
4013   push_topmost_sequence ();
4014   rv = leaf_function_p ();
4015   pop_topmost_sequence ();
4016   return rv;
4017 }
4018 
4019 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4020    opcodes.  If the function doesn't need the frame base or stack
4021    pointer, it can use the simpler RTS opcode.  */
4022 static bool
m32c_function_needs_enter(void)4023 m32c_function_needs_enter (void)
4024 {
4025   rtx_insn *insn;
4026   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4027   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4028 
4029   for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4030     if (NONDEBUG_INSN_P (insn))
4031       {
4032 	if (reg_mentioned_p (sp, insn))
4033 	  return true;
4034 	if (reg_mentioned_p (fb, insn))
4035 	  return true;
4036       }
4037   return false;
4038 }
4039 
4040 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4041    frame-related.  Return PAR.
4042 
4043    dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4044    PARALLEL rtx other than the first if they do not have the
4045    FRAME_RELATED flag set on them.  So this function is handy for
4046    marking up 'enter' instructions.  */
4047 static rtx
m32c_all_frame_related(rtx par)4048 m32c_all_frame_related (rtx par)
4049 {
4050   int len = XVECLEN (par, 0);
4051   int i;
4052 
4053   for (i = 0; i < len; i++)
4054     F (XVECEXP (par, 0, i));
4055 
4056   return par;
4057 }
4058 
4059 /* Emits the prologue.  See the frame layout comment earlier in this
4060    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
4061    that we manually update sp.  */
4062 void
m32c_emit_prologue(void)4063 m32c_emit_prologue (void)
4064 {
4065   int frame_size, extra_frame_size = 0, reg_save_size;
4066   int complex_prologue = 0;
4067 
4068   cfun->machine->is_leaf = m32c_leaf_function_p ();
4069   if (interrupt_p (cfun->decl))
4070     {
4071       cfun->machine->is_interrupt = 1;
4072       complex_prologue = 1;
4073     }
4074   else if (bank_switch_p (cfun->decl))
4075     warning (OPT_Wattributes,
4076 	     "%<bank_switch%> has no effect on non-interrupt functions");
4077 
4078   reg_save_size = m32c_pushm_popm (PP_justcount);
4079 
4080   if (interrupt_p (cfun->decl))
4081     {
4082       if (bank_switch_p (cfun->decl))
4083 	emit_insn (gen_fset_b ());
4084       else if (cfun->machine->intr_pushm)
4085 	emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4086     }
4087 
4088   frame_size =
4089     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4090   if (frame_size == 0
4091       && !m32c_function_needs_enter ())
4092     cfun->machine->use_rts = 1;
4093 
4094   if (flag_stack_usage_info)
4095     current_function_static_stack_size = frame_size;
4096 
4097   if (frame_size > 254)
4098     {
4099       extra_frame_size = frame_size - 254;
4100       frame_size = 254;
4101     }
4102   if (cfun->machine->use_rts == 0)
4103     F (emit_insn (m32c_all_frame_related
4104 		  (TARGET_A16
4105 		   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4106 		   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4107 
4108   if (extra_frame_size)
4109     {
4110       complex_prologue = 1;
4111       if (TARGET_A16)
4112 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4113 				  gen_rtx_REG (HImode, SP_REGNO),
4114 				  GEN_INT (-extra_frame_size))));
4115       else
4116 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4117 				   gen_rtx_REG (PSImode, SP_REGNO),
4118 				   GEN_INT (-extra_frame_size))));
4119     }
4120 
4121   complex_prologue += m32c_pushm_popm (PP_pushm);
4122 
4123   /* This just emits a comment into the .s file for debugging.  */
4124   if (complex_prologue)
4125     emit_insn (gen_prologue_end ());
4126 }
4127 
4128 /* Likewise, for the epilogue.  The only exception is that, for
4129    interrupts, we must manually unwind the frame as the REIT opcode
4130    doesn't do that.  */
4131 void
m32c_emit_epilogue(void)4132 m32c_emit_epilogue (void)
4133 {
4134   int popm_count = m32c_pushm_popm (PP_justcount);
4135 
4136   /* This just emits a comment into the .s file for debugging.  */
4137   if (popm_count > 0 || cfun->machine->is_interrupt)
4138     emit_insn (gen_epilogue_start ());
4139 
4140   if (popm_count > 0)
4141     m32c_pushm_popm (PP_popm);
4142 
4143   if (cfun->machine->is_interrupt)
4144     {
4145       machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4146 
4147       /* REIT clears B flag and restores $fp for us, but we still
4148 	 have to fix up the stack.  USE_RTS just means we didn't
4149 	 emit ENTER.  */
4150       if (!cfun->machine->use_rts)
4151 	{
4152 	  emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4153 			  gen_rtx_REG (spmode, FP_REGNO));
4154 	  emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4155 			  gen_rtx_REG (spmode, A0_REGNO));
4156 	  /* We can't just add this to the POPM because it would be in
4157 	     the wrong order, and wouldn't fix the stack if we're bank
4158 	     switching.  */
4159 	  if (TARGET_A16)
4160 	    emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4161 	  else
4162 	    emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4163 	}
4164       if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4165 	emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4166 
4167       /* The FREIT (Fast REturn from InTerrupt) instruction should be
4168          generated only for M32C/M32CM targets (generate the REIT
4169          instruction otherwise).  */
4170       if (fast_interrupt_p (cfun->decl))
4171         {
4172           /* Check if fast_attribute is set for M32C or M32CM.  */
4173           if (TARGET_A24)
4174             {
4175               emit_jump_insn (gen_epilogue_freit ());
4176             }
4177           /* If fast_interrupt attribute is set for an R8C or M16C
4178              target ignore this attribute and generated REIT
4179              instruction.  */
4180           else
4181 	    {
4182 	      warning (OPT_Wattributes,
4183 		       "%<fast_interrupt%> attribute directive ignored");
4184 	      emit_jump_insn (gen_epilogue_reit_16 ());
4185 	    }
4186         }
4187       else if (TARGET_A16)
4188 	emit_jump_insn (gen_epilogue_reit_16 ());
4189       else
4190 	emit_jump_insn (gen_epilogue_reit_24 ());
4191     }
4192   else if (cfun->machine->use_rts)
4193     emit_jump_insn (gen_epilogue_rts ());
4194   else if (TARGET_A16)
4195     emit_jump_insn (gen_epilogue_exitd_16 ());
4196   else
4197     emit_jump_insn (gen_epilogue_exitd_24 ());
4198 }
4199 
4200 void
m32c_emit_eh_epilogue(rtx ret_addr)4201 m32c_emit_eh_epilogue (rtx ret_addr)
4202 {
4203   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
4204      return to.  We have to fudge the stack, pop everything, pop SP
4205      (fudged), and return (fudged).  This is actually easier to do in
4206      assembler, so punt to libgcc.  */
4207   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4208   /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4209 }
4210 
4211 /* Indicate which flags must be properly set for a given conditional.  */
4212 static int
flags_needed_for_conditional(rtx cond)4213 flags_needed_for_conditional (rtx cond)
4214 {
4215   switch (GET_CODE (cond))
4216     {
4217     case LE:
4218     case GT:
4219       return FLAGS_OSZ;
4220     case LEU:
4221     case GTU:
4222       return FLAGS_ZC;
4223     case LT:
4224     case GE:
4225       return FLAGS_OS;
4226     case LTU:
4227     case GEU:
4228       return FLAGS_C;
4229     case EQ:
4230     case NE:
4231       return FLAGS_Z;
4232     default:
4233       return FLAGS_N;
4234     }
4235 }
4236 
4237 #define DEBUG_CMP 0
4238 
4239 /* Returns true if a compare insn is redundant because it would only
4240    set flags that are already set correctly.  */
4241 static bool
m32c_compare_redundant(rtx_insn * cmp,rtx * operands)4242 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4243 {
4244   int flags_needed;
4245   int pflags;
4246   rtx_insn *prev;
4247   rtx pp, next;
4248   rtx op0, op1;
4249 #if DEBUG_CMP
4250   int prev_icode, i;
4251 #endif
4252 
4253   op0 = operands[0];
4254   op1 = operands[1];
4255 
4256 #if DEBUG_CMP
4257   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4258   debug_rtx(cmp);
4259   for (i=0; i<2; i++)
4260     {
4261       fprintf(stderr, "operands[%d] = ", i);
4262       debug_rtx(operands[i]);
4263     }
4264 #endif
4265 
4266   next = next_nonnote_insn (cmp);
4267   if (!next || !INSN_P (next))
4268     {
4269 #if DEBUG_CMP
4270       fprintf(stderr, "compare not followed by insn\n");
4271       debug_rtx(next);
4272 #endif
4273       return false;
4274     }
4275   if (GET_CODE (PATTERN (next)) == SET
4276       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4277     {
4278       next = XEXP (XEXP (PATTERN (next), 1), 0);
4279     }
4280   else if (GET_CODE (PATTERN (next)) == SET)
4281     {
4282       /* If this is a conditional, flags_needed will be something
4283 	 other than FLAGS_N, which we test below.  */
4284       next = XEXP (PATTERN (next), 1);
4285     }
4286   else
4287     {
4288 #if DEBUG_CMP
4289       fprintf(stderr, "compare not followed by conditional\n");
4290       debug_rtx(next);
4291 #endif
4292       return false;
4293     }
4294 #if DEBUG_CMP
4295   fprintf(stderr, "conditional is: ");
4296   debug_rtx(next);
4297 #endif
4298 
4299   flags_needed = flags_needed_for_conditional (next);
4300   if (flags_needed == FLAGS_N)
4301     {
4302 #if DEBUG_CMP
4303       fprintf(stderr, "compare not followed by conditional\n");
4304       debug_rtx(next);
4305 #endif
4306       return false;
4307     }
4308 
4309   /* Compare doesn't set overflow and carry the same way that
4310      arithmetic instructions do, so we can't replace those.  */
4311   if (flags_needed & FLAGS_OC)
4312     return false;
4313 
4314   prev = cmp;
4315   do {
4316     prev = prev_nonnote_insn (prev);
4317     if (!prev)
4318       {
4319 #if DEBUG_CMP
4320 	fprintf(stderr, "No previous insn.\n");
4321 #endif
4322 	return false;
4323       }
4324     if (!INSN_P (prev))
4325       {
4326 #if DEBUG_CMP
4327 	fprintf(stderr, "Previous insn is a non-insn.\n");
4328 #endif
4329 	return false;
4330       }
4331     pp = PATTERN (prev);
4332     if (GET_CODE (pp) != SET)
4333       {
4334 #if DEBUG_CMP
4335 	fprintf(stderr, "Previous insn is not a SET.\n");
4336 #endif
4337 	return false;
4338       }
4339     pflags = get_attr_flags (prev);
4340 
4341     /* Looking up attributes of previous insns corrupted the recog
4342        tables.  */
4343     INSN_UID (cmp) = -1;
4344     recog (PATTERN (cmp), cmp, 0);
4345 
4346     if (pflags == FLAGS_N
4347 	&& reg_mentioned_p (op0, pp))
4348       {
4349 #if DEBUG_CMP
4350 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
4351 	debug_rtx(prev);
4352 #endif
4353 	return false;
4354       }
4355 
4356     /* Check for comparisons against memory - between volatiles and
4357        aliases, we just can't risk this one.  */
4358     if (GET_CODE (operands[0]) == MEM
4359 	|| GET_CODE (operands[0]) == MEM)
4360       {
4361 #if DEBUG_CMP
4362 	fprintf(stderr, "comparisons with memory:\n");
4363 	debug_rtx(prev);
4364 #endif
4365 	return false;
4366       }
4367 
4368     /* Check for PREV changing a register that's used to compute a
4369        value in CMP, even if it doesn't otherwise change flags.  */
4370     if (GET_CODE (operands[0]) == REG
4371 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4372       {
4373 #if DEBUG_CMP
4374 	fprintf(stderr, "sub-value affected, op0:\n");
4375 	debug_rtx(prev);
4376 #endif
4377 	return false;
4378       }
4379     if (GET_CODE (operands[1]) == REG
4380 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4381       {
4382 #if DEBUG_CMP
4383 	fprintf(stderr, "sub-value affected, op1:\n");
4384 	debug_rtx(prev);
4385 #endif
4386 	return false;
4387       }
4388 
4389   } while (pflags == FLAGS_N);
4390 #if DEBUG_CMP
4391   fprintf(stderr, "previous flag-setting insn:\n");
4392   debug_rtx(prev);
4393   debug_rtx(pp);
4394 #endif
4395 
4396   if (GET_CODE (pp) == SET
4397       && GET_CODE (XEXP (pp, 0)) == REG
4398       && REGNO (XEXP (pp, 0)) == FLG_REGNO
4399       && GET_CODE (XEXP (pp, 1)) == COMPARE)
4400     {
4401       /* Adjacent cbranches must have the same operands to be
4402 	 redundant.  */
4403       rtx pop0 = XEXP (XEXP (pp, 1), 0);
4404       rtx pop1 = XEXP (XEXP (pp, 1), 1);
4405 #if DEBUG_CMP
4406       fprintf(stderr, "adjacent cbranches\n");
4407       debug_rtx(pop0);
4408       debug_rtx(pop1);
4409 #endif
4410       if (rtx_equal_p (op0, pop0)
4411 	  && rtx_equal_p (op1, pop1))
4412 	return true;
4413 #if DEBUG_CMP
4414       fprintf(stderr, "prev cmp not same\n");
4415 #endif
4416       return false;
4417     }
4418 
4419   /* Else the previous insn must be a SET, with either the source or
4420      dest equal to operands[0], and operands[1] must be zero.  */
4421 
4422   if (!rtx_equal_p (op1, const0_rtx))
4423     {
4424 #if DEBUG_CMP
4425       fprintf(stderr, "operands[1] not const0_rtx\n");
4426 #endif
4427       return false;
4428     }
4429   if (GET_CODE (pp) != SET)
4430     {
4431 #if DEBUG_CMP
4432       fprintf (stderr, "pp not set\n");
4433 #endif
4434       return false;
4435     }
4436   if (!rtx_equal_p (op0, SET_SRC (pp))
4437       && !rtx_equal_p (op0, SET_DEST (pp)))
4438     {
4439 #if DEBUG_CMP
4440       fprintf(stderr, "operands[0] not found in set\n");
4441 #endif
4442       return false;
4443     }
4444 
4445 #if DEBUG_CMP
4446   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4447 #endif
4448   if ((pflags & flags_needed) == flags_needed)
4449     return true;
4450 
4451   return false;
4452 }
4453 
4454 /* Return the pattern for a compare.  This will be commented out if
4455    the compare is redundant, else a normal pattern is returned.  Thus,
4456    the assembler output says where the compare would have been.  */
4457 char *
m32c_output_compare(rtx_insn * insn,rtx * operands)4458 m32c_output_compare (rtx_insn *insn, rtx *operands)
4459 {
4460   static char templ[] = ";cmp.b\t%1,%0";
4461   /*                             ^ 5  */
4462 
4463   templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4464   if (m32c_compare_redundant (insn, operands))
4465     {
4466 #if DEBUG_CMP
4467       fprintf(stderr, "cbranch: cmp not needed\n");
4468 #endif
4469       return templ;
4470     }
4471 
4472 #if DEBUG_CMP
4473   fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4474 #endif
4475   return templ + 1;
4476 }
4477 
4478 #undef TARGET_ENCODE_SECTION_INFO
4479 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4480 
4481 /* If the frame pointer isn't used, we detect it manually.  But the
4482    stack pointer doesn't have as flexible addressing as the frame
4483    pointer, so we always assume we have it.  */
4484 
4485 #undef TARGET_FRAME_POINTER_REQUIRED
4486 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4487 
4488 #undef TARGET_HARD_REGNO_NREGS
4489 #define TARGET_HARD_REGNO_NREGS m32c_hard_regno_nregs
4490 #undef TARGET_HARD_REGNO_MODE_OK
4491 #define TARGET_HARD_REGNO_MODE_OK m32c_hard_regno_mode_ok
4492 #undef TARGET_MODES_TIEABLE_P
4493 #define TARGET_MODES_TIEABLE_P m32c_modes_tieable_p
4494 
4495 #undef TARGET_CAN_CHANGE_MODE_CLASS
4496 #define TARGET_CAN_CHANGE_MODE_CLASS m32c_can_change_mode_class
4497 
4498 /* The Global `targetm' Variable. */
4499 
4500 struct gcc_target targetm = TARGET_INITIALIZER;
4501 
4502 #include "gt-m32c.h"
4503