1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2    Copyright (C) 1996-2017 Free Software Foundation, Inc.
3    Contributed by Jeff Law (law@cygnus.com).
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    GCC is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "cfghooks.h"
29 #include "cfgloop.h"
30 #include "df.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "alias.h"
39 #include "stor-layout.h"
40 #include "varasm.h"
41 #include "calls.h"
42 #include "output.h"
43 #include "insn-attr.h"
44 #include "reload.h"
45 #include "explow.h"
46 #include "expr.h"
47 #include "tm-constrs.h"
48 #include "cfgrtl.h"
49 #include "dumpfile.h"
50 #include "builtins.h"
51 
52 /* This file should be included last.  */
53 #include "target-def.h"
54 
55 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
56    names are not prefixed by underscores, to tell whether to prefix a
57    label with a plus sign or not, so that the assembler can tell
58    symbol names from register names.  */
59 int mn10300_protect_label;
60 
61 /* Selected processor type for tuning.  */
62 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
63 
64 #define CC_FLAG_Z	1
65 #define CC_FLAG_N	2
66 #define CC_FLAG_C	4
67 #define CC_FLAG_V	8
68 
69 static int cc_flags_for_mode(machine_mode);
70 static int cc_flags_for_code(enum rtx_code);
71 
72 /* Implement TARGET_OPTION_OVERRIDE.  */
73 static void
74 mn10300_option_override (void)
75 {
76   if (TARGET_AM33)
77     target_flags &= ~MASK_MULT_BUG;
78   else
79     {
80       /* Disable scheduling for the MN10300 as we do
81 	 not have timing information available for it.  */
82       flag_schedule_insns = 0;
83       flag_schedule_insns_after_reload = 0;
84 
85       /* Force enable splitting of wide types, as otherwise it is trivial
86 	 to run out of registers.  Indeed, this works so well that register
87 	 allocation problems are now more common *without* optimization,
88 	 when this flag is not enabled by default.  */
89       flag_split_wide_types = 1;
90     }
91 
92   if (mn10300_tune_string)
93     {
94       if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
95 	mn10300_tune_cpu = PROCESSOR_MN10300;
96       else if (strcasecmp (mn10300_tune_string, "am33") == 0)
97 	mn10300_tune_cpu = PROCESSOR_AM33;
98       else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
99 	mn10300_tune_cpu = PROCESSOR_AM33_2;
100       else if (strcasecmp (mn10300_tune_string, "am34") == 0)
101 	mn10300_tune_cpu = PROCESSOR_AM34;
102       else
103 	error ("-mtune= expects mn10300, am33, am33-2, or am34");
104     }
105 }
106 
107 static void
108 mn10300_file_start (void)
109 {
110   default_file_start ();
111 
112   if (TARGET_AM33_2)
113     fprintf (asm_out_file, "\t.am33_2\n");
114   else if (TARGET_AM33)
115     fprintf (asm_out_file, "\t.am33\n");
116 }
117 
118 /* Note: This list must match the liw_op attribute in mn10300.md.  */
119 
120 static const char *liw_op_names[] =
121 {
122   "add", "cmp", "sub", "mov",
123   "and", "or", "xor",
124   "asr", "lsr", "asl",
125   "none", "max"
126 };
127 
128 /* Print operand X using operand code CODE to assembly language output file
129    FILE.  */
130 
131 void
132 mn10300_print_operand (FILE *file, rtx x, int code)
133 {
134   switch (code)
135     {
136     case 'W':
137       {
138 	unsigned int liw_op = UINTVAL (x);
139 
140 	gcc_assert (TARGET_ALLOW_LIW);
141 	gcc_assert (liw_op < LIW_OP_MAX);
142 	fputs (liw_op_names[liw_op], file);
143 	break;
144       }
145 
146     case 'b':
147     case 'B':
148       {
149 	enum rtx_code cmp = GET_CODE (x);
150 	machine_mode mode = GET_MODE (XEXP (x, 0));
151 	const char *str;
152 	int have_flags;
153 
154 	if (code == 'B')
155 	  cmp = reverse_condition (cmp);
156 	have_flags = cc_flags_for_mode (mode);
157 
158 	switch (cmp)
159 	  {
160 	  case NE:
161 	    str = "ne";
162 	    break;
163 	  case EQ:
164 	    str = "eq";
165 	    break;
166 	  case GE:
167 	    /* bge is smaller than bnc.  */
168 	    str = (have_flags & CC_FLAG_V ? "ge" : "nc");
169 	    break;
170 	  case LT:
171 	    str = (have_flags & CC_FLAG_V ? "lt" : "ns");
172 	    break;
173 	  case GT:
174 	    str = "gt";
175 	    break;
176 	  case LE:
177 	    str = "le";
178 	    break;
179 	  case GEU:
180 	    str = "cc";
181 	    break;
182 	  case GTU:
183 	    str = "hi";
184 	    break;
185 	  case LEU:
186 	    str = "ls";
187 	    break;
188 	  case LTU:
189 	    str = "cs";
190 	    break;
191 	  case ORDERED:
192 	    str = "lge";
193 	    break;
194 	  case UNORDERED:
195 	    str = "uo";
196 	    break;
197 	  case LTGT:
198 	    str = "lg";
199 	    break;
200 	  case UNEQ:
201 	    str = "ue";
202 	    break;
203 	  case UNGE:
204 	    str = "uge";
205 	    break;
206 	  case UNGT:
207 	    str = "ug";
208 	    break;
209 	  case UNLE:
210 	    str = "ule";
211 	    break;
212 	  case UNLT:
213 	    str = "ul";
214 	    break;
215 	  default:
216 	    gcc_unreachable ();
217 	  }
218 
219 	gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
220 	fputs (str, file);
221       }
222       break;
223 
224     case 'C':
225       /* This is used for the operand to a call instruction;
226 	 if it's a REG, enclose it in parens, else output
227 	 the operand normally.  */
228       if (REG_P (x))
229 	{
230 	  fputc ('(', file);
231 	  mn10300_print_operand (file, x, 0);
232 	  fputc (')', file);
233 	}
234       else
235 	mn10300_print_operand (file, x, 0);
236       break;
237 
238     case 'D':
239       switch (GET_CODE (x))
240 	{
241 	case MEM:
242 	  fputc ('(', file);
243 	  output_address (GET_MODE (x), XEXP (x, 0));
244 	  fputc (')', file);
245 	  break;
246 
247 	case REG:
248 	  fprintf (file, "fd%d", REGNO (x) - 18);
249 	  break;
250 
251 	default:
252 	  gcc_unreachable ();
253 	}
254       break;
255 
256       /* These are the least significant word in a 64bit value.  */
257     case 'L':
258       switch (GET_CODE (x))
259 	{
260 	case MEM:
261 	  fputc ('(', file);
262 	  output_address (GET_MODE (x), XEXP (x, 0));
263 	  fputc (')', file);
264 	  break;
265 
266 	case REG:
267 	  fprintf (file, "%s", reg_names[REGNO (x)]);
268 	  break;
269 
270 	case SUBREG:
271 	  fprintf (file, "%s", reg_names[subreg_regno (x)]);
272 	  break;
273 
274 	case CONST_DOUBLE:
275 	  {
276 	    long val[2];
277 
278 	    switch (GET_MODE (x))
279 	      {
280 	      case DFmode:
281 		REAL_VALUE_TO_TARGET_DOUBLE
282 		  (*CONST_DOUBLE_REAL_VALUE (x), val);
283 		fprintf (file, "0x%lx", val[0]);
284 		break;;
285 	      case SFmode:
286 		REAL_VALUE_TO_TARGET_SINGLE
287 		  (*CONST_DOUBLE_REAL_VALUE (x), val[0]);
288 		fprintf (file, "0x%lx", val[0]);
289 		break;;
290 	      case VOIDmode:
291 	      case DImode:
292 		mn10300_print_operand_address (file,
293 					       GEN_INT (CONST_DOUBLE_LOW (x)));
294 		break;
295 	      default:
296 		break;
297 	      }
298 	    break;
299 	  }
300 
301 	case CONST_INT:
302 	  {
303 	    rtx low, high;
304 	    split_double (x, &low, &high);
305 	    fprintf (file, "%ld", (long)INTVAL (low));
306 	    break;
307 	    }
308 
309 	default:
310 	  gcc_unreachable ();
311 	}
312       break;
313 
314       /* Similarly, but for the most significant word.  */
315     case 'H':
316       switch (GET_CODE (x))
317 	{
318 	case MEM:
319 	  fputc ('(', file);
320 	  x = adjust_address (x, SImode, 4);
321 	  output_address (GET_MODE (x), XEXP (x, 0));
322 	  fputc (')', file);
323 	  break;
324 
325 	case REG:
326 	  fprintf (file, "%s", reg_names[REGNO (x) + 1]);
327 	  break;
328 
329 	case SUBREG:
330 	  fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
331 	  break;
332 
333 	case CONST_DOUBLE:
334 	  {
335 	    long val[2];
336 
337 	    switch (GET_MODE (x))
338 	      {
339 	      case DFmode:
340 		REAL_VALUE_TO_TARGET_DOUBLE
341 		  (*CONST_DOUBLE_REAL_VALUE (x), val);
342 		fprintf (file, "0x%lx", val[1]);
343 		break;;
344 	      case SFmode:
345 		gcc_unreachable ();
346 	      case VOIDmode:
347 	      case DImode:
348 		mn10300_print_operand_address (file,
349 					       GEN_INT (CONST_DOUBLE_HIGH (x)));
350 		break;
351 	      default:
352 		break;
353 	      }
354 	    break;
355 	  }
356 
357 	case CONST_INT:
358 	  {
359 	    rtx low, high;
360 	    split_double (x, &low, &high);
361 	    fprintf (file, "%ld", (long)INTVAL (high));
362 	    break;
363 	  }
364 
365 	default:
366 	  gcc_unreachable ();
367 	}
368       break;
369 
370     case 'A':
371       fputc ('(', file);
372       if (REG_P (XEXP (x, 0)))
373 	output_address (VOIDmode, gen_rtx_PLUS (SImode,
374 						XEXP (x, 0), const0_rtx));
375       else
376 	output_address (VOIDmode, XEXP (x, 0));
377       fputc (')', file);
378       break;
379 
380     case 'N':
381       gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
382       fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
383       break;
384 
385     case 'U':
386       gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
387       fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
388       break;
389 
390       /* For shift counts.  The hardware ignores the upper bits of
391 	 any immediate, but the assembler will flag an out of range
392 	 shift count as an error.  So we mask off the high bits
393 	 of the immediate here.  */
394     case 'S':
395       if (CONST_INT_P (x))
396 	{
397 	  fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
398 	  break;
399 	}
400       /* FALL THROUGH */
401 
402     default:
403       switch (GET_CODE (x))
404 	{
405 	case MEM:
406 	  fputc ('(', file);
407 	  output_address (GET_MODE (x), XEXP (x, 0));
408 	  fputc (')', file);
409 	  break;
410 
411 	case PLUS:
412 	  output_address (VOIDmode, x);
413 	  break;
414 
415 	case REG:
416 	  fprintf (file, "%s", reg_names[REGNO (x)]);
417 	  break;
418 
419 	case SUBREG:
420 	  fprintf (file, "%s", reg_names[subreg_regno (x)]);
421 	  break;
422 
423 	  /* This will only be single precision....  */
424 	case CONST_DOUBLE:
425 	  {
426 	    unsigned long val;
427 
428 	    REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), val);
429 	    fprintf (file, "0x%lx", val);
430 	    break;
431 	  }
432 
433 	case CONST_INT:
434 	case SYMBOL_REF:
435 	case CONST:
436 	case LABEL_REF:
437 	case CODE_LABEL:
438 	case UNSPEC:
439 	  mn10300_print_operand_address (file, x);
440 	  break;
441 	default:
442 	  gcc_unreachable ();
443 	}
444       break;
445     }
446 }
447 
448 /* Output assembly language output for the address ADDR to FILE.  */
449 
450 void
451 mn10300_print_operand_address (FILE *file, rtx addr)
452 {
453   switch (GET_CODE (addr))
454     {
455     case POST_INC:
456       mn10300_print_operand (file, XEXP (addr, 0), 0);
457       fputc ('+', file);
458       break;
459 
460     case POST_MODIFY:
461       mn10300_print_operand (file, XEXP (addr, 0), 0);
462       fputc ('+', file);
463       fputc (',', file);
464       mn10300_print_operand (file, XEXP (addr, 1), 0);
465       break;
466 
467     case REG:
468       mn10300_print_operand (file, addr, 0);
469       break;
470     case PLUS:
471       {
472 	rtx base = XEXP (addr, 0);
473 	rtx index = XEXP (addr, 1);
474 
475 	if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
476 	  {
477 	    rtx x = base;
478 	    base = index;
479 	    index = x;
480 
481 	    gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
482 	  }
483 	gcc_assert (REG_OK_FOR_BASE_P (base));
484 
485 	mn10300_print_operand (file, index, 0);
486 	fputc (',', file);
487 	mn10300_print_operand (file, base, 0);
488 	break;
489       }
490     case SYMBOL_REF:
491       output_addr_const (file, addr);
492       break;
493     default:
494       output_addr_const (file, addr);
495       break;
496     }
497 }
498 
499 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
500 
501    Used for PIC-specific UNSPECs.  */
502 
503 static bool
504 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
505 {
506   if (GET_CODE (x) == UNSPEC)
507     {
508       switch (XINT (x, 1))
509 	{
510 	case UNSPEC_PIC:
511 	  /* GLOBAL_OFFSET_TABLE or local symbols, no suffix.  */
512 	  output_addr_const (file, XVECEXP (x, 0, 0));
513 	  break;
514 	case UNSPEC_GOT:
515 	  output_addr_const (file, XVECEXP (x, 0, 0));
516 	  fputs ("@GOT", file);
517 	  break;
518 	case UNSPEC_GOTOFF:
519 	  output_addr_const (file, XVECEXP (x, 0, 0));
520 	  fputs ("@GOTOFF", file);
521 	  break;
522 	case UNSPEC_PLT:
523 	  output_addr_const (file, XVECEXP (x, 0, 0));
524 	  fputs ("@PLT", file);
525 	  break;
526 	case UNSPEC_GOTSYM_OFF:
527 	  assemble_name (file, GOT_SYMBOL_NAME);
528 	  fputs ("-(", file);
529 	  output_addr_const (file, XVECEXP (x, 0, 0));
530 	  fputs ("-.)", file);
531 	  break;
532 	default:
533 	  return false;
534 	}
535       return true;
536     }
537   else
538     return false;
539 }
540 
541 /* Count the number of FP registers that have to be saved.  */
542 static int
543 fp_regs_to_save (void)
544 {
545   int i, n = 0;
546 
547   if (! TARGET_AM33_2)
548     return 0;
549 
550   for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
551     if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
552       ++n;
553 
554   return n;
555 }
556 
557 /* Print a set of registers in the format required by "movm" and "ret".
558    Register K is saved if bit K of MASK is set.  The data and address
559    registers can be stored individually, but the extended registers cannot.
560    We assume that the mask already takes that into account.  For instance,
561    bits 14 to 17 must have the same value.  */
562 
563 void
564 mn10300_print_reg_list (FILE *file, int mask)
565 {
566   int need_comma;
567   int i;
568 
569   need_comma = 0;
570   fputc ('[', file);
571 
572   for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
573     if ((mask & (1 << i)) != 0)
574       {
575 	if (need_comma)
576 	  fputc (',', file);
577 	fputs (reg_names [i], file);
578 	need_comma = 1;
579       }
580 
581   if ((mask & 0x3c000) != 0)
582     {
583       gcc_assert ((mask & 0x3c000) == 0x3c000);
584       if (need_comma)
585 	fputc (',', file);
586       fputs ("exreg1", file);
587       need_comma = 1;
588     }
589 
590   fputc (']', file);
591 }
592 
593 /* If the MDR register is never clobbered, we can use the RETF instruction
594    which takes the address from the MDR register.  This is 3 cycles faster
595    than having to load the address from the stack.  */
596 
597 bool
598 mn10300_can_use_retf_insn (void)
599 {
600   /* Don't bother if we're not optimizing.  In this case we won't
601      have proper access to df_regs_ever_live_p.  */
602   if (!optimize)
603     return false;
604 
605   /* EH returns alter the saved return address; MDR is not current.  */
606   if (crtl->calls_eh_return)
607     return false;
608 
609   /* Obviously not if MDR is ever clobbered.  */
610   if (df_regs_ever_live_p (MDR_REG))
611     return false;
612 
613   /* ??? Careful not to use this during expand_epilogue etc.  */
614   gcc_assert (!in_sequence_p ());
615   return leaf_function_p ();
616 }
617 
618 bool
619 mn10300_can_use_rets_insn (void)
620 {
621   return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
622 }
623 
624 /* Returns the set of live, callee-saved registers as a bitmask.  The
625    callee-saved extended registers cannot be stored individually, so
626    all of them will be included in the mask if any one of them is used.
627    Also returns the number of bytes in the registers in the mask if
628    BYTES_SAVED is not NULL.  */
629 
630 unsigned int
631 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
632 {
633   int mask;
634   int i;
635   unsigned int count;
636 
637   count = mask = 0;
638   for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
639     if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
640       {
641 	mask |= (1 << i);
642 	++ count;
643       }
644 
645   if ((mask & 0x3c000) != 0)
646     {
647       for (i = 0x04000; i < 0x40000; i <<= 1)
648 	if ((mask & i) == 0)
649 	  ++ count;
650 
651       mask |= 0x3c000;
652     }
653 
654   if (bytes_saved)
655     * bytes_saved = count * UNITS_PER_WORD;
656 
657   return mask;
658 }
659 
660 static rtx
661 F (rtx r)
662 {
663   RTX_FRAME_RELATED_P (r) = 1;
664   return r;
665 }
666 
667 /* Generate an instruction that pushes several registers onto the stack.
668    Register K will be saved if bit K in MASK is set.  The function does
669    nothing if MASK is zero.
670 
671    To be compatible with the "movm" instruction, the lowest-numbered
672    register must be stored in the lowest slot.  If MASK is the set
673    { R1,...,RN }, where R1...RN are ordered least first, the generated
674    instruction will have the form:
675 
676        (parallel
677          (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
678 	 (set (mem:SI (plus:SI (reg:SI 9)
679 	                       (const_int -1*4)))
680 	      (reg:SI RN))
681 	 ...
682 	 (set (mem:SI (plus:SI (reg:SI 9)
683 	                       (const_int -N*4)))
684 	      (reg:SI R1))) */
685 
686 static void
687 mn10300_gen_multiple_store (unsigned int mask)
688 {
689   /* The order in which registers are stored, from SP-4 through SP-N*4.  */
690   static const unsigned int store_order[8] = {
691     /* e2, e3: never saved */
692     FIRST_EXTENDED_REGNUM + 4,
693     FIRST_EXTENDED_REGNUM + 5,
694     FIRST_EXTENDED_REGNUM + 6,
695     FIRST_EXTENDED_REGNUM + 7,
696     /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
697     FIRST_DATA_REGNUM + 2,
698     FIRST_DATA_REGNUM + 3,
699     FIRST_ADDRESS_REGNUM + 2,
700     FIRST_ADDRESS_REGNUM + 3,
701     /* d0, d1, a0, a1, mdr, lir, lar: never saved.  */
702   };
703 
704   rtx x, elts[9];
705   unsigned int i;
706   int count;
707 
708   if (mask == 0)
709     return;
710 
711   for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
712     {
713       unsigned regno = store_order[i];
714 
715       if (((mask >> regno) & 1) == 0)
716 	continue;
717 
718       ++count;
719       x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
720       x = gen_frame_mem (SImode, x);
721       x = gen_rtx_SET (x, gen_rtx_REG (SImode, regno));
722       elts[count] = F(x);
723 
724       /* Remove the register from the mask so that... */
725       mask &= ~(1u << regno);
726     }
727 
728   /* ... we can make sure that we didn't try to use a register
729      not listed in the store order.  */
730   gcc_assert (mask == 0);
731 
732   /* Create the instruction that updates the stack pointer.  */
733   x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
734   x = gen_rtx_SET (stack_pointer_rtx, x);
735   elts[0] = F(x);
736 
737   /* We need one PARALLEL element to update the stack pointer and
738      an additional element for each register that is stored.  */
739   x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
740   F (emit_insn (x));
741 }
742 
743 static inline unsigned int
744 popcount (unsigned int mask)
745 {
746   unsigned int count = 0;
747 
748   while (mask)
749     {
750       ++ count;
751       mask &= ~ (mask & - mask);
752     }
753   return count;
754 }
755 
756 void
757 mn10300_expand_prologue (void)
758 {
759   HOST_WIDE_INT size = mn10300_frame_size ();
760   unsigned int mask;
761 
762   mask = mn10300_get_live_callee_saved_regs (NULL);
763   /* If we use any of the callee-saved registers, save them now.  */
764   mn10300_gen_multiple_store (mask);
765 
766   if (flag_stack_usage_info)
767     current_function_static_stack_size = size + popcount (mask) * 4;
768 
769   if (TARGET_AM33_2 && fp_regs_to_save ())
770     {
771       int num_regs_to_save = fp_regs_to_save (), i;
772       HOST_WIDE_INT xsize;
773       enum
774       {
775 	save_sp_merge,
776 	save_sp_no_merge,
777 	save_sp_partial_merge,
778 	save_a0_merge,
779 	save_a0_no_merge
780       } strategy;
781       unsigned int strategy_size = (unsigned)-1, this_strategy_size;
782       rtx reg;
783 
784       if (flag_stack_usage_info)
785 	current_function_static_stack_size += num_regs_to_save * 4;
786 
787       /* We have several different strategies to save FP registers.
788 	 We can store them using SP offsets, which is beneficial if
789 	 there are just a few registers to save, or we can use `a0' in
790 	 post-increment mode (`a0' is the only call-clobbered address
791 	 register that is never used to pass information to a
792 	 function).  Furthermore, if we don't need a frame pointer, we
793 	 can merge the two SP adds into a single one, but this isn't
794 	 always beneficial; sometimes we can just split the two adds
795 	 so that we don't exceed a 16-bit constant size.  The code
796 	 below will select which strategy to use, so as to generate
797 	 smallest code.  Ties are broken in favor or shorter sequences
798 	 (in terms of number of instructions).  */
799 
800 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
801 			: (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
802 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
803 			: (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
804 
805 /* We add 0 * (S) in two places to promote to the type of S,
806    so that all arms of the conditional have the same type.  */
807 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
808   (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
809    : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
810 			       + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
811    : 0 * (S) + (ELSE))
812 #define SIZE_FMOV_SP_(S,N) \
813   (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
814                    SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
815 				    (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
816 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
817 
818       /* Consider alternative save_sp_merge only if we don't need the
819 	 frame pointer and size is nonzero.  */
820       if (! frame_pointer_needed && size)
821 	{
822 	  /* Insn: add -(size + 4 * num_regs_to_save), sp.  */
823 	  this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
824 	  /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
825 	  this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
826 
827 	  if (this_strategy_size < strategy_size)
828 	    {
829 	      strategy = save_sp_merge;
830 	      strategy_size = this_strategy_size;
831 	    }
832 	}
833 
834       /* Consider alternative save_sp_no_merge unconditionally.  */
835       /* Insn: add -4 * num_regs_to_save, sp.  */
836       this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
837       /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
838       this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
839       if (size)
840 	{
841 	  /* Insn: add -size, sp.  */
842 	  this_strategy_size += SIZE_ADD_SP (-size);
843 	}
844 
845       if (this_strategy_size < strategy_size)
846 	{
847 	  strategy = save_sp_no_merge;
848 	  strategy_size = this_strategy_size;
849 	}
850 
851       /* Consider alternative save_sp_partial_merge only if we don't
852 	 need a frame pointer and size is reasonably large.  */
853       if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
854 	{
855 	  /* Insn: add -128, sp.  */
856 	  this_strategy_size = SIZE_ADD_SP (-128);
857 	  /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
858 	  this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
859 					      num_regs_to_save);
860 	  if (size)
861 	    {
862 	      /* Insn: add 128-size, sp.  */
863 	      this_strategy_size += SIZE_ADD_SP (128 - size);
864 	    }
865 
866 	  if (this_strategy_size < strategy_size)
867 	    {
868 	      strategy = save_sp_partial_merge;
869 	      strategy_size = this_strategy_size;
870 	    }
871 	}
872 
873       /* Consider alternative save_a0_merge only if we don't need a
874 	 frame pointer, size is nonzero and the user hasn't
875 	 changed the calling conventions of a0.  */
876       if (! frame_pointer_needed && size
877 	  && call_really_used_regs [FIRST_ADDRESS_REGNUM]
878 	  && ! fixed_regs[FIRST_ADDRESS_REGNUM])
879 	{
880 	  /* Insn: add -(size + 4 * num_regs_to_save), sp.  */
881 	  this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
882 	  /* Insn: mov sp, a0.  */
883 	  this_strategy_size++;
884 	  if (size)
885 	    {
886 	      /* Insn: add size, a0.  */
887 	      this_strategy_size += SIZE_ADD_AX (size);
888 	    }
889 	  /* Insn: fmov fs#, (a0+), for each fs# to be saved.  */
890 	  this_strategy_size += 3 * num_regs_to_save;
891 
892 	  if (this_strategy_size < strategy_size)
893 	    {
894 	      strategy = save_a0_merge;
895 	      strategy_size = this_strategy_size;
896 	    }
897 	}
898 
899       /* Consider alternative save_a0_no_merge if the user hasn't
900 	 changed the calling conventions of a0.  */
901       if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
902 	  && ! fixed_regs[FIRST_ADDRESS_REGNUM])
903 	{
904 	  /* Insn: add -4 * num_regs_to_save, sp.  */
905 	  this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
906 	  /* Insn: mov sp, a0.  */
907 	  this_strategy_size++;
908 	  /* Insn: fmov fs#, (a0+), for each fs# to be saved.  */
909 	  this_strategy_size += 3 * num_regs_to_save;
910 	  if (size)
911 	    {
912 	      /* Insn: add -size, sp.  */
913 	      this_strategy_size += SIZE_ADD_SP (-size);
914 	    }
915 
916 	  if (this_strategy_size < strategy_size)
917 	    {
918 	      strategy = save_a0_no_merge;
919 	      strategy_size = this_strategy_size;
920 	    }
921 	}
922 
923       /* Emit the initial SP add, common to all strategies.  */
924       switch (strategy)
925 	{
926 	case save_sp_no_merge:
927 	case save_a0_no_merge:
928 	  F (emit_insn (gen_addsi3 (stack_pointer_rtx,
929 				    stack_pointer_rtx,
930 				    GEN_INT (-4 * num_regs_to_save))));
931 	  xsize = 0;
932 	  break;
933 
934 	case save_sp_partial_merge:
935 	  F (emit_insn (gen_addsi3 (stack_pointer_rtx,
936 				    stack_pointer_rtx,
937 				    GEN_INT (-128))));
938 	  xsize = 128 - 4 * num_regs_to_save;
939 	  size -= xsize;
940 	  break;
941 
942 	case save_sp_merge:
943 	case save_a0_merge:
944 	  F (emit_insn (gen_addsi3 (stack_pointer_rtx,
945 				    stack_pointer_rtx,
946 				    GEN_INT (-(size + 4 * num_regs_to_save)))));
947 	  /* We'll have to adjust FP register saves according to the
948 	     frame size.  */
949 	  xsize = size;
950 	  /* Since we've already created the stack frame, don't do it
951 	     again at the end of the function.  */
952 	  size = 0;
953 	  break;
954 
955 	default:
956 	  gcc_unreachable ();
957 	}
958 
959       /* Now prepare register a0, if we have decided to use it.  */
960       switch (strategy)
961 	{
962 	case save_sp_merge:
963 	case save_sp_no_merge:
964 	case save_sp_partial_merge:
965 	  reg = 0;
966 	  break;
967 
968 	case save_a0_merge:
969 	case save_a0_no_merge:
970 	  reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
971 	  F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
972 	  if (xsize)
973 	    F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
974 	  reg = gen_rtx_POST_INC (SImode, reg);
975 	  break;
976 
977 	default:
978 	  gcc_unreachable ();
979 	}
980 
981       /* Now actually save the FP registers.  */
982       for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
983 	if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
984 	  {
985 	    rtx addr;
986 
987 	    if (reg)
988 	      addr = reg;
989 	    else
990 	      {
991 		/* If we aren't using `a0', use an SP offset.  */
992 		if (xsize)
993 		  {
994 		    addr = gen_rtx_PLUS (SImode,
995 					 stack_pointer_rtx,
996 					 GEN_INT (xsize));
997 		  }
998 		else
999 		  addr = stack_pointer_rtx;
1000 
1001 		xsize += 4;
1002 	      }
1003 
1004 	    F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1005 				     gen_rtx_REG (SFmode, i))));
1006 	  }
1007     }
1008 
1009   /* Now put the frame pointer into the frame pointer register.  */
1010   if (frame_pointer_needed)
1011     F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1012 
1013   /* Allocate stack for this frame.  */
1014   if (size)
1015     F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1016 			      stack_pointer_rtx,
1017 			      GEN_INT (-size))));
1018 
1019   if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1020     emit_insn (gen_load_pic ());
1021 }
1022 
1023 void
1024 mn10300_expand_epilogue (void)
1025 {
1026   HOST_WIDE_INT size = mn10300_frame_size ();
1027   unsigned int reg_save_bytes;
1028 
1029   mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1030 
1031   if (TARGET_AM33_2 && fp_regs_to_save ())
1032     {
1033       int num_regs_to_save = fp_regs_to_save (), i;
1034       rtx reg = 0;
1035 
1036       /* We have several options to restore FP registers.  We could
1037 	 load them from SP offsets, but, if there are enough FP
1038 	 registers to restore, we win if we use a post-increment
1039 	 addressing mode.  */
1040 
1041       /* If we have a frame pointer, it's the best option, because we
1042 	 already know it has the value we want.  */
1043       if (frame_pointer_needed)
1044 	reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1045       /* Otherwise, we may use `a1', since it's call-clobbered and
1046 	 it's never used for return values.  But only do so if it's
1047 	 smaller than using SP offsets.  */
1048       else
1049 	{
1050 	  enum { restore_sp_post_adjust,
1051 		 restore_sp_pre_adjust,
1052 		 restore_sp_partial_adjust,
1053 		 restore_a1 } strategy;
1054 	  unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1055 
1056 	  /* Consider using sp offsets before adjusting sp.  */
1057 	  /* Insn: fmov (##,sp),fs#, for each fs# to be restored.  */
1058 	  this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1059 	  /* If size is too large, we'll have to adjust SP with an
1060 		 add.  */
1061 	  if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1062 	    {
1063 	      /* Insn: add size + 4 * num_regs_to_save, sp.  */
1064 	      this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1065 	    }
1066 	  /* If we don't have to restore any non-FP registers,
1067 		 we'll be able to save one byte by using rets.  */
1068 	  if (! reg_save_bytes)
1069 	    this_strategy_size--;
1070 
1071 	  if (this_strategy_size < strategy_size)
1072 	    {
1073 	      strategy = restore_sp_post_adjust;
1074 	      strategy_size = this_strategy_size;
1075 	    }
1076 
1077 	  /* Consider using sp offsets after adjusting sp.  */
1078 	  /* Insn: add size, sp.  */
1079 	  this_strategy_size = SIZE_ADD_SP (size);
1080 	  /* Insn: fmov (##,sp),fs#, for each fs# to be restored.  */
1081 	  this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1082 	  /* We're going to use ret to release the FP registers
1083 		 save area, so, no savings.  */
1084 
1085 	  if (this_strategy_size < strategy_size)
1086 	    {
1087 	      strategy = restore_sp_pre_adjust;
1088 	      strategy_size = this_strategy_size;
1089 	    }
1090 
1091 	  /* Consider using sp offsets after partially adjusting sp.
1092 	     When size is close to 32Kb, we may be able to adjust SP
1093 	     with an imm16 add instruction while still using fmov
1094 	     (d8,sp).  */
1095 	  if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1096 	    {
1097 	      /* Insn: add size + 4 * num_regs_to_save
1098 				+ reg_save_bytes - 252,sp.  */
1099 	      this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1100 						+ (int) reg_save_bytes - 252);
1101 	      /* Insn: fmov (##,sp),fs#, fo each fs# to be restored.  */
1102 	      this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1103 						  - 4 * num_regs_to_save,
1104 						  num_regs_to_save);
1105 	      /* We're going to use ret to release the FP registers
1106 		 save area, so, no savings.  */
1107 
1108 	      if (this_strategy_size < strategy_size)
1109 		{
1110 		  strategy = restore_sp_partial_adjust;
1111 		  strategy_size = this_strategy_size;
1112 		}
1113 	    }
1114 
1115 	  /* Consider using a1 in post-increment mode, as long as the
1116 	     user hasn't changed the calling conventions of a1.  */
1117 	  if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1118 	      && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1119 	    {
1120 	      /* Insn: mov sp,a1.  */
1121 	      this_strategy_size = 1;
1122 	      if (size)
1123 		{
1124 		  /* Insn: add size,a1.  */
1125 		  this_strategy_size += SIZE_ADD_AX (size);
1126 		}
1127 	      /* Insn: fmov (a1+),fs#, for each fs# to be restored.  */
1128 	      this_strategy_size += 3 * num_regs_to_save;
1129 	      /* If size is large enough, we may be able to save a
1130 		 couple of bytes.  */
1131 	      if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1132 		{
1133 		  /* Insn: mov a1,sp.  */
1134 		  this_strategy_size += 2;
1135 		}
1136 	      /* If we don't have to restore any non-FP registers,
1137 		 we'll be able to save one byte by using rets.  */
1138 	      if (! reg_save_bytes)
1139 		this_strategy_size--;
1140 
1141 	      if (this_strategy_size < strategy_size)
1142 		{
1143 		  strategy = restore_a1;
1144 		  strategy_size = this_strategy_size;
1145 		}
1146 	    }
1147 
1148 	  switch (strategy)
1149 	    {
1150 	    case restore_sp_post_adjust:
1151 	      break;
1152 
1153 	    case restore_sp_pre_adjust:
1154 	      emit_insn (gen_addsi3 (stack_pointer_rtx,
1155 				     stack_pointer_rtx,
1156 				     GEN_INT (size)));
1157 	      size = 0;
1158 	      break;
1159 
1160 	    case restore_sp_partial_adjust:
1161 	      emit_insn (gen_addsi3 (stack_pointer_rtx,
1162 				     stack_pointer_rtx,
1163 				     GEN_INT (size + 4 * num_regs_to_save
1164 					      + reg_save_bytes - 252)));
1165 	      size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1166 	      break;
1167 
1168 	    case restore_a1:
1169 	      reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1170 	      emit_insn (gen_movsi (reg, stack_pointer_rtx));
1171 	      if (size)
1172 		emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1173 	      break;
1174 
1175 	    default:
1176 	      gcc_unreachable ();
1177 	    }
1178 	}
1179 
1180       /* Adjust the selected register, if any, for post-increment.  */
1181       if (reg)
1182 	reg = gen_rtx_POST_INC (SImode, reg);
1183 
1184       for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1185 	if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1186 	  {
1187 	    rtx addr;
1188 
1189 	    if (reg)
1190 	      addr = reg;
1191 	    else if (size)
1192 	      {
1193 		/* If we aren't using a post-increment register, use an
1194 		   SP offset.  */
1195 		addr = gen_rtx_PLUS (SImode,
1196 				     stack_pointer_rtx,
1197 				     GEN_INT (size));
1198 	      }
1199 	    else
1200 	      addr = stack_pointer_rtx;
1201 
1202 	    size += 4;
1203 
1204 	    emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1205 				  gen_rtx_MEM (SFmode, addr)));
1206 	  }
1207 
1208       /* If we were using the restore_a1 strategy and the number of
1209 	 bytes to be released won't fit in the `ret' byte, copy `a1'
1210 	 to `sp', to avoid having to use `add' to adjust it.  */
1211       if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1212 	{
1213 	  emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1214 	  size = 0;
1215 	}
1216     }
1217 
1218   /* Maybe cut back the stack, except for the register save area.
1219 
1220      If the frame pointer exists, then use the frame pointer to
1221      cut back the stack.
1222 
1223      If the stack size + register save area is more than 255 bytes,
1224      then the stack must be cut back here since the size + register
1225      save size is too big for a ret/retf instruction.
1226 
1227      Else leave it alone, it will be cut back as part of the
1228      ret/retf instruction, or there wasn't any stack to begin with.
1229 
1230      Under no circumstances should the register save area be
1231      deallocated here, that would leave a window where an interrupt
1232      could occur and trash the register save area.  */
1233   if (frame_pointer_needed)
1234     {
1235       emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1236       size = 0;
1237     }
1238   else if (size + reg_save_bytes > 255)
1239     {
1240       emit_insn (gen_addsi3 (stack_pointer_rtx,
1241 			     stack_pointer_rtx,
1242 			     GEN_INT (size)));
1243       size = 0;
1244     }
1245 
1246   /* Adjust the stack and restore callee-saved registers, if any.  */
1247   if (mn10300_can_use_rets_insn ())
1248     emit_jump_insn (ret_rtx);
1249   else
1250     emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
1251 }
1252 
1253 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1254    This function is for MATCH_PARALLEL and so assumes OP is known to be
1255    parallel.  If OP is a multiple store, return a mask indicating which
1256    registers it saves.  Return 0 otherwise.  */
1257 
1258 unsigned int
1259 mn10300_store_multiple_regs (rtx op)
1260 {
1261   int count;
1262   int mask;
1263   int i;
1264   unsigned int last;
1265   rtx elt;
1266 
1267   count = XVECLEN (op, 0);
1268   if (count < 2)
1269     return 0;
1270 
1271   /* Check that first instruction has the form (set (sp) (plus A B)) */
1272   elt = XVECEXP (op, 0, 0);
1273   if (GET_CODE (elt) != SET
1274       || (! REG_P (SET_DEST (elt)))
1275       || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1276       || GET_CODE (SET_SRC (elt)) != PLUS)
1277     return 0;
1278 
1279   /* Check that A is the stack pointer and B is the expected stack size.
1280      For OP to match, each subsequent instruction should push a word onto
1281      the stack.  We therefore expect the first instruction to create
1282      COUNT-1 stack slots.  */
1283   elt = SET_SRC (elt);
1284   if ((! REG_P (XEXP (elt, 0)))
1285       || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1286       || (! CONST_INT_P (XEXP (elt, 1)))
1287       || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1288     return 0;
1289 
1290   mask = 0;
1291   for (i = 1; i < count; i++)
1292     {
1293       /* Check that element i is a (set (mem M) R).  */
1294       /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1295 	 Remember: the ordering is *not* monotonic.  */
1296       elt = XVECEXP (op, 0, i);
1297       if (GET_CODE (elt) != SET
1298 	  || (! MEM_P (SET_DEST (elt)))
1299 	  || (! REG_P (SET_SRC (elt))))
1300 	return 0;
1301 
1302       /* Remember which registers are to be saved.  */
1303       last = REGNO (SET_SRC (elt));
1304       mask |= (1 << last);
1305 
1306       /* Check that M has the form (plus (sp) (const_int -I*4)) */
1307       elt = XEXP (SET_DEST (elt), 0);
1308       if (GET_CODE (elt) != PLUS
1309 	  || (! REG_P (XEXP (elt, 0)))
1310 	  || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1311 	  || (! CONST_INT_P (XEXP (elt, 1)))
1312 	  || INTVAL (XEXP (elt, 1)) != -i * 4)
1313 	return 0;
1314     }
1315 
1316   /* All or none of the callee-saved extended registers must be in the set.  */
1317   if ((mask & 0x3c000) != 0
1318       && (mask & 0x3c000) != 0x3c000)
1319     return 0;
1320 
1321   return mask;
1322 }
1323 
1324 /* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
1325 
1326 static reg_class_t
1327 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1328 {
1329   if (x == stack_pointer_rtx && rclass != SP_REGS)
1330     return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1331   else if (MEM_P (x)
1332 	   || (REG_P (x)
1333 	       && !HARD_REGISTER_P (x))
1334 	   || (GET_CODE (x) == SUBREG
1335 	       && REG_P (SUBREG_REG (x))
1336 	       && !HARD_REGISTER_P (SUBREG_REG (x))))
1337     return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1338   else
1339     return rclass;
1340 }
1341 
1342 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
1343 
1344 static reg_class_t
1345 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1346 {
1347   if (x == stack_pointer_rtx && rclass != SP_REGS)
1348     return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1349   return rclass;
1350 }
1351 
1352 /* Implement TARGET_SECONDARY_RELOAD.  */
1353 
1354 static reg_class_t
1355 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1356 			  machine_mode mode, secondary_reload_info *sri)
1357 {
1358   enum reg_class rclass = (enum reg_class) rclass_i;
1359   enum reg_class xclass = NO_REGS;
1360   unsigned int xregno = INVALID_REGNUM;
1361 
1362   if (REG_P (x))
1363     {
1364       xregno = REGNO (x);
1365       if (xregno >= FIRST_PSEUDO_REGISTER)
1366 	xregno = true_regnum (x);
1367       if (xregno != INVALID_REGNUM)
1368 	xclass = REGNO_REG_CLASS (xregno);
1369     }
1370 
1371   if (!TARGET_AM33)
1372     {
1373       /* Memory load/stores less than a full word wide can't have an
1374          address or stack pointer destination.  They must use a data
1375          register as an intermediate register.  */
1376       if (rclass != DATA_REGS
1377 	  && (mode == QImode || mode == HImode)
1378 	  && xclass == NO_REGS)
1379 	return DATA_REGS;
1380 
1381       /* We can only move SP to/from an address register.  */
1382       if (in_p
1383 	  && rclass == SP_REGS
1384 	  && xclass != ADDRESS_REGS)
1385 	return ADDRESS_REGS;
1386       if (!in_p
1387 	  && xclass == SP_REGS
1388 	  && rclass != ADDRESS_REGS
1389 	  && rclass != SP_OR_ADDRESS_REGS)
1390 	return ADDRESS_REGS;
1391     }
1392 
1393   /* We can't directly load sp + const_int into a register;
1394      we must use an address register as an scratch.  */
1395   if (in_p
1396       && rclass != SP_REGS
1397       && rclass != SP_OR_ADDRESS_REGS
1398       && rclass != SP_OR_GENERAL_REGS
1399       && GET_CODE (x) == PLUS
1400       && (XEXP (x, 0) == stack_pointer_rtx
1401 	  || XEXP (x, 1) == stack_pointer_rtx))
1402     {
1403       sri->icode = CODE_FOR_reload_plus_sp_const;
1404       return NO_REGS;
1405     }
1406 
1407   /* We can only move MDR to/from a data register.  */
1408   if (rclass == MDR_REGS && xclass != DATA_REGS)
1409     return DATA_REGS;
1410   if (xclass == MDR_REGS && rclass != DATA_REGS)
1411     return DATA_REGS;
1412 
1413   /* We can't load/store an FP register from a constant address.  */
1414   if (TARGET_AM33_2
1415       && (rclass == FP_REGS || xclass == FP_REGS)
1416       && (xclass == NO_REGS || rclass == NO_REGS))
1417     {
1418       rtx addr = NULL;
1419 
1420       if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1421 	{
1422 	  addr = reg_equiv_mem (xregno);
1423 	  if (addr)
1424 	    addr = XEXP (addr, 0);
1425 	}
1426       else if (MEM_P (x))
1427 	addr = XEXP (x, 0);
1428 
1429       if (addr && CONSTANT_ADDRESS_P (addr))
1430 	return GENERAL_REGS;
1431     }
1432   /* Otherwise assume no secondary reloads are needed.  */
1433   return NO_REGS;
1434 }
1435 
1436 int
1437 mn10300_frame_size (void)
1438 {
1439   /* size includes the fixed stack space needed for function calls.  */
1440   int size = get_frame_size () + crtl->outgoing_args_size;
1441 
1442   /* And space for the return pointer.  */
1443   size += crtl->outgoing_args_size ? 4 : 0;
1444 
1445   return size;
1446 }
1447 
1448 int
1449 mn10300_initial_offset (int from, int to)
1450 {
1451   int diff = 0;
1452 
1453   gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1454   gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1455 
1456   if (to == STACK_POINTER_REGNUM)
1457     diff = mn10300_frame_size ();
1458 
1459   /* The difference between the argument pointer and the frame pointer
1460      is the size of the callee register save area.  */
1461   if (from == ARG_POINTER_REGNUM)
1462     {
1463       unsigned int reg_save_bytes;
1464 
1465       mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1466       diff += reg_save_bytes;
1467       diff += 4 * fp_regs_to_save ();
1468     }
1469 
1470   return diff;
1471 }
1472 
1473 /* Worker function for TARGET_RETURN_IN_MEMORY.  */
1474 
1475 static bool
1476 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1477 {
1478   /* Return values > 8 bytes in length in memory.  */
1479   return (int_size_in_bytes (type) > 8
1480 	  || int_size_in_bytes (type) == 0
1481 	  || TYPE_MODE (type) == BLKmode);
1482 }
1483 
1484 /* Flush the argument registers to the stack for a stdarg function;
1485    return the new argument pointer.  */
1486 static rtx
1487 mn10300_builtin_saveregs (void)
1488 {
1489   rtx offset, mem;
1490   tree fntype = TREE_TYPE (current_function_decl);
1491   int argadj = ((!stdarg_p (fntype))
1492                 ? UNITS_PER_WORD : 0);
1493   alias_set_type set = get_varargs_alias_set ();
1494 
1495   if (argadj)
1496     offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
1497   else
1498     offset = crtl->args.arg_offset_rtx;
1499 
1500   mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1501   set_mem_alias_set (mem, set);
1502   emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1503 
1504   mem = gen_rtx_MEM (SImode,
1505 		     plus_constant (Pmode,
1506 				    crtl->args.internal_arg_pointer, 4));
1507   set_mem_alias_set (mem, set);
1508   emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1509 
1510   return copy_to_reg (expand_binop (Pmode, add_optab,
1511 				    crtl->args.internal_arg_pointer,
1512 				    offset, 0, 0, OPTAB_LIB_WIDEN));
1513 }
1514 
1515 static void
1516 mn10300_va_start (tree valist, rtx nextarg)
1517 {
1518   nextarg = expand_builtin_saveregs ();
1519   std_expand_builtin_va_start (valist, nextarg);
1520 }
1521 
1522 /* Return true when a parameter should be passed by reference.  */
1523 
1524 static bool
1525 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
1526 			   machine_mode mode, const_tree type,
1527 			   bool named ATTRIBUTE_UNUSED)
1528 {
1529   unsigned HOST_WIDE_INT size;
1530 
1531   if (type)
1532     size = int_size_in_bytes (type);
1533   else
1534     size = GET_MODE_SIZE (mode);
1535 
1536   return (size > 8 || size == 0);
1537 }
1538 
1539 /* Return an RTX to represent where a value with mode MODE will be returned
1540    from a function.  If the result is NULL_RTX, the argument is pushed.  */
1541 
1542 static rtx
1543 mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
1544 		      const_tree type, bool named ATTRIBUTE_UNUSED)
1545 {
1546   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1547   rtx result = NULL_RTX;
1548   int size;
1549 
1550   /* We only support using 2 data registers as argument registers.  */
1551   int nregs = 2;
1552 
1553   /* Figure out the size of the object to be passed.  */
1554   if (mode == BLKmode)
1555     size = int_size_in_bytes (type);
1556   else
1557     size = GET_MODE_SIZE (mode);
1558 
1559   cum->nbytes = (cum->nbytes + 3) & ~3;
1560 
1561   /* Don't pass this arg via a register if all the argument registers
1562      are used up.  */
1563   if (cum->nbytes > nregs * UNITS_PER_WORD)
1564     return result;
1565 
1566   /* Don't pass this arg via a register if it would be split between
1567      registers and memory.  */
1568   if (type == NULL_TREE
1569       && cum->nbytes + size > nregs * UNITS_PER_WORD)
1570     return result;
1571 
1572   switch (cum->nbytes / UNITS_PER_WORD)
1573     {
1574     case 0:
1575       result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1576       break;
1577     case 1:
1578       result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1579       break;
1580     default:
1581       break;
1582     }
1583 
1584   return result;
1585 }
1586 
1587 /* Update the data in CUM to advance over an argument
1588    of mode MODE and data type TYPE.
1589    (TYPE is null for libcalls where that information may not be available.)  */
1590 
1591 static void
1592 mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1593 			      const_tree type, bool named ATTRIBUTE_UNUSED)
1594 {
1595   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1596 
1597   cum->nbytes += (mode != BLKmode
1598 		  ? (GET_MODE_SIZE (mode) + 3) & ~3
1599 		  : (int_size_in_bytes (type) + 3) & ~3);
1600 }
1601 
1602 /* Return the number of bytes of registers to use for an argument passed
1603    partially in registers and partially in memory.  */
1604 
1605 static int
1606 mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
1607 			   tree type, bool named ATTRIBUTE_UNUSED)
1608 {
1609   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1610   int size;
1611 
1612   /* We only support using 2 data registers as argument registers.  */
1613   int nregs = 2;
1614 
1615   /* Figure out the size of the object to be passed.  */
1616   if (mode == BLKmode)
1617     size = int_size_in_bytes (type);
1618   else
1619     size = GET_MODE_SIZE (mode);
1620 
1621   cum->nbytes = (cum->nbytes + 3) & ~3;
1622 
1623   /* Don't pass this arg via a register if all the argument registers
1624      are used up.  */
1625   if (cum->nbytes > nregs * UNITS_PER_WORD)
1626     return 0;
1627 
1628   if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1629     return 0;
1630 
1631   /* Don't pass this arg via a register if it would be split between
1632      registers and memory.  */
1633   if (type == NULL_TREE
1634       && cum->nbytes + size > nregs * UNITS_PER_WORD)
1635     return 0;
1636 
1637   return nregs * UNITS_PER_WORD - cum->nbytes;
1638 }
1639 
1640 /* Return the location of the function's value.  This will be either
1641    $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1642    $d0 and $a0 if the -mreturn-pointer-on-do flag is set.  Note that
1643    we only return the PARALLEL for outgoing values; we do not want
1644    callers relying on this extra copy.  */
1645 
1646 static rtx
1647 mn10300_function_value (const_tree valtype,
1648 			const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1649 			bool outgoing)
1650 {
1651   rtx rv;
1652   machine_mode mode = TYPE_MODE (valtype);
1653 
1654   if (! POINTER_TYPE_P (valtype))
1655     return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1656   else if (! TARGET_PTR_A0D0 || ! outgoing
1657 	   || cfun->returns_struct)
1658     return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1659 
1660   rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1661   XVECEXP (rv, 0, 0)
1662     = gen_rtx_EXPR_LIST (VOIDmode,
1663 			 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1664 			 GEN_INT (0));
1665 
1666   XVECEXP (rv, 0, 1)
1667     = gen_rtx_EXPR_LIST (VOIDmode,
1668 			 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1669 			 GEN_INT (0));
1670   return rv;
1671 }
1672 
1673 /* Implements TARGET_LIBCALL_VALUE.  */
1674 
1675 static rtx
1676 mn10300_libcall_value (machine_mode mode,
1677 		       const_rtx fun ATTRIBUTE_UNUSED)
1678 {
1679   return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1680 }
1681 
1682 /* Implements FUNCTION_VALUE_REGNO_P.  */
1683 
1684 bool
1685 mn10300_function_value_regno_p (const unsigned int regno)
1686 {
1687  return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1688 }
1689 
1690 /* Output an addition operation.  */
1691 
1692 const char *
1693 mn10300_output_add (rtx operands[3], bool need_flags)
1694 {
1695   rtx dest, src1, src2;
1696   unsigned int dest_regnum, src1_regnum, src2_regnum;
1697   enum reg_class src1_class, src2_class, dest_class;
1698 
1699   dest = operands[0];
1700   src1 = operands[1];
1701   src2 = operands[2];
1702 
1703   dest_regnum = true_regnum (dest);
1704   src1_regnum = true_regnum (src1);
1705 
1706   dest_class = REGNO_REG_CLASS (dest_regnum);
1707   src1_class = REGNO_REG_CLASS (src1_regnum);
1708 
1709   if (CONST_INT_P (src2))
1710     {
1711       gcc_assert (dest_regnum == src1_regnum);
1712 
1713       if (src2 == const1_rtx && !need_flags)
1714 	return "inc %0";
1715       if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1716         return "inc4 %0";
1717 
1718       gcc_assert (!need_flags || dest_class != SP_REGS);
1719       return "add %2,%0";
1720     }
1721   else if (CONSTANT_P (src2))
1722     return "add %2,%0";
1723 
1724   src2_regnum = true_regnum (src2);
1725   src2_class = REGNO_REG_CLASS (src2_regnum);
1726 
1727   if (dest_regnum == src1_regnum)
1728     return "add %2,%0";
1729   if (dest_regnum == src2_regnum)
1730     return "add %1,%0";
1731 
1732   /* The rest of the cases are reg = reg+reg.  For AM33, we can implement
1733      this directly, as below, but when optimizing for space we can sometimes
1734      do better by using a mov+add.  For MN103, we claimed that we could
1735      implement a three-operand add because the various move and add insns
1736      change sizes across register classes, and we can often do better than
1737      reload in choosing which operand to move.  */
1738   if (TARGET_AM33 && optimize_insn_for_speed_p ())
1739     return "add %2,%1,%0";
1740 
1741   /* Catch cases where no extended register was used.  */
1742   if (src1_class != EXTENDED_REGS
1743       && src2_class != EXTENDED_REGS
1744       && dest_class != EXTENDED_REGS)
1745     {
1746       /* We have to copy one of the sources into the destination, then
1747          add the other source to the destination.
1748 
1749          Carefully select which source to copy to the destination; a
1750          naive implementation will waste a byte when the source classes
1751          are different and the destination is an address register.
1752          Selecting the lowest cost register copy will optimize this
1753          sequence.  */
1754       if (src1_class == dest_class)
1755         return "mov %1,%0\n\tadd %2,%0";
1756       else
1757 	return "mov %2,%0\n\tadd %1,%0";
1758     }
1759 
1760   /* At least one register is an extended register.  */
1761 
1762   /* The three operand add instruction on the am33 is a win iff the
1763      output register is an extended register, or if both source
1764      registers are extended registers.  */
1765   if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1766     return "add %2,%1,%0";
1767 
1768   /* It is better to copy one of the sources to the destination, then
1769      perform a 2 address add.  The destination in this case must be
1770      an address or data register and one of the sources must be an
1771      extended register and the remaining source must not be an extended
1772      register.
1773 
1774      The best code for this case is to copy the extended reg to the
1775      destination, then emit a two address add.  */
1776   if (src1_class == EXTENDED_REGS)
1777     return "mov %1,%0\n\tadd %2,%0";
1778   else
1779     return "mov %2,%0\n\tadd %1,%0";
1780 }
1781 
1782 /* Return 1 if X contains a symbolic expression.  We know these
1783    expressions will have one of a few well defined forms, so
1784    we need only check those forms.  */
1785 
1786 int
1787 mn10300_symbolic_operand (rtx op,
1788 			  machine_mode mode ATTRIBUTE_UNUSED)
1789 {
1790   switch (GET_CODE (op))
1791     {
1792     case SYMBOL_REF:
1793     case LABEL_REF:
1794       return 1;
1795     case CONST:
1796       op = XEXP (op, 0);
1797       return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1798                || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1799               && CONST_INT_P (XEXP (op, 1)));
1800     default:
1801       return 0;
1802     }
1803 }
1804 
1805 /* Try machine dependent ways of modifying an illegitimate address
1806    to be legitimate.  If we find one, return the new valid address.
1807    This macro is used in only one place: `memory_address' in explow.c.
1808 
1809    OLDX is the address as it was before break_out_memory_refs was called.
1810    In some cases it is useful to look at this to decide what needs to be done.
1811 
1812    Normally it is always safe for this macro to do nothing.  It exists to
1813    recognize opportunities to optimize the output.
1814 
1815    But on a few ports with segmented architectures and indexed addressing
1816    (mn10300, hppa) it is used to rewrite certain problematical addresses.  */
1817 
1818 static rtx
1819 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1820 			    machine_mode mode ATTRIBUTE_UNUSED)
1821 {
1822   if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1823     x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1824 
1825   /* Uh-oh.  We might have an address for x[n-100000].  This needs
1826      special handling to avoid creating an indexed memory address
1827      with x-100000 as the base.  */
1828   if (GET_CODE (x) == PLUS
1829       && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1830     {
1831       /* Ugly.  We modify things here so that the address offset specified
1832          by the index expression is computed first, then added to x to form
1833          the entire address.  */
1834 
1835       rtx regx1, regy1, regy2, y;
1836 
1837       /* Strip off any CONST.  */
1838       y = XEXP (x, 1);
1839       if (GET_CODE (y) == CONST)
1840         y = XEXP (y, 0);
1841 
1842       if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1843 	{
1844 	  regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1845 	  regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1846 	  regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1847 	  regx1 = force_reg (Pmode,
1848 			     gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1849 					     regy2));
1850 	  return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1851 	}
1852     }
1853   return x;
1854 }
1855 
1856 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1857    @GOTOFF in `reg'.  */
1858 
1859 rtx
1860 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1861 {
1862   rtx x;
1863   rtx_insn *insn;
1864 
1865   if (GET_CODE (orig) == LABEL_REF
1866       || (GET_CODE (orig) == SYMBOL_REF
1867 	  && (CONSTANT_POOL_ADDRESS_P (orig)
1868 	      || ! MN10300_GLOBAL_P (orig))))
1869     {
1870       if (reg == NULL)
1871 	reg = gen_reg_rtx (Pmode);
1872 
1873       x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1874       x = gen_rtx_CONST (SImode, x);
1875       emit_move_insn (reg, x);
1876 
1877       insn = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1878     }
1879   else if (GET_CODE (orig) == SYMBOL_REF)
1880     {
1881       if (reg == NULL)
1882 	reg = gen_reg_rtx (Pmode);
1883 
1884       x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1885       x = gen_rtx_CONST (SImode, x);
1886       x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1887       x = gen_const_mem (SImode, x);
1888 
1889       insn = emit_move_insn (reg, x);
1890     }
1891   else
1892     return orig;
1893 
1894   set_unique_reg_note (insn, REG_EQUAL, orig);
1895   return reg;
1896 }
1897 
1898 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1899    isn't protected by a PIC unspec; nonzero otherwise.  */
1900 
1901 int
1902 mn10300_legitimate_pic_operand_p (rtx x)
1903 {
1904   const char *fmt;
1905   int i;
1906 
1907   if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1908     return 0;
1909 
1910   if (GET_CODE (x) == UNSPEC
1911       && (XINT (x, 1) == UNSPEC_PIC
1912 	  || XINT (x, 1) == UNSPEC_GOT
1913 	  || XINT (x, 1) == UNSPEC_GOTOFF
1914 	  || XINT (x, 1) == UNSPEC_PLT
1915 	  || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1916       return 1;
1917 
1918   fmt = GET_RTX_FORMAT (GET_CODE (x));
1919   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1920     {
1921       if (fmt[i] == 'E')
1922 	{
1923 	  int j;
1924 
1925 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1926 	    if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1927 	      return 0;
1928 	}
1929       else if (fmt[i] == 'e'
1930 	       && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1931 	return 0;
1932     }
1933 
1934   return 1;
1935 }
1936 
1937 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1938    legitimate, and FALSE otherwise.
1939 
1940    On the mn10300, the value in the address register must be
1941    in the same memory space/segment as the effective address.
1942 
1943    This is problematical for reload since it does not understand
1944    that base+index != index+base in a memory reference.
1945 
1946    Note it is still possible to use reg+reg addressing modes,
1947    it's just much more difficult.  For a discussion of a possible
1948    workaround and solution, see the comments in pa.c before the
1949    function record_unscaled_index_insn_codes.  */
1950 
1951 static bool
1952 mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1953 {
1954   rtx base, index;
1955 
1956   if (CONSTANT_ADDRESS_P (x))
1957     return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1958 
1959   if (RTX_OK_FOR_BASE_P (x, strict))
1960     return true;
1961 
1962   if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1963     {
1964       if (GET_CODE (x) == POST_INC)
1965 	return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1966       if (GET_CODE (x) == POST_MODIFY)
1967 	return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1968 		&& CONSTANT_ADDRESS_P (XEXP (x, 1)));
1969     }
1970 
1971   if (GET_CODE (x) != PLUS)
1972     return false;
1973 
1974   base = XEXP (x, 0);
1975   index = XEXP (x, 1);
1976 
1977   if (!REG_P (base))
1978     return false;
1979   if (REG_P (index))
1980     {
1981       /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1982 	 addressing is hard to satisfy.  */
1983       if (!TARGET_AM33)
1984 	return false;
1985 
1986       return (REGNO_GENERAL_P (REGNO (base), strict)
1987 	      && REGNO_GENERAL_P (REGNO (index), strict));
1988     }
1989 
1990   if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1991     return false;
1992 
1993   if (CONST_INT_P (index))
1994     return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1995 
1996   if (CONSTANT_ADDRESS_P (index))
1997     return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1998 
1999   return false;
2000 }
2001 
2002 bool
2003 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2004 {
2005   if (regno >= FIRST_PSEUDO_REGISTER)
2006     {
2007       if (!strict)
2008 	return true;
2009       if (!reg_renumber)
2010 	return false;
2011       regno = reg_renumber[regno];
2012       if (regno == INVALID_REGNUM)
2013 	return false;
2014     }
2015   return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2016 }
2017 
2018 rtx
2019 mn10300_legitimize_reload_address (rtx x,
2020 				   machine_mode mode ATTRIBUTE_UNUSED,
2021 				   int opnum, int type,
2022 				   int ind_levels ATTRIBUTE_UNUSED)
2023 {
2024   bool any_change = false;
2025 
2026   /* See above re disabling reg+reg addressing for MN103.  */
2027   if (!TARGET_AM33)
2028     return NULL_RTX;
2029 
2030   if (GET_CODE (x) != PLUS)
2031     return NULL_RTX;
2032 
2033   if (XEXP (x, 0) == stack_pointer_rtx)
2034     {
2035       push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2036 		   GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2037 		   opnum, (enum reload_type) type);
2038       any_change = true;
2039     }
2040   if (XEXP (x, 1) == stack_pointer_rtx)
2041     {
2042       push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2043 		   GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2044 		   opnum, (enum reload_type) type);
2045       any_change = true;
2046     }
2047 
2048   return any_change ? x : NULL_RTX;
2049 }
2050 
2051 /* Implement TARGET_LEGITIMATE_CONSTANT_P.  Returns TRUE if X is a valid
2052    constant.  Note that some "constants" aren't valid, such as TLS
2053    symbols and unconverted GOT-based references, so we eliminate
2054    those here.  */
2055 
2056 static bool
2057 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2058 {
2059   switch (GET_CODE (x))
2060     {
2061     case CONST:
2062       x = XEXP (x, 0);
2063 
2064       if (GET_CODE (x) == PLUS)
2065 	{
2066 	  if (! CONST_INT_P (XEXP (x, 1)))
2067 	    return false;
2068 	  x = XEXP (x, 0);
2069 	}
2070 
2071       /* Only some unspecs are valid as "constants".  */
2072       if (GET_CODE (x) == UNSPEC)
2073 	{
2074 	  switch (XINT (x, 1))
2075 	    {
2076 	    case UNSPEC_PIC:
2077 	    case UNSPEC_GOT:
2078 	    case UNSPEC_GOTOFF:
2079 	    case UNSPEC_PLT:
2080 	      return true;
2081 	    default:
2082 	      return false;
2083 	    }
2084 	}
2085 
2086       /* We must have drilled down to a symbol.  */
2087       if (! mn10300_symbolic_operand (x, Pmode))
2088 	return false;
2089       break;
2090 
2091     default:
2092       break;
2093     }
2094 
2095   return true;
2096 }
2097 
2098 /* Undo pic address legitimization for the benefit of debug info.  */
2099 
2100 static rtx
2101 mn10300_delegitimize_address (rtx orig_x)
2102 {
2103   rtx x = orig_x, ret, addend = NULL;
2104   bool need_mem;
2105 
2106   if (MEM_P (x))
2107     x = XEXP (x, 0);
2108   if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2109     return orig_x;
2110 
2111   if (XEXP (x, 0) == pic_offset_table_rtx)
2112     ;
2113   /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2114      some odd-looking "addresses" that were never valid in the first place.
2115      We need to look harder to avoid warnings being emitted.  */
2116   else if (GET_CODE (XEXP (x, 0)) == PLUS)
2117     {
2118       rtx x0 = XEXP (x, 0);
2119       rtx x00 = XEXP (x0, 0);
2120       rtx x01 = XEXP (x0, 1);
2121 
2122       if (x00 == pic_offset_table_rtx)
2123 	addend = x01;
2124       else if (x01 == pic_offset_table_rtx)
2125 	addend = x00;
2126       else
2127 	return orig_x;
2128 
2129     }
2130   else
2131     return orig_x;
2132   x = XEXP (x, 1);
2133 
2134   if (GET_CODE (x) != CONST)
2135     return orig_x;
2136   x = XEXP (x, 0);
2137   if (GET_CODE (x) != UNSPEC)
2138     return orig_x;
2139 
2140   ret = XVECEXP (x, 0, 0);
2141   if (XINT (x, 1) == UNSPEC_GOTOFF)
2142     need_mem = false;
2143   else if (XINT (x, 1) == UNSPEC_GOT)
2144     need_mem = true;
2145   else
2146     return orig_x;
2147 
2148   gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2149   if (need_mem != MEM_P (orig_x))
2150     return orig_x;
2151   if (need_mem && addend)
2152     return orig_x;
2153   if (addend)
2154     ret = gen_rtx_PLUS (Pmode, addend, ret);
2155   return ret;
2156 }
2157 
2158 /* For addresses, costs are relative to "MOV (Rm),Rn".  For AM33 this is
2159    the 3-byte fully general instruction; for MN103 this is the 2-byte form
2160    with an address register.  */
2161 
2162 static int
2163 mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
2164 		      addr_space_t as ATTRIBUTE_UNUSED, bool speed)
2165 {
2166   HOST_WIDE_INT i;
2167   rtx base, index;
2168 
2169   switch (GET_CODE (x))
2170     {
2171     case CONST:
2172     case SYMBOL_REF:
2173     case LABEL_REF:
2174       /* We assume all of these require a 32-bit constant, even though
2175 	 some symbol and label references can be relaxed.  */
2176       return speed ? 1 : 4;
2177 
2178     case REG:
2179     case SUBREG:
2180     case POST_INC:
2181       return 0;
2182 
2183     case POST_MODIFY:
2184       /* Assume any symbolic offset is a 32-bit constant.  */
2185       i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2186       if (IN_RANGE (i, -128, 127))
2187 	return speed ? 0 : 1;
2188       if (speed)
2189 	return 1;
2190       if (IN_RANGE (i, -0x800000, 0x7fffff))
2191 	return 3;
2192       return 4;
2193 
2194     case PLUS:
2195       base = XEXP (x, 0);
2196       index = XEXP (x, 1);
2197       if (register_operand (index, SImode))
2198 	{
2199 	  /* Attempt to minimize the number of registers in the address.
2200 	     This is similar to what other ports do.  */
2201 	  if (register_operand (base, SImode))
2202 	    return 1;
2203 
2204 	  base = XEXP (x, 1);
2205 	  index = XEXP (x, 0);
2206 	}
2207 
2208       /* Assume any symbolic offset is a 32-bit constant.  */
2209       i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2210       if (IN_RANGE (i, -128, 127))
2211 	return speed ? 0 : 1;
2212       if (IN_RANGE (i, -32768, 32767))
2213 	return speed ? 0 : 2;
2214       return speed ? 2 : 6;
2215 
2216     default:
2217       return rtx_cost (x, Pmode, MEM, 0, speed);
2218     }
2219 }
2220 
2221 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2222 
2223    Recall that the base value of 2 is required by assumptions elsewhere
2224    in the body of the compiler, and that cost 2 is special-cased as an
2225    early exit from reload meaning no work is required.  */
2226 
2227 static int
2228 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2229 			    reg_class_t ifrom, reg_class_t ito)
2230 {
2231   enum reg_class from = (enum reg_class) ifrom;
2232   enum reg_class to = (enum reg_class) ito;
2233   enum reg_class scratch, test;
2234 
2235   /* Simplify the following code by unifying the fp register classes.  */
2236   if (to == FP_ACC_REGS)
2237     to = FP_REGS;
2238   if (from == FP_ACC_REGS)
2239     from = FP_REGS;
2240 
2241   /* Diagnose invalid moves by costing them as two moves.  */
2242 
2243   scratch = NO_REGS;
2244   test = from;
2245   if (to == SP_REGS)
2246     scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2247   else if (to == MDR_REGS)
2248     scratch = DATA_REGS;
2249   else if (to == FP_REGS && to != from)
2250     scratch = GENERAL_REGS;
2251   else
2252     {
2253       test = to;
2254       if (from == SP_REGS)
2255 	scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2256       else if (from == MDR_REGS)
2257 	scratch = DATA_REGS;
2258       else if (from == FP_REGS && to != from)
2259 	scratch = GENERAL_REGS;
2260     }
2261   if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2262     return (mn10300_register_move_cost (VOIDmode, from, scratch)
2263 	    + mn10300_register_move_cost (VOIDmode, scratch, to));
2264 
2265   /* From here on, all we need consider are legal combinations.  */
2266 
2267   if (optimize_size)
2268     {
2269       /* The scale here is bytes * 2.  */
2270 
2271       if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2272 	return 2;
2273 
2274       if (from == SP_REGS)
2275 	return (to == ADDRESS_REGS ? 2 : 6);
2276 
2277       /* For MN103, all remaining legal moves are two bytes.  */
2278       if (TARGET_AM33)
2279 	return 4;
2280 
2281       if (to == SP_REGS)
2282 	return (from == ADDRESS_REGS ? 4 : 6);
2283 
2284       if ((from == ADDRESS_REGS || from == DATA_REGS)
2285 	   && (to == ADDRESS_REGS || to == DATA_REGS))
2286 	return 4;
2287 
2288       if (to == EXTENDED_REGS)
2289 	return (to == from ? 6 : 4);
2290 
2291       /* What's left are SP_REGS, FP_REGS, or combinations of the above.  */
2292       return 6;
2293     }
2294   else
2295     {
2296       /* The scale here is cycles * 2.  */
2297 
2298       if (to == FP_REGS)
2299 	return 8;
2300       if (from == FP_REGS)
2301 	return 4;
2302 
2303       /* All legal moves between integral registers are single cycle.  */
2304       return 2;
2305     }
2306 }
2307 
2308 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2309 
2310    Given lack of the form of the address, this must be speed-relative,
2311    though we should never be less expensive than a size-relative register
2312    move cost above.  This is not a problem.  */
2313 
2314 static int
2315 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2316 			  reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2317 {
2318   enum reg_class rclass = (enum reg_class) iclass;
2319 
2320   if (rclass == FP_REGS)
2321     return 8;
2322   return 6;
2323 }
2324 
2325 /* Implement the TARGET_RTX_COSTS hook.
2326 
2327    Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2328    to represent cycles.  Size-relative costs are in bytes.  */
2329 
2330 static bool
2331 mn10300_rtx_costs (rtx x, machine_mode mode, int outer_code,
2332 		   int opno ATTRIBUTE_UNUSED, int *ptotal, bool speed)
2333 {
2334   /* This value is used for SYMBOL_REF etc where we want to pretend
2335      we have a full 32-bit constant.  */
2336   HOST_WIDE_INT i = 0x12345678;
2337   int total;
2338   int code = GET_CODE (x);
2339 
2340   switch (code)
2341     {
2342     case CONST_INT:
2343       i = INTVAL (x);
2344     do_int_costs:
2345       if (speed)
2346 	{
2347 	  if (outer_code == SET)
2348 	    {
2349 	      /* 16-bit integer loads have latency 1, 32-bit loads 2.  */
2350 	      if (IN_RANGE (i, -32768, 32767))
2351 		total = COSTS_N_INSNS (1);
2352 	      else
2353 		total = COSTS_N_INSNS (2);
2354 	    }
2355 	  else
2356 	    {
2357 	      /* 16-bit integer operands don't affect latency;
2358 		 24-bit and 32-bit operands add a cycle.  */
2359 	      if (IN_RANGE (i, -32768, 32767))
2360 		total = 0;
2361 	      else
2362 		total = COSTS_N_INSNS (1);
2363 	    }
2364 	}
2365       else
2366 	{
2367 	  if (outer_code == SET)
2368 	    {
2369 	      if (i == 0)
2370 		total = 1;
2371 	      else if (IN_RANGE (i, -128, 127))
2372 		total = 2;
2373 	      else if (IN_RANGE (i, -32768, 32767))
2374 		total = 3;
2375 	      else
2376 		total = 6;
2377 	    }
2378 	  else
2379 	    {
2380 	      /* Reference here is ADD An,Dn, vs ADD imm,Dn.  */
2381 	      if (IN_RANGE (i, -128, 127))
2382 		total = 0;
2383 	      else if (IN_RANGE (i, -32768, 32767))
2384 		total = 2;
2385 	      else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2386 		total = 3;
2387 	      else
2388 		total = 4;
2389 	    }
2390 	}
2391       goto alldone;
2392 
2393     case CONST:
2394     case LABEL_REF:
2395     case SYMBOL_REF:
2396     case CONST_DOUBLE:
2397       /* We assume all of these require a 32-bit constant, even though
2398 	 some symbol and label references can be relaxed.  */
2399       goto do_int_costs;
2400 
2401     case UNSPEC:
2402       switch (XINT (x, 1))
2403 	{
2404 	case UNSPEC_PIC:
2405 	case UNSPEC_GOT:
2406 	case UNSPEC_GOTOFF:
2407 	case UNSPEC_PLT:
2408 	case UNSPEC_GOTSYM_OFF:
2409 	  /* The PIC unspecs also resolve to a 32-bit constant.  */
2410 	  goto do_int_costs;
2411 
2412 	default:
2413 	  /* Assume any non-listed unspec is some sort of arithmetic.  */
2414 	  goto do_arith_costs;
2415 	}
2416 
2417     case PLUS:
2418       /* Notice the size difference of INC and INC4.  */
2419       if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2420 	{
2421 	  i = INTVAL (XEXP (x, 1));
2422 	  if (i == 1 || i == 4)
2423 	    {
2424 	      total = 1 + rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed);
2425 	      goto alldone;
2426 	    }
2427 	}
2428       goto do_arith_costs;
2429 
2430     case MINUS:
2431     case AND:
2432     case IOR:
2433     case XOR:
2434     case NOT:
2435     case NEG:
2436     case ZERO_EXTEND:
2437     case SIGN_EXTEND:
2438     case COMPARE:
2439     case BSWAP:
2440     case CLZ:
2441     do_arith_costs:
2442       total = (speed ? COSTS_N_INSNS (1) : 2);
2443       break;
2444 
2445     case ASHIFT:
2446       /* Notice the size difference of ASL2 and variants.  */
2447       if (!speed && CONST_INT_P (XEXP (x, 1)))
2448 	switch (INTVAL (XEXP (x, 1)))
2449 	  {
2450 	  case 1:
2451 	  case 2:
2452 	    total = 1;
2453 	    goto alldone;
2454 	  case 3:
2455 	  case 4:
2456 	    total = 2;
2457 	    goto alldone;
2458 	  }
2459       /* FALLTHRU */
2460 
2461     case ASHIFTRT:
2462     case LSHIFTRT:
2463       total = (speed ? COSTS_N_INSNS (1) : 3);
2464       goto alldone;
2465 
2466     case MULT:
2467       total = (speed ? COSTS_N_INSNS (3) : 2);
2468       break;
2469 
2470     case DIV:
2471     case UDIV:
2472     case MOD:
2473     case UMOD:
2474       total = (speed ? COSTS_N_INSNS (39)
2475 		/* Include space to load+retrieve MDR.  */
2476 		: code == MOD || code == UMOD ? 6 : 4);
2477       break;
2478 
2479     case MEM:
2480       total = mn10300_address_cost (XEXP (x, 0), mode,
2481 				    MEM_ADDR_SPACE (x), speed);
2482       if (speed)
2483 	total = COSTS_N_INSNS (2 + total);
2484       goto alldone;
2485 
2486     default:
2487       /* Probably not implemented.  Assume external call.  */
2488       total = (speed ? COSTS_N_INSNS (10) : 7);
2489       break;
2490     }
2491 
2492   *ptotal = total;
2493   return false;
2494 
2495  alldone:
2496   *ptotal = total;
2497   return true;
2498 }
2499 
2500 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2501    may access it using GOTOFF instead of GOT.  */
2502 
2503 static void
2504 mn10300_encode_section_info (tree decl, rtx rtl, int first)
2505 {
2506   rtx symbol;
2507 
2508   default_encode_section_info (decl, rtl, first);
2509 
2510   if (! MEM_P (rtl))
2511     return;
2512 
2513   symbol = XEXP (rtl, 0);
2514   if (GET_CODE (symbol) != SYMBOL_REF)
2515     return;
2516 
2517   if (flag_pic)
2518     SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2519 }
2520 
2521 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2522    and readonly data size.  So we crank up the case threshold value to
2523    encourage a series of if/else comparisons to implement many small switch
2524    statements.  In theory, this value could be increased much more if we
2525    were solely optimizing for space, but we keep it "reasonable" to avoid
2526    serious code efficiency lossage.  */
2527 
2528 static unsigned int
2529 mn10300_case_values_threshold (void)
2530 {
2531   return 6;
2532 }
2533 
2534 /* Worker function for TARGET_TRAMPOLINE_INIT.  */
2535 
2536 static void
2537 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2538 {
2539   rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2540 
2541   /* This is a strict alignment target, which means that we play
2542      some games to make sure that the locations at which we need
2543      to store <chain> and <disp> wind up at aligned addresses.
2544 
2545 	0x28 0x00			add 0,d0
2546 	          0xfc 0xdd		mov chain,a1
2547         <chain>
2548 	0xf8 0xed 0x00			btst 0,d1
2549 	               0xdc		jmp fnaddr
2550 	<disp>
2551 
2552      Note that the two extra insns are effectively nops; they
2553      clobber the flags but do not affect the contents of D0 or D1.  */
2554 
2555   disp = expand_binop (SImode, sub_optab, fnaddr,
2556 		       plus_constant (Pmode, XEXP (m_tramp, 0), 11),
2557 		       NULL_RTX, 1, OPTAB_DIRECT);
2558 
2559   mem = adjust_address (m_tramp, SImode, 0);
2560   emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2561   mem = adjust_address (m_tramp, SImode, 4);
2562   emit_move_insn (mem, chain_value);
2563   mem = adjust_address (m_tramp, SImode, 8);
2564   emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2565   mem = adjust_address (m_tramp, SImode, 12);
2566   emit_move_insn (mem, disp);
2567 }
2568 
2569 /* Output the assembler code for a C++ thunk function.
2570    THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2571    is the decl for the target function.  DELTA is an immediate constant
2572    offset to be added to the THIS parameter.  If VCALL_OFFSET is nonzero
2573    the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2574    additionally added to THIS.  Finally jump to the entry point of
2575    FUNCTION.  */
2576 
2577 static void
2578 mn10300_asm_output_mi_thunk (FILE *        file,
2579 			     tree          thunk_fndecl ATTRIBUTE_UNUSED,
2580 			     HOST_WIDE_INT delta,
2581 			     HOST_WIDE_INT vcall_offset,
2582 			     tree          function)
2583 {
2584   const char * _this;
2585 
2586   /* Get the register holding the THIS parameter.  Handle the case
2587      where there is a hidden first argument for a returned structure.  */
2588   if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2589     _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2590   else
2591     _this = reg_names [FIRST_ARGUMENT_REGNUM];
2592 
2593   fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2594 
2595   if (delta)
2596     fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2597 
2598   if (vcall_offset)
2599     {
2600       const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2601 
2602       fprintf (file, "\tmov %s, %s\n", _this, scratch);
2603       fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2604       fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2605       fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2606       fprintf (file, "\tadd %s, %s\n", scratch, _this);
2607     }
2608 
2609   fputs ("\tjmp ", file);
2610   assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2611   putc ('\n', file);
2612 }
2613 
2614 /* Return true if mn10300_output_mi_thunk would be able to output the
2615    assembler code for the thunk function specified by the arguments
2616    it is passed, and false otherwise.  */
2617 
2618 static bool
2619 mn10300_can_output_mi_thunk (const_tree    thunk_fndecl ATTRIBUTE_UNUSED,
2620 			     HOST_WIDE_INT delta        ATTRIBUTE_UNUSED,
2621 			     HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2622 			     const_tree    function     ATTRIBUTE_UNUSED)
2623 {
2624   return true;
2625 }
2626 
2627 bool
2628 mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2629 {
2630   if (REGNO_REG_CLASS (regno) == FP_REGS
2631       || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2632     /* Do not store integer values in FP registers.  */
2633     return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2634 
2635   if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2636     return false;
2637 
2638   if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2639     return true;
2640 
2641   if (REGNO_REG_CLASS (regno) == DATA_REGS
2642       || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2643       || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2644     return GET_MODE_SIZE (mode) <= 4;
2645 
2646   return false;
2647 }
2648 
2649 bool
2650 mn10300_modes_tieable (machine_mode mode1, machine_mode mode2)
2651 {
2652   if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2653       && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2654     return false;
2655 
2656   if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2657       && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2658     return false;
2659 
2660   if (TARGET_AM33
2661       || mode1 == mode2
2662       || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2663     return true;
2664 
2665   return false;
2666 }
2667 
2668 static int
2669 cc_flags_for_mode (machine_mode mode)
2670 {
2671   switch (mode)
2672     {
2673     case CCmode:
2674       return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2675     case CCZNCmode:
2676       return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2677     case CCZNmode:
2678       return CC_FLAG_Z | CC_FLAG_N;
2679     case CC_FLOATmode:
2680       return -1;
2681     default:
2682       gcc_unreachable ();
2683     }
2684 }
2685 
2686 static int
2687 cc_flags_for_code (enum rtx_code code)
2688 {
2689   switch (code)
2690     {
2691     case EQ:	/* Z */
2692     case NE:	/* ~Z */
2693       return CC_FLAG_Z;
2694 
2695     case LT:	/* N */
2696     case GE:	/* ~N */
2697       return CC_FLAG_N;
2698 
2699     case GT:    /* ~(Z|(N^V)) */
2700     case LE:    /* Z|(N^V) */
2701       return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2702 
2703     case GEU:	/* ~C */
2704     case LTU:	/* C */
2705       return CC_FLAG_C;
2706 
2707     case GTU:	/* ~(C | Z) */
2708     case LEU:	/* C | Z */
2709       return CC_FLAG_Z | CC_FLAG_C;
2710 
2711     case ORDERED:
2712     case UNORDERED:
2713     case LTGT:
2714     case UNEQ:
2715     case UNGE:
2716     case UNGT:
2717     case UNLE:
2718     case UNLT:
2719       return -1;
2720 
2721     default:
2722       gcc_unreachable ();
2723     }
2724 }
2725 
2726 machine_mode
2727 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2728 {
2729   int req;
2730 
2731   if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2732     return CC_FLOATmode;
2733 
2734   req = cc_flags_for_code (code);
2735 
2736   if (req & CC_FLAG_V)
2737     return CCmode;
2738   if (req & CC_FLAG_C)
2739     return CCZNCmode;
2740   return CCZNmode;
2741 }
2742 
2743 static inline bool
2744 set_is_load_p (rtx set)
2745 {
2746   return MEM_P (SET_SRC (set));
2747 }
2748 
2749 static inline bool
2750 set_is_store_p (rtx set)
2751 {
2752   return MEM_P (SET_DEST (set));
2753 }
2754 
2755 /* Update scheduling costs for situations that cannot be
2756    described using the attributes and DFA machinery.
2757    DEP is the insn being scheduled.
2758    INSN is the previous insn.
2759    COST is the current cycle cost for DEP.  */
2760 
2761 static int
2762 mn10300_adjust_sched_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
2763 			   int cost, unsigned int)
2764 {
2765   rtx insn_set;
2766   rtx dep_set;
2767   int timings;
2768 
2769   if (!TARGET_AM33)
2770     return 1;
2771 
2772   /* We are only interested in pairs of SET. */
2773   insn_set = single_set (insn);
2774   if (!insn_set)
2775     return cost;
2776 
2777   dep_set = single_set (dep);
2778   if (!dep_set)
2779     return cost;
2780 
2781   /* For the AM34 a load instruction that follows a
2782      store instruction incurs an extra cycle of delay.  */
2783   if (mn10300_tune_cpu == PROCESSOR_AM34
2784       && set_is_load_p (dep_set)
2785       && set_is_store_p (insn_set))
2786     cost += 1;
2787 
2788   /* For the AM34 a non-store, non-branch FPU insn that follows
2789      another FPU insn incurs a one cycle throughput increase.  */
2790   else if (mn10300_tune_cpu == PROCESSOR_AM34
2791       && ! set_is_store_p (insn_set)
2792       && ! JUMP_P (insn)
2793       && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2794       && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
2795     cost += 1;
2796 
2797   /*  Resolve the conflict described in section 1-7-4 of
2798       Chapter 3 of the MN103E Series Instruction Manual
2799       where it says:
2800 
2801         "When the preceding instruction is a CPU load or
2802 	 store instruction, a following FPU instruction
2803 	 cannot be executed until the CPU completes the
2804 	 latency period even though there are no register
2805 	 or flag dependencies between them."  */
2806 
2807   /* Only the AM33-2 (and later) CPUs have FPU instructions.  */
2808   if (! TARGET_AM33_2)
2809     return cost;
2810 
2811   /* If a data dependence already exists then the cost is correct.  */
2812   if (dep_type == 0)
2813     return cost;
2814 
2815   /* Check that the instruction about to scheduled is an FPU instruction.  */
2816   if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
2817     return cost;
2818 
2819   /* Now check to see if the previous instruction is a load or store.  */
2820   if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
2821     return cost;
2822 
2823   /* XXX: Verify: The text of 1-7-4 implies that the restriction
2824      only applies when an INTEGER load/store precedes an FPU
2825      instruction, but is this true ?  For now we assume that it is.  */
2826   if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
2827     return cost;
2828 
2829   /* Extract the latency value from the timings attribute.  */
2830   timings = get_attr_timings (insn);
2831   return timings < 100 ? (timings % 10) : (timings % 100);
2832 }
2833 
2834 static void
2835 mn10300_conditional_register_usage (void)
2836 {
2837   unsigned int i;
2838 
2839   if (!TARGET_AM33)
2840     {
2841       for (i = FIRST_EXTENDED_REGNUM;
2842 	   i <= LAST_EXTENDED_REGNUM; i++)
2843 	fixed_regs[i] = call_used_regs[i] = 1;
2844     }
2845   if (!TARGET_AM33_2)
2846     {
2847       for (i = FIRST_FP_REGNUM;
2848 	   i <= LAST_FP_REGNUM; i++)
2849 	fixed_regs[i] = call_used_regs[i] = 1;
2850     }
2851   if (flag_pic)
2852     fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2853     call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2854 }
2855 
2856 /* Worker function for TARGET_MD_ASM_ADJUST.
2857    We do this in the mn10300 backend to maintain source compatibility
2858    with the old cc0-based compiler.  */
2859 
2860 static rtx_insn *
2861 mn10300_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
2862 		       vec<const char *> &/*constraints*/,
2863 		       vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
2864 {
2865   clobbers.safe_push (gen_rtx_REG (CCmode, CC_REG));
2866   SET_HARD_REG_BIT (clobbered_regs, CC_REG);
2867   return NULL;
2868 }
2869 
2870 /* A helper function for splitting cbranch patterns after reload.  */
2871 
2872 void
2873 mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2874 {
2875   rtx flags, x;
2876 
2877   flags = gen_rtx_REG (cmp_mode, CC_REG);
2878   x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2879   x = gen_rtx_SET (flags, x);
2880   emit_insn (x);
2881 
2882   x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2883   x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2884   x = gen_rtx_SET (pc_rtx, x);
2885   emit_jump_insn (x);
2886 }
2887 
2888 /* A helper function for matching parallels that set the flags.  */
2889 
2890 bool
2891 mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
2892 {
2893   rtx op1, flags;
2894   machine_mode flags_mode;
2895 
2896   gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2897 
2898   op1 = XVECEXP (PATTERN (insn), 0, 1);
2899   gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2900 
2901   flags = SET_DEST (op1);
2902   flags_mode = GET_MODE (flags);
2903 
2904   if (GET_MODE (SET_SRC (op1)) != flags_mode)
2905     return false;
2906   if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2907     return false;
2908 
2909   /* Ensure that the mode of FLAGS is compatible with CC_MODE.  */
2910   if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2911     return false;
2912 
2913   return true;
2914 }
2915 
2916 /* This function is used to help split:
2917 
2918      (set (reg) (and (reg) (int)))
2919 
2920    into:
2921 
2922      (set (reg) (shift (reg) (int))
2923      (set (reg) (shift (reg) (int))
2924 
2925    where the shitfs will be shorter than the "and" insn.
2926 
2927    It returns the number of bits that should be shifted.  A positive
2928    values means that the low bits are to be cleared (and hence the
2929    shifts should be right followed by left) whereas a negative value
2930    means that the high bits are to be cleared (left followed by right).
2931    Zero is returned when it would not be economical to split the AND.  */
2932 
2933 int
2934 mn10300_split_and_operand_count (rtx op)
2935 {
2936   HOST_WIDE_INT val = INTVAL (op);
2937   int count;
2938 
2939   if (val < 0)
2940     {
2941       /* High bit is set, look for bits clear at the bottom.  */
2942       count = exact_log2 (-val);
2943       if (count < 0)
2944 	return 0;
2945       /* This is only size win if we can use the asl2 insn.  Otherwise we
2946 	 would be replacing 1 6-byte insn with 2 3-byte insns.  */
2947       if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2948 	return 0;
2949       return count;
2950     }
2951   else
2952     {
2953       /* High bit is clear, look for bits set at the bottom.  */
2954       count = exact_log2 (val + 1);
2955       count = 32 - count;
2956       /* Again, this is only a size win with asl2.  */
2957       if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2958 	return 0;
2959       return -count;
2960     }
2961 }
2962 
2963 struct liw_data
2964 {
2965   enum attr_liw slot;
2966   enum attr_liw_op op;
2967   rtx dest;
2968   rtx src;
2969 };
2970 
2971 /* Decide if the given insn is a candidate for LIW bundling.  If it is then
2972    extract the operands and LIW attributes from the insn and use them to fill
2973    in the liw_data structure.  Return true upon success or false if the insn
2974    cannot be bundled.  */
2975 
2976 static bool
2977 extract_bundle (rtx_insn *insn, struct liw_data * pdata)
2978 {
2979   bool allow_consts = true;
2980   rtx p;
2981 
2982   gcc_assert (pdata != NULL);
2983 
2984   if (insn == NULL)
2985     return false;
2986   /* Make sure that we are dealing with a simple SET insn.  */
2987   p = single_set (insn);
2988   if (p == NULL_RTX)
2989     return false;
2990 
2991   /* Make sure that it could go into one of the LIW pipelines.  */
2992   pdata->slot = get_attr_liw (insn);
2993   if (pdata->slot == LIW_BOTH)
2994     return false;
2995 
2996   pdata->op = get_attr_liw_op (insn);
2997 
2998   switch (pdata->op)
2999     {
3000     case LIW_OP_MOV:
3001       pdata->dest = SET_DEST (p);
3002       pdata->src = SET_SRC (p);
3003       break;
3004     case LIW_OP_CMP:
3005       pdata->dest = XEXP (SET_SRC (p), 0);
3006       pdata->src = XEXP (SET_SRC (p), 1);
3007       break;
3008     case LIW_OP_NONE:
3009       return false;
3010     case LIW_OP_AND:
3011     case LIW_OP_OR:
3012     case LIW_OP_XOR:
3013       /* The AND, OR and XOR long instruction words only accept register arguments.  */
3014       allow_consts = false;
3015       /* Fall through.  */
3016     default:
3017       pdata->dest = SET_DEST (p);
3018       pdata->src = XEXP (SET_SRC (p), 1);
3019       break;
3020     }
3021 
3022   if (! REG_P (pdata->dest))
3023     return false;
3024 
3025   if (REG_P (pdata->src))
3026     return true;
3027 
3028   return allow_consts && satisfies_constraint_O (pdata->src);
3029 }
3030 
3031 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel.  GCC generated
3032    the instructions with the assumption that LIW1 would be executed before LIW2
3033    so we must check for overlaps between their sources and destinations.  */
3034 
3035 static bool
3036 check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3037 {
3038   /* Check for slot conflicts.  */
3039   if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3040     return false;
3041 
3042   /* If either operation is a compare, then "dest" is really an input; the real
3043      destination is CC_REG.  So these instructions need different checks.  */
3044 
3045   /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3046      check its values prior to any changes made by OP.  */
3047   if (pliw1->op == LIW_OP_CMP)
3048     {
3049       /* Two sequential comparisons means dead code, which ought to
3050          have been eliminated given that bundling only happens with
3051          optimization.  We cannot bundle them in any case.  */
3052       gcc_assert (pliw1->op != pliw2->op);
3053       return true;
3054     }
3055 
3056   /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3057      is the destination of OP, as the CMP will look at the old value, not the new
3058      one.  */
3059   if (pliw2->op == LIW_OP_CMP)
3060     {
3061       if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3062 	return false;
3063 
3064       if (REG_P (pliw2->src))
3065 	return REGNO (pliw2->src) != REGNO (pliw1->dest);
3066 
3067       return true;
3068     }
3069 
3070   /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3071      same destination register.  */
3072   if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3073     return false;
3074 
3075   /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3076      of OP1 is the source of OP2.  The exception is when OP1 is a MOVE instruction when
3077      we can replace the source in OP2 with the source of OP1.  */
3078   if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3079     {
3080       if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3081 	{
3082 	  if (! REG_P (pliw1->src)
3083 	      && (pliw2->op == LIW_OP_AND
3084 		  || pliw2->op == LIW_OP_OR
3085 		  || pliw2->op == LIW_OP_XOR))
3086 	    return false;
3087 
3088 	  pliw2->src = pliw1->src;
3089 	  return true;
3090 	}
3091       return false;
3092     }
3093 
3094   /* Everything else is OK.  */
3095   return true;
3096 }
3097 
3098 /* Combine pairs of insns into LIW bundles.  */
3099 
3100 static void
3101 mn10300_bundle_liw (void)
3102 {
3103   rtx_insn *r;
3104 
3105   for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
3106     {
3107       rtx_insn *insn1, *insn2;
3108       struct liw_data liw1, liw2;
3109 
3110       insn1 = r;
3111       if (! extract_bundle (insn1, & liw1))
3112 	continue;
3113 
3114       insn2 = next_nonnote_nondebug_insn (insn1);
3115       if (! extract_bundle (insn2, & liw2))
3116 	continue;
3117 
3118       /* Check for source/destination overlap.  */
3119       if (! check_liw_constraints (& liw1, & liw2))
3120 	continue;
3121 
3122       if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3123 	{
3124 	  struct liw_data temp;
3125 
3126 	  temp = liw1;
3127 	  liw1 = liw2;
3128 	  liw2 = temp;
3129 	}
3130 
3131       delete_insn (insn2);
3132 
3133       rtx insn2_pat;
3134       if (liw1.op == LIW_OP_CMP)
3135 	insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3136 				 GEN_INT (liw2.op));
3137       else if (liw2.op == LIW_OP_CMP)
3138 	insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3139 				 GEN_INT (liw1.op));
3140       else
3141 	insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3142 			     GEN_INT (liw1.op), GEN_INT (liw2.op));
3143 
3144       insn2 = emit_insn_after (insn2_pat, insn1);
3145       delete_insn (insn1);
3146       r = insn2;
3147     }
3148 }
3149 
3150 #define DUMP(reason, insn)			\
3151   do						\
3152     {						\
3153       if (dump_file)				\
3154 	{					\
3155 	  fprintf (dump_file, reason "\n");	\
3156 	  if (insn != NULL_RTX)			\
3157 	    print_rtl_single (dump_file, insn);	\
3158 	  fprintf(dump_file, "\n");		\
3159 	}					\
3160     }						\
3161   while (0)
3162 
3163 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3164    Insert a SETLB insn just before LABEL.  */
3165 
3166 static void
3167 mn10300_insert_setlb_lcc (rtx_insn *label, rtx_insn *branch)
3168 {
3169   rtx lcc, comparison, cmp_reg;
3170 
3171   if (LABEL_NUSES (label) > 1)
3172     {
3173       rtx_insn *insn;
3174 
3175       /* This label is used both as an entry point to the loop
3176 	 and as a loop-back point for the loop.  We need to separate
3177 	 these two functions so that the SETLB happens upon entry,
3178 	 but the loop-back does not go to the SETLB instruction.  */
3179       DUMP ("Inserting SETLB insn after:", label);
3180       insn = emit_insn_after (gen_setlb (), label);
3181       label = gen_label_rtx ();
3182       emit_label_after (label, insn);
3183       DUMP ("Created new loop-back label:", label);
3184     }
3185   else
3186     {
3187       DUMP ("Inserting SETLB insn before:", label);
3188       emit_insn_before (gen_setlb (), label);
3189     }
3190 
3191   comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3192   cmp_reg = XEXP (comparison, 0);
3193   gcc_assert (REG_P (cmp_reg));
3194 
3195   /* If the comparison has not already been split out of the branch
3196      then do so now.  */
3197   gcc_assert (REGNO (cmp_reg) == CC_REG);
3198 
3199   if (GET_MODE (cmp_reg) == CC_FLOATmode)
3200     lcc = gen_FLcc (comparison, label);
3201   else
3202     lcc = gen_Lcc (comparison, label);
3203 
3204   rtx_insn *jump = emit_jump_insn_before (lcc, branch);
3205   mark_jump_label (XVECEXP (lcc, 0, 0), jump, 0);
3206   JUMP_LABEL (jump) = label;
3207   DUMP ("Replacing branch insn...", branch);
3208   DUMP ("... with Lcc insn:", jump);
3209   delete_insn (branch);
3210 }
3211 
3212 static bool
3213 mn10300_block_contains_call (basic_block block)
3214 {
3215   rtx_insn *insn;
3216 
3217   FOR_BB_INSNS (block, insn)
3218     if (CALL_P (insn))
3219       return true;
3220 
3221   return false;
3222 }
3223 
3224 static bool
3225 mn10300_loop_contains_call_insn (loop_p loop)
3226 {
3227   basic_block * bbs;
3228   bool result = false;
3229   unsigned int i;
3230 
3231   bbs = get_loop_body (loop);
3232 
3233   for (i = 0; i < loop->num_nodes; i++)
3234     if (mn10300_block_contains_call (bbs[i]))
3235       {
3236 	result = true;
3237 	break;
3238       }
3239 
3240   free (bbs);
3241   return result;
3242 }
3243 
3244 static void
3245 mn10300_scan_for_setlb_lcc (void)
3246 {
3247   loop_p loop;
3248 
3249   DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3250 
3251   df_analyze ();
3252   compute_bb_for_insn ();
3253 
3254   /* Find the loops.  */
3255   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
3256 
3257   /* FIXME: For now we only investigate innermost loops.  In practice however
3258      if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3259      be the case that its parent loop is suitable.  Thus we should check all
3260      loops, but work from the innermost outwards.  */
3261   FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
3262     {
3263       const char * reason = NULL;
3264 
3265       /* Check to see if we can modify this loop.  If we cannot
3266 	 then set 'reason' to describe why it could not be done.  */
3267       if (loop->latch == NULL)
3268 	reason = "it contains multiple latches";
3269       else if (loop->header != loop->latch)
3270 	/* FIXME: We could handle loops that span multiple blocks,
3271 	   but this requires a lot more work tracking down the branches
3272 	   that need altering, so for now keep things simple.  */
3273 	reason = "the loop spans multiple blocks";
3274       else if (mn10300_loop_contains_call_insn (loop))
3275 	reason = "it contains CALL insns";
3276       else
3277 	{
3278 	  rtx_insn *branch = BB_END (loop->latch);
3279 
3280 	  gcc_assert (JUMP_P (branch));
3281 	  if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3282 	    /* We cannot optimize tablejumps and the like.  */
3283 	    /* FIXME: We could handle unconditional jumps.  */
3284 	    reason = "it is not a simple loop";
3285 	  else
3286 	    {
3287 	      rtx_insn *label;
3288 
3289 	      if (dump_file)
3290 		flow_loop_dump (loop, dump_file, NULL, 0);
3291 
3292 	      label = BB_HEAD (loop->header);
3293 	      gcc_assert (LABEL_P (label));
3294 
3295 	      mn10300_insert_setlb_lcc (label, branch);
3296 	    }
3297 	}
3298 
3299       if (dump_file && reason != NULL)
3300 	fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3301 		 INSN_UID (BB_HEAD (loop->header)),
3302 		 reason);
3303     }
3304 
3305   loop_optimizer_finalize ();
3306 
3307   df_finish_pass (false);
3308 
3309   DUMP ("SETLB scan complete", NULL_RTX);
3310 }
3311 
3312 static void
3313 mn10300_reorg (void)
3314 {
3315   /* These are optimizations, so only run them if optimizing.  */
3316   if (TARGET_AM33 && (optimize > 0 || optimize_size))
3317     {
3318       if (TARGET_ALLOW_SETLB)
3319 	mn10300_scan_for_setlb_lcc ();
3320 
3321       if (TARGET_ALLOW_LIW)
3322 	mn10300_bundle_liw ();
3323     }
3324 }
3325 
3326 /* Initialize the GCC target structure.  */
3327 
3328 #undef  TARGET_MACHINE_DEPENDENT_REORG
3329 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3330 
3331 #undef  TARGET_ASM_ALIGNED_HI_OP
3332 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3333 
3334 #undef  TARGET_LEGITIMIZE_ADDRESS
3335 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3336 
3337 #undef  TARGET_ADDRESS_COST
3338 #define TARGET_ADDRESS_COST  mn10300_address_cost
3339 #undef  TARGET_REGISTER_MOVE_COST
3340 #define TARGET_REGISTER_MOVE_COST  mn10300_register_move_cost
3341 #undef  TARGET_MEMORY_MOVE_COST
3342 #define TARGET_MEMORY_MOVE_COST  mn10300_memory_move_cost
3343 #undef  TARGET_RTX_COSTS
3344 #define TARGET_RTX_COSTS mn10300_rtx_costs
3345 
3346 #undef  TARGET_ASM_FILE_START
3347 #define TARGET_ASM_FILE_START mn10300_file_start
3348 #undef  TARGET_ASM_FILE_START_FILE_DIRECTIVE
3349 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3350 
3351 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3352 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3353 
3354 #undef  TARGET_OPTION_OVERRIDE
3355 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3356 
3357 #undef  TARGET_ENCODE_SECTION_INFO
3358 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3359 
3360 #undef  TARGET_PROMOTE_PROTOTYPES
3361 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3362 #undef  TARGET_RETURN_IN_MEMORY
3363 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3364 #undef  TARGET_PASS_BY_REFERENCE
3365 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3366 #undef  TARGET_CALLEE_COPIES
3367 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3368 #undef  TARGET_ARG_PARTIAL_BYTES
3369 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3370 #undef  TARGET_FUNCTION_ARG
3371 #define TARGET_FUNCTION_ARG mn10300_function_arg
3372 #undef  TARGET_FUNCTION_ARG_ADVANCE
3373 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3374 
3375 #undef  TARGET_EXPAND_BUILTIN_SAVEREGS
3376 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3377 #undef  TARGET_EXPAND_BUILTIN_VA_START
3378 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3379 
3380 #undef  TARGET_CASE_VALUES_THRESHOLD
3381 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3382 
3383 #undef TARGET_LRA_P
3384 #define TARGET_LRA_P hook_bool_void_false
3385 
3386 #undef  TARGET_LEGITIMATE_ADDRESS_P
3387 #define TARGET_LEGITIMATE_ADDRESS_P	mn10300_legitimate_address_p
3388 #undef  TARGET_DELEGITIMIZE_ADDRESS
3389 #define TARGET_DELEGITIMIZE_ADDRESS	mn10300_delegitimize_address
3390 #undef  TARGET_LEGITIMATE_CONSTANT_P
3391 #define TARGET_LEGITIMATE_CONSTANT_P	mn10300_legitimate_constant_p
3392 
3393 #undef  TARGET_PREFERRED_RELOAD_CLASS
3394 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3395 #undef  TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3396 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3397   mn10300_preferred_output_reload_class
3398 #undef  TARGET_SECONDARY_RELOAD
3399 #define TARGET_SECONDARY_RELOAD  mn10300_secondary_reload
3400 
3401 #undef  TARGET_TRAMPOLINE_INIT
3402 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3403 
3404 #undef  TARGET_FUNCTION_VALUE
3405 #define TARGET_FUNCTION_VALUE mn10300_function_value
3406 #undef  TARGET_LIBCALL_VALUE
3407 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3408 
3409 #undef  TARGET_ASM_OUTPUT_MI_THUNK
3410 #define TARGET_ASM_OUTPUT_MI_THUNK      mn10300_asm_output_mi_thunk
3411 #undef  TARGET_ASM_CAN_OUTPUT_MI_THUNK
3412 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK  mn10300_can_output_mi_thunk
3413 
3414 #undef  TARGET_SCHED_ADJUST_COST
3415 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3416 
3417 #undef  TARGET_CONDITIONAL_REGISTER_USAGE
3418 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3419 
3420 #undef TARGET_MD_ASM_ADJUST
3421 #define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
3422 
3423 #undef  TARGET_FLAGS_REGNUM
3424 #define TARGET_FLAGS_REGNUM  CC_REG
3425 
3426 struct gcc_target targetm = TARGET_INITIALIZER;
3427