1 /* Subroutines for insn-output.c for Motorola 68000 family.
2    Copyright (C) 1987-2014 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "calls.h"
26 #include "stor-layout.h"
27 #include "varasm.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "recog.h"
37 #include "diagnostic-core.h"
38 #include "expr.h"
39 #include "reload.h"
40 #include "tm_p.h"
41 #include "target.h"
42 #include "target-def.h"
43 #include "debug.h"
44 #include "flags.h"
45 #include "df.h"
46 /* ??? Need to add a dependency between m68k.o and sched-int.h.  */
47 #include "sched-int.h"
48 #include "insn-codes.h"
49 #include "ggc.h"
50 #include "opts.h"
51 #include "optabs.h"
52 
53 enum reg_class regno_reg_class[] =
54 {
55   DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
56   DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
57   ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
58   ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
59   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
60   FP_REGS, FP_REGS, FP_REGS, FP_REGS,
61   ADDR_REGS
62 };
63 
64 
65 /* The minimum number of integer registers that we want to save with the
66    movem instruction.  Using two movel instructions instead of a single
67    moveml is about 15% faster for the 68020 and 68030 at no expense in
68    code size.  */
69 #define MIN_MOVEM_REGS 3
70 
71 /* The minimum number of floating point registers that we want to save
72    with the fmovem instruction.  */
73 #define MIN_FMOVEM_REGS 1
74 
75 /* Structure describing stack frame layout.  */
76 struct m68k_frame
77 {
78   /* Stack pointer to frame pointer offset.  */
79   HOST_WIDE_INT offset;
80 
81   /* Offset of FPU registers.  */
82   HOST_WIDE_INT foffset;
83 
84   /* Frame size in bytes (rounded up).  */
85   HOST_WIDE_INT size;
86 
87   /* Data and address register.  */
88   int reg_no;
89   unsigned int reg_mask;
90 
91   /* FPU registers.  */
92   int fpu_no;
93   unsigned int fpu_mask;
94 
95   /* Offsets relative to ARG_POINTER.  */
96   HOST_WIDE_INT frame_pointer_offset;
97   HOST_WIDE_INT stack_pointer_offset;
98 
99   /* Function which the above information refers to.  */
100   int funcdef_no;
101 };
102 
103 /* Current frame information calculated by m68k_compute_frame_layout().  */
104 static struct m68k_frame current_frame;
105 
106 /* Structure describing an m68k address.
107 
108    If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
109    with null fields evaluating to 0.  Here:
110 
111    - BASE satisfies m68k_legitimate_base_reg_p
112    - INDEX satisfies m68k_legitimate_index_reg_p
113    - OFFSET satisfies m68k_legitimate_constant_address_p
114 
115    INDEX is either HImode or SImode.  The other fields are SImode.
116 
117    If CODE is PRE_DEC, the address is -(BASE).  If CODE is POST_INC,
118    the address is (BASE)+.  */
119 struct m68k_address {
120   enum rtx_code code;
121   rtx base;
122   rtx index;
123   rtx offset;
124   int scale;
125 };
126 
127 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
128 static int m68k_sched_issue_rate (void);
129 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
130 static void m68k_sched_md_init_global (FILE *, int, int);
131 static void m68k_sched_md_finish_global (FILE *, int);
132 static void m68k_sched_md_init (FILE *, int, int);
133 static void m68k_sched_dfa_pre_advance_cycle (void);
134 static void m68k_sched_dfa_post_advance_cycle (void);
135 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
136 
137 static bool m68k_can_eliminate (const int, const int);
138 static void m68k_conditional_register_usage (void);
139 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
140 static void m68k_option_override (void);
141 static void m68k_override_options_after_change (void);
142 static rtx find_addr_reg (rtx);
143 static const char *singlemove_string (rtx *);
144 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
145 					  HOST_WIDE_INT, tree);
146 static rtx m68k_struct_value_rtx (tree, int);
147 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
148 					  tree args, int flags,
149 					  bool *no_add_attrs);
150 static void m68k_compute_frame_layout (void);
151 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
152 static bool m68k_ok_for_sibcall_p (tree, tree);
153 static bool m68k_tls_symbol_p (rtx);
154 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
155 static bool m68k_rtx_costs (rtx, int, int, int, int *, bool);
156 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
157 static bool m68k_return_in_memory (const_tree, const_tree);
158 #endif
159 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
160 static void m68k_trampoline_init (rtx, tree, rtx);
161 static int m68k_return_pops_args (tree, tree, int);
162 static rtx m68k_delegitimize_address (rtx);
163 static void m68k_function_arg_advance (cumulative_args_t, enum machine_mode,
164 				       const_tree, bool);
165 static rtx m68k_function_arg (cumulative_args_t, enum machine_mode,
166 			      const_tree, bool);
167 static bool m68k_cannot_force_const_mem (enum machine_mode mode, rtx x);
168 static bool m68k_output_addr_const_extra (FILE *, rtx);
169 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
170 
171 /* Initialize the GCC target structure.  */
172 
173 #if INT_OP_GROUP == INT_OP_DOT_WORD
174 #undef TARGET_ASM_ALIGNED_HI_OP
175 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
176 #endif
177 
178 #if INT_OP_GROUP == INT_OP_NO_DOT
179 #undef TARGET_ASM_BYTE_OP
180 #define TARGET_ASM_BYTE_OP "\tbyte\t"
181 #undef TARGET_ASM_ALIGNED_HI_OP
182 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
185 #endif
186 
187 #if INT_OP_GROUP == INT_OP_DC
188 #undef TARGET_ASM_BYTE_OP
189 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
190 #undef TARGET_ASM_ALIGNED_HI_OP
191 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
192 #undef TARGET_ASM_ALIGNED_SI_OP
193 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
194 #endif
195 
196 #undef TARGET_ASM_UNALIGNED_HI_OP
197 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
198 #undef TARGET_ASM_UNALIGNED_SI_OP
199 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
200 
201 #undef TARGET_ASM_OUTPUT_MI_THUNK
202 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
203 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
204 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
205 
206 #undef TARGET_ASM_FILE_START_APP_OFF
207 #define TARGET_ASM_FILE_START_APP_OFF true
208 
209 #undef TARGET_LEGITIMIZE_ADDRESS
210 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
211 
212 #undef TARGET_SCHED_ADJUST_COST
213 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
214 
215 #undef TARGET_SCHED_ISSUE_RATE
216 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
217 
218 #undef TARGET_SCHED_VARIABLE_ISSUE
219 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
220 
221 #undef TARGET_SCHED_INIT_GLOBAL
222 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
223 
224 #undef TARGET_SCHED_FINISH_GLOBAL
225 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
226 
227 #undef TARGET_SCHED_INIT
228 #define TARGET_SCHED_INIT m68k_sched_md_init
229 
230 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
231 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
232 
233 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
234 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
235 
236 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
237 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD	\
238   m68k_sched_first_cycle_multipass_dfa_lookahead
239 
240 #undef TARGET_OPTION_OVERRIDE
241 #define TARGET_OPTION_OVERRIDE m68k_option_override
242 
243 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
244 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
245 
246 #undef TARGET_RTX_COSTS
247 #define TARGET_RTX_COSTS m68k_rtx_costs
248 
249 #undef TARGET_ATTRIBUTE_TABLE
250 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
251 
252 #undef TARGET_PROMOTE_PROTOTYPES
253 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
254 
255 #undef TARGET_STRUCT_VALUE_RTX
256 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
257 
258 #undef TARGET_CANNOT_FORCE_CONST_MEM
259 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
260 
261 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
262 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
263 
264 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
265 #undef TARGET_RETURN_IN_MEMORY
266 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
267 #endif
268 
269 #ifdef HAVE_AS_TLS
270 #undef TARGET_HAVE_TLS
271 #define TARGET_HAVE_TLS (true)
272 
273 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
274 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
275 #endif
276 
277 #undef TARGET_LEGITIMATE_ADDRESS_P
278 #define TARGET_LEGITIMATE_ADDRESS_P	m68k_legitimate_address_p
279 
280 #undef TARGET_CAN_ELIMINATE
281 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
282 
283 #undef TARGET_CONDITIONAL_REGISTER_USAGE
284 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
285 
286 #undef TARGET_TRAMPOLINE_INIT
287 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
288 
289 #undef TARGET_RETURN_POPS_ARGS
290 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
291 
292 #undef TARGET_DELEGITIMIZE_ADDRESS
293 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
294 
295 #undef TARGET_FUNCTION_ARG
296 #define TARGET_FUNCTION_ARG m68k_function_arg
297 
298 #undef TARGET_FUNCTION_ARG_ADVANCE
299 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
300 
301 #undef TARGET_LEGITIMATE_CONSTANT_P
302 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
303 
304 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
305 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
306 
307 /* The value stored by TAS.  */
308 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
309 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
310 
311 static const struct attribute_spec m68k_attribute_table[] =
312 {
313   /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
314        affects_type_identity } */
315   { "interrupt", 0, 0, true,  false, false, m68k_handle_fndecl_attribute,
316     false },
317   { "interrupt_handler", 0, 0, true,  false, false,
318     m68k_handle_fndecl_attribute, false },
319   { "interrupt_thread", 0, 0, true,  false, false,
320     m68k_handle_fndecl_attribute, false },
321   { NULL,                0, 0, false, false, false, NULL, false }
322 };
323 
324 struct gcc_target targetm = TARGET_INITIALIZER;
325 
326 /* Base flags for 68k ISAs.  */
327 #define FL_FOR_isa_00    FL_ISA_68000
328 #define FL_FOR_isa_10    (FL_FOR_isa_00 | FL_ISA_68010)
329 /* FL_68881 controls the default setting of -m68881.  gcc has traditionally
330    generated 68881 code for 68020 and 68030 targets unless explicitly told
331    not to.  */
332 #define FL_FOR_isa_20    (FL_FOR_isa_10 | FL_ISA_68020 \
333 			  | FL_BITFIELD | FL_68881 | FL_CAS)
334 #define FL_FOR_isa_40    (FL_FOR_isa_20 | FL_ISA_68040)
335 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
336 
337 /* Base flags for ColdFire ISAs.  */
338 #define FL_FOR_isa_a     (FL_COLDFIRE | FL_ISA_A)
339 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
340 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support.  */
341 #define FL_FOR_isa_b     (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
342 /* ISA_C is not upwardly compatible with ISA_B.  */
343 #define FL_FOR_isa_c     (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
344 
345 enum m68k_isa
346 {
347   /* Traditional 68000 instruction sets.  */
348   isa_00,
349   isa_10,
350   isa_20,
351   isa_40,
352   isa_cpu32,
353   /* ColdFire instruction set variants.  */
354   isa_a,
355   isa_aplus,
356   isa_b,
357   isa_c,
358   isa_max
359 };
360 
361 /* Information about one of the -march, -mcpu or -mtune arguments.  */
362 struct m68k_target_selection
363 {
364   /* The argument being described.  */
365   const char *name;
366 
367   /* For -mcpu, this is the device selected by the option.
368      For -mtune and -march, it is a representative device
369      for the microarchitecture or ISA respectively.  */
370   enum target_device device;
371 
372   /* The M68K_DEVICE fields associated with DEVICE.  See the comment
373      in m68k-devices.def for details.  FAMILY is only valid for -mcpu.  */
374   const char *family;
375   enum uarch_type microarch;
376   enum m68k_isa isa;
377   unsigned long flags;
378 };
379 
380 /* A list of all devices in m68k-devices.def.  Used for -mcpu selection.  */
381 static const struct m68k_target_selection all_devices[] =
382 {
383 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
384   { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
385 #include "m68k-devices.def"
386 #undef M68K_DEVICE
387   { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
388 };
389 
390 /* A list of all ISAs, mapping each one to a representative device.
391    Used for -march selection.  */
392 static const struct m68k_target_selection all_isas[] =
393 {
394 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
395   { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
396 #include "m68k-isas.def"
397 #undef M68K_ISA
398   { NULL,       unk_device, NULL,  unk_arch, isa_max,   0 }
399 };
400 
401 /* A list of all microarchitectures, mapping each one to a representative
402    device.  Used for -mtune selection.  */
403 static const struct m68k_target_selection all_microarchs[] =
404 {
405 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
406   { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
407 #include "m68k-microarchs.def"
408 #undef M68K_MICROARCH
409   { NULL,       unk_device, NULL,  unk_arch,  isa_max, 0 }
410 };
411 
412 /* The entries associated with the -mcpu, -march and -mtune settings,
413    or null for options that have not been used.  */
414 const struct m68k_target_selection *m68k_cpu_entry;
415 const struct m68k_target_selection *m68k_arch_entry;
416 const struct m68k_target_selection *m68k_tune_entry;
417 
418 /* Which CPU we are generating code for.  */
419 enum target_device m68k_cpu;
420 
421 /* Which microarchitecture to tune for.  */
422 enum uarch_type m68k_tune;
423 
424 /* Which FPU to use.  */
425 enum fpu_type m68k_fpu;
426 
427 /* The set of FL_* flags that apply to the target processor.  */
428 unsigned int m68k_cpu_flags;
429 
430 /* The set of FL_* flags that apply to the processor to be tuned for.  */
431 unsigned int m68k_tune_flags;
432 
433 /* Asm templates for calling or jumping to an arbitrary symbolic address,
434    or NULL if such calls or jumps are not supported.  The address is held
435    in operand 0.  */
436 const char *m68k_symbolic_call;
437 const char *m68k_symbolic_jump;
438 
439 /* Enum variable that corresponds to m68k_symbolic_call values.  */
440 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
441 
442 
443 /* Implement TARGET_OPTION_OVERRIDE.  */
444 
445 static void
m68k_option_override(void)446 m68k_option_override (void)
447 {
448   const struct m68k_target_selection *entry;
449   unsigned long target_mask;
450 
451   if (global_options_set.x_m68k_arch_option)
452     m68k_arch_entry = &all_isas[m68k_arch_option];
453 
454   if (global_options_set.x_m68k_cpu_option)
455     m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
456 
457   if (global_options_set.x_m68k_tune_option)
458     m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
459 
460   /* User can choose:
461 
462      -mcpu=
463      -march=
464      -mtune=
465 
466      -march=ARCH should generate code that runs any processor
467      implementing architecture ARCH.  -mcpu=CPU should override -march
468      and should generate code that runs on processor CPU, making free
469      use of any instructions that CPU understands.  -mtune=UARCH applies
470      on top of -mcpu or -march and optimizes the code for UARCH.  It does
471      not change the target architecture.  */
472   if (m68k_cpu_entry)
473     {
474       /* Complain if the -march setting is for a different microarchitecture,
475 	 or includes flags that the -mcpu setting doesn't.  */
476       if (m68k_arch_entry
477 	  && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
478 	      || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
479 	warning (0, "-mcpu=%s conflicts with -march=%s",
480 		 m68k_cpu_entry->name, m68k_arch_entry->name);
481 
482       entry = m68k_cpu_entry;
483     }
484   else
485     entry = m68k_arch_entry;
486 
487   if (!entry)
488     entry = all_devices + TARGET_CPU_DEFAULT;
489 
490   m68k_cpu_flags = entry->flags;
491 
492   /* Use the architecture setting to derive default values for
493      certain flags.  */
494   target_mask = 0;
495 
496   /* ColdFire is lenient about alignment.  */
497   if (!TARGET_COLDFIRE)
498     target_mask |= MASK_STRICT_ALIGNMENT;
499 
500   if ((m68k_cpu_flags & FL_BITFIELD) != 0)
501     target_mask |= MASK_BITFIELD;
502   if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
503     target_mask |= MASK_CF_HWDIV;
504   if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
505     target_mask |= MASK_HARD_FLOAT;
506   target_flags |= target_mask & ~target_flags_explicit;
507 
508   /* Set the directly-usable versions of the -mcpu and -mtune settings.  */
509   m68k_cpu = entry->device;
510   if (m68k_tune_entry)
511     {
512       m68k_tune = m68k_tune_entry->microarch;
513       m68k_tune_flags = m68k_tune_entry->flags;
514     }
515 #ifdef M68K_DEFAULT_TUNE
516   else if (!m68k_cpu_entry && !m68k_arch_entry)
517     {
518       enum target_device dev;
519       dev = all_microarchs[M68K_DEFAULT_TUNE].device;
520       m68k_tune_flags = all_devices[dev].flags;
521     }
522 #endif
523   else
524     {
525       m68k_tune = entry->microarch;
526       m68k_tune_flags = entry->flags;
527     }
528 
529   /* Set the type of FPU.  */
530   m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
531 	      : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
532 	      : FPUTYPE_68881);
533 
534   /* Sanity check to ensure that msep-data and mid-sahred-library are not
535    * both specified together.  Doing so simply doesn't make sense.
536    */
537   if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
538     error ("cannot specify both -msep-data and -mid-shared-library");
539 
540   /* If we're generating code for a separate A5 relative data segment,
541    * we've got to enable -fPIC as well.  This might be relaxable to
542    * -fpic but it hasn't been tested properly.
543    */
544   if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
545     flag_pic = 2;
546 
547   /* -mpcrel -fPIC uses 32-bit pc-relative displacements.  Raise an
548      error if the target does not support them.  */
549   if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
550     error ("-mpcrel -fPIC is not currently supported on selected cpu");
551 
552   /* ??? A historic way of turning on pic, or is this intended to
553      be an embedded thing that doesn't have the same name binding
554      significance that it does on hosted ELF systems?  */
555   if (TARGET_PCREL && flag_pic == 0)
556     flag_pic = 1;
557 
558   if (!flag_pic)
559     {
560       m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
561 
562       m68k_symbolic_jump = "jra %a0";
563     }
564   else if (TARGET_ID_SHARED_LIBRARY)
565     /* All addresses must be loaded from the GOT.  */
566     ;
567   else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
568     {
569       if (TARGET_PCREL)
570 	m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
571       else
572 	m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
573 
574       if (TARGET_ISAC)
575 	/* No unconditional long branch */;
576       else if (TARGET_PCREL)
577 	m68k_symbolic_jump = "bra%.l %c0";
578       else
579 	m68k_symbolic_jump = "bra%.l %p0";
580       /* Turn off function cse if we are doing PIC.  We always want
581 	 function call to be done as `bsr foo@PLTPC'.  */
582       /* ??? It's traditional to do this for -mpcrel too, but it isn't
583 	 clear how intentional that is.  */
584       flag_no_function_cse = 1;
585     }
586 
587   switch (m68k_symbolic_call_var)
588     {
589     case M68K_SYMBOLIC_CALL_JSR:
590       m68k_symbolic_call = "jsr %a0";
591       break;
592 
593     case M68K_SYMBOLIC_CALL_BSR_C:
594       m68k_symbolic_call = "bsr%.l %c0";
595       break;
596 
597     case M68K_SYMBOLIC_CALL_BSR_P:
598       m68k_symbolic_call = "bsr%.l %p0";
599       break;
600 
601     case M68K_SYMBOLIC_CALL_NONE:
602       gcc_assert (m68k_symbolic_call == NULL);
603       break;
604 
605     default:
606       gcc_unreachable ();
607     }
608 
609 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
610   if (align_labels > 2)
611     {
612       warning (0, "-falign-labels=%d is not supported", align_labels);
613       align_labels = 0;
614     }
615   if (align_loops > 2)
616     {
617       warning (0, "-falign-loops=%d is not supported", align_loops);
618       align_loops = 0;
619     }
620 #endif
621 
622   if (stack_limit_rtx != NULL_RTX && !TARGET_68020)
623     {
624       warning (0, "-fstack-limit- options are not supported on this cpu");
625       stack_limit_rtx = NULL_RTX;
626     }
627 
628   SUBTARGET_OVERRIDE_OPTIONS;
629 
630   /* Setup scheduling options.  */
631   if (TUNE_CFV1)
632     m68k_sched_cpu = CPU_CFV1;
633   else if (TUNE_CFV2)
634     m68k_sched_cpu = CPU_CFV2;
635   else if (TUNE_CFV3)
636     m68k_sched_cpu = CPU_CFV3;
637   else if (TUNE_CFV4)
638     m68k_sched_cpu = CPU_CFV4;
639   else
640     {
641       m68k_sched_cpu = CPU_UNKNOWN;
642       flag_schedule_insns = 0;
643       flag_schedule_insns_after_reload = 0;
644       flag_modulo_sched = 0;
645       flag_live_range_shrinkage = 0;
646     }
647 
648   if (m68k_sched_cpu != CPU_UNKNOWN)
649     {
650       if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
651 	m68k_sched_mac = MAC_CF_EMAC;
652       else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
653 	m68k_sched_mac = MAC_CF_MAC;
654       else
655 	m68k_sched_mac = MAC_NO;
656     }
657 }
658 
659 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE.  */
660 
661 static void
m68k_override_options_after_change(void)662 m68k_override_options_after_change (void)
663 {
664   if (m68k_sched_cpu == CPU_UNKNOWN)
665     {
666       flag_schedule_insns = 0;
667       flag_schedule_insns_after_reload = 0;
668       flag_modulo_sched = 0;
669       flag_live_range_shrinkage = 0;
670     }
671 }
672 
673 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
674    given argument and NAME is the argument passed to -mcpu.  Return NULL
675    if -mcpu was not passed.  */
676 
677 const char *
m68k_cpp_cpu_ident(const char * prefix)678 m68k_cpp_cpu_ident (const char *prefix)
679 {
680   if (!m68k_cpu_entry)
681     return NULL;
682   return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
683 }
684 
685 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
686    given argument and NAME is the name of the representative device for
687    the -mcpu argument's family.  Return NULL if -mcpu was not passed.  */
688 
689 const char *
m68k_cpp_cpu_family(const char * prefix)690 m68k_cpp_cpu_family (const char *prefix)
691 {
692   if (!m68k_cpu_entry)
693     return NULL;
694   return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
695 }
696 
697 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
698    "interrupt_handler" attribute and interrupt_thread if FUNC has an
699    "interrupt_thread" attribute.  Otherwise, return
700    m68k_fk_normal_function.  */
701 
702 enum m68k_function_kind
m68k_get_function_kind(tree func)703 m68k_get_function_kind (tree func)
704 {
705   tree a;
706 
707   gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
708 
709   a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
710   if (a != NULL_TREE)
711     return m68k_fk_interrupt_handler;
712 
713   a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
714   if (a != NULL_TREE)
715     return m68k_fk_interrupt_handler;
716 
717   a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
718   if (a != NULL_TREE)
719     return m68k_fk_interrupt_thread;
720 
721   return m68k_fk_normal_function;
722 }
723 
724 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
725    struct attribute_spec.handler.  */
726 static tree
m68k_handle_fndecl_attribute(tree * node,tree name,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)727 m68k_handle_fndecl_attribute (tree *node, tree name,
728 			      tree args ATTRIBUTE_UNUSED,
729 			      int flags ATTRIBUTE_UNUSED,
730 			      bool *no_add_attrs)
731 {
732   if (TREE_CODE (*node) != FUNCTION_DECL)
733     {
734       warning (OPT_Wattributes, "%qE attribute only applies to functions",
735 	       name);
736       *no_add_attrs = true;
737     }
738 
739   if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
740     {
741       error ("multiple interrupt attributes not allowed");
742       *no_add_attrs = true;
743     }
744 
745   if (!TARGET_FIDOA
746       && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
747     {
748       error ("interrupt_thread is available only on fido");
749       *no_add_attrs = true;
750     }
751 
752   return NULL_TREE;
753 }
754 
755 static void
m68k_compute_frame_layout(void)756 m68k_compute_frame_layout (void)
757 {
758   int regno, saved;
759   unsigned int mask;
760   enum m68k_function_kind func_kind =
761     m68k_get_function_kind (current_function_decl);
762   bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
763   bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
764 
765   /* Only compute the frame once per function.
766      Don't cache information until reload has been completed.  */
767   if (current_frame.funcdef_no == current_function_funcdef_no
768       && reload_completed)
769     return;
770 
771   current_frame.size = (get_frame_size () + 3) & -4;
772 
773   mask = saved = 0;
774 
775   /* Interrupt thread does not need to save any register.  */
776   if (!interrupt_thread)
777     for (regno = 0; regno < 16; regno++)
778       if (m68k_save_reg (regno, interrupt_handler))
779 	{
780 	  mask |= 1 << (regno - D0_REG);
781 	  saved++;
782 	}
783   current_frame.offset = saved * 4;
784   current_frame.reg_no = saved;
785   current_frame.reg_mask = mask;
786 
787   current_frame.foffset = 0;
788   mask = saved = 0;
789   if (TARGET_HARD_FLOAT)
790     {
791       /* Interrupt thread does not need to save any register.  */
792       if (!interrupt_thread)
793 	for (regno = 16; regno < 24; regno++)
794 	  if (m68k_save_reg (regno, interrupt_handler))
795 	    {
796 	      mask |= 1 << (regno - FP0_REG);
797 	      saved++;
798 	    }
799       current_frame.foffset = saved * TARGET_FP_REG_SIZE;
800       current_frame.offset += current_frame.foffset;
801     }
802   current_frame.fpu_no = saved;
803   current_frame.fpu_mask = mask;
804 
805   /* Remember what function this frame refers to.  */
806   current_frame.funcdef_no = current_function_funcdef_no;
807 }
808 
809 /* Worker function for TARGET_CAN_ELIMINATE.  */
810 
811 bool
m68k_can_eliminate(const int from ATTRIBUTE_UNUSED,const int to)812 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
813 {
814   return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
815 }
816 
817 HOST_WIDE_INT
m68k_initial_elimination_offset(int from,int to)818 m68k_initial_elimination_offset (int from, int to)
819 {
820   int argptr_offset;
821   /* The arg pointer points 8 bytes before the start of the arguments,
822      as defined by FIRST_PARM_OFFSET.  This makes it coincident with the
823      frame pointer in most frames.  */
824   argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
825   if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
826     return argptr_offset;
827 
828   m68k_compute_frame_layout ();
829 
830   gcc_assert (to == STACK_POINTER_REGNUM);
831   switch (from)
832     {
833     case ARG_POINTER_REGNUM:
834       return current_frame.offset + current_frame.size - argptr_offset;
835     case FRAME_POINTER_REGNUM:
836       return current_frame.offset + current_frame.size;
837     default:
838       gcc_unreachable ();
839     }
840 }
841 
842 /* Refer to the array `regs_ever_live' to determine which registers
843    to save; `regs_ever_live[I]' is nonzero if register number I
844    is ever used in the function.  This function is responsible for
845    knowing which registers should not be saved even if used.
846    Return true if we need to save REGNO.  */
847 
848 static bool
m68k_save_reg(unsigned int regno,bool interrupt_handler)849 m68k_save_reg (unsigned int regno, bool interrupt_handler)
850 {
851   if (flag_pic && regno == PIC_REG)
852     {
853       if (crtl->saves_all_registers)
854 	return true;
855       if (crtl->uses_pic_offset_table)
856 	return true;
857       /* Reload may introduce constant pool references into a function
858 	 that thitherto didn't need a PIC register.  Note that the test
859 	 above will not catch that case because we will only set
860 	 crtl->uses_pic_offset_table when emitting
861 	 the address reloads.  */
862       if (crtl->uses_const_pool)
863 	return true;
864     }
865 
866   if (crtl->calls_eh_return)
867     {
868       unsigned int i;
869       for (i = 0; ; i++)
870 	{
871 	  unsigned int test = EH_RETURN_DATA_REGNO (i);
872 	  if (test == INVALID_REGNUM)
873 	    break;
874 	  if (test == regno)
875 	    return true;
876 	}
877     }
878 
879   /* Fixed regs we never touch.  */
880   if (fixed_regs[regno])
881     return false;
882 
883   /* The frame pointer (if it is such) is handled specially.  */
884   if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
885     return false;
886 
887   /* Interrupt handlers must also save call_used_regs
888      if they are live or when calling nested functions.  */
889   if (interrupt_handler)
890     {
891       if (df_regs_ever_live_p (regno))
892 	return true;
893 
894       if (!crtl->is_leaf && call_used_regs[regno])
895 	return true;
896     }
897 
898   /* Never need to save registers that aren't touched.  */
899   if (!df_regs_ever_live_p (regno))
900     return false;
901 
902   /* Otherwise save everything that isn't call-clobbered.  */
903   return !call_used_regs[regno];
904 }
905 
906 /* Emit RTL for a MOVEM or FMOVEM instruction.  BASE + OFFSET represents
907    the lowest memory address.  COUNT is the number of registers to be
908    moved, with register REGNO + I being moved if bit I of MASK is set.
909    STORE_P specifies the direction of the move and ADJUST_STACK_P says
910    whether or not this is pre-decrement (if STORE_P) or post-increment
911    (if !STORE_P) operation.  */
912 
913 static rtx
m68k_emit_movem(rtx base,HOST_WIDE_INT offset,unsigned int count,unsigned int regno,unsigned int mask,bool store_p,bool adjust_stack_p)914 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
915 		 unsigned int count, unsigned int regno,
916 		 unsigned int mask, bool store_p, bool adjust_stack_p)
917 {
918   int i;
919   rtx body, addr, src, operands[2];
920   enum machine_mode mode;
921 
922   body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
923   mode = reg_raw_mode[regno];
924   i = 0;
925 
926   if (adjust_stack_p)
927     {
928       src = plus_constant (Pmode, base,
929 			   (count
930 			    * GET_MODE_SIZE (mode)
931 			    * (HOST_WIDE_INT) (store_p ? -1 : 1)));
932       XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
933     }
934 
935   for (; mask != 0; mask >>= 1, regno++)
936     if (mask & 1)
937       {
938 	addr = plus_constant (Pmode, base, offset);
939 	operands[!store_p] = gen_frame_mem (mode, addr);
940 	operands[store_p] = gen_rtx_REG (mode, regno);
941 	XVECEXP (body, 0, i++)
942 	  = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
943 	offset += GET_MODE_SIZE (mode);
944       }
945   gcc_assert (i == XVECLEN (body, 0));
946 
947   return emit_insn (body);
948 }
949 
950 /* Make INSN a frame-related instruction.  */
951 
952 static void
m68k_set_frame_related(rtx insn)953 m68k_set_frame_related (rtx insn)
954 {
955   rtx body;
956   int i;
957 
958   RTX_FRAME_RELATED_P (insn) = 1;
959   body = PATTERN (insn);
960   if (GET_CODE (body) == PARALLEL)
961     for (i = 0; i < XVECLEN (body, 0); i++)
962       RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
963 }
964 
965 /* Emit RTL for the "prologue" define_expand.  */
966 
967 void
m68k_expand_prologue(void)968 m68k_expand_prologue (void)
969 {
970   HOST_WIDE_INT fsize_with_regs;
971   rtx limit, src, dest;
972 
973   m68k_compute_frame_layout ();
974 
975   if (flag_stack_usage_info)
976     current_function_static_stack_size
977       = current_frame.size + current_frame.offset;
978 
979   /* If the stack limit is a symbol, we can check it here,
980      before actually allocating the space.  */
981   if (crtl->limit_stack
982       && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
983     {
984       limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
985       if (!m68k_legitimate_constant_p (Pmode, limit))
986 	{
987 	  emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
988 	  limit = gen_rtx_REG (Pmode, D0_REG);
989 	}
990       emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
991 					    stack_pointer_rtx, limit),
992 			       stack_pointer_rtx, limit,
993 			       const1_rtx));
994     }
995 
996   fsize_with_regs = current_frame.size;
997   if (TARGET_COLDFIRE)
998     {
999       /* ColdFire's move multiple instructions do not allow pre-decrement
1000 	 addressing.  Add the size of movem saves to the initial stack
1001 	 allocation instead.  */
1002       if (current_frame.reg_no >= MIN_MOVEM_REGS)
1003 	fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1004       if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1005 	fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1006     }
1007 
1008   if (frame_pointer_needed)
1009     {
1010       if (fsize_with_regs == 0 && TUNE_68040)
1011 	{
1012 	  /* On the 68040, two separate moves are faster than link.w 0.  */
1013 	  dest = gen_frame_mem (Pmode,
1014 				gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1015 	  m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1016 	  m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1017 						  stack_pointer_rtx));
1018 	}
1019       else if (fsize_with_regs < 0x8000 || TARGET_68020)
1020 	m68k_set_frame_related
1021 	  (emit_insn (gen_link (frame_pointer_rtx,
1022 				GEN_INT (-4 - fsize_with_regs))));
1023       else
1024  	{
1025 	  m68k_set_frame_related
1026 	    (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1027 	  m68k_set_frame_related
1028 	    (emit_insn (gen_addsi3 (stack_pointer_rtx,
1029 				    stack_pointer_rtx,
1030 				    GEN_INT (-fsize_with_regs))));
1031 	}
1032 
1033       /* If the frame pointer is needed, emit a special barrier that
1034 	 will prevent the scheduler from moving stores to the frame
1035 	 before the stack adjustment.  */
1036       emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1037     }
1038   else if (fsize_with_regs != 0)
1039     m68k_set_frame_related
1040       (emit_insn (gen_addsi3 (stack_pointer_rtx,
1041 			      stack_pointer_rtx,
1042 			      GEN_INT (-fsize_with_regs))));
1043 
1044   if (current_frame.fpu_mask)
1045     {
1046       gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1047       if (TARGET_68881)
1048 	m68k_set_frame_related
1049 	  (m68k_emit_movem (stack_pointer_rtx,
1050 			    current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1051 			    current_frame.fpu_no, FP0_REG,
1052 			    current_frame.fpu_mask, true, true));
1053       else
1054 	{
1055 	  int offset;
1056 
1057 	  /* If we're using moveml to save the integer registers,
1058 	     the stack pointer will point to the bottom of the moveml
1059 	     save area.  Find the stack offset of the first FP register.  */
1060 	  if (current_frame.reg_no < MIN_MOVEM_REGS)
1061 	    offset = 0;
1062 	  else
1063 	    offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1064 	  m68k_set_frame_related
1065 	    (m68k_emit_movem (stack_pointer_rtx, offset,
1066 			      current_frame.fpu_no, FP0_REG,
1067 			      current_frame.fpu_mask, true, false));
1068 	}
1069     }
1070 
1071   /* If the stack limit is not a symbol, check it here.
1072      This has the disadvantage that it may be too late...  */
1073   if (crtl->limit_stack)
1074     {
1075       if (REG_P (stack_limit_rtx))
1076         emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1077 					      stack_limit_rtx),
1078 			         stack_pointer_rtx, stack_limit_rtx,
1079 			         const1_rtx));
1080 
1081       else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1082 	warning (0, "stack limit expression is not supported");
1083     }
1084 
1085   if (current_frame.reg_no < MIN_MOVEM_REGS)
1086     {
1087       /* Store each register separately in the same order moveml does.  */
1088       int i;
1089 
1090       for (i = 16; i-- > 0; )
1091 	if (current_frame.reg_mask & (1 << i))
1092 	  {
1093 	    src = gen_rtx_REG (SImode, D0_REG + i);
1094 	    dest = gen_frame_mem (SImode,
1095 				  gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1096 	    m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1097 	  }
1098     }
1099   else
1100     {
1101       if (TARGET_COLDFIRE)
1102 	/* The required register save space has already been allocated.
1103 	   The first register should be stored at (%sp).  */
1104 	m68k_set_frame_related
1105 	  (m68k_emit_movem (stack_pointer_rtx, 0,
1106 			    current_frame.reg_no, D0_REG,
1107 			    current_frame.reg_mask, true, false));
1108       else
1109 	m68k_set_frame_related
1110 	  (m68k_emit_movem (stack_pointer_rtx,
1111 			    current_frame.reg_no * -GET_MODE_SIZE (SImode),
1112 			    current_frame.reg_no, D0_REG,
1113 			    current_frame.reg_mask, true, true));
1114     }
1115 
1116   if (!TARGET_SEP_DATA
1117       && crtl->uses_pic_offset_table)
1118     emit_insn (gen_load_got (pic_offset_table_rtx));
1119 }
1120 
1121 /* Return true if a simple (return) instruction is sufficient for this
1122    instruction (i.e. if no epilogue is needed).  */
1123 
1124 bool
m68k_use_return_insn(void)1125 m68k_use_return_insn (void)
1126 {
1127   if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1128     return false;
1129 
1130   m68k_compute_frame_layout ();
1131   return current_frame.offset == 0;
1132 }
1133 
1134 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1135    SIBCALL_P says which.
1136 
1137    The function epilogue should not depend on the current stack pointer!
1138    It should use the frame pointer only, if there is a frame pointer.
1139    This is mandatory because of alloca; we also take advantage of it to
1140    omit stack adjustments before returning.  */
1141 
1142 void
m68k_expand_epilogue(bool sibcall_p)1143 m68k_expand_epilogue (bool sibcall_p)
1144 {
1145   HOST_WIDE_INT fsize, fsize_with_regs;
1146   bool big, restore_from_sp;
1147 
1148   m68k_compute_frame_layout ();
1149 
1150   fsize = current_frame.size;
1151   big = false;
1152   restore_from_sp = false;
1153 
1154   /* FIXME : crtl->is_leaf below is too strong.
1155      What we really need to know there is if there could be pending
1156      stack adjustment needed at that point.  */
1157   restore_from_sp = (!frame_pointer_needed
1158 		     || (!cfun->calls_alloca && crtl->is_leaf));
1159 
1160   /* fsize_with_regs is the size we need to adjust the sp when
1161      popping the frame.  */
1162   fsize_with_regs = fsize;
1163   if (TARGET_COLDFIRE && restore_from_sp)
1164     {
1165       /* ColdFire's move multiple instructions do not allow post-increment
1166 	 addressing.  Add the size of movem loads to the final deallocation
1167 	 instead.  */
1168       if (current_frame.reg_no >= MIN_MOVEM_REGS)
1169 	fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1170       if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1171 	fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1172     }
1173 
1174   if (current_frame.offset + fsize >= 0x8000
1175       && !restore_from_sp
1176       && (current_frame.reg_mask || current_frame.fpu_mask))
1177     {
1178       if (TARGET_COLDFIRE
1179 	  && (current_frame.reg_no >= MIN_MOVEM_REGS
1180 	      || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1181 	{
1182 	  /* ColdFire's move multiple instructions do not support the
1183 	     (d8,Ax,Xi) addressing mode, so we're as well using a normal
1184 	     stack-based restore.  */
1185 	  emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1186 			  GEN_INT (-(current_frame.offset + fsize)));
1187 	  emit_insn (gen_addsi3 (stack_pointer_rtx,
1188 				 gen_rtx_REG (Pmode, A1_REG),
1189 				 frame_pointer_rtx));
1190 	  restore_from_sp = true;
1191 	}
1192       else
1193 	{
1194 	  emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1195 	  fsize = 0;
1196 	  big = true;
1197 	}
1198     }
1199 
1200   if (current_frame.reg_no < MIN_MOVEM_REGS)
1201     {
1202       /* Restore each register separately in the same order moveml does.  */
1203       int i;
1204       HOST_WIDE_INT offset;
1205 
1206       offset = current_frame.offset + fsize;
1207       for (i = 0; i < 16; i++)
1208         if (current_frame.reg_mask & (1 << i))
1209           {
1210 	    rtx addr;
1211 
1212 	    if (big)
1213 	      {
1214 		/* Generate the address -OFFSET(%fp,%a1.l).  */
1215 		addr = gen_rtx_REG (Pmode, A1_REG);
1216 		addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1217 		addr = plus_constant (Pmode, addr, -offset);
1218 	      }
1219 	    else if (restore_from_sp)
1220 	      addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1221 	    else
1222 	      addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1223 	    emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1224 			    gen_frame_mem (SImode, addr));
1225 	    offset -= GET_MODE_SIZE (SImode);
1226 	  }
1227     }
1228   else if (current_frame.reg_mask)
1229     {
1230       if (big)
1231 	m68k_emit_movem (gen_rtx_PLUS (Pmode,
1232 				       gen_rtx_REG (Pmode, A1_REG),
1233 				       frame_pointer_rtx),
1234 			 -(current_frame.offset + fsize),
1235 			 current_frame.reg_no, D0_REG,
1236 			 current_frame.reg_mask, false, false);
1237       else if (restore_from_sp)
1238 	m68k_emit_movem (stack_pointer_rtx, 0,
1239 			 current_frame.reg_no, D0_REG,
1240 			 current_frame.reg_mask, false,
1241 			 !TARGET_COLDFIRE);
1242       else
1243 	m68k_emit_movem (frame_pointer_rtx,
1244 			 -(current_frame.offset + fsize),
1245 			 current_frame.reg_no, D0_REG,
1246 			 current_frame.reg_mask, false, false);
1247     }
1248 
1249   if (current_frame.fpu_no > 0)
1250     {
1251       if (big)
1252 	m68k_emit_movem (gen_rtx_PLUS (Pmode,
1253 				       gen_rtx_REG (Pmode, A1_REG),
1254 				       frame_pointer_rtx),
1255 			 -(current_frame.foffset + fsize),
1256 			 current_frame.fpu_no, FP0_REG,
1257 			 current_frame.fpu_mask, false, false);
1258       else if (restore_from_sp)
1259 	{
1260 	  if (TARGET_COLDFIRE)
1261 	    {
1262 	      int offset;
1263 
1264 	      /* If we used moveml to restore the integer registers, the
1265 		 stack pointer will still point to the bottom of the moveml
1266 		 save area.  Find the stack offset of the first FP
1267 		 register.  */
1268 	      if (current_frame.reg_no < MIN_MOVEM_REGS)
1269 		offset = 0;
1270 	      else
1271 		offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1272 	      m68k_emit_movem (stack_pointer_rtx, offset,
1273 			       current_frame.fpu_no, FP0_REG,
1274 			       current_frame.fpu_mask, false, false);
1275 	    }
1276 	  else
1277 	    m68k_emit_movem (stack_pointer_rtx, 0,
1278 			     current_frame.fpu_no, FP0_REG,
1279 			     current_frame.fpu_mask, false, true);
1280 	}
1281       else
1282 	m68k_emit_movem (frame_pointer_rtx,
1283 			 -(current_frame.foffset + fsize),
1284 			 current_frame.fpu_no, FP0_REG,
1285 			 current_frame.fpu_mask, false, false);
1286     }
1287 
1288   if (frame_pointer_needed)
1289     emit_insn (gen_unlink (frame_pointer_rtx));
1290   else if (fsize_with_regs)
1291     emit_insn (gen_addsi3 (stack_pointer_rtx,
1292 			   stack_pointer_rtx,
1293 			   GEN_INT (fsize_with_regs)));
1294 
1295   if (crtl->calls_eh_return)
1296     emit_insn (gen_addsi3 (stack_pointer_rtx,
1297 			   stack_pointer_rtx,
1298 			   EH_RETURN_STACKADJ_RTX));
1299 
1300   if (!sibcall_p)
1301     emit_jump_insn (ret_rtx);
1302 }
1303 
1304 /* Return true if X is a valid comparison operator for the dbcc
1305    instruction.
1306 
1307    Note it rejects floating point comparison operators.
1308    (In the future we could use Fdbcc).
1309 
1310    It also rejects some comparisons when CC_NO_OVERFLOW is set.  */
1311 
1312 int
valid_dbcc_comparison_p_2(rtx x,enum machine_mode mode ATTRIBUTE_UNUSED)1313 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1314 {
1315   switch (GET_CODE (x))
1316     {
1317       case EQ: case NE: case GTU: case LTU:
1318       case GEU: case LEU:
1319         return 1;
1320 
1321       /* Reject some when CC_NO_OVERFLOW is set.  This may be over
1322          conservative */
1323       case GT: case LT: case GE: case LE:
1324         return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1325       default:
1326         return 0;
1327     }
1328 }
1329 
1330 /* Return nonzero if flags are currently in the 68881 flag register.  */
1331 int
flags_in_68881(void)1332 flags_in_68881 (void)
1333 {
1334   /* We could add support for these in the future */
1335   return cc_status.flags & CC_IN_68881;
1336 }
1337 
1338 /* Return true if PARALLEL contains register REGNO.  */
1339 static bool
m68k_reg_present_p(const_rtx parallel,unsigned int regno)1340 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1341 {
1342   int i;
1343 
1344   if (REG_P (parallel) && REGNO (parallel) == regno)
1345     return true;
1346 
1347   if (GET_CODE (parallel) != PARALLEL)
1348     return false;
1349 
1350   for (i = 0; i < XVECLEN (parallel, 0); ++i)
1351     {
1352       const_rtx x;
1353 
1354       x = XEXP (XVECEXP (parallel, 0, i), 0);
1355       if (REG_P (x) && REGNO (x) == regno)
1356 	return true;
1357     }
1358 
1359   return false;
1360 }
1361 
1362 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P.  */
1363 
1364 static bool
m68k_ok_for_sibcall_p(tree decl,tree exp)1365 m68k_ok_for_sibcall_p (tree decl, tree exp)
1366 {
1367   enum m68k_function_kind kind;
1368 
1369   /* We cannot use sibcalls for nested functions because we use the
1370      static chain register for indirect calls.  */
1371   if (CALL_EXPR_STATIC_CHAIN (exp))
1372     return false;
1373 
1374   if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1375     {
1376       /* Check that the return value locations are the same.  For
1377 	 example that we aren't returning a value from the sibling in
1378 	 a D0 register but then need to transfer it to a A0 register.  */
1379       rtx cfun_value;
1380       rtx call_value;
1381 
1382       cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1383 				   cfun->decl);
1384       call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1385 
1386       /* Check that the values are equal or that the result the callee
1387 	 function returns is superset of what the current function returns.  */
1388       if (!(rtx_equal_p (cfun_value, call_value)
1389 	    || (REG_P (cfun_value)
1390 		&& m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1391 	return false;
1392     }
1393 
1394   kind = m68k_get_function_kind (current_function_decl);
1395   if (kind == m68k_fk_normal_function)
1396     /* We can always sibcall from a normal function, because it's
1397        undefined if it is calling an interrupt function.  */
1398     return true;
1399 
1400   /* Otherwise we can only sibcall if the function kind is known to be
1401      the same.  */
1402   if (decl && m68k_get_function_kind (decl) == kind)
1403     return true;
1404 
1405   return false;
1406 }
1407 
1408 /* On the m68k all args are always pushed.  */
1409 
1410 static rtx
m68k_function_arg(cumulative_args_t cum ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,const_tree type ATTRIBUTE_UNUSED,bool named ATTRIBUTE_UNUSED)1411 m68k_function_arg (cumulative_args_t cum ATTRIBUTE_UNUSED,
1412 		   enum machine_mode mode ATTRIBUTE_UNUSED,
1413 		   const_tree type ATTRIBUTE_UNUSED,
1414 		   bool named ATTRIBUTE_UNUSED)
1415 {
1416   return NULL_RTX;
1417 }
1418 
1419 static void
m68k_function_arg_advance(cumulative_args_t cum_v,enum machine_mode mode,const_tree type,bool named ATTRIBUTE_UNUSED)1420 m68k_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1421 			   const_tree type, bool named ATTRIBUTE_UNUSED)
1422 {
1423   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1424 
1425   *cum += (mode != BLKmode
1426 	   ? (GET_MODE_SIZE (mode) + 3) & ~3
1427 	   : (int_size_in_bytes (type) + 3) & ~3);
1428 }
1429 
1430 /* Convert X to a legitimate function call memory reference and return the
1431    result.  */
1432 
1433 rtx
m68k_legitimize_call_address(rtx x)1434 m68k_legitimize_call_address (rtx x)
1435 {
1436   gcc_assert (MEM_P (x));
1437   if (call_operand (XEXP (x, 0), VOIDmode))
1438     return x;
1439   return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1440 }
1441 
1442 /* Likewise for sibling calls.  */
1443 
1444 rtx
m68k_legitimize_sibcall_address(rtx x)1445 m68k_legitimize_sibcall_address (rtx x)
1446 {
1447   gcc_assert (MEM_P (x));
1448   if (sibcall_operand (XEXP (x, 0), VOIDmode))
1449     return x;
1450 
1451   emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1452   return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1453 }
1454 
1455 /* Convert X to a legitimate address and return it if successful.  Otherwise
1456    return X.
1457 
1458    For the 68000, we handle X+REG by loading X into a register R and
1459    using R+REG.  R will go in an address reg and indexing will be used.
1460    However, if REG is a broken-out memory address or multiplication,
1461    nothing needs to be done because REG can certainly go in an address reg.  */
1462 
1463 static rtx
m68k_legitimize_address(rtx x,rtx oldx,enum machine_mode mode)1464 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1465 {
1466   if (m68k_tls_symbol_p (x))
1467     return m68k_legitimize_tls_address (x);
1468 
1469   if (GET_CODE (x) == PLUS)
1470     {
1471       int ch = (x) != (oldx);
1472       int copied = 0;
1473 
1474 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1475 
1476       if (GET_CODE (XEXP (x, 0)) == MULT)
1477 	{
1478 	  COPY_ONCE (x);
1479 	  XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1480 	}
1481       if (GET_CODE (XEXP (x, 1)) == MULT)
1482 	{
1483 	  COPY_ONCE (x);
1484 	  XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1485 	}
1486       if (ch)
1487 	{
1488           if (GET_CODE (XEXP (x, 1)) == REG
1489 	      && GET_CODE (XEXP (x, 0)) == REG)
1490 	    {
1491 	      if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1492 	        {
1493 	          COPY_ONCE (x);
1494 	          x = force_operand (x, 0);
1495 	        }
1496 	      return x;
1497 	    }
1498 	  if (memory_address_p (mode, x))
1499 	    return x;
1500 	}
1501       if (GET_CODE (XEXP (x, 0)) == REG
1502 	  || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1503 	      && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1504 	      && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1505 	{
1506 	  rtx temp = gen_reg_rtx (Pmode);
1507 	  rtx val = force_operand (XEXP (x, 1), 0);
1508 	  emit_move_insn (temp, val);
1509 	  COPY_ONCE (x);
1510 	  XEXP (x, 1) = temp;
1511 	  if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1512 	      && GET_CODE (XEXP (x, 0)) == REG)
1513 	    x = force_operand (x, 0);
1514 	}
1515       else if (GET_CODE (XEXP (x, 1)) == REG
1516 	       || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1517 		   && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1518 		   && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1519 	{
1520 	  rtx temp = gen_reg_rtx (Pmode);
1521 	  rtx val = force_operand (XEXP (x, 0), 0);
1522 	  emit_move_insn (temp, val);
1523 	  COPY_ONCE (x);
1524 	  XEXP (x, 0) = temp;
1525 	  if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1526 	      && GET_CODE (XEXP (x, 1)) == REG)
1527 	    x = force_operand (x, 0);
1528 	}
1529     }
1530 
1531   return x;
1532 }
1533 
1534 
1535 /* Output a dbCC; jCC sequence.  Note we do not handle the
1536    floating point version of this sequence (Fdbcc).  We also
1537    do not handle alternative conditions when CC_NO_OVERFLOW is
1538    set.  It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1539    kick those out before we get here.  */
1540 
1541 void
output_dbcc_and_branch(rtx * operands)1542 output_dbcc_and_branch (rtx *operands)
1543 {
1544   switch (GET_CODE (operands[3]))
1545     {
1546       case EQ:
1547 	output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1548 	break;
1549 
1550       case NE:
1551 	output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1552 	break;
1553 
1554       case GT:
1555 	output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1556 	break;
1557 
1558       case GTU:
1559 	output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1560 	break;
1561 
1562       case LT:
1563 	output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1564 	break;
1565 
1566       case LTU:
1567 	output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1568 	break;
1569 
1570       case GE:
1571 	output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1572 	break;
1573 
1574       case GEU:
1575 	output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1576 	break;
1577 
1578       case LE:
1579 	output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1580 	break;
1581 
1582       case LEU:
1583 	output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1584 	break;
1585 
1586       default:
1587 	gcc_unreachable ();
1588     }
1589 
1590   /* If the decrement is to be done in SImode, then we have
1591      to compensate for the fact that dbcc decrements in HImode.  */
1592   switch (GET_MODE (operands[0]))
1593     {
1594       case SImode:
1595         output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1596         break;
1597 
1598       case HImode:
1599         break;
1600 
1601       default:
1602         gcc_unreachable ();
1603     }
1604 }
1605 
1606 const char *
output_scc_di(rtx op,rtx operand1,rtx operand2,rtx dest)1607 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1608 {
1609   rtx loperands[7];
1610   enum rtx_code op_code = GET_CODE (op);
1611 
1612   /* This does not produce a useful cc.  */
1613   CC_STATUS_INIT;
1614 
1615   /* The m68k cmp.l instruction requires operand1 to be a reg as used
1616      below.  Swap the operands and change the op if these requirements
1617      are not fulfilled.  */
1618   if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1619     {
1620       rtx tmp = operand1;
1621 
1622       operand1 = operand2;
1623       operand2 = tmp;
1624       op_code = swap_condition (op_code);
1625     }
1626   loperands[0] = operand1;
1627   if (GET_CODE (operand1) == REG)
1628     loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1629   else
1630     loperands[1] = adjust_address (operand1, SImode, 4);
1631   if (operand2 != const0_rtx)
1632     {
1633       loperands[2] = operand2;
1634       if (GET_CODE (operand2) == REG)
1635 	loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1636       else
1637 	loperands[3] = adjust_address (operand2, SImode, 4);
1638     }
1639   loperands[4] = gen_label_rtx ();
1640   if (operand2 != const0_rtx)
1641     output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1642   else
1643     {
1644       if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1645 	output_asm_insn ("tst%.l %0", loperands);
1646       else
1647 	output_asm_insn ("cmp%.w #0,%0", loperands);
1648 
1649       output_asm_insn ("jne %l4", loperands);
1650 
1651       if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1652 	output_asm_insn ("tst%.l %1", loperands);
1653       else
1654 	output_asm_insn ("cmp%.w #0,%1", loperands);
1655     }
1656 
1657   loperands[5] = dest;
1658 
1659   switch (op_code)
1660     {
1661       case EQ:
1662         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1663 					   CODE_LABEL_NUMBER (loperands[4]));
1664         output_asm_insn ("seq %5", loperands);
1665         break;
1666 
1667       case NE:
1668         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1669 					   CODE_LABEL_NUMBER (loperands[4]));
1670         output_asm_insn ("sne %5", loperands);
1671         break;
1672 
1673       case GT:
1674         loperands[6] = gen_label_rtx ();
1675         output_asm_insn ("shi %5\n\tjra %l6", loperands);
1676         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1677 					   CODE_LABEL_NUMBER (loperands[4]));
1678         output_asm_insn ("sgt %5", loperands);
1679         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1680 					   CODE_LABEL_NUMBER (loperands[6]));
1681         break;
1682 
1683       case GTU:
1684         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1685 					   CODE_LABEL_NUMBER (loperands[4]));
1686         output_asm_insn ("shi %5", loperands);
1687         break;
1688 
1689       case LT:
1690         loperands[6] = gen_label_rtx ();
1691         output_asm_insn ("scs %5\n\tjra %l6", loperands);
1692         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1693 					   CODE_LABEL_NUMBER (loperands[4]));
1694         output_asm_insn ("slt %5", loperands);
1695         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1696 					   CODE_LABEL_NUMBER (loperands[6]));
1697         break;
1698 
1699       case LTU:
1700         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1701 					   CODE_LABEL_NUMBER (loperands[4]));
1702         output_asm_insn ("scs %5", loperands);
1703         break;
1704 
1705       case GE:
1706         loperands[6] = gen_label_rtx ();
1707         output_asm_insn ("scc %5\n\tjra %l6", loperands);
1708         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1709 					   CODE_LABEL_NUMBER (loperands[4]));
1710         output_asm_insn ("sge %5", loperands);
1711         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1712 					   CODE_LABEL_NUMBER (loperands[6]));
1713         break;
1714 
1715       case GEU:
1716         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1717 					   CODE_LABEL_NUMBER (loperands[4]));
1718         output_asm_insn ("scc %5", loperands);
1719         break;
1720 
1721       case LE:
1722         loperands[6] = gen_label_rtx ();
1723         output_asm_insn ("sls %5\n\tjra %l6", loperands);
1724         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1725 					   CODE_LABEL_NUMBER (loperands[4]));
1726         output_asm_insn ("sle %5", loperands);
1727         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1728 					   CODE_LABEL_NUMBER (loperands[6]));
1729         break;
1730 
1731       case LEU:
1732         (*targetm.asm_out.internal_label) (asm_out_file, "L",
1733 					   CODE_LABEL_NUMBER (loperands[4]));
1734         output_asm_insn ("sls %5", loperands);
1735         break;
1736 
1737       default:
1738 	gcc_unreachable ();
1739     }
1740   return "";
1741 }
1742 
1743 const char *
output_btst(rtx * operands,rtx countop,rtx dataop,rtx insn,int signpos)1744 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1745 {
1746   operands[0] = countop;
1747   operands[1] = dataop;
1748 
1749   if (GET_CODE (countop) == CONST_INT)
1750     {
1751       register int count = INTVAL (countop);
1752       /* If COUNT is bigger than size of storage unit in use,
1753 	 advance to the containing unit of same size.  */
1754       if (count > signpos)
1755 	{
1756 	  int offset = (count & ~signpos) / 8;
1757 	  count = count & signpos;
1758 	  operands[1] = dataop = adjust_address (dataop, QImode, offset);
1759 	}
1760       if (count == signpos)
1761 	cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1762       else
1763 	cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1764 
1765       /* These three statements used to use next_insns_test_no...
1766 	 but it appears that this should do the same job.  */
1767       if (count == 31
1768 	  && next_insn_tests_no_inequality (insn))
1769 	return "tst%.l %1";
1770       if (count == 15
1771 	  && next_insn_tests_no_inequality (insn))
1772 	return "tst%.w %1";
1773       if (count == 7
1774 	  && next_insn_tests_no_inequality (insn))
1775 	return "tst%.b %1";
1776       /* Try to use `movew to ccr' followed by the appropriate branch insn.
1777          On some m68k variants unfortunately that's slower than btst.
1778          On 68000 and higher, that should also work for all HImode operands. */
1779       if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1780 	{
1781 	  if (count == 3 && DATA_REG_P (operands[1])
1782 	      && next_insn_tests_no_inequality (insn))
1783 	    {
1784 	    cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1785 	    return "move%.w %1,%%ccr";
1786 	    }
1787 	  if (count == 2 && DATA_REG_P (operands[1])
1788 	      && next_insn_tests_no_inequality (insn))
1789 	    {
1790 	    cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1791 	    return "move%.w %1,%%ccr";
1792 	    }
1793 	  /* count == 1 followed by bvc/bvs and
1794 	     count == 0 followed by bcc/bcs are also possible, but need
1795 	     m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1796 	}
1797 
1798       cc_status.flags = CC_NOT_NEGATIVE;
1799     }
1800   return "btst %0,%1";
1801 }
1802 
1803 /* Return true if X is a legitimate base register.  STRICT_P says
1804    whether we need strict checking.  */
1805 
1806 bool
m68k_legitimate_base_reg_p(rtx x,bool strict_p)1807 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1808 {
1809   /* Allow SUBREG everywhere we allow REG.  This results in better code.  */
1810   if (!strict_p && GET_CODE (x) == SUBREG)
1811     x = SUBREG_REG (x);
1812 
1813   return (REG_P (x)
1814 	  && (strict_p
1815 	      ? REGNO_OK_FOR_BASE_P (REGNO (x))
1816 	      : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1817 }
1818 
1819 /* Return true if X is a legitimate index register.  STRICT_P says
1820    whether we need strict checking.  */
1821 
1822 bool
m68k_legitimate_index_reg_p(rtx x,bool strict_p)1823 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1824 {
1825   if (!strict_p && GET_CODE (x) == SUBREG)
1826     x = SUBREG_REG (x);
1827 
1828   return (REG_P (x)
1829 	  && (strict_p
1830 	      ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1831 	      : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1832 }
1833 
1834 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1835    (bd,An,Xn) addressing mode.  Fill in the INDEX and SCALE fields of
1836    ADDRESS if so.  STRICT_P says whether we need strict checking.  */
1837 
1838 static bool
m68k_decompose_index(rtx x,bool strict_p,struct m68k_address * address)1839 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1840 {
1841   int scale;
1842 
1843   /* Check for a scale factor.  */
1844   scale = 1;
1845   if ((TARGET_68020 || TARGET_COLDFIRE)
1846       && GET_CODE (x) == MULT
1847       && GET_CODE (XEXP (x, 1)) == CONST_INT
1848       && (INTVAL (XEXP (x, 1)) == 2
1849 	  || INTVAL (XEXP (x, 1)) == 4
1850 	  || (INTVAL (XEXP (x, 1)) == 8
1851 	      && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1852     {
1853       scale = INTVAL (XEXP (x, 1));
1854       x = XEXP (x, 0);
1855     }
1856 
1857   /* Check for a word extension.  */
1858   if (!TARGET_COLDFIRE
1859       && GET_CODE (x) == SIGN_EXTEND
1860       && GET_MODE (XEXP (x, 0)) == HImode)
1861     x = XEXP (x, 0);
1862 
1863   if (m68k_legitimate_index_reg_p (x, strict_p))
1864     {
1865       address->scale = scale;
1866       address->index = x;
1867       return true;
1868     }
1869 
1870   return false;
1871 }
1872 
1873 /* Return true if X is an illegitimate symbolic constant.  */
1874 
1875 bool
m68k_illegitimate_symbolic_constant_p(rtx x)1876 m68k_illegitimate_symbolic_constant_p (rtx x)
1877 {
1878   rtx base, offset;
1879 
1880   if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1881     {
1882       split_const (x, &base, &offset);
1883       if (GET_CODE (base) == SYMBOL_REF
1884 	  && !offset_within_block_p (base, INTVAL (offset)))
1885 	return true;
1886     }
1887   return m68k_tls_reference_p (x, false);
1888 }
1889 
1890 /* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
1891 
1892 static bool
m68k_cannot_force_const_mem(enum machine_mode mode ATTRIBUTE_UNUSED,rtx x)1893 m68k_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1894 {
1895   return m68k_illegitimate_symbolic_constant_p (x);
1896 }
1897 
1898 /* Return true if X is a legitimate constant address that can reach
1899    bytes in the range [X, X + REACH).  STRICT_P says whether we need
1900    strict checking.  */
1901 
1902 static bool
m68k_legitimate_constant_address_p(rtx x,unsigned int reach,bool strict_p)1903 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1904 {
1905   rtx base, offset;
1906 
1907   if (!CONSTANT_ADDRESS_P (x))
1908     return false;
1909 
1910   if (flag_pic
1911       && !(strict_p && TARGET_PCREL)
1912       && symbolic_operand (x, VOIDmode))
1913     return false;
1914 
1915   if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1916     {
1917       split_const (x, &base, &offset);
1918       if (GET_CODE (base) == SYMBOL_REF
1919 	  && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1920 	return false;
1921     }
1922 
1923   return !m68k_tls_reference_p (x, false);
1924 }
1925 
1926 /* Return true if X is a LABEL_REF for a jump table.  Assume that unplaced
1927    labels will become jump tables.  */
1928 
1929 static bool
m68k_jump_table_ref_p(rtx x)1930 m68k_jump_table_ref_p (rtx x)
1931 {
1932   if (GET_CODE (x) != LABEL_REF)
1933     return false;
1934 
1935   x = XEXP (x, 0);
1936   if (!NEXT_INSN (x) && !PREV_INSN (x))
1937     return true;
1938 
1939   x = next_nonnote_insn (x);
1940   return x && JUMP_TABLE_DATA_P (x);
1941 }
1942 
1943 /* Return true if X is a legitimate address for values of mode MODE.
1944    STRICT_P says whether strict checking is needed.  If the address
1945    is valid, describe its components in *ADDRESS.  */
1946 
1947 static bool
m68k_decompose_address(enum machine_mode mode,rtx x,bool strict_p,struct m68k_address * address)1948 m68k_decompose_address (enum machine_mode mode, rtx x,
1949 			bool strict_p, struct m68k_address *address)
1950 {
1951   unsigned int reach;
1952 
1953   memset (address, 0, sizeof (*address));
1954 
1955   if (mode == BLKmode)
1956     reach = 1;
1957   else
1958     reach = GET_MODE_SIZE (mode);
1959 
1960   /* Check for (An) (mode 2).  */
1961   if (m68k_legitimate_base_reg_p (x, strict_p))
1962     {
1963       address->base = x;
1964       return true;
1965     }
1966 
1967   /* Check for -(An) and (An)+ (modes 3 and 4).  */
1968   if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1969       && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1970     {
1971       address->code = GET_CODE (x);
1972       address->base = XEXP (x, 0);
1973       return true;
1974     }
1975 
1976   /* Check for (d16,An) (mode 5).  */
1977   if (GET_CODE (x) == PLUS
1978       && GET_CODE (XEXP (x, 1)) == CONST_INT
1979       && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1980       && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1981     {
1982       address->base = XEXP (x, 0);
1983       address->offset = XEXP (x, 1);
1984       return true;
1985     }
1986 
1987   /* Check for GOT loads.  These are (bd,An,Xn) addresses if
1988      TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1989      addresses.  */
1990   if (GET_CODE (x) == PLUS
1991       && XEXP (x, 0) == pic_offset_table_rtx)
1992     {
1993       /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1994 	 they are invalid in this context.  */
1995       if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1996 	{
1997 	  address->base = XEXP (x, 0);
1998 	  address->offset = XEXP (x, 1);
1999 	  return true;
2000 	}
2001     }
2002 
2003   /* The ColdFire FPU only accepts addressing modes 2-5.  */
2004   if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2005     return false;
2006 
2007   /* Check for (xxx).w and (xxx).l.  Also, in the TARGET_PCREL case,
2008      check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2009      All these modes are variations of mode 7.  */
2010   if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2011     {
2012       address->offset = x;
2013       return true;
2014     }
2015 
2016   /* Check for (d8,PC,Xn), a mode 7 form.  This case is needed for
2017      tablejumps.
2018 
2019      ??? do_tablejump creates these addresses before placing the target
2020      label, so we have to assume that unplaced labels are jump table
2021      references.  It seems unlikely that we would ever generate indexed
2022      accesses to unplaced labels in other cases.  */
2023   if (GET_CODE (x) == PLUS
2024       && m68k_jump_table_ref_p (XEXP (x, 1))
2025       && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2026     {
2027       address->offset = XEXP (x, 1);
2028       return true;
2029     }
2030 
2031   /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2032      (bd,An,Xn.SIZE*SCALE) addresses.  */
2033 
2034   if (TARGET_68020)
2035     {
2036       /* Check for a nonzero base displacement.  */
2037       if (GET_CODE (x) == PLUS
2038 	  && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2039 	{
2040 	  address->offset = XEXP (x, 1);
2041 	  x = XEXP (x, 0);
2042 	}
2043 
2044       /* Check for a suppressed index register.  */
2045       if (m68k_legitimate_base_reg_p (x, strict_p))
2046 	{
2047 	  address->base = x;
2048 	  return true;
2049 	}
2050 
2051       /* Check for a suppressed base register.  Do not allow this case
2052 	 for non-symbolic offsets as it effectively gives gcc freedom
2053 	 to treat data registers as base registers, which can generate
2054 	 worse code.  */
2055       if (address->offset
2056 	  && symbolic_operand (address->offset, VOIDmode)
2057 	  && m68k_decompose_index (x, strict_p, address))
2058 	return true;
2059     }
2060   else
2061     {
2062       /* Check for a nonzero base displacement.  */
2063       if (GET_CODE (x) == PLUS
2064 	  && GET_CODE (XEXP (x, 1)) == CONST_INT
2065 	  && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2066 	{
2067 	  address->offset = XEXP (x, 1);
2068 	  x = XEXP (x, 0);
2069 	}
2070     }
2071 
2072   /* We now expect the sum of a base and an index.  */
2073   if (GET_CODE (x) == PLUS)
2074     {
2075       if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2076 	  && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2077 	{
2078 	  address->base = XEXP (x, 0);
2079 	  return true;
2080 	}
2081 
2082       if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2083 	  && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2084 	{
2085 	  address->base = XEXP (x, 1);
2086 	  return true;
2087 	}
2088     }
2089   return false;
2090 }
2091 
2092 /* Return true if X is a legitimate address for values of mode MODE.
2093    STRICT_P says whether strict checking is needed.  */
2094 
2095 bool
m68k_legitimate_address_p(enum machine_mode mode,rtx x,bool strict_p)2096 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2097 {
2098   struct m68k_address address;
2099 
2100   return m68k_decompose_address (mode, x, strict_p, &address);
2101 }
2102 
2103 /* Return true if X is a memory, describing its address in ADDRESS if so.
2104    Apply strict checking if called during or after reload.  */
2105 
2106 static bool
m68k_legitimate_mem_p(rtx x,struct m68k_address * address)2107 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2108 {
2109   return (MEM_P (x)
2110 	  && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2111 				     reload_in_progress || reload_completed,
2112 				     address));
2113 }
2114 
2115 /* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
2116 
2117 bool
m68k_legitimate_constant_p(enum machine_mode mode,rtx x)2118 m68k_legitimate_constant_p (enum machine_mode mode, rtx x)
2119 {
2120   return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2121 }
2122 
2123 /* Return true if X matches the 'Q' constraint.  It must be a memory
2124    with a base address and no constant offset or index.  */
2125 
2126 bool
m68k_matches_q_p(rtx x)2127 m68k_matches_q_p (rtx x)
2128 {
2129   struct m68k_address address;
2130 
2131   return (m68k_legitimate_mem_p (x, &address)
2132 	  && address.code == UNKNOWN
2133 	  && address.base
2134 	  && !address.offset
2135 	  && !address.index);
2136 }
2137 
2138 /* Return true if X matches the 'U' constraint.  It must be a base address
2139    with a constant offset and no index.  */
2140 
2141 bool
m68k_matches_u_p(rtx x)2142 m68k_matches_u_p (rtx x)
2143 {
2144   struct m68k_address address;
2145 
2146   return (m68k_legitimate_mem_p (x, &address)
2147 	  && address.code == UNKNOWN
2148 	  && address.base
2149 	  && address.offset
2150 	  && !address.index);
2151 }
2152 
2153 /* Return GOT pointer.  */
2154 
2155 static rtx
m68k_get_gp(void)2156 m68k_get_gp (void)
2157 {
2158   if (pic_offset_table_rtx == NULL_RTX)
2159     pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2160 
2161   crtl->uses_pic_offset_table = 1;
2162 
2163   return pic_offset_table_rtx;
2164 }
2165 
2166 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2167    wrappers.  */
2168 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2169 		  RELOC_TLSIE, RELOC_TLSLE };
2170 
2171 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2172 
2173 /* Wrap symbol X into unspec representing relocation RELOC.
2174    BASE_REG - register that should be added to the result.
2175    TEMP_REG - if non-null, temporary register.  */
2176 
2177 static rtx
m68k_wrap_symbol(rtx x,enum m68k_reloc reloc,rtx base_reg,rtx temp_reg)2178 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2179 {
2180   bool use_x_p;
2181 
2182   use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2183 
2184   if (TARGET_COLDFIRE && use_x_p)
2185     /* When compiling with -mx{got, tls} switch the code will look like this:
2186 
2187        move.l <X>@<RELOC>,<TEMP_REG>
2188        add.l <BASE_REG>,<TEMP_REG>  */
2189     {
2190       /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2191 	 to put @RELOC after reference.  */
2192       x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2193 			  UNSPEC_RELOC32);
2194       x = gen_rtx_CONST (Pmode, x);
2195 
2196       if (temp_reg == NULL)
2197 	{
2198 	  gcc_assert (can_create_pseudo_p ());
2199 	  temp_reg = gen_reg_rtx (Pmode);
2200 	}
2201 
2202       emit_move_insn (temp_reg, x);
2203       emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2204       x = temp_reg;
2205     }
2206   else
2207     {
2208       x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2209 			  UNSPEC_RELOC16);
2210       x = gen_rtx_CONST (Pmode, x);
2211 
2212       x = gen_rtx_PLUS (Pmode, base_reg, x);
2213     }
2214 
2215   return x;
2216 }
2217 
2218 /* Helper for m68k_unwrap_symbol.
2219    Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2220    sets *RELOC_PTR to relocation type for the symbol.  */
2221 
2222 static rtx
m68k_unwrap_symbol_1(rtx orig,bool unwrap_reloc32_p,enum m68k_reloc * reloc_ptr)2223 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2224 		      enum m68k_reloc *reloc_ptr)
2225 {
2226   if (GET_CODE (orig) == CONST)
2227     {
2228       rtx x;
2229       enum m68k_reloc dummy;
2230 
2231       x = XEXP (orig, 0);
2232 
2233       if (reloc_ptr == NULL)
2234 	reloc_ptr = &dummy;
2235 
2236       /* Handle an addend.  */
2237       if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2238 	  && CONST_INT_P (XEXP (x, 1)))
2239 	x = XEXP (x, 0);
2240 
2241       if (GET_CODE (x) == UNSPEC)
2242 	{
2243 	  switch (XINT (x, 1))
2244 	    {
2245 	    case UNSPEC_RELOC16:
2246 	      orig = XVECEXP (x, 0, 0);
2247 	      *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2248 	      break;
2249 
2250 	    case UNSPEC_RELOC32:
2251 	      if (unwrap_reloc32_p)
2252 		{
2253 		  orig = XVECEXP (x, 0, 0);
2254 		  *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2255 		}
2256 	      break;
2257 
2258 	    default:
2259 	      break;
2260 	    }
2261 	}
2262     }
2263 
2264   return orig;
2265 }
2266 
2267 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2268    UNSPEC_RELOC32 wrappers.  */
2269 
2270 rtx
m68k_unwrap_symbol(rtx orig,bool unwrap_reloc32_p)2271 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2272 {
2273   return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2274 }
2275 
2276 /* Helper for m68k_final_prescan_insn.  */
2277 
2278 static int
m68k_final_prescan_insn_1(rtx * x_ptr,void * data ATTRIBUTE_UNUSED)2279 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2280 {
2281   rtx x = *x_ptr;
2282 
2283   if (m68k_unwrap_symbol (x, true) != x)
2284     /* For rationale of the below, see comment in m68k_final_prescan_insn.  */
2285     {
2286       rtx plus;
2287 
2288       gcc_assert (GET_CODE (x) == CONST);
2289       plus = XEXP (x, 0);
2290 
2291       if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2292 	{
2293 	  rtx unspec;
2294 	  rtx addend;
2295 
2296 	  unspec = XEXP (plus, 0);
2297 	  gcc_assert (GET_CODE (unspec) == UNSPEC);
2298 	  addend = XEXP (plus, 1);
2299 	  gcc_assert (CONST_INT_P (addend));
2300 
2301 	  /* We now have all the pieces, rearrange them.  */
2302 
2303 	  /* Move symbol to plus.  */
2304 	  XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2305 
2306 	  /* Move plus inside unspec.  */
2307 	  XVECEXP (unspec, 0, 0) = plus;
2308 
2309 	  /* Move unspec to top level of const.  */
2310 	  XEXP (x, 0) = unspec;
2311 	}
2312 
2313       return -1;
2314     }
2315 
2316   return 0;
2317 }
2318 
2319 /* Prescan insn before outputing assembler for it.  */
2320 
2321 void
m68k_final_prescan_insn(rtx insn ATTRIBUTE_UNUSED,rtx * operands,int n_operands)2322 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2323 			 rtx *operands, int n_operands)
2324 {
2325   int i;
2326 
2327   /* Combine and, possibly, other optimizations may do good job
2328      converting
2329        (const (unspec [(symbol)]))
2330      into
2331        (const (plus (unspec [(symbol)])
2332                     (const_int N))).
2333      The problem with this is emitting @TLS or @GOT decorations.
2334      The decoration is emitted when processing (unspec), so the
2335      result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2336 
2337      It seems that the easiest solution to this is to convert such
2338      operands to
2339        (const (unspec [(plus (symbol)
2340                              (const_int N))])).
2341      Note, that the top level of operand remains intact, so we don't have
2342      to patch up anything outside of the operand.  */
2343 
2344   for (i = 0; i < n_operands; ++i)
2345     {
2346       rtx op;
2347 
2348       op = operands[i];
2349 
2350       for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2351     }
2352 }
2353 
2354 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2355    If REG is non-null, use it; generate new pseudo otherwise.  */
2356 
2357 static rtx
m68k_move_to_reg(rtx x,rtx orig,rtx reg)2358 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2359 {
2360   rtx insn;
2361 
2362   if (reg == NULL_RTX)
2363     {
2364       gcc_assert (can_create_pseudo_p ());
2365       reg = gen_reg_rtx (Pmode);
2366     }
2367 
2368   insn = emit_move_insn (reg, x);
2369   /* Put a REG_EQUAL note on this insn, so that it can be optimized
2370      by loop.  */
2371   set_unique_reg_note (insn, REG_EQUAL, orig);
2372 
2373   return reg;
2374 }
2375 
2376 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2377    GOT slot.  */
2378 
2379 static rtx
m68k_wrap_symbol_into_got_ref(rtx x,enum m68k_reloc reloc,rtx temp_reg)2380 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2381 {
2382   x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2383 
2384   x = gen_rtx_MEM (Pmode, x);
2385   MEM_READONLY_P (x) = 1;
2386 
2387   return x;
2388 }
2389 
2390 /* Legitimize PIC addresses.  If the address is already
2391    position-independent, we return ORIG.  Newly generated
2392    position-independent addresses go to REG.  If we need more
2393    than one register, we lose.
2394 
2395    An address is legitimized by making an indirect reference
2396    through the Global Offset Table with the name of the symbol
2397    used as an offset.
2398 
2399    The assembler and linker are responsible for placing the
2400    address of the symbol in the GOT.  The function prologue
2401    is responsible for initializing a5 to the starting address
2402    of the GOT.
2403 
2404    The assembler is also responsible for translating a symbol name
2405    into a constant displacement from the start of the GOT.
2406 
2407    A quick example may make things a little clearer:
2408 
2409    When not generating PIC code to store the value 12345 into _foo
2410    we would generate the following code:
2411 
2412 	movel #12345, _foo
2413 
2414    When generating PIC two transformations are made.  First, the compiler
2415    loads the address of foo into a register.  So the first transformation makes:
2416 
2417 	lea	_foo, a0
2418 	movel   #12345, a0@
2419 
2420    The code in movsi will intercept the lea instruction and call this
2421    routine which will transform the instructions into:
2422 
2423 	movel   a5@(_foo:w), a0
2424 	movel   #12345, a0@
2425 
2426 
2427    That (in a nutshell) is how *all* symbol and label references are
2428    handled.  */
2429 
2430 rtx
legitimize_pic_address(rtx orig,enum machine_mode mode ATTRIBUTE_UNUSED,rtx reg)2431 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2432 		        rtx reg)
2433 {
2434   rtx pic_ref = orig;
2435 
2436   /* First handle a simple SYMBOL_REF or LABEL_REF */
2437   if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2438     {
2439       gcc_assert (reg);
2440 
2441       pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2442       pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2443     }
2444   else if (GET_CODE (orig) == CONST)
2445     {
2446       rtx base;
2447 
2448       /* Make sure this has not already been legitimized.  */
2449       if (m68k_unwrap_symbol (orig, true) != orig)
2450 	return orig;
2451 
2452       gcc_assert (reg);
2453 
2454       /* legitimize both operands of the PLUS */
2455       gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2456 
2457       base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2458       orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2459 				     base == reg ? 0 : reg);
2460 
2461       if (GET_CODE (orig) == CONST_INT)
2462 	pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2463       else
2464 	pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2465     }
2466 
2467   return pic_ref;
2468 }
2469 
2470 /* The __tls_get_addr symbol.  */
2471 static GTY(()) rtx m68k_tls_get_addr;
2472 
2473 /* Return SYMBOL_REF for __tls_get_addr.  */
2474 
2475 static rtx
m68k_get_tls_get_addr(void)2476 m68k_get_tls_get_addr (void)
2477 {
2478   if (m68k_tls_get_addr == NULL_RTX)
2479     m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2480 
2481   return m68k_tls_get_addr;
2482 }
2483 
2484 /* Return libcall result in A0 instead of usual D0.  */
2485 static bool m68k_libcall_value_in_a0_p = false;
2486 
2487 /* Emit instruction sequence that calls __tls_get_addr.  X is
2488    the TLS symbol we are referencing and RELOC is the symbol type to use
2489    (either TLSGD or TLSLDM).  EQV is the REG_EQUAL note for the sequence
2490    emitted.  A pseudo register with result of __tls_get_addr call is
2491    returned.  */
2492 
2493 static rtx
m68k_call_tls_get_addr(rtx x,rtx eqv,enum m68k_reloc reloc)2494 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2495 {
2496   rtx a0;
2497   rtx insns;
2498   rtx dest;
2499 
2500   /* Emit the call sequence.  */
2501   start_sequence ();
2502 
2503   /* FIXME: Unfortunately, emit_library_call_value does not
2504      consider (plus (%a5) (const (unspec))) to be a good enough
2505      operand for push, so it forces it into a register.  The bad
2506      thing about this is that combiner, due to copy propagation and other
2507      optimizations, sometimes can not later fix this.  As a consequence,
2508      additional register may be allocated resulting in a spill.
2509      For reference, see args processing loops in
2510      calls.c:emit_library_call_value_1.
2511      For testcase, see gcc.target/m68k/tls-{gd, ld}.c  */
2512   x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2513 
2514   /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2515      is the simpliest way of generating a call.  The difference between
2516      __tls_get_addr() and libcall is that the result is returned in D0
2517      instead of A0.  To workaround this, we use m68k_libcall_value_in_a0_p
2518      which temporarily switches returning the result to A0.  */
2519 
2520   m68k_libcall_value_in_a0_p = true;
2521   a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2522 				Pmode, 1, x, Pmode);
2523   m68k_libcall_value_in_a0_p = false;
2524 
2525   insns = get_insns ();
2526   end_sequence ();
2527 
2528   gcc_assert (can_create_pseudo_p ());
2529   dest = gen_reg_rtx (Pmode);
2530   emit_libcall_block (insns, dest, a0, eqv);
2531 
2532   return dest;
2533 }
2534 
2535 /* The __tls_get_addr symbol.  */
2536 static GTY(()) rtx m68k_read_tp;
2537 
2538 /* Return SYMBOL_REF for __m68k_read_tp.  */
2539 
2540 static rtx
m68k_get_m68k_read_tp(void)2541 m68k_get_m68k_read_tp (void)
2542 {
2543   if (m68k_read_tp == NULL_RTX)
2544     m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2545 
2546   return m68k_read_tp;
2547 }
2548 
2549 /* Emit instruction sequence that calls __m68k_read_tp.
2550    A pseudo register with result of __m68k_read_tp call is returned.  */
2551 
2552 static rtx
m68k_call_m68k_read_tp(void)2553 m68k_call_m68k_read_tp (void)
2554 {
2555   rtx a0;
2556   rtx eqv;
2557   rtx insns;
2558   rtx dest;
2559 
2560   start_sequence ();
2561 
2562   /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2563      is the simpliest way of generating a call.  The difference between
2564      __m68k_read_tp() and libcall is that the result is returned in D0
2565      instead of A0.  To workaround this, we use m68k_libcall_value_in_a0_p
2566      which temporarily switches returning the result to A0.  */
2567 
2568   /* Emit the call sequence.  */
2569   m68k_libcall_value_in_a0_p = true;
2570   a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2571 				Pmode, 0);
2572   m68k_libcall_value_in_a0_p = false;
2573   insns = get_insns ();
2574   end_sequence ();
2575 
2576   /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2577      share the m68k_read_tp result with other IE/LE model accesses.  */
2578   eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2579 
2580   gcc_assert (can_create_pseudo_p ());
2581   dest = gen_reg_rtx (Pmode);
2582   emit_libcall_block (insns, dest, a0, eqv);
2583 
2584   return dest;
2585 }
2586 
2587 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2588    For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2589    ColdFire.  */
2590 
2591 rtx
m68k_legitimize_tls_address(rtx orig)2592 m68k_legitimize_tls_address (rtx orig)
2593 {
2594   switch (SYMBOL_REF_TLS_MODEL (orig))
2595     {
2596     case TLS_MODEL_GLOBAL_DYNAMIC:
2597       orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2598       break;
2599 
2600     case TLS_MODEL_LOCAL_DYNAMIC:
2601       {
2602 	rtx eqv;
2603 	rtx a0;
2604 	rtx x;
2605 
2606 	/* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2607 	   share the LDM result with other LD model accesses.  */
2608 	eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2609 			      UNSPEC_RELOC32);
2610 
2611 	a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2612 
2613 	x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2614 
2615 	if (can_create_pseudo_p ())
2616 	  x = m68k_move_to_reg (x, orig, NULL_RTX);
2617 
2618 	orig = x;
2619 	break;
2620       }
2621 
2622     case TLS_MODEL_INITIAL_EXEC:
2623       {
2624 	rtx a0;
2625 	rtx x;
2626 
2627 	a0 = m68k_call_m68k_read_tp ();
2628 
2629 	x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2630 	x = gen_rtx_PLUS (Pmode, x, a0);
2631 
2632 	if (can_create_pseudo_p ())
2633 	  x = m68k_move_to_reg (x, orig, NULL_RTX);
2634 
2635 	orig = x;
2636 	break;
2637       }
2638 
2639     case TLS_MODEL_LOCAL_EXEC:
2640       {
2641 	rtx a0;
2642 	rtx x;
2643 
2644 	a0 = m68k_call_m68k_read_tp ();
2645 
2646 	x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2647 
2648 	if (can_create_pseudo_p ())
2649 	  x = m68k_move_to_reg (x, orig, NULL_RTX);
2650 
2651 	orig = x;
2652 	break;
2653       }
2654 
2655     default:
2656       gcc_unreachable ();
2657     }
2658 
2659   return orig;
2660 }
2661 
2662 /* Return true if X is a TLS symbol.  */
2663 
2664 static bool
m68k_tls_symbol_p(rtx x)2665 m68k_tls_symbol_p (rtx x)
2666 {
2667   if (!TARGET_HAVE_TLS)
2668     return false;
2669 
2670   if (GET_CODE (x) != SYMBOL_REF)
2671     return false;
2672 
2673   return SYMBOL_REF_TLS_MODEL (x) != 0;
2674 }
2675 
2676 /* Helper for m68k_tls_referenced_p.  */
2677 
2678 static int
m68k_tls_reference_p_1(rtx * x_ptr,void * data ATTRIBUTE_UNUSED)2679 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2680 {
2681   /* Note: this is not the same as m68k_tls_symbol_p.  */
2682   if (GET_CODE (*x_ptr) == SYMBOL_REF)
2683     return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2684 
2685   /* Don't recurse into legitimate TLS references.  */
2686   if (m68k_tls_reference_p (*x_ptr, true))
2687     return -1;
2688 
2689   return 0;
2690 }
2691 
2692 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2693    though illegitimate one.
2694    If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference.  */
2695 
2696 bool
m68k_tls_reference_p(rtx x,bool legitimate_p)2697 m68k_tls_reference_p (rtx x, bool legitimate_p)
2698 {
2699   if (!TARGET_HAVE_TLS)
2700     return false;
2701 
2702   if (!legitimate_p)
2703     return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2704   else
2705     {
2706       enum m68k_reloc reloc = RELOC_GOT;
2707 
2708       return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2709 	      && TLS_RELOC_P (reloc));
2710     }
2711 }
2712 
2713 
2714 
2715 #define USE_MOVQ(i)	((unsigned) ((i) + 128) <= 255)
2716 
2717 /* Return the type of move that should be used for integer I.  */
2718 
2719 M68K_CONST_METHOD
m68k_const_method(HOST_WIDE_INT i)2720 m68k_const_method (HOST_WIDE_INT i)
2721 {
2722   unsigned u;
2723 
2724   if (USE_MOVQ (i))
2725     return MOVQ;
2726 
2727   /* The ColdFire doesn't have byte or word operations.  */
2728   /* FIXME: This may not be useful for the m68060 either.  */
2729   if (!TARGET_COLDFIRE)
2730     {
2731       /* if -256 < N < 256 but N is not in range for a moveq
2732 	 N^ff will be, so use moveq #N^ff, dreg; not.b dreg.  */
2733       if (USE_MOVQ (i ^ 0xff))
2734 	return NOTB;
2735       /* Likewise, try with not.w */
2736       if (USE_MOVQ (i ^ 0xffff))
2737 	return NOTW;
2738       /* This is the only value where neg.w is useful */
2739       if (i == -65408)
2740 	return NEGW;
2741     }
2742 
2743   /* Try also with swap.  */
2744   u = i;
2745   if (USE_MOVQ ((u >> 16) | (u << 16)))
2746     return SWAP;
2747 
2748   if (TARGET_ISAB)
2749     {
2750       /* Try using MVZ/MVS with an immediate value to load constants.  */
2751       if (i >= 0 && i <= 65535)
2752 	return MVZ;
2753       if (i >= -32768 && i <= 32767)
2754 	return MVS;
2755     }
2756 
2757   /* Otherwise, use move.l */
2758   return MOVL;
2759 }
2760 
2761 /* Return the cost of moving constant I into a data register.  */
2762 
2763 static int
const_int_cost(HOST_WIDE_INT i)2764 const_int_cost (HOST_WIDE_INT i)
2765 {
2766   switch (m68k_const_method (i))
2767     {
2768     case MOVQ:
2769       /* Constants between -128 and 127 are cheap due to moveq.  */
2770       return 0;
2771     case MVZ:
2772     case MVS:
2773     case NOTB:
2774     case NOTW:
2775     case NEGW:
2776     case SWAP:
2777       /* Constants easily generated by moveq + not.b/not.w/neg.w/swap.  */
2778       return 1;
2779     case MOVL:
2780       return 2;
2781     default:
2782       gcc_unreachable ();
2783     }
2784 }
2785 
2786 static bool
m68k_rtx_costs(rtx x,int code,int outer_code,int opno ATTRIBUTE_UNUSED,int * total,bool speed ATTRIBUTE_UNUSED)2787 m68k_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2788 		int *total, bool speed ATTRIBUTE_UNUSED)
2789 {
2790   switch (code)
2791     {
2792     case CONST_INT:
2793       /* Constant zero is super cheap due to clr instruction.  */
2794       if (x == const0_rtx)
2795 	*total = 0;
2796       else
2797         *total = const_int_cost (INTVAL (x));
2798       return true;
2799 
2800     case CONST:
2801     case LABEL_REF:
2802     case SYMBOL_REF:
2803       *total = 3;
2804       return true;
2805 
2806     case CONST_DOUBLE:
2807       /* Make 0.0 cheaper than other floating constants to
2808          encourage creating tstsf and tstdf insns.  */
2809       if (outer_code == COMPARE
2810           && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2811 	*total = 4;
2812       else
2813 	*total = 5;
2814       return true;
2815 
2816     /* These are vaguely right for a 68020.  */
2817     /* The costs for long multiply have been adjusted to work properly
2818        in synth_mult on the 68020, relative to an average of the time
2819        for add and the time for shift, taking away a little more because
2820        sometimes move insns are needed.  */
2821     /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2822        terms.  */
2823 #define MULL_COST				\
2824   (TUNE_68060 ? 2				\
2825    : TUNE_68040 ? 5				\
2826    : (TUNE_CFV2 && TUNE_EMAC) ? 3		\
2827    : (TUNE_CFV2 && TUNE_MAC) ? 4		\
2828    : TUNE_CFV2 ? 8				\
2829    : TARGET_COLDFIRE ? 3 : 13)
2830 
2831 #define MULW_COST				\
2832   (TUNE_68060 ? 2				\
2833    : TUNE_68040 ? 3				\
2834    : TUNE_68000_10 ? 5				\
2835    : (TUNE_CFV2 && TUNE_EMAC) ? 3		\
2836    : (TUNE_CFV2 && TUNE_MAC) ? 2		\
2837    : TUNE_CFV2 ? 8				\
2838    : TARGET_COLDFIRE ? 2 : 8)
2839 
2840 #define DIVW_COST				\
2841   (TARGET_CF_HWDIV ? 11				\
2842    : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2843 
2844     case PLUS:
2845       /* An lea costs about three times as much as a simple add.  */
2846       if (GET_MODE (x) == SImode
2847 	  && GET_CODE (XEXP (x, 1)) == REG
2848 	  && GET_CODE (XEXP (x, 0)) == MULT
2849 	  && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2850 	  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2851 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2852 	      || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2853 	      || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2854 	{
2855 	    /* lea an@(dx:l:i),am */
2856 	    *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2857 	    return true;
2858 	}
2859       return false;
2860 
2861     case ASHIFT:
2862     case ASHIFTRT:
2863     case LSHIFTRT:
2864       if (TUNE_68060)
2865 	{
2866           *total = COSTS_N_INSNS(1);
2867 	  return true;
2868 	}
2869       if (TUNE_68000_10)
2870         {
2871 	  if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2872 	    {
2873 	      if (INTVAL (XEXP (x, 1)) < 16)
2874 	        *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2875 	      else
2876 	        /* We're using clrw + swap for these cases.  */
2877 	        *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2878 	    }
2879 	  else
2880 	    *total = COSTS_N_INSNS (10); /* Worst case.  */
2881 	  return true;
2882         }
2883       /* A shift by a big integer takes an extra instruction.  */
2884       if (GET_CODE (XEXP (x, 1)) == CONST_INT
2885 	  && (INTVAL (XEXP (x, 1)) == 16))
2886 	{
2887 	  *total = COSTS_N_INSNS (2);	 /* clrw;swap */
2888 	  return true;
2889 	}
2890       if (GET_CODE (XEXP (x, 1)) == CONST_INT
2891 	  && !(INTVAL (XEXP (x, 1)) > 0
2892 	       && INTVAL (XEXP (x, 1)) <= 8))
2893 	{
2894 	  *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3);	 /* lsr #i,dn */
2895 	  return true;
2896 	}
2897       return false;
2898 
2899     case MULT:
2900       if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2901 	   || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2902 	  && GET_MODE (x) == SImode)
2903         *total = COSTS_N_INSNS (MULW_COST);
2904       else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2905         *total = COSTS_N_INSNS (MULW_COST);
2906       else
2907         *total = COSTS_N_INSNS (MULL_COST);
2908       return true;
2909 
2910     case DIV:
2911     case UDIV:
2912     case MOD:
2913     case UMOD:
2914       if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2915         *total = COSTS_N_INSNS (DIVW_COST);	/* div.w */
2916       else if (TARGET_CF_HWDIV)
2917         *total = COSTS_N_INSNS (18);
2918       else
2919 	*total = COSTS_N_INSNS (43);		/* div.l */
2920       return true;
2921 
2922     case ZERO_EXTRACT:
2923       if (outer_code == COMPARE)
2924         *total = 0;
2925       return false;
2926 
2927     default:
2928       return false;
2929     }
2930 }
2931 
2932 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2933    OPERANDS[0].  */
2934 
2935 static const char *
output_move_const_into_data_reg(rtx * operands)2936 output_move_const_into_data_reg (rtx *operands)
2937 {
2938   HOST_WIDE_INT i;
2939 
2940   i = INTVAL (operands[1]);
2941   switch (m68k_const_method (i))
2942     {
2943     case MVZ:
2944       return "mvzw %1,%0";
2945     case MVS:
2946       return "mvsw %1,%0";
2947     case MOVQ:
2948       return "moveq %1,%0";
2949     case NOTB:
2950       CC_STATUS_INIT;
2951       operands[1] = GEN_INT (i ^ 0xff);
2952       return "moveq %1,%0\n\tnot%.b %0";
2953     case NOTW:
2954       CC_STATUS_INIT;
2955       operands[1] = GEN_INT (i ^ 0xffff);
2956       return "moveq %1,%0\n\tnot%.w %0";
2957     case NEGW:
2958       CC_STATUS_INIT;
2959       return "moveq #-128,%0\n\tneg%.w %0";
2960     case SWAP:
2961       {
2962 	unsigned u = i;
2963 
2964 	operands[1] = GEN_INT ((u << 16) | (u >> 16));
2965 	return "moveq %1,%0\n\tswap %0";
2966       }
2967     case MOVL:
2968       return "move%.l %1,%0";
2969     default:
2970       gcc_unreachable ();
2971     }
2972 }
2973 
2974 /* Return true if I can be handled by ISA B's mov3q instruction.  */
2975 
2976 bool
valid_mov3q_const(HOST_WIDE_INT i)2977 valid_mov3q_const (HOST_WIDE_INT i)
2978 {
2979   return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2980 }
2981 
2982 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2983    I is the value of OPERANDS[1].  */
2984 
2985 static const char *
output_move_simode_const(rtx * operands)2986 output_move_simode_const (rtx *operands)
2987 {
2988   rtx dest;
2989   HOST_WIDE_INT src;
2990 
2991   dest = operands[0];
2992   src = INTVAL (operands[1]);
2993   if (src == 0
2994       && (DATA_REG_P (dest) || MEM_P (dest))
2995       /* clr insns on 68000 read before writing.  */
2996       && ((TARGET_68010 || TARGET_COLDFIRE)
2997 	  || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2998     return "clr%.l %0";
2999   else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3000     return "mov3q%.l %1,%0";
3001   else if (src == 0 && ADDRESS_REG_P (dest))
3002     return "sub%.l %0,%0";
3003   else if (DATA_REG_P (dest))
3004     return output_move_const_into_data_reg (operands);
3005   else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3006     {
3007       if (valid_mov3q_const (src))
3008         return "mov3q%.l %1,%0";
3009       return "move%.w %1,%0";
3010     }
3011   else if (MEM_P (dest)
3012 	   && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3013 	   && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3014 	   && IN_RANGE (src, -0x8000, 0x7fff))
3015     {
3016       if (valid_mov3q_const (src))
3017         return "mov3q%.l %1,%-";
3018       return "pea %a1";
3019     }
3020   return "move%.l %1,%0";
3021 }
3022 
3023 const char *
output_move_simode(rtx * operands)3024 output_move_simode (rtx *operands)
3025 {
3026   if (GET_CODE (operands[1]) == CONST_INT)
3027     return output_move_simode_const (operands);
3028   else if ((GET_CODE (operands[1]) == SYMBOL_REF
3029 	    || GET_CODE (operands[1]) == CONST)
3030 	   && push_operand (operands[0], SImode))
3031     return "pea %a1";
3032   else if ((GET_CODE (operands[1]) == SYMBOL_REF
3033 	    || GET_CODE (operands[1]) == CONST)
3034 	   && ADDRESS_REG_P (operands[0]))
3035     return "lea %a1,%0";
3036   return "move%.l %1,%0";
3037 }
3038 
3039 const char *
output_move_himode(rtx * operands)3040 output_move_himode (rtx *operands)
3041 {
3042  if (GET_CODE (operands[1]) == CONST_INT)
3043     {
3044       if (operands[1] == const0_rtx
3045 	  && (DATA_REG_P (operands[0])
3046 	      || GET_CODE (operands[0]) == MEM)
3047 	  /* clr insns on 68000 read before writing.  */
3048 	  && ((TARGET_68010 || TARGET_COLDFIRE)
3049 	      || !(GET_CODE (operands[0]) == MEM
3050 		   && MEM_VOLATILE_P (operands[0]))))
3051 	return "clr%.w %0";
3052       else if (operands[1] == const0_rtx
3053 	       && ADDRESS_REG_P (operands[0]))
3054 	return "sub%.l %0,%0";
3055       else if (DATA_REG_P (operands[0])
3056 	       && INTVAL (operands[1]) < 128
3057 	       && INTVAL (operands[1]) >= -128)
3058 	return "moveq %1,%0";
3059       else if (INTVAL (operands[1]) < 0x8000
3060 	       && INTVAL (operands[1]) >= -0x8000)
3061 	return "move%.w %1,%0";
3062     }
3063   else if (CONSTANT_P (operands[1]))
3064     return "move%.l %1,%0";
3065   return "move%.w %1,%0";
3066 }
3067 
3068 const char *
output_move_qimode(rtx * operands)3069 output_move_qimode (rtx *operands)
3070 {
3071   /* 68k family always modifies the stack pointer by at least 2, even for
3072      byte pushes.  The 5200 (ColdFire) does not do this.  */
3073 
3074   /* This case is generated by pushqi1 pattern now.  */
3075   gcc_assert (!(GET_CODE (operands[0]) == MEM
3076 		&& GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3077 		&& XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3078 		&& ! ADDRESS_REG_P (operands[1])
3079 		&& ! TARGET_COLDFIRE));
3080 
3081   /* clr and st insns on 68000 read before writing.  */
3082   if (!ADDRESS_REG_P (operands[0])
3083       && ((TARGET_68010 || TARGET_COLDFIRE)
3084 	  || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3085     {
3086       if (operands[1] == const0_rtx)
3087 	return "clr%.b %0";
3088       if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3089 	  && GET_CODE (operands[1]) == CONST_INT
3090 	  && (INTVAL (operands[1]) & 255) == 255)
3091 	{
3092 	  CC_STATUS_INIT;
3093 	  return "st %0";
3094 	}
3095     }
3096   if (GET_CODE (operands[1]) == CONST_INT
3097       && DATA_REG_P (operands[0])
3098       && INTVAL (operands[1]) < 128
3099       && INTVAL (operands[1]) >= -128)
3100     return "moveq %1,%0";
3101   if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3102     return "sub%.l %0,%0";
3103   if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3104     return "move%.l %1,%0";
3105   /* 68k family (including the 5200 ColdFire) does not support byte moves to
3106      from address registers.  */
3107   if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3108     return "move%.w %1,%0";
3109   return "move%.b %1,%0";
3110 }
3111 
3112 const char *
output_move_stricthi(rtx * operands)3113 output_move_stricthi (rtx *operands)
3114 {
3115   if (operands[1] == const0_rtx
3116       /* clr insns on 68000 read before writing.  */
3117       && ((TARGET_68010 || TARGET_COLDFIRE)
3118 	  || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3119     return "clr%.w %0";
3120   return "move%.w %1,%0";
3121 }
3122 
3123 const char *
output_move_strictqi(rtx * operands)3124 output_move_strictqi (rtx *operands)
3125 {
3126   if (operands[1] == const0_rtx
3127       /* clr insns on 68000 read before writing.  */
3128       && ((TARGET_68010 || TARGET_COLDFIRE)
3129           || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3130     return "clr%.b %0";
3131   return "move%.b %1,%0";
3132 }
3133 
3134 /* Return the best assembler insn template
3135    for moving operands[1] into operands[0] as a fullword.  */
3136 
3137 static const char *
singlemove_string(rtx * operands)3138 singlemove_string (rtx *operands)
3139 {
3140   if (GET_CODE (operands[1]) == CONST_INT)
3141     return output_move_simode_const (operands);
3142   return "move%.l %1,%0";
3143 }
3144 
3145 
3146 /* Output assembler or rtl code to perform a doubleword move insn
3147    with operands OPERANDS.
3148    Pointers to 3 helper functions should be specified:
3149    HANDLE_REG_ADJUST to adjust a register by a small value,
3150    HANDLE_COMPADR to compute an address and
3151    HANDLE_MOVSI to move 4 bytes.  */
3152 
3153 static void
handle_move_double(rtx operands[2],void (* handle_reg_adjust)(rtx,int),void (* handle_compadr)(rtx[2]),void (* handle_movsi)(rtx[2]))3154 handle_move_double (rtx operands[2],
3155 		    void (*handle_reg_adjust) (rtx, int),
3156 		    void (*handle_compadr) (rtx [2]),
3157 		    void (*handle_movsi) (rtx [2]))
3158 {
3159   enum
3160     {
3161       REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3162     } optype0, optype1;
3163   rtx latehalf[2];
3164   rtx middlehalf[2];
3165   rtx xops[2];
3166   rtx addreg0 = 0, addreg1 = 0;
3167   int dest_overlapped_low = 0;
3168   int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3169 
3170   middlehalf[0] = 0;
3171   middlehalf[1] = 0;
3172 
3173   /* First classify both operands.  */
3174 
3175   if (REG_P (operands[0]))
3176     optype0 = REGOP;
3177   else if (offsettable_memref_p (operands[0]))
3178     optype0 = OFFSOP;
3179   else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3180     optype0 = POPOP;
3181   else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3182     optype0 = PUSHOP;
3183   else if (GET_CODE (operands[0]) == MEM)
3184     optype0 = MEMOP;
3185   else
3186     optype0 = RNDOP;
3187 
3188   if (REG_P (operands[1]))
3189     optype1 = REGOP;
3190   else if (CONSTANT_P (operands[1]))
3191     optype1 = CNSTOP;
3192   else if (offsettable_memref_p (operands[1]))
3193     optype1 = OFFSOP;
3194   else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3195     optype1 = POPOP;
3196   else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3197     optype1 = PUSHOP;
3198   else if (GET_CODE (operands[1]) == MEM)
3199     optype1 = MEMOP;
3200   else
3201     optype1 = RNDOP;
3202 
3203   /* Check for the cases that the operand constraints are not supposed
3204      to allow to happen.  Generating code for these cases is
3205      painful.  */
3206   gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3207 
3208   /* If one operand is decrementing and one is incrementing
3209      decrement the former register explicitly
3210      and change that operand into ordinary indexing.  */
3211 
3212   if (optype0 == PUSHOP && optype1 == POPOP)
3213     {
3214       operands[0] = XEXP (XEXP (operands[0], 0), 0);
3215 
3216       handle_reg_adjust (operands[0], -size);
3217 
3218       if (GET_MODE (operands[1]) == XFmode)
3219 	operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3220       else if (GET_MODE (operands[0]) == DFmode)
3221 	operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3222       else
3223 	operands[0] = gen_rtx_MEM (DImode, operands[0]);
3224       optype0 = OFFSOP;
3225     }
3226   if (optype0 == POPOP && optype1 == PUSHOP)
3227     {
3228       operands[1] = XEXP (XEXP (operands[1], 0), 0);
3229 
3230       handle_reg_adjust (operands[1], -size);
3231 
3232       if (GET_MODE (operands[1]) == XFmode)
3233 	operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3234       else if (GET_MODE (operands[1]) == DFmode)
3235 	operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3236       else
3237 	operands[1] = gen_rtx_MEM (DImode, operands[1]);
3238       optype1 = OFFSOP;
3239     }
3240 
3241   /* If an operand is an unoffsettable memory ref, find a register
3242      we can increment temporarily to make it refer to the second word.  */
3243 
3244   if (optype0 == MEMOP)
3245     addreg0 = find_addr_reg (XEXP (operands[0], 0));
3246 
3247   if (optype1 == MEMOP)
3248     addreg1 = find_addr_reg (XEXP (operands[1], 0));
3249 
3250   /* Ok, we can do one word at a time.
3251      Normally we do the low-numbered word first,
3252      but if either operand is autodecrementing then we
3253      do the high-numbered word first.
3254 
3255      In either case, set up in LATEHALF the operands to use
3256      for the high-numbered word and in some cases alter the
3257      operands in OPERANDS to be suitable for the low-numbered word.  */
3258 
3259   if (size == 12)
3260     {
3261       if (optype0 == REGOP)
3262 	{
3263 	  latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3264 	  middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3265 	}
3266       else if (optype0 == OFFSOP)
3267 	{
3268 	  middlehalf[0] = adjust_address (operands[0], SImode, 4);
3269 	  latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3270 	}
3271       else
3272 	{
3273 	  middlehalf[0] = adjust_address (operands[0], SImode, 0);
3274 	  latehalf[0] = adjust_address (operands[0], SImode, 0);
3275 	}
3276 
3277       if (optype1 == REGOP)
3278 	{
3279 	  latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3280 	  middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3281 	}
3282       else if (optype1 == OFFSOP)
3283 	{
3284 	  middlehalf[1] = adjust_address (operands[1], SImode, 4);
3285 	  latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3286 	}
3287       else if (optype1 == CNSTOP)
3288 	{
3289 	  if (GET_CODE (operands[1]) == CONST_DOUBLE)
3290 	    {
3291 	      REAL_VALUE_TYPE r;
3292 	      long l[3];
3293 
3294 	      REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3295 	      REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3296 	      operands[1] = GEN_INT (l[0]);
3297 	      middlehalf[1] = GEN_INT (l[1]);
3298 	      latehalf[1] = GEN_INT (l[2]);
3299 	    }
3300 	  else
3301 	    {
3302 	      /* No non-CONST_DOUBLE constant should ever appear
3303 		 here.  */
3304 	      gcc_assert (!CONSTANT_P (operands[1]));
3305 	    }
3306 	}
3307       else
3308 	{
3309 	  middlehalf[1] = adjust_address (operands[1], SImode, 0);
3310 	  latehalf[1] = adjust_address (operands[1], SImode, 0);
3311 	}
3312     }
3313   else
3314     /* size is not 12: */
3315     {
3316       if (optype0 == REGOP)
3317 	latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3318       else if (optype0 == OFFSOP)
3319 	latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3320       else
3321 	latehalf[0] = adjust_address (operands[0], SImode, 0);
3322 
3323       if (optype1 == REGOP)
3324 	latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3325       else if (optype1 == OFFSOP)
3326 	latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3327       else if (optype1 == CNSTOP)
3328 	split_double (operands[1], &operands[1], &latehalf[1]);
3329       else
3330 	latehalf[1] = adjust_address (operands[1], SImode, 0);
3331     }
3332 
3333   /* If insn is effectively movd N(REG),-(REG) then we will do the high
3334      word first.  We should use the adjusted operand 1 (which is N+4(REG))
3335      for the low word as well, to compensate for the first decrement of
3336      REG.  */
3337   if (optype0 == PUSHOP
3338       && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3339     operands[1] = middlehalf[1] = latehalf[1];
3340 
3341   /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3342      if the upper part of reg N does not appear in the MEM, arrange to
3343      emit the move late-half first.  Otherwise, compute the MEM address
3344      into the upper part of N and use that as a pointer to the memory
3345      operand.  */
3346   if (optype0 == REGOP
3347       && (optype1 == OFFSOP || optype1 == MEMOP))
3348     {
3349       rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3350 
3351       if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3352 	  && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3353 	{
3354 	  /* If both halves of dest are used in the src memory address,
3355 	     compute the address into latehalf of dest.
3356 	     Note that this can't happen if the dest is two data regs.  */
3357 	compadr:
3358 	  xops[0] = latehalf[0];
3359 	  xops[1] = XEXP (operands[1], 0);
3360 
3361 	  handle_compadr (xops);
3362 	  if (GET_MODE (operands[1]) == XFmode)
3363 	    {
3364 	      operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3365 	      middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3366 	      latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3367 	    }
3368 	  else
3369 	    {
3370 	      operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3371 	      latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3372 	    }
3373 	}
3374       else if (size == 12
3375 	       && reg_overlap_mentioned_p (middlehalf[0],
3376 					   XEXP (operands[1], 0)))
3377 	{
3378 	  /* Check for two regs used by both source and dest.
3379 	     Note that this can't happen if the dest is all data regs.
3380 	     It can happen if the dest is d6, d7, a0.
3381 	     But in that case, latehalf is an addr reg, so
3382 	     the code at compadr does ok.  */
3383 
3384 	  if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3385 	      || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3386 	    goto compadr;
3387 
3388 	  /* JRV says this can't happen: */
3389 	  gcc_assert (!addreg0 && !addreg1);
3390 
3391 	  /* Only the middle reg conflicts; simply put it last.  */
3392 	  handle_movsi (operands);
3393 	  handle_movsi (latehalf);
3394 	  handle_movsi (middlehalf);
3395 
3396 	  return;
3397 	}
3398       else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3399 	/* If the low half of dest is mentioned in the source memory
3400 	   address, the arrange to emit the move late half first.  */
3401 	dest_overlapped_low = 1;
3402     }
3403 
3404   /* If one or both operands autodecrementing,
3405      do the two words, high-numbered first.  */
3406 
3407   /* Likewise,  the first move would clobber the source of the second one,
3408      do them in the other order.  This happens only for registers;
3409      such overlap can't happen in memory unless the user explicitly
3410      sets it up, and that is an undefined circumstance.  */
3411 
3412   if (optype0 == PUSHOP || optype1 == PUSHOP
3413       || (optype0 == REGOP && optype1 == REGOP
3414 	  && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3415 	      || REGNO (operands[0]) == REGNO (latehalf[1])))
3416       || dest_overlapped_low)
3417     {
3418       /* Make any unoffsettable addresses point at high-numbered word.  */
3419       if (addreg0)
3420 	handle_reg_adjust (addreg0, size - 4);
3421       if (addreg1)
3422 	handle_reg_adjust (addreg1, size - 4);
3423 
3424       /* Do that word.  */
3425       handle_movsi (latehalf);
3426 
3427       /* Undo the adds we just did.  */
3428       if (addreg0)
3429 	handle_reg_adjust (addreg0, -4);
3430       if (addreg1)
3431 	handle_reg_adjust (addreg1, -4);
3432 
3433       if (size == 12)
3434 	{
3435 	  handle_movsi (middlehalf);
3436 
3437 	  if (addreg0)
3438 	    handle_reg_adjust (addreg0, -4);
3439 	  if (addreg1)
3440 	    handle_reg_adjust (addreg1, -4);
3441 	}
3442 
3443       /* Do low-numbered word.  */
3444 
3445       handle_movsi (operands);
3446       return;
3447     }
3448 
3449   /* Normal case: do the two words, low-numbered first.  */
3450 
3451   m68k_final_prescan_insn (NULL, operands, 2);
3452   handle_movsi (operands);
3453 
3454   /* Do the middle one of the three words for long double */
3455   if (size == 12)
3456     {
3457       if (addreg0)
3458 	handle_reg_adjust (addreg0, 4);
3459       if (addreg1)
3460 	handle_reg_adjust (addreg1, 4);
3461 
3462       m68k_final_prescan_insn (NULL, middlehalf, 2);
3463       handle_movsi (middlehalf);
3464     }
3465 
3466   /* Make any unoffsettable addresses point at high-numbered word.  */
3467   if (addreg0)
3468     handle_reg_adjust (addreg0, 4);
3469   if (addreg1)
3470     handle_reg_adjust (addreg1, 4);
3471 
3472   /* Do that word.  */
3473   m68k_final_prescan_insn (NULL, latehalf, 2);
3474   handle_movsi (latehalf);
3475 
3476   /* Undo the adds we just did.  */
3477   if (addreg0)
3478     handle_reg_adjust (addreg0, -(size - 4));
3479   if (addreg1)
3480     handle_reg_adjust (addreg1, -(size - 4));
3481 
3482   return;
3483 }
3484 
3485 /* Output assembler code to adjust REG by N.  */
3486 static void
output_reg_adjust(rtx reg,int n)3487 output_reg_adjust (rtx reg, int n)
3488 {
3489   const char *s;
3490 
3491   gcc_assert (GET_MODE (reg) == SImode
3492 	      && -12 <= n && n != 0 && n <= 12);
3493 
3494   switch (n)
3495     {
3496     case 12:
3497       s = "add%.l #12,%0";
3498       break;
3499 
3500     case 8:
3501       s = "addq%.l #8,%0";
3502       break;
3503 
3504     case 4:
3505       s = "addq%.l #4,%0";
3506       break;
3507 
3508     case -12:
3509       s = "sub%.l #12,%0";
3510       break;
3511 
3512     case -8:
3513       s = "subq%.l #8,%0";
3514       break;
3515 
3516     case -4:
3517       s = "subq%.l #4,%0";
3518       break;
3519 
3520     default:
3521       gcc_unreachable ();
3522       s = NULL;
3523     }
3524 
3525   output_asm_insn (s, &reg);
3526 }
3527 
3528 /* Emit rtl code to adjust REG by N.  */
3529 static void
emit_reg_adjust(rtx reg1,int n)3530 emit_reg_adjust (rtx reg1, int n)
3531 {
3532   rtx reg2;
3533 
3534   gcc_assert (GET_MODE (reg1) == SImode
3535 	      && -12 <= n && n != 0 && n <= 12);
3536 
3537   reg1 = copy_rtx (reg1);
3538   reg2 = copy_rtx (reg1);
3539 
3540   if (n < 0)
3541     emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3542   else if (n > 0)
3543     emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3544   else
3545     gcc_unreachable ();
3546 }
3547 
3548 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1].  */
3549 static void
output_compadr(rtx operands[2])3550 output_compadr (rtx operands[2])
3551 {
3552   output_asm_insn ("lea %a1,%0", operands);
3553 }
3554 
3555 /* Output the best assembler insn for moving operands[1] into operands[0]
3556    as a fullword.  */
3557 static void
output_movsi(rtx operands[2])3558 output_movsi (rtx operands[2])
3559 {
3560   output_asm_insn (singlemove_string (operands), operands);
3561 }
3562 
3563 /* Copy OP and change its mode to MODE.  */
3564 static rtx
copy_operand(rtx op,enum machine_mode mode)3565 copy_operand (rtx op, enum machine_mode mode)
3566 {
3567   /* ??? This looks really ugly.  There must be a better way
3568      to change a mode on the operand.  */
3569   if (GET_MODE (op) != VOIDmode)
3570     {
3571       if (REG_P (op))
3572 	op = gen_rtx_REG (mode, REGNO (op));
3573       else
3574 	{
3575 	  op = copy_rtx (op);
3576 	  PUT_MODE (op, mode);
3577 	}
3578     }
3579 
3580   return op;
3581 }
3582 
3583 /* Emit rtl code for moving operands[1] into operands[0] as a fullword.  */
3584 static void
emit_movsi(rtx operands[2])3585 emit_movsi (rtx operands[2])
3586 {
3587   operands[0] = copy_operand (operands[0], SImode);
3588   operands[1] = copy_operand (operands[1], SImode);
3589 
3590   emit_insn (gen_movsi (operands[0], operands[1]));
3591 }
3592 
3593 /* Output assembler code to perform a doubleword move insn
3594    with operands OPERANDS.  */
3595 const char *
output_move_double(rtx * operands)3596 output_move_double (rtx *operands)
3597 {
3598   handle_move_double (operands,
3599 		      output_reg_adjust, output_compadr, output_movsi);
3600 
3601   return "";
3602 }
3603 
3604 /* Output rtl code to perform a doubleword move insn
3605    with operands OPERANDS.  */
3606 void
m68k_emit_move_double(rtx operands[2])3607 m68k_emit_move_double (rtx operands[2])
3608 {
3609   handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3610 }
3611 
3612 /* Ensure mode of ORIG, a REG rtx, is MODE.  Returns either ORIG or a
3613    new rtx with the correct mode.  */
3614 
3615 static rtx
force_mode(enum machine_mode mode,rtx orig)3616 force_mode (enum machine_mode mode, rtx orig)
3617 {
3618   if (mode == GET_MODE (orig))
3619     return orig;
3620 
3621   if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3622     abort ();
3623 
3624   return gen_rtx_REG (mode, REGNO (orig));
3625 }
3626 
3627 static int
fp_reg_operand(rtx op,enum machine_mode mode ATTRIBUTE_UNUSED)3628 fp_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3629 {
3630   return reg_renumber && FP_REG_P (op);
3631 }
3632 
3633 /* Emit insns to move operands[1] into operands[0].
3634 
3635    Return 1 if we have written out everything that needs to be done to
3636    do the move.  Otherwise, return 0 and the caller will emit the move
3637    normally.
3638 
3639    Note SCRATCH_REG may not be in the proper mode depending on how it
3640    will be used.  This routine is responsible for creating a new copy
3641    of SCRATCH_REG in the proper mode.  */
3642 
3643 int
emit_move_sequence(rtx * operands,enum machine_mode mode,rtx scratch_reg)3644 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
3645 {
3646   register rtx operand0 = operands[0];
3647   register rtx operand1 = operands[1];
3648   register rtx tem;
3649 
3650   if (scratch_reg
3651       && reload_in_progress && GET_CODE (operand0) == REG
3652       && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3653     operand0 = reg_equiv_mem (REGNO (operand0));
3654   else if (scratch_reg
3655 	   && reload_in_progress && GET_CODE (operand0) == SUBREG
3656 	   && GET_CODE (SUBREG_REG (operand0)) == REG
3657 	   && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3658     {
3659      /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3660 	the code which tracks sets/uses for delete_output_reload.  */
3661       rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3662 				 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3663 				 SUBREG_BYTE (operand0));
3664       operand0 = alter_subreg (&temp, true);
3665     }
3666 
3667   if (scratch_reg
3668       && reload_in_progress && GET_CODE (operand1) == REG
3669       && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3670     operand1 = reg_equiv_mem (REGNO (operand1));
3671   else if (scratch_reg
3672 	   && reload_in_progress && GET_CODE (operand1) == SUBREG
3673 	   && GET_CODE (SUBREG_REG (operand1)) == REG
3674 	   && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3675     {
3676      /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3677 	the code which tracks sets/uses for delete_output_reload.  */
3678       rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3679 				 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3680 				 SUBREG_BYTE (operand1));
3681       operand1 = alter_subreg (&temp, true);
3682     }
3683 
3684   if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
3685       && ((tem = find_replacement (&XEXP (operand0, 0)))
3686 	  != XEXP (operand0, 0)))
3687     operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3688   if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
3689       && ((tem = find_replacement (&XEXP (operand1, 0)))
3690 	  != XEXP (operand1, 0)))
3691     operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3692 
3693   /* Handle secondary reloads for loads/stores of FP registers where
3694      the address is symbolic by using the scratch register */
3695   if (fp_reg_operand (operand0, mode)
3696       && ((GET_CODE (operand1) == MEM
3697 	   && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3698 	  || ((GET_CODE (operand1) == SUBREG
3699 	       && GET_CODE (XEXP (operand1, 0)) == MEM
3700 	       && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3701       && scratch_reg)
3702     {
3703       if (GET_CODE (operand1) == SUBREG)
3704 	operand1 = XEXP (operand1, 0);
3705 
3706       /* SCRATCH_REG will hold an address.  We want
3707 	 it in SImode regardless of what mode it was originally given
3708 	 to us.  */
3709       scratch_reg = force_mode (SImode, scratch_reg);
3710 
3711       /* D might not fit in 14 bits either; for such cases load D into
3712 	 scratch reg.  */
3713       if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3714 	{
3715 	  emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3716 	  emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3717 						       Pmode,
3718 						       XEXP (XEXP (operand1, 0), 0),
3719 						       scratch_reg));
3720 	}
3721       else
3722 	emit_move_insn (scratch_reg, XEXP (operand1, 0));
3723       emit_insn (gen_rtx_SET (VOIDmode, operand0,
3724 			      gen_rtx_MEM (mode, scratch_reg)));
3725       return 1;
3726     }
3727   else if (fp_reg_operand (operand1, mode)
3728 	   && ((GET_CODE (operand0) == MEM
3729 		&& ! memory_address_p (DFmode, XEXP (operand0, 0)))
3730 	       || ((GET_CODE (operand0) == SUBREG)
3731 		   && GET_CODE (XEXP (operand0, 0)) == MEM
3732 		   && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3733 	   && scratch_reg)
3734     {
3735       if (GET_CODE (operand0) == SUBREG)
3736 	operand0 = XEXP (operand0, 0);
3737 
3738       /* SCRATCH_REG will hold an address and maybe the actual data.  We want
3739 	 it in SIMODE regardless of what mode it was originally given
3740 	 to us.  */
3741       scratch_reg = force_mode (SImode, scratch_reg);
3742 
3743       /* D might not fit in 14 bits either; for such cases load D into
3744 	 scratch reg.  */
3745       if (!memory_address_p (Pmode, XEXP (operand0, 0)))
3746 	{
3747 	  emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
3748 	  emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
3749 								        0)),
3750 						       Pmode,
3751 						       XEXP (XEXP (operand0, 0),
3752 								   0),
3753 						       scratch_reg));
3754 	}
3755       else
3756 	emit_move_insn (scratch_reg, XEXP (operand0, 0));
3757       emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
3758 			      operand1));
3759       return 1;
3760     }
3761   /* Handle secondary reloads for loads of FP registers from constant
3762      expressions by forcing the constant into memory.
3763 
3764      use scratch_reg to hold the address of the memory location.
3765 
3766      The proper fix is to change PREFERRED_RELOAD_CLASS to return
3767      NO_REGS when presented with a const_int and an register class
3768      containing only FP registers.  Doing so unfortunately creates
3769      more problems than it solves.   Fix this for 2.5.  */
3770   else if (fp_reg_operand (operand0, mode)
3771 	   && CONSTANT_P (operand1)
3772 	   && scratch_reg)
3773     {
3774       rtx xoperands[2];
3775 
3776       /* SCRATCH_REG will hold an address and maybe the actual data.  We want
3777 	 it in SIMODE regardless of what mode it was originally given
3778 	 to us.  */
3779       scratch_reg = force_mode (SImode, scratch_reg);
3780 
3781       /* Force the constant into memory and put the address of the
3782 	 memory location into scratch_reg.  */
3783       xoperands[0] = scratch_reg;
3784       xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
3785       emit_insn (gen_rtx_SET (mode, scratch_reg, xoperands[1]));
3786 
3787       /* Now load the destination register.  */
3788       emit_insn (gen_rtx_SET (mode, operand0,
3789 			      gen_rtx_MEM (mode, scratch_reg)));
3790       return 1;
3791     }
3792 
3793   /* Now have insn-emit do whatever it normally does.  */
3794   return 0;
3795 }
3796 
3797 /* Split one or more DImode RTL references into pairs of SImode
3798    references.  The RTL can be REG, offsettable MEM, integer constant, or
3799    CONST_DOUBLE.  "operands" is a pointer to an array of DImode RTL to
3800    split and "num" is its length.  lo_half and hi_half are output arrays
3801    that parallel "operands".  */
3802 
3803 void
split_di(rtx operands[],int num,rtx lo_half[],rtx hi_half[])3804 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
3805 {
3806   while (num--)
3807     {
3808       rtx op = operands[num];
3809 
3810       /* simplify_subreg refuses to split volatile memory addresses,
3811 	 but we still have to handle it.  */
3812       if (GET_CODE (op) == MEM)
3813 	{
3814 	  lo_half[num] = adjust_address (op, SImode, 4);
3815 	  hi_half[num] = adjust_address (op, SImode, 0);
3816 	}
3817       else
3818 	{
3819 	  lo_half[num] = simplify_gen_subreg (SImode, op,
3820 					      GET_MODE (op) == VOIDmode
3821 					      ? DImode : GET_MODE (op), 4);
3822 	  hi_half[num] = simplify_gen_subreg (SImode, op,
3823 					      GET_MODE (op) == VOIDmode
3824 					      ? DImode : GET_MODE (op), 0);
3825 	}
3826     }
3827 }
3828 
3829 /* Split X into a base and a constant offset, storing them in *BASE
3830    and *OFFSET respectively.  */
3831 
3832 static void
m68k_split_offset(rtx x,rtx * base,HOST_WIDE_INT * offset)3833 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
3834 {
3835   *offset = 0;
3836   if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
3837     {
3838       *offset += INTVAL (XEXP (x, 1));
3839       x = XEXP (x, 0);
3840     }
3841   *base = x;
3842 }
3843 
3844 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
3845    instruction.  STORE_P says whether the move is a load or store.
3846 
3847    If the instruction uses post-increment or pre-decrement addressing,
3848    AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
3849    adjustment.  This adjustment will be made by the first element of
3850    PARALLEL, with the loads or stores starting at element 1.  If the
3851    instruction does not use post-increment or pre-decrement addressing,
3852    AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
3853    start at element 0.  */
3854 
3855 bool
m68k_movem_pattern_p(rtx pattern,rtx automod_base,HOST_WIDE_INT automod_offset,bool store_p)3856 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
3857 		      HOST_WIDE_INT automod_offset, bool store_p)
3858 {
3859   rtx base, mem_base, set, mem, reg, last_reg;
3860   HOST_WIDE_INT offset, mem_offset;
3861   int i, first, len;
3862   enum reg_class rclass;
3863 
3864   len = XVECLEN (pattern, 0);
3865   first = (automod_base != NULL);
3866 
3867   if (automod_base)
3868     {
3869       /* Stores must be pre-decrement and loads must be post-increment.  */
3870       if (store_p != (automod_offset < 0))
3871 	return false;
3872 
3873       /* Work out the base and offset for lowest memory location.  */
3874       base = automod_base;
3875       offset = (automod_offset < 0 ? automod_offset : 0);
3876     }
3877   else
3878     {
3879       /* Allow any valid base and offset in the first access.  */
3880       base = NULL;
3881       offset = 0;
3882     }
3883 
3884   last_reg = NULL;
3885   rclass = NO_REGS;
3886   for (i = first; i < len; i++)
3887     {
3888       /* We need a plain SET.  */
3889       set = XVECEXP (pattern, 0, i);
3890       if (GET_CODE (set) != SET)
3891 	return false;
3892 
3893       /* Check that we have a memory location...  */
3894       mem = XEXP (set, !store_p);
3895       if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
3896 	return false;
3897 
3898       /* ...with the right address.  */
3899       if (base == NULL)
3900 	{
3901 	  m68k_split_offset (XEXP (mem, 0), &base, &offset);
3902 	  /* The ColdFire instruction only allows (An) and (d16,An) modes.
3903 	     There are no mode restrictions for 680x0 besides the
3904 	     automodification rules enforced above.  */
3905 	  if (TARGET_COLDFIRE
3906 	      && !m68k_legitimate_base_reg_p (base, reload_completed))
3907 	    return false;
3908 	}
3909       else
3910 	{
3911 	  m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
3912 	  if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
3913 	    return false;
3914 	}
3915 
3916       /* Check that we have a register of the required mode and class.  */
3917       reg = XEXP (set, store_p);
3918       if (!REG_P (reg)
3919 	  || !HARD_REGISTER_P (reg)
3920 	  || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
3921 	return false;
3922 
3923       if (last_reg)
3924 	{
3925 	  /* The register must belong to RCLASS and have a higher number
3926 	     than the register in the previous SET.  */
3927 	  if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
3928 	      || REGNO (last_reg) >= REGNO (reg))
3929 	    return false;
3930 	}
3931       else
3932 	{
3933 	  /* Work out which register class we need.  */
3934 	  if (INT_REGNO_P (REGNO (reg)))
3935 	    rclass = GENERAL_REGS;
3936 	  else if (FP_REGNO_P (REGNO (reg)))
3937 	    rclass = FP_REGS;
3938 	  else
3939 	    return false;
3940 	}
3941 
3942       last_reg = reg;
3943       offset += GET_MODE_SIZE (GET_MODE (reg));
3944     }
3945 
3946   /* If we have an automodification, check whether the final offset is OK.  */
3947   if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
3948     return false;
3949 
3950   /* Reject unprofitable cases.  */
3951   if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
3952     return false;
3953 
3954   return true;
3955 }
3956 
3957 /* Return the assembly code template for a movem or fmovem instruction
3958    whose pattern is given by PATTERN.  Store the template's operands
3959    in OPERANDS.
3960 
3961    If the instruction uses post-increment or pre-decrement addressing,
3962    AUTOMOD_OFFSET is the total adjustment, otherwise it is 0.  STORE_P
3963    is true if this is a store instruction.  */
3964 
3965 const char *
m68k_output_movem(rtx * operands,rtx pattern,HOST_WIDE_INT automod_offset,bool store_p)3966 m68k_output_movem (rtx *operands, rtx pattern,
3967 		   HOST_WIDE_INT automod_offset, bool store_p)
3968 {
3969   unsigned int mask;
3970   int i, first;
3971 
3972   gcc_assert (GET_CODE (pattern) == PARALLEL);
3973   mask = 0;
3974   first = (automod_offset != 0);
3975   for (i = first; i < XVECLEN (pattern, 0); i++)
3976     {
3977       /* When using movem with pre-decrement addressing, register X + D0_REG
3978 	 is controlled by bit 15 - X.  For all other addressing modes,
3979 	 register X + D0_REG is controlled by bit X.  Confusingly, the
3980 	 register mask for fmovem is in the opposite order to that for
3981 	 movem.  */
3982       unsigned int regno;
3983 
3984       gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
3985       gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
3986       regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
3987       if (automod_offset < 0)
3988 	{
3989 	  if (FP_REGNO_P (regno))
3990 	    mask |= 1 << (regno - FP0_REG);
3991 	  else
3992 	    mask |= 1 << (15 - (regno - D0_REG));
3993 	}
3994       else
3995 	{
3996 	  if (FP_REGNO_P (regno))
3997 	    mask |= 1 << (7 - (regno - FP0_REG));
3998 	  else
3999 	    mask |= 1 << (regno - D0_REG);
4000 	}
4001     }
4002   CC_STATUS_INIT;
4003 
4004   if (automod_offset == 0)
4005     operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4006   else if (automod_offset < 0)
4007     operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4008   else
4009     operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4010   operands[1] = GEN_INT (mask);
4011   if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4012     {
4013       if (store_p)
4014 	return "fmovem %1,%a0";
4015       else
4016 	return "fmovem %a0,%1";
4017     }
4018   else
4019     {
4020       if (store_p)
4021 	return "movem%.l %1,%a0";
4022       else
4023 	return "movem%.l %a0,%1";
4024     }
4025 }
4026 
4027 /* Return a REG that occurs in ADDR with coefficient 1.
4028    ADDR can be effectively incremented by incrementing REG.  */
4029 
4030 static rtx
find_addr_reg(rtx addr)4031 find_addr_reg (rtx addr)
4032 {
4033   while (GET_CODE (addr) == PLUS)
4034     {
4035       if (GET_CODE (XEXP (addr, 0)) == REG)
4036 	addr = XEXP (addr, 0);
4037       else if (GET_CODE (XEXP (addr, 1)) == REG)
4038 	addr = XEXP (addr, 1);
4039       else if (CONSTANT_P (XEXP (addr, 0)))
4040 	addr = XEXP (addr, 1);
4041       else if (CONSTANT_P (XEXP (addr, 1)))
4042 	addr = XEXP (addr, 0);
4043       else
4044 	gcc_unreachable ();
4045     }
4046   gcc_assert (GET_CODE (addr) == REG);
4047   return addr;
4048 }
4049 
4050 /* Output assembler code to perform a 32-bit 3-operand add.  */
4051 
4052 const char *
output_addsi3(rtx * operands)4053 output_addsi3 (rtx *operands)
4054 {
4055   if (! operands_match_p (operands[0], operands[1]))
4056     {
4057       if (!ADDRESS_REG_P (operands[1]))
4058 	{
4059 	  rtx tmp = operands[1];
4060 
4061 	  operands[1] = operands[2];
4062 	  operands[2] = tmp;
4063 	}
4064 
4065       /* These insns can result from reloads to access
4066 	 stack slots over 64k from the frame pointer.  */
4067       if (GET_CODE (operands[2]) == CONST_INT
4068 	  && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4069         return "move%.l %2,%0\n\tadd%.l %1,%0";
4070       if (GET_CODE (operands[2]) == REG)
4071 	return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4072       return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4073     }
4074   if (GET_CODE (operands[2]) == CONST_INT)
4075     {
4076       if (INTVAL (operands[2]) > 0
4077 	  && INTVAL (operands[2]) <= 8)
4078 	return "addq%.l %2,%0";
4079       if (INTVAL (operands[2]) < 0
4080 	  && INTVAL (operands[2]) >= -8)
4081         {
4082 	  operands[2] = GEN_INT (- INTVAL (operands[2]));
4083 	  return "subq%.l %2,%0";
4084 	}
4085       /* On the CPU32 it is faster to use two addql instructions to
4086 	 add a small integer (8 < N <= 16) to a register.
4087 	 Likewise for subql.  */
4088       if (TUNE_CPU32 && REG_P (operands[0]))
4089 	{
4090 	  if (INTVAL (operands[2]) > 8
4091 	      && INTVAL (operands[2]) <= 16)
4092 	    {
4093 	      operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4094 	      return "addq%.l #8,%0\n\taddq%.l %2,%0";
4095 	    }
4096 	  if (INTVAL (operands[2]) < -8
4097 	      && INTVAL (operands[2]) >= -16)
4098 	    {
4099 	      operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4100 	      return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4101 	    }
4102 	}
4103       if (ADDRESS_REG_P (operands[0])
4104 	  && INTVAL (operands[2]) >= -0x8000
4105 	  && INTVAL (operands[2]) < 0x8000)
4106 	{
4107 	  if (TUNE_68040)
4108 	    return "add%.w %2,%0";
4109 	  else
4110 	    return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4111 	}
4112     }
4113   return "add%.l %2,%0";
4114 }
4115 
4116 /* Store in cc_status the expressions that the condition codes will
4117    describe after execution of an instruction whose pattern is EXP.
4118    Do not alter them if the instruction would not alter the cc's.  */
4119 
4120 /* On the 68000, all the insns to store in an address register fail to
4121    set the cc's.  However, in some cases these instructions can make it
4122    possibly invalid to use the saved cc's.  In those cases we clear out
4123    some or all of the saved cc's so they won't be used.  */
4124 
4125 void
notice_update_cc(rtx exp,rtx insn)4126 notice_update_cc (rtx exp, rtx insn)
4127 {
4128   if (GET_CODE (exp) == SET)
4129     {
4130       if (GET_CODE (SET_SRC (exp)) == CALL)
4131 	CC_STATUS_INIT;
4132       else if (ADDRESS_REG_P (SET_DEST (exp)))
4133 	{
4134 	  if (cc_status.value1 && modified_in_p (cc_status.value1, insn))
4135 	    cc_status.value1 = 0;
4136 	  if (cc_status.value2 && modified_in_p (cc_status.value2, insn))
4137 	    cc_status.value2 = 0;
4138 	}
4139       /* fmoves to memory or data registers do not set the condition
4140 	 codes.  Normal moves _do_ set the condition codes, but not in
4141 	 a way that is appropriate for comparison with 0, because -0.0
4142 	 would be treated as a negative nonzero number.  Note that it
4143 	 isn't appropriate to conditionalize this restriction on
4144 	 HONOR_SIGNED_ZEROS because that macro merely indicates whether
4145 	 we care about the difference between -0.0 and +0.0.  */
4146       else if (!FP_REG_P (SET_DEST (exp))
4147 	       && SET_DEST (exp) != cc0_rtx
4148 	       && (FP_REG_P (SET_SRC (exp))
4149 		   || GET_CODE (SET_SRC (exp)) == FIX
4150 		   || FLOAT_MODE_P (GET_MODE (SET_DEST (exp)))))
4151 	CC_STATUS_INIT;
4152       /* A pair of move insns doesn't produce a useful overall cc.  */
4153       else if (!FP_REG_P (SET_DEST (exp))
4154 	       && !FP_REG_P (SET_SRC (exp))
4155 	       && GET_MODE_SIZE (GET_MODE (SET_SRC (exp))) > 4
4156 	       && (GET_CODE (SET_SRC (exp)) == REG
4157 		   || GET_CODE (SET_SRC (exp)) == MEM
4158 		   || GET_CODE (SET_SRC (exp)) == CONST_DOUBLE))
4159 	CC_STATUS_INIT;
4160       else if (SET_DEST (exp) != pc_rtx)
4161 	{
4162 	  cc_status.flags = 0;
4163 	  cc_status.value1 = SET_DEST (exp);
4164 	  cc_status.value2 = SET_SRC (exp);
4165 	}
4166     }
4167   else if (GET_CODE (exp) == PARALLEL
4168 	   && GET_CODE (XVECEXP (exp, 0, 0)) == SET)
4169     {
4170       rtx dest = SET_DEST (XVECEXP (exp, 0, 0));
4171       rtx src  = SET_SRC  (XVECEXP (exp, 0, 0));
4172 
4173       if (ADDRESS_REG_P (dest))
4174 	CC_STATUS_INIT;
4175       else if (dest != pc_rtx)
4176 	{
4177 	  cc_status.flags = 0;
4178 	  cc_status.value1 = dest;
4179 	  cc_status.value2 = src;
4180 	}
4181     }
4182   else
4183     CC_STATUS_INIT;
4184   if (cc_status.value2 != 0
4185       && ADDRESS_REG_P (cc_status.value2)
4186       && GET_MODE (cc_status.value2) == QImode)
4187     CC_STATUS_INIT;
4188   if (cc_status.value2 != 0)
4189     switch (GET_CODE (cc_status.value2))
4190       {
4191       case ASHIFT: case ASHIFTRT: case LSHIFTRT:
4192       case ROTATE: case ROTATERT:
4193 	/* These instructions always clear the overflow bit, and set
4194 	   the carry to the bit shifted out.  */
4195 	cc_status.flags |= CC_OVERFLOW_UNUSABLE | CC_NO_CARRY;
4196 	break;
4197 
4198       case PLUS: case MINUS: case MULT:
4199       case DIV: case UDIV: case MOD: case UMOD: case NEG:
4200 	if (GET_MODE (cc_status.value2) != VOIDmode)
4201 	  cc_status.flags |= CC_NO_OVERFLOW;
4202 	break;
4203       case ZERO_EXTEND:
4204 	/* (SET r1 (ZERO_EXTEND r2)) on this machine
4205 	   ends with a move insn moving r2 in r2's mode.
4206 	   Thus, the cc's are set for r2.
4207 	   This can set N bit spuriously.  */
4208 	cc_status.flags |= CC_NOT_NEGATIVE;
4209 
4210       default:
4211 	break;
4212       }
4213   if (cc_status.value1 && GET_CODE (cc_status.value1) == REG
4214       && cc_status.value2
4215       && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2))
4216     cc_status.value2 = 0;
4217   /* Check for PRE_DEC in dest modifying a register used in src.  */
4218   if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM
4219       && GET_CODE (XEXP (cc_status.value1, 0)) == PRE_DEC
4220       && cc_status.value2
4221       && reg_overlap_mentioned_p (XEXP (XEXP (cc_status.value1, 0), 0),
4222 				  cc_status.value2))
4223     cc_status.value2 = 0;
4224   if (((cc_status.value1 && FP_REG_P (cc_status.value1))
4225        || (cc_status.value2 && FP_REG_P (cc_status.value2))))
4226     cc_status.flags = CC_IN_68881;
4227   if (cc_status.value2 && GET_CODE (cc_status.value2) == COMPARE
4228       && GET_MODE_CLASS (GET_MODE (XEXP (cc_status.value2, 0))) == MODE_FLOAT)
4229     {
4230       cc_status.flags = CC_IN_68881;
4231       if (!FP_REG_P (XEXP (cc_status.value2, 0))
4232 	  && FP_REG_P (XEXP (cc_status.value2, 1)))
4233 	cc_status.flags |= CC_REVERSED;
4234     }
4235 }
4236 
4237 const char *
output_move_const_double(rtx * operands)4238 output_move_const_double (rtx *operands)
4239 {
4240   int code = standard_68881_constant_p (operands[1]);
4241 
4242   if (code != 0)
4243     {
4244       static char buf[40];
4245 
4246       sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4247       return buf;
4248     }
4249   return "fmove%.d %1,%0";
4250 }
4251 
4252 const char *
output_move_const_single(rtx * operands)4253 output_move_const_single (rtx *operands)
4254 {
4255   int code = standard_68881_constant_p (operands[1]);
4256 
4257   if (code != 0)
4258     {
4259       static char buf[40];
4260 
4261       sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4262       return buf;
4263     }
4264   return "fmove%.s %f1,%0";
4265 }
4266 
4267 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4268    from the "fmovecr" instruction.
4269    The value, anded with 0xff, gives the code to use in fmovecr
4270    to get the desired constant.  */
4271 
4272 /* This code has been fixed for cross-compilation.  */
4273 
4274 static int inited_68881_table = 0;
4275 
4276 static const char *const strings_68881[7] = {
4277   "0.0",
4278   "1.0",
4279   "10.0",
4280   "100.0",
4281   "10000.0",
4282   "1e8",
4283   "1e16"
4284 };
4285 
4286 static const int codes_68881[7] = {
4287   0x0f,
4288   0x32,
4289   0x33,
4290   0x34,
4291   0x35,
4292   0x36,
4293   0x37
4294 };
4295 
4296 REAL_VALUE_TYPE values_68881[7];
4297 
4298 /* Set up values_68881 array by converting the decimal values
4299    strings_68881 to binary.  */
4300 
4301 void
init_68881_table(void)4302 init_68881_table (void)
4303 {
4304   int i;
4305   REAL_VALUE_TYPE r;
4306   enum machine_mode mode;
4307 
4308   mode = SFmode;
4309   for (i = 0; i < 7; i++)
4310     {
4311       if (i == 6)
4312         mode = DFmode;
4313       r = REAL_VALUE_ATOF (strings_68881[i], mode);
4314       values_68881[i] = r;
4315     }
4316   inited_68881_table = 1;
4317 }
4318 
4319 int
standard_68881_constant_p(rtx x)4320 standard_68881_constant_p (rtx x)
4321 {
4322   REAL_VALUE_TYPE r;
4323   int i;
4324 
4325   /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4326      used at all on those chips.  */
4327   if (TUNE_68040_60)
4328     return 0;
4329 
4330   if (! inited_68881_table)
4331     init_68881_table ();
4332 
4333   REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4334 
4335   /* Use REAL_VALUES_IDENTICAL instead of REAL_VALUES_EQUAL so that -0.0
4336      is rejected.  */
4337   for (i = 0; i < 6; i++)
4338     {
4339       if (REAL_VALUES_IDENTICAL (r, values_68881[i]))
4340         return (codes_68881[i]);
4341     }
4342 
4343   if (GET_MODE (x) == SFmode)
4344     return 0;
4345 
4346   if (REAL_VALUES_EQUAL (r, values_68881[6]))
4347     return (codes_68881[6]);
4348 
4349   /* larger powers of ten in the constants ram are not used
4350      because they are not equal to a `double' C constant.  */
4351   return 0;
4352 }
4353 
4354 /* If X is a floating-point constant, return the logarithm of X base 2,
4355    or 0 if X is not a power of 2.  */
4356 
4357 int
floating_exact_log2(rtx x)4358 floating_exact_log2 (rtx x)
4359 {
4360   REAL_VALUE_TYPE r, r1;
4361   int exp;
4362 
4363   REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4364 
4365   if (REAL_VALUES_LESS (r, dconst1))
4366     return 0;
4367 
4368   exp = real_exponent (&r);
4369   real_2expN (&r1, exp, DFmode);
4370   if (REAL_VALUES_EQUAL (r1, r))
4371     return exp;
4372 
4373   return 0;
4374 }
4375 
4376 /* A C compound statement to output to stdio stream STREAM the
4377    assembler syntax for an instruction operand X.  X is an RTL
4378    expression.
4379 
4380    CODE is a value that can be used to specify one of several ways
4381    of printing the operand.  It is used when identical operands
4382    must be printed differently depending on the context.  CODE
4383    comes from the `%' specification that was used to request
4384    printing of the operand.  If the specification was just `%DIGIT'
4385    then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4386    is the ASCII code for LTR.
4387 
4388    If X is a register, this macro should print the register's name.
4389    The names can be found in an array `reg_names' whose type is
4390    `char *[]'.  `reg_names' is initialized from `REGISTER_NAMES'.
4391 
4392    When the machine description has a specification `%PUNCT' (a `%'
4393    followed by a punctuation character), this macro is called with
4394    a null pointer for X and the punctuation character for CODE.
4395 
4396    The m68k specific codes are:
4397 
4398    '.' for dot needed in Motorola-style opcode names.
4399    '-' for an operand pushing on the stack:
4400        sp@-, -(sp) or -(%sp) depending on the style of syntax.
4401    '+' for an operand pushing on the stack:
4402        sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4403    '@' for a reference to the top word on the stack:
4404        sp@, (sp) or (%sp) depending on the style of syntax.
4405    '#' for an immediate operand prefix (# in MIT and Motorola syntax
4406        but & in SGS syntax).
4407    '!' for the cc register (used in an `and to cc' insn).
4408    '$' for the letter `s' in an op code, but only on the 68040.
4409    '&' for the letter `d' in an op code, but only on the 68040.
4410    '/' for register prefix needed by longlong.h.
4411    '?' for m68k_library_id_string
4412 
4413    'b' for byte insn (no effect, on the Sun; this is for the ISI).
4414    'd' to force memory addressing to be absolute, not relative.
4415    'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4416    'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4417        or print pair of registers as rx:ry.
4418    'p' print an address with @PLTPC attached, but only if the operand
4419        is not locally-bound.  */
4420 
4421 void
print_operand(FILE * file,rtx op,int letter)4422 print_operand (FILE *file, rtx op, int letter)
4423 {
4424   if (letter == '.')
4425     {
4426       if (MOTOROLA)
4427 	fprintf (file, ".");
4428     }
4429   else if (letter == '#')
4430     asm_fprintf (file, "%I");
4431   else if (letter == '-')
4432     asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
4433   else if (letter == '+')
4434     asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
4435   else if (letter == '@')
4436     asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
4437   else if (letter == '!')
4438     asm_fprintf (file, "%Rfpcr");
4439   else if (letter == '$')
4440     {
4441       if (TARGET_68040)
4442 	fprintf (file, "s");
4443     }
4444   else if (letter == '&')
4445     {
4446       if (TARGET_68040)
4447 	fprintf (file, "d");
4448     }
4449   else if (letter == '/')
4450     asm_fprintf (file, "%R");
4451   else if (letter == '?')
4452     asm_fprintf (file, m68k_library_id_string);
4453   else if (letter == 'p')
4454     {
4455       output_addr_const (file, op);
4456       if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
4457 	fprintf (file, "@PLTPC");
4458     }
4459   else if (GET_CODE (op) == REG)
4460     {
4461       if (letter == 'R')
4462 	/* Print out the second register name of a register pair.
4463 	   I.e., R (6) => 7.  */
4464 	fputs (M68K_REGNAME(REGNO (op) + 1), file);
4465       else
4466 	fputs (M68K_REGNAME(REGNO (op)), file);
4467     }
4468   else if (GET_CODE (op) == MEM)
4469     {
4470       output_address (XEXP (op, 0));
4471       if (letter == 'd' && ! TARGET_68020
4472 	  && CONSTANT_ADDRESS_P (XEXP (op, 0))
4473 	  && !(GET_CODE (XEXP (op, 0)) == CONST_INT
4474 	       && INTVAL (XEXP (op, 0)) < 0x8000
4475 	       && INTVAL (XEXP (op, 0)) >= -0x8000))
4476 	fprintf (file, MOTOROLA ? ".l" : ":l");
4477     }
4478   else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
4479     {
4480       REAL_VALUE_TYPE r;
4481       long l;
4482       REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4483       REAL_VALUE_TO_TARGET_SINGLE (r, l);
4484       asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
4485     }
4486   else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
4487     {
4488       REAL_VALUE_TYPE r;
4489       long l[3];
4490       REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4491       REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
4492       asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
4493 		   l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
4494     }
4495   else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
4496     {
4497       REAL_VALUE_TYPE r;
4498       long l[2];
4499       REAL_VALUE_FROM_CONST_DOUBLE (r, op);
4500       REAL_VALUE_TO_TARGET_DOUBLE (r, l);
4501       asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
4502     }
4503   else
4504     {
4505       /* Use `print_operand_address' instead of `output_addr_const'
4506 	 to ensure that we print relevant PIC stuff.  */
4507       asm_fprintf (file, "%I");
4508       if (TARGET_PCREL
4509 	  && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
4510 	print_operand_address (file, op);
4511       else
4512 	output_addr_const (file, op);
4513     }
4514 }
4515 
4516 /* Return string for TLS relocation RELOC.  */
4517 
4518 static const char *
m68k_get_reloc_decoration(enum m68k_reloc reloc)4519 m68k_get_reloc_decoration (enum m68k_reloc reloc)
4520 {
4521   /* To my knowledge, !MOTOROLA assemblers don't support TLS.  */
4522   gcc_assert (MOTOROLA || reloc == RELOC_GOT);
4523 
4524   switch (reloc)
4525     {
4526     case RELOC_GOT:
4527       if (MOTOROLA)
4528 	{
4529 	  if (flag_pic == 1 && TARGET_68020)
4530 	    return "@GOT.w";
4531 	  else
4532 	    return "@GOT";
4533 	}
4534       else
4535 	{
4536 	  if (TARGET_68020)
4537 	    {
4538 	      switch (flag_pic)
4539 		{
4540 		case 1:
4541 		  return ":w";
4542 		case 2:
4543 		  return ":l";
4544 		default:
4545 		  return "";
4546 		}
4547 	    }
4548 	}
4549 
4550     case RELOC_TLSGD:
4551       return "@TLSGD";
4552 
4553     case RELOC_TLSLDM:
4554       return "@TLSLDM";
4555 
4556     case RELOC_TLSLDO:
4557       return "@TLSLDO";
4558 
4559     case RELOC_TLSIE:
4560       return "@TLSIE";
4561 
4562     case RELOC_TLSLE:
4563       return "@TLSLE";
4564 
4565     default:
4566       gcc_unreachable ();
4567     }
4568 }
4569 
4570 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA.  */
4571 
4572 static bool
m68k_output_addr_const_extra(FILE * file,rtx x)4573 m68k_output_addr_const_extra (FILE *file, rtx x)
4574 {
4575   if (GET_CODE (x) == UNSPEC)
4576     {
4577       switch (XINT (x, 1))
4578 	{
4579 	case UNSPEC_RELOC16:
4580 	case UNSPEC_RELOC32:
4581 	  output_addr_const (file, XVECEXP (x, 0, 0));
4582 	  fputs (m68k_get_reloc_decoration
4583 		 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
4584 	  return true;
4585 
4586 	default:
4587 	  break;
4588 	}
4589     }
4590 
4591   return false;
4592 }
4593 
4594 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
4595 
4596 static void
m68k_output_dwarf_dtprel(FILE * file,int size,rtx x)4597 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
4598 {
4599   gcc_assert (size == 4);
4600   fputs ("\t.long\t", file);
4601   output_addr_const (file, x);
4602   fputs ("@TLSLDO+0x8000", file);
4603 }
4604 
4605 /* In the name of slightly smaller debug output, and to cater to
4606    general assembler lossage, recognize various UNSPEC sequences
4607    and turn them back into a direct symbol reference.  */
4608 
4609 static rtx
m68k_delegitimize_address(rtx orig_x)4610 m68k_delegitimize_address (rtx orig_x)
4611 {
4612   rtx x;
4613   struct m68k_address addr;
4614   rtx unspec;
4615 
4616   orig_x = delegitimize_mem_from_attrs (orig_x);
4617   x = orig_x;
4618   if (MEM_P (x))
4619     x = XEXP (x, 0);
4620 
4621   if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
4622     return orig_x;
4623 
4624   if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
4625       || addr.offset == NULL_RTX
4626       || GET_CODE (addr.offset) != CONST)
4627     return orig_x;
4628 
4629   unspec = XEXP (addr.offset, 0);
4630   if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
4631     unspec = XEXP (unspec, 0);
4632   if (GET_CODE (unspec) != UNSPEC
4633       || (XINT (unspec, 1) != UNSPEC_RELOC16
4634 	  && XINT (unspec, 1) != UNSPEC_RELOC32))
4635     return orig_x;
4636   x = XVECEXP (unspec, 0, 0);
4637   gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
4638   if (unspec != XEXP (addr.offset, 0))
4639     x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
4640   if (addr.index)
4641     {
4642       rtx idx = addr.index;
4643       if (addr.scale != 1)
4644 	idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
4645       x = gen_rtx_PLUS (Pmode, idx, x);
4646     }
4647   if (addr.base)
4648     x = gen_rtx_PLUS (Pmode, addr.base, x);
4649   if (MEM_P (orig_x))
4650     x = replace_equiv_address_nv (orig_x, x);
4651   return x;
4652 }
4653 
4654 
4655 /* A C compound statement to output to stdio stream STREAM the
4656    assembler syntax for an instruction operand that is a memory
4657    reference whose address is ADDR.  ADDR is an RTL expression.
4658 
4659    Note that this contains a kludge that knows that the only reason
4660    we have an address (plus (label_ref...) (reg...)) when not generating
4661    PIC code is in the insn before a tablejump, and we know that m68k.md
4662    generates a label LInnn: on such an insn.
4663 
4664    It is possible for PIC to generate a (plus (label_ref...) (reg...))
4665    and we handle that just like we would a (plus (symbol_ref...) (reg...)).
4666 
4667    This routine is responsible for distinguishing between -fpic and -fPIC
4668    style relocations in an address.  When generating -fpic code the
4669    offset is output in word mode (e.g. movel a5@(_foo:w), a0).  When generating
4670    -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
4671 
4672 void
print_operand_address(FILE * file,rtx addr)4673 print_operand_address (FILE *file, rtx addr)
4674 {
4675   struct m68k_address address;
4676 
4677   if (!m68k_decompose_address (QImode, addr, true, &address))
4678     gcc_unreachable ();
4679 
4680   if (address.code == PRE_DEC)
4681     fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
4682 	     M68K_REGNAME (REGNO (address.base)));
4683   else if (address.code == POST_INC)
4684     fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
4685 	     M68K_REGNAME (REGNO (address.base)));
4686   else if (!address.base && !address.index)
4687     {
4688       /* A constant address.  */
4689       gcc_assert (address.offset == addr);
4690       if (GET_CODE (addr) == CONST_INT)
4691 	{
4692 	  /* (xxx).w or (xxx).l.  */
4693 	  if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
4694 	    fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
4695 	  else
4696 	    fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
4697 	}
4698       else if (TARGET_PCREL)
4699 	{
4700 	  /* (d16,PC) or (bd,PC,Xn) (with suppressed index register).  */
4701 	  fputc ('(', file);
4702 	  output_addr_const (file, addr);
4703 	  asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
4704 	}
4705       else
4706 	{
4707 	  /* (xxx).l.  We need a special case for SYMBOL_REF if the symbol
4708 	     name ends in `.<letter>', as the last 2 characters can be
4709 	     mistaken as a size suffix.  Put the name in parentheses.  */
4710 	  if (GET_CODE (addr) == SYMBOL_REF
4711 	      && strlen (XSTR (addr, 0)) > 2
4712 	      && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
4713 	    {
4714 	      putc ('(', file);
4715 	      output_addr_const (file, addr);
4716 	      putc (')', file);
4717 	    }
4718 	  else
4719 	    output_addr_const (file, addr);
4720 	}
4721     }
4722   else
4723     {
4724       int labelno;
4725 
4726       /* If ADDR is a (d8,pc,Xn) address, this is the number of the
4727 	 label being accessed, otherwise it is -1.  */
4728       labelno = (address.offset
4729 		 && !address.base
4730 		 && GET_CODE (address.offset) == LABEL_REF
4731 		 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
4732 		 : -1);
4733       if (MOTOROLA)
4734 	{
4735 	  /* Print the "offset(base" component.  */
4736 	  if (labelno >= 0)
4737 	    asm_fprintf (file, "%LL%d(%Rpc,", labelno);
4738 	  else
4739 	    {
4740 	      if (address.offset)
4741 		output_addr_const (file, address.offset);
4742 
4743 	      putc ('(', file);
4744 	      if (address.base)
4745 		fputs (M68K_REGNAME (REGNO (address.base)), file);
4746 	    }
4747 	  /* Print the ",index" component, if any.  */
4748 	  if (address.index)
4749 	    {
4750 	      if (address.base)
4751 		putc (',', file);
4752 	      fprintf (file, "%s.%c",
4753 		       M68K_REGNAME (REGNO (address.index)),
4754 		       GET_MODE (address.index) == HImode ? 'w' : 'l');
4755 	      if (address.scale != 1)
4756 		fprintf (file, "*%d", address.scale);
4757 	    }
4758 	  putc (')', file);
4759 	}
4760       else /* !MOTOROLA */
4761 	{
4762 	  if (!address.offset && !address.index)
4763 	    fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
4764 	  else
4765 	    {
4766 	      /* Print the "base@(offset" component.  */
4767 	      if (labelno >= 0)
4768 		asm_fprintf (file, "%Rpc@(%LL%d", labelno);
4769 	      else
4770 		{
4771 		  if (address.base)
4772 		    fputs (M68K_REGNAME (REGNO (address.base)), file);
4773 		  fprintf (file, "@(");
4774 		  if (address.offset)
4775 		    output_addr_const (file, address.offset);
4776 		}
4777 	      /* Print the ",index" component, if any.  */
4778 	      if (address.index)
4779 		{
4780 		  fprintf (file, ",%s:%c",
4781 			   M68K_REGNAME (REGNO (address.index)),
4782 			   GET_MODE (address.index) == HImode ? 'w' : 'l');
4783 		  if (address.scale != 1)
4784 		    fprintf (file, ":%d", address.scale);
4785 		}
4786 	      putc (')', file);
4787 	    }
4788 	}
4789     }
4790 }
4791 
4792 /* Check for cases where a clr insns can be omitted from code using
4793    strict_low_part sets.  For example, the second clrl here is not needed:
4794    clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
4795 
4796    MODE is the mode of this STRICT_LOW_PART set.  FIRST_INSN is the clear
4797    insn we are checking for redundancy.  TARGET is the register set by the
4798    clear insn.  */
4799 
4800 bool
strict_low_part_peephole_ok(enum machine_mode mode,rtx first_insn,rtx target)4801 strict_low_part_peephole_ok (enum machine_mode mode, rtx first_insn,
4802                              rtx target)
4803 {
4804   rtx p = first_insn;
4805 
4806   while ((p = PREV_INSN (p)))
4807     {
4808       if (NOTE_INSN_BASIC_BLOCK_P (p))
4809 	return false;
4810 
4811       if (NOTE_P (p))
4812 	continue;
4813 
4814       /* If it isn't an insn, then give up.  */
4815       if (!INSN_P (p))
4816 	return false;
4817 
4818       if (reg_set_p (target, p))
4819 	{
4820 	  rtx set = single_set (p);
4821 	  rtx dest;
4822 
4823 	  /* If it isn't an easy to recognize insn, then give up.  */
4824 	  if (! set)
4825 	    return false;
4826 
4827 	  dest = SET_DEST (set);
4828 
4829 	  /* If this sets the entire target register to zero, then our
4830 	     first_insn is redundant.  */
4831 	  if (rtx_equal_p (dest, target)
4832 	      && SET_SRC (set) == const0_rtx)
4833 	    return true;
4834 	  else if (GET_CODE (dest) == STRICT_LOW_PART
4835 		   && GET_CODE (XEXP (dest, 0)) == REG
4836 		   && REGNO (XEXP (dest, 0)) == REGNO (target)
4837 		   && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
4838 		       <= GET_MODE_SIZE (mode)))
4839 	    /* This is a strict low part set which modifies less than
4840 	       we are using, so it is safe.  */
4841 	    ;
4842 	  else
4843 	    return false;
4844 	}
4845     }
4846 
4847   return false;
4848 }
4849 
4850 /* Operand predicates for implementing asymmetric pc-relative addressing
4851    on m68k.  The m68k supports pc-relative addressing (mode 7, register 2)
4852    when used as a source operand, but not as a destination operand.
4853 
4854    We model this by restricting the meaning of the basic predicates
4855    (general_operand, memory_operand, etc) to forbid the use of this
4856    addressing mode, and then define the following predicates that permit
4857    this addressing mode.  These predicates can then be used for the
4858    source operands of the appropriate instructions.
4859 
4860    n.b.  While it is theoretically possible to change all machine patterns
4861    to use this addressing more where permitted by the architecture,
4862    it has only been implemented for "common" cases: SImode, HImode, and
4863    QImode operands, and only for the principle operations that would
4864    require this addressing mode: data movement and simple integer operations.
4865 
4866    In parallel with these new predicates, two new constraint letters
4867    were defined: 'S' and 'T'.  'S' is the -mpcrel analog of 'm'.
4868    'T' replaces 's' in the non-pcrel case.  It is a no-op in the pcrel case.
4869    In the pcrel case 's' is only valid in combination with 'a' registers.
4870    See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
4871    of how these constraints are used.
4872 
4873    The use of these predicates is strictly optional, though patterns that
4874    don't will cause an extra reload register to be allocated where one
4875    was not necessary:
4876 
4877 	lea (abc:w,%pc),%a0	; need to reload address
4878 	moveq &1,%d1		; since write to pc-relative space
4879 	movel %d1,%a0@		; is not allowed
4880 	...
4881 	lea (abc:w,%pc),%a1	; no need to reload address here
4882 	movel %a1@,%d0		; since "movel (abc:w,%pc),%d0" is ok
4883 
4884    For more info, consult tiemann@cygnus.com.
4885 
4886 
4887    All of the ugliness with predicates and constraints is due to the
4888    simple fact that the m68k does not allow a pc-relative addressing
4889    mode as a destination.  gcc does not distinguish between source and
4890    destination addresses.  Hence, if we claim that pc-relative address
4891    modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
4892    end up with invalid code.  To get around this problem, we left
4893    pc-relative modes as invalid addresses, and then added special
4894    predicates and constraints to accept them.
4895 
4896    A cleaner way to handle this is to modify gcc to distinguish
4897    between source and destination addresses.  We can then say that
4898    pc-relative is a valid source address but not a valid destination
4899    address, and hopefully avoid a lot of the predicate and constraint
4900    hackery.  Unfortunately, this would be a pretty big change.  It would
4901    be a useful change for a number of ports, but there aren't any current
4902    plans to undertake this.
4903 
4904    ***************************************************************************/
4905 
4906 
4907 const char *
output_andsi3(rtx * operands)4908 output_andsi3 (rtx *operands)
4909 {
4910   int logval;
4911   if (GET_CODE (operands[2]) == CONST_INT
4912       && (INTVAL (operands[2]) | 0xffff) == -1
4913       && (DATA_REG_P (operands[0])
4914 	  || offsettable_memref_p (operands[0]))
4915       && !TARGET_COLDFIRE)
4916     {
4917       if (GET_CODE (operands[0]) != REG)
4918         operands[0] = adjust_address (operands[0], HImode, 2);
4919       operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
4920       /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4921       CC_STATUS_INIT;
4922       if (operands[2] == const0_rtx)
4923         return "clr%.w %0";
4924       return "and%.w %2,%0";
4925     }
4926   if (GET_CODE (operands[2]) == CONST_INT
4927       && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
4928       && (DATA_REG_P (operands[0])
4929           || offsettable_memref_p (operands[0])))
4930     {
4931       if (DATA_REG_P (operands[0]))
4932 	operands[1] = GEN_INT (logval);
4933       else
4934         {
4935 	  operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4936 	  operands[1] = GEN_INT (logval % 8);
4937         }
4938       /* This does not set condition codes in a standard way.  */
4939       CC_STATUS_INIT;
4940       return "bclr %1,%0";
4941     }
4942   return "and%.l %2,%0";
4943 }
4944 
4945 const char *
output_iorsi3(rtx * operands)4946 output_iorsi3 (rtx *operands)
4947 {
4948   register int logval;
4949   if (GET_CODE (operands[2]) == CONST_INT
4950       && INTVAL (operands[2]) >> 16 == 0
4951       && (DATA_REG_P (operands[0])
4952 	  || offsettable_memref_p (operands[0]))
4953       && !TARGET_COLDFIRE)
4954     {
4955       if (GET_CODE (operands[0]) != REG)
4956         operands[0] = adjust_address (operands[0], HImode, 2);
4957       /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4958       CC_STATUS_INIT;
4959       if (INTVAL (operands[2]) == 0xffff)
4960 	return "mov%.w %2,%0";
4961       return "or%.w %2,%0";
4962     }
4963   if (GET_CODE (operands[2]) == CONST_INT
4964       && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
4965       && (DATA_REG_P (operands[0])
4966 	  || offsettable_memref_p (operands[0])))
4967     {
4968       if (DATA_REG_P (operands[0]))
4969 	operands[1] = GEN_INT (logval);
4970       else
4971         {
4972 	  operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
4973 	  operands[1] = GEN_INT (logval % 8);
4974 	}
4975       CC_STATUS_INIT;
4976       return "bset %1,%0";
4977     }
4978   return "or%.l %2,%0";
4979 }
4980 
4981 const char *
output_xorsi3(rtx * operands)4982 output_xorsi3 (rtx *operands)
4983 {
4984   register int logval;
4985   if (GET_CODE (operands[2]) == CONST_INT
4986       && INTVAL (operands[2]) >> 16 == 0
4987       && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
4988       && !TARGET_COLDFIRE)
4989     {
4990       if (! DATA_REG_P (operands[0]))
4991 	operands[0] = adjust_address (operands[0], HImode, 2);
4992       /* Do not delete a following tstl %0 insn; that would be incorrect.  */
4993       CC_STATUS_INIT;
4994       if (INTVAL (operands[2]) == 0xffff)
4995 	return "not%.w %0";
4996       return "eor%.w %2,%0";
4997     }
4998   if (GET_CODE (operands[2]) == CONST_INT
4999       && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5000       && (DATA_REG_P (operands[0])
5001 	  || offsettable_memref_p (operands[0])))
5002     {
5003       if (DATA_REG_P (operands[0]))
5004 	operands[1] = GEN_INT (logval);
5005       else
5006         {
5007 	  operands[0] = adjust_address (operands[0], SImode, 3 - (logval / 8));
5008 	  operands[1] = GEN_INT (logval % 8);
5009 	}
5010       CC_STATUS_INIT;
5011       return "bchg %1,%0";
5012     }
5013   return "eor%.l %2,%0";
5014 }
5015 
5016 /* Return the instruction that should be used for a call to address X,
5017    which is known to be in operand 0.  */
5018 
5019 const char *
output_call(rtx x)5020 output_call (rtx x)
5021 {
5022   if (symbolic_operand (x, VOIDmode))
5023     return m68k_symbolic_call;
5024   else
5025     return "jsr %a0";
5026 }
5027 
5028 /* Likewise sibling calls.  */
5029 
5030 const char *
output_sibcall(rtx x)5031 output_sibcall (rtx x)
5032 {
5033   if (symbolic_operand (x, VOIDmode))
5034     return m68k_symbolic_jump;
5035   else
5036     return "jmp %a0";
5037 }
5038 
5039 static void
m68k_output_mi_thunk(FILE * file,tree thunk ATTRIBUTE_UNUSED,HOST_WIDE_INT delta,HOST_WIDE_INT vcall_offset,tree function)5040 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5041 		      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5042 		      tree function)
5043 {
5044   rtx this_slot, offset, addr, mem, insn, tmp;
5045 
5046   /* Avoid clobbering the struct value reg by using the
5047      static chain reg as a temporary.  */
5048   tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5049 
5050   /* Pretend to be a post-reload pass while generating rtl.  */
5051   reload_completed = 1;
5052 
5053   /* The "this" pointer is stored at 4(%sp).  */
5054   this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5055 						 stack_pointer_rtx, 4));
5056 
5057   /* Add DELTA to THIS.  */
5058   if (delta != 0)
5059     {
5060       /* Make the offset a legitimate operand for memory addition.  */
5061       offset = GEN_INT (delta);
5062       if ((delta < -8 || delta > 8)
5063 	  && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5064 	{
5065 	  emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5066 	  offset = gen_rtx_REG (Pmode, D0_REG);
5067 	}
5068       emit_insn (gen_add3_insn (copy_rtx (this_slot),
5069 				copy_rtx (this_slot), offset));
5070     }
5071 
5072   /* If needed, add *(*THIS + VCALL_OFFSET) to THIS.  */
5073   if (vcall_offset != 0)
5074     {
5075       /* Set the static chain register to *THIS.  */
5076       emit_move_insn (tmp, this_slot);
5077       emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5078 
5079       /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET.  */
5080       addr = plus_constant (Pmode, tmp, vcall_offset);
5081       if (!m68k_legitimate_address_p (Pmode, addr, true))
5082 	{
5083 	  emit_insn (gen_rtx_SET (VOIDmode, tmp, addr));
5084 	  addr = tmp;
5085 	}
5086 
5087       /* Load the offset into %d0 and add it to THIS.  */
5088       emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5089 		      gen_rtx_MEM (Pmode, addr));
5090       emit_insn (gen_add3_insn (copy_rtx (this_slot),
5091 				copy_rtx (this_slot),
5092 				gen_rtx_REG (Pmode, D0_REG)));
5093     }
5094 
5095   /* Jump to the target function.  Use a sibcall if direct jumps are
5096      allowed, otherwise load the address into a register first.  */
5097   mem = DECL_RTL (function);
5098   if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5099     {
5100       gcc_assert (flag_pic);
5101 
5102       if (!TARGET_SEP_DATA)
5103 	{
5104 	  /* Use the static chain register as a temporary (call-clobbered)
5105 	     GOT pointer for this function.  We can use the static chain
5106 	     register because it isn't live on entry to the thunk.  */
5107 	  SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5108 	  emit_insn (gen_load_got (pic_offset_table_rtx));
5109 	}
5110       legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5111       mem = replace_equiv_address (mem, tmp);
5112     }
5113   insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5114   SIBLING_CALL_P (insn) = 1;
5115 
5116   /* Run just enough of rest_of_compilation.  */
5117   insn = get_insns ();
5118   split_all_insns_noflow ();
5119   final_start_function (insn, file, 1);
5120   final (insn, file, 1);
5121   final_end_function ();
5122 
5123   /* Clean up the vars set above.  */
5124   reload_completed = 0;
5125 
5126   /* Restore the original PIC register.  */
5127   if (flag_pic)
5128     SET_REGNO (pic_offset_table_rtx, PIC_REG);
5129 }
5130 
5131 /* Worker function for TARGET_STRUCT_VALUE_RTX.  */
5132 
5133 static rtx
m68k_struct_value_rtx(tree fntype ATTRIBUTE_UNUSED,int incoming ATTRIBUTE_UNUSED)5134 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5135 		       int incoming ATTRIBUTE_UNUSED)
5136 {
5137   return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5138 }
5139 
5140 /* Return nonzero if register old_reg can be renamed to register new_reg.  */
5141 int
m68k_hard_regno_rename_ok(unsigned int old_reg ATTRIBUTE_UNUSED,unsigned int new_reg)5142 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5143 			   unsigned int new_reg)
5144 {
5145 
5146   /* Interrupt functions can only use registers that have already been
5147      saved by the prologue, even if they would normally be
5148      call-clobbered.  */
5149 
5150   if ((m68k_get_function_kind (current_function_decl)
5151        == m68k_fk_interrupt_handler)
5152       && !df_regs_ever_live_p (new_reg))
5153     return 0;
5154 
5155   return 1;
5156 }
5157 
5158 /* Value is true if hard register REGNO can hold a value of machine-mode
5159    MODE.  On the 68000, we let the cpu registers can hold any mode, but
5160    restrict the 68881 registers to floating-point modes.  */
5161 
5162 bool
m68k_regno_mode_ok(int regno,enum machine_mode mode)5163 m68k_regno_mode_ok (int regno, enum machine_mode mode)
5164 {
5165   if (DATA_REGNO_P (regno))
5166     {
5167       /* Data Registers, can hold aggregate if fits in.  */
5168       if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5169 	return true;
5170     }
5171   else if (ADDRESS_REGNO_P (regno))
5172     {
5173       if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5174 	return true;
5175     }
5176   else if (FP_REGNO_P (regno))
5177     {
5178       /* FPU registers, hold float or complex float of long double or
5179 	 smaller.  */
5180       if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5181 	   || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5182 	  && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5183 	return true;
5184     }
5185   return false;
5186 }
5187 
5188 /* Implement SECONDARY_RELOAD_CLASS.  */
5189 
5190 enum reg_class
m68k_secondary_reload_class(enum reg_class rclass,enum machine_mode mode,rtx x)5191 m68k_secondary_reload_class (enum reg_class rclass,
5192 			     enum machine_mode mode, rtx x)
5193 {
5194   int regno;
5195 
5196   regno = true_regnum (x);
5197 
5198   /* If one operand of a movqi is an address register, the other
5199      operand must be a general register or constant.  Other types
5200      of operand must be reloaded through a data register.  */
5201   if (GET_MODE_SIZE (mode) == 1
5202       && reg_classes_intersect_p (rclass, ADDR_REGS)
5203       && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5204     return DATA_REGS;
5205 
5206   /* PC-relative addresses must be loaded into an address register first.  */
5207   if (TARGET_PCREL
5208       && !reg_class_subset_p (rclass, ADDR_REGS)
5209       && symbolic_operand (x, VOIDmode))
5210     return ADDR_REGS;
5211 
5212   return NO_REGS;
5213 }
5214 
5215 /* Implement PREFERRED_RELOAD_CLASS.  */
5216 
5217 enum reg_class
m68k_preferred_reload_class(rtx x,enum reg_class rclass)5218 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5219 {
5220   enum reg_class secondary_class;
5221 
5222   /* If RCLASS might need a secondary reload, try restricting it to
5223      a class that doesn't.  */
5224   secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5225   if (secondary_class != NO_REGS
5226       && reg_class_subset_p (secondary_class, rclass))
5227     return secondary_class;
5228 
5229   /* Prefer to use moveq for in-range constants.  */
5230   if (GET_CODE (x) == CONST_INT
5231       && reg_class_subset_p (DATA_REGS, rclass)
5232       && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5233     return DATA_REGS;
5234 
5235   /* ??? Do we really need this now?  */
5236   if (GET_CODE (x) == CONST_DOUBLE
5237       && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5238     {
5239       if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5240 	return FP_REGS;
5241 
5242       return NO_REGS;
5243     }
5244 
5245   return rclass;
5246 }
5247 
5248 /* Return floating point values in a 68881 register.  This makes 68881 code
5249    a little bit faster.  It also makes -msoft-float code incompatible with
5250    hard-float code, so people have to be careful not to mix the two.
5251    For ColdFire it was decided the ABI incompatibility is undesirable.
5252    If there is need for a hard-float ABI it is probably worth doing it
5253    properly and also passing function arguments in FP registers.  */
5254 rtx
m68k_libcall_value(enum machine_mode mode)5255 m68k_libcall_value (enum machine_mode mode)
5256 {
5257   switch (mode) {
5258   case SFmode:
5259   case DFmode:
5260   case XFmode:
5261     if (TARGET_68881)
5262       return gen_rtx_REG (mode, FP0_REG);
5263     break;
5264   default:
5265     break;
5266   }
5267 
5268   return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5269 }
5270 
5271 /* Location in which function value is returned.
5272    NOTE: Due to differences in ABIs, don't call this function directly,
5273    use FUNCTION_VALUE instead.  */
5274 rtx
m68k_function_value(const_tree valtype,const_tree func ATTRIBUTE_UNUSED)5275 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5276 {
5277   enum machine_mode mode;
5278 
5279   mode = TYPE_MODE (valtype);
5280   switch (mode) {
5281   case SFmode:
5282   case DFmode:
5283   case XFmode:
5284     if (TARGET_68881)
5285       return gen_rtx_REG (mode, FP0_REG);
5286     break;
5287   default:
5288     break;
5289   }
5290 
5291   /* If the function returns a pointer, push that into %a0.  */
5292   if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5293     /* For compatibility with the large body of existing code which
5294        does not always properly declare external functions returning
5295        pointer types, the m68k/SVR4 convention is to copy the value
5296        returned for pointer functions from a0 to d0 in the function
5297        epilogue, so that callers that have neglected to properly
5298        declare the callee can still find the correct return value in
5299        d0.  */
5300     return gen_rtx_PARALLEL
5301       (mode,
5302        gen_rtvec (2,
5303 		  gen_rtx_EXPR_LIST (VOIDmode,
5304 				     gen_rtx_REG (mode, A0_REG),
5305 				     const0_rtx),
5306 		  gen_rtx_EXPR_LIST (VOIDmode,
5307 				     gen_rtx_REG (mode, D0_REG),
5308 				     const0_rtx)));
5309   else if (POINTER_TYPE_P (valtype))
5310     return gen_rtx_REG (mode, A0_REG);
5311   else
5312     return gen_rtx_REG (mode, D0_REG);
5313 }
5314 
5315 /* Worker function for TARGET_RETURN_IN_MEMORY.  */
5316 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5317 static bool
m68k_return_in_memory(const_tree type,const_tree fntype ATTRIBUTE_UNUSED)5318 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5319 {
5320   enum machine_mode mode = TYPE_MODE (type);
5321 
5322   if (mode == BLKmode)
5323     return true;
5324 
5325   /* If TYPE's known alignment is less than the alignment of MODE that
5326      would contain the structure, then return in memory.  We need to
5327      do so to maintain the compatibility between code compiled with
5328      -mstrict-align and that compiled with -mno-strict-align.  */
5329   if (AGGREGATE_TYPE_P (type)
5330       && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5331     return true;
5332 
5333   return false;
5334 }
5335 #endif
5336 
5337 /* CPU to schedule the program for.  */
5338 enum attr_cpu m68k_sched_cpu;
5339 
5340 /* MAC to schedule the program for.  */
5341 enum attr_mac m68k_sched_mac;
5342 
5343 /* Operand type.  */
5344 enum attr_op_type
5345   {
5346     /* No operand.  */
5347     OP_TYPE_NONE,
5348 
5349     /* Integer register.  */
5350     OP_TYPE_RN,
5351 
5352     /* FP register.  */
5353     OP_TYPE_FPN,
5354 
5355     /* Implicit mem reference (e.g. stack).  */
5356     OP_TYPE_MEM1,
5357 
5358     /* Memory without offset or indexing.  EA modes 2, 3 and 4.  */
5359     OP_TYPE_MEM234,
5360 
5361     /* Memory with offset but without indexing.  EA mode 5.  */
5362     OP_TYPE_MEM5,
5363 
5364     /* Memory with indexing.  EA mode 6.  */
5365     OP_TYPE_MEM6,
5366 
5367     /* Memory referenced by absolute address.  EA mode 7.  */
5368     OP_TYPE_MEM7,
5369 
5370     /* Immediate operand that doesn't require extension word.  */
5371     OP_TYPE_IMM_Q,
5372 
5373     /* Immediate 16 bit operand.  */
5374     OP_TYPE_IMM_W,
5375 
5376     /* Immediate 32 bit operand.  */
5377     OP_TYPE_IMM_L
5378   };
5379 
5380 /* Return type of memory ADDR_RTX refers to.  */
5381 static enum attr_op_type
sched_address_type(enum machine_mode mode,rtx addr_rtx)5382 sched_address_type (enum machine_mode mode, rtx addr_rtx)
5383 {
5384   struct m68k_address address;
5385 
5386   if (symbolic_operand (addr_rtx, VOIDmode))
5387     return OP_TYPE_MEM7;
5388 
5389   if (!m68k_decompose_address (mode, addr_rtx,
5390 			       reload_completed, &address))
5391     {
5392       gcc_assert (!reload_completed);
5393       /* Reload will likely fix the address to be in the register.  */
5394       return OP_TYPE_MEM234;
5395     }
5396 
5397   if (address.scale != 0)
5398     return OP_TYPE_MEM6;
5399 
5400   if (address.base != NULL_RTX)
5401     {
5402       if (address.offset == NULL_RTX)
5403 	return OP_TYPE_MEM234;
5404 
5405       return OP_TYPE_MEM5;
5406     }
5407 
5408   gcc_assert (address.offset != NULL_RTX);
5409 
5410   return OP_TYPE_MEM7;
5411 }
5412 
5413 /* Return X or Y (depending on OPX_P) operand of INSN.  */
5414 static rtx
sched_get_operand(rtx insn,bool opx_p)5415 sched_get_operand (rtx insn, bool opx_p)
5416 {
5417   int i;
5418 
5419   if (recog_memoized (insn) < 0)
5420     gcc_unreachable ();
5421 
5422   extract_constrain_insn_cached (insn);
5423 
5424   if (opx_p)
5425     i = get_attr_opx (insn);
5426   else
5427     i = get_attr_opy (insn);
5428 
5429   if (i >= recog_data.n_operands)
5430     return NULL;
5431 
5432   return recog_data.operand[i];
5433 }
5434 
5435 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
5436    If ADDRESS_P is true, return type of memory location operand refers to.  */
5437 static enum attr_op_type
sched_attr_op_type(rtx insn,bool opx_p,bool address_p)5438 sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
5439 {
5440   rtx op;
5441 
5442   op = sched_get_operand (insn, opx_p);
5443 
5444   if (op == NULL)
5445     {
5446       gcc_assert (!reload_completed);
5447       return OP_TYPE_RN;
5448     }
5449 
5450   if (address_p)
5451     return sched_address_type (QImode, op);
5452 
5453   if (memory_operand (op, VOIDmode))
5454     return sched_address_type (GET_MODE (op), XEXP (op, 0));
5455 
5456   if (register_operand (op, VOIDmode))
5457     {
5458       if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
5459 	  || (reload_completed && FP_REG_P (op)))
5460 	return OP_TYPE_FPN;
5461 
5462       return OP_TYPE_RN;
5463     }
5464 
5465   if (GET_CODE (op) == CONST_INT)
5466     {
5467       int ival;
5468 
5469       ival = INTVAL (op);
5470 
5471       /* Check for quick constants.  */
5472       switch (get_attr_type (insn))
5473 	{
5474 	case TYPE_ALUQ_L:
5475 	  if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
5476 	    return OP_TYPE_IMM_Q;
5477 
5478 	  gcc_assert (!reload_completed);
5479 	  break;
5480 
5481 	case TYPE_MOVEQ_L:
5482 	  if (USE_MOVQ (ival))
5483 	    return OP_TYPE_IMM_Q;
5484 
5485 	  gcc_assert (!reload_completed);
5486 	  break;
5487 
5488 	case TYPE_MOV3Q_L:
5489 	  if (valid_mov3q_const (ival))
5490 	    return OP_TYPE_IMM_Q;
5491 
5492 	  gcc_assert (!reload_completed);
5493 	  break;
5494 
5495 	default:
5496 	  break;
5497 	}
5498 
5499       if (IN_RANGE (ival, -0x8000, 0x7fff))
5500 	return OP_TYPE_IMM_W;
5501 
5502       return OP_TYPE_IMM_L;
5503     }
5504 
5505   if (GET_CODE (op) == CONST_DOUBLE)
5506     {
5507       switch (GET_MODE (op))
5508 	{
5509 	case SFmode:
5510 	  return OP_TYPE_IMM_W;
5511 
5512 	case VOIDmode:
5513 	case DFmode:
5514 	  return OP_TYPE_IMM_L;
5515 
5516 	default:
5517 	  gcc_unreachable ();
5518 	}
5519     }
5520 
5521   if (GET_CODE (op) == CONST
5522       || symbolic_operand (op, VOIDmode)
5523       || LABEL_P (op))
5524     {
5525       switch (GET_MODE (op))
5526 	{
5527 	case QImode:
5528 	  return OP_TYPE_IMM_Q;
5529 
5530 	case HImode:
5531 	  return OP_TYPE_IMM_W;
5532 
5533 	case SImode:
5534 	  return OP_TYPE_IMM_L;
5535 
5536 	default:
5537 	  if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
5538 	    /* Just a guess.  */
5539 	    return OP_TYPE_IMM_W;
5540 
5541 	  return OP_TYPE_IMM_L;
5542 	}
5543     }
5544 
5545   gcc_assert (!reload_completed);
5546 
5547   if (FLOAT_MODE_P (GET_MODE (op)))
5548     return OP_TYPE_FPN;
5549 
5550   return OP_TYPE_RN;
5551 }
5552 
5553 /* Implement opx_type attribute.
5554    Return type of INSN's operand X.
5555    If ADDRESS_P is true, return type of memory location operand refers to.  */
5556 enum attr_opx_type
m68k_sched_attr_opx_type(rtx insn,int address_p)5557 m68k_sched_attr_opx_type (rtx insn, int address_p)
5558 {
5559   switch (sched_attr_op_type (insn, true, address_p != 0))
5560     {
5561     case OP_TYPE_RN:
5562       return OPX_TYPE_RN;
5563 
5564     case OP_TYPE_FPN:
5565       return OPX_TYPE_FPN;
5566 
5567     case OP_TYPE_MEM1:
5568       return OPX_TYPE_MEM1;
5569 
5570     case OP_TYPE_MEM234:
5571       return OPX_TYPE_MEM234;
5572 
5573     case OP_TYPE_MEM5:
5574       return OPX_TYPE_MEM5;
5575 
5576     case OP_TYPE_MEM6:
5577       return OPX_TYPE_MEM6;
5578 
5579     case OP_TYPE_MEM7:
5580       return OPX_TYPE_MEM7;
5581 
5582     case OP_TYPE_IMM_Q:
5583       return OPX_TYPE_IMM_Q;
5584 
5585     case OP_TYPE_IMM_W:
5586       return OPX_TYPE_IMM_W;
5587 
5588     case OP_TYPE_IMM_L:
5589       return OPX_TYPE_IMM_L;
5590 
5591     default:
5592       gcc_unreachable ();
5593     }
5594 }
5595 
5596 /* Implement opy_type attribute.
5597    Return type of INSN's operand Y.
5598    If ADDRESS_P is true, return type of memory location operand refers to.  */
5599 enum attr_opy_type
m68k_sched_attr_opy_type(rtx insn,int address_p)5600 m68k_sched_attr_opy_type (rtx insn, int address_p)
5601 {
5602   switch (sched_attr_op_type (insn, false, address_p != 0))
5603     {
5604     case OP_TYPE_RN:
5605       return OPY_TYPE_RN;
5606 
5607     case OP_TYPE_FPN:
5608       return OPY_TYPE_FPN;
5609 
5610     case OP_TYPE_MEM1:
5611       return OPY_TYPE_MEM1;
5612 
5613     case OP_TYPE_MEM234:
5614       return OPY_TYPE_MEM234;
5615 
5616     case OP_TYPE_MEM5:
5617       return OPY_TYPE_MEM5;
5618 
5619     case OP_TYPE_MEM6:
5620       return OPY_TYPE_MEM6;
5621 
5622     case OP_TYPE_MEM7:
5623       return OPY_TYPE_MEM7;
5624 
5625     case OP_TYPE_IMM_Q:
5626       return OPY_TYPE_IMM_Q;
5627 
5628     case OP_TYPE_IMM_W:
5629       return OPY_TYPE_IMM_W;
5630 
5631     case OP_TYPE_IMM_L:
5632       return OPY_TYPE_IMM_L;
5633 
5634     default:
5635       gcc_unreachable ();
5636     }
5637 }
5638 
5639 /* Return size of INSN as int.  */
5640 static int
sched_get_attr_size_int(rtx insn)5641 sched_get_attr_size_int (rtx insn)
5642 {
5643   int size;
5644 
5645   switch (get_attr_type (insn))
5646     {
5647     case TYPE_IGNORE:
5648       /* There should be no references to m68k_sched_attr_size for 'ignore'
5649 	 instructions.  */
5650       gcc_unreachable ();
5651       return 0;
5652 
5653     case TYPE_MUL_L:
5654       size = 2;
5655       break;
5656 
5657     default:
5658       size = 1;
5659       break;
5660     }
5661 
5662   switch (get_attr_opx_type (insn))
5663     {
5664     case OPX_TYPE_NONE:
5665     case OPX_TYPE_RN:
5666     case OPX_TYPE_FPN:
5667     case OPX_TYPE_MEM1:
5668     case OPX_TYPE_MEM234:
5669     case OPY_TYPE_IMM_Q:
5670       break;
5671 
5672     case OPX_TYPE_MEM5:
5673     case OPX_TYPE_MEM6:
5674       /* Here we assume that most absolute references are short.  */
5675     case OPX_TYPE_MEM7:
5676     case OPY_TYPE_IMM_W:
5677       ++size;
5678       break;
5679 
5680     case OPY_TYPE_IMM_L:
5681       size += 2;
5682       break;
5683 
5684     default:
5685       gcc_unreachable ();
5686     }
5687 
5688   switch (get_attr_opy_type (insn))
5689     {
5690     case OPY_TYPE_NONE:
5691     case OPY_TYPE_RN:
5692     case OPY_TYPE_FPN:
5693     case OPY_TYPE_MEM1:
5694     case OPY_TYPE_MEM234:
5695     case OPY_TYPE_IMM_Q:
5696       break;
5697 
5698     case OPY_TYPE_MEM5:
5699     case OPY_TYPE_MEM6:
5700       /* Here we assume that most absolute references are short.  */
5701     case OPY_TYPE_MEM7:
5702     case OPY_TYPE_IMM_W:
5703       ++size;
5704       break;
5705 
5706     case OPY_TYPE_IMM_L:
5707       size += 2;
5708       break;
5709 
5710     default:
5711       gcc_unreachable ();
5712     }
5713 
5714   if (size > 3)
5715     {
5716       gcc_assert (!reload_completed);
5717 
5718       size = 3;
5719     }
5720 
5721   return size;
5722 }
5723 
5724 /* Return size of INSN as attribute enum value.  */
5725 enum attr_size
m68k_sched_attr_size(rtx insn)5726 m68k_sched_attr_size (rtx insn)
5727 {
5728   switch (sched_get_attr_size_int (insn))
5729     {
5730     case 1:
5731       return SIZE_1;
5732 
5733     case 2:
5734       return SIZE_2;
5735 
5736     case 3:
5737       return SIZE_3;
5738 
5739     default:
5740       gcc_unreachable ();
5741     }
5742 }
5743 
5744 /* Return operand X or Y (depending on OPX_P) of INSN,
5745    if it is a MEM, or NULL overwise.  */
5746 static enum attr_op_type
sched_get_opxy_mem_type(rtx insn,bool opx_p)5747 sched_get_opxy_mem_type (rtx insn, bool opx_p)
5748 {
5749   if (opx_p)
5750     {
5751       switch (get_attr_opx_type (insn))
5752 	{
5753 	case OPX_TYPE_NONE:
5754 	case OPX_TYPE_RN:
5755 	case OPX_TYPE_FPN:
5756 	case OPX_TYPE_IMM_Q:
5757 	case OPX_TYPE_IMM_W:
5758 	case OPX_TYPE_IMM_L:
5759 	  return OP_TYPE_RN;
5760 
5761 	case OPX_TYPE_MEM1:
5762 	case OPX_TYPE_MEM234:
5763 	case OPX_TYPE_MEM5:
5764 	case OPX_TYPE_MEM7:
5765 	  return OP_TYPE_MEM1;
5766 
5767 	case OPX_TYPE_MEM6:
5768 	  return OP_TYPE_MEM6;
5769 
5770 	default:
5771 	  gcc_unreachable ();
5772 	}
5773     }
5774   else
5775     {
5776       switch (get_attr_opy_type (insn))
5777 	{
5778 	case OPY_TYPE_NONE:
5779 	case OPY_TYPE_RN:
5780 	case OPY_TYPE_FPN:
5781 	case OPY_TYPE_IMM_Q:
5782 	case OPY_TYPE_IMM_W:
5783 	case OPY_TYPE_IMM_L:
5784 	  return OP_TYPE_RN;
5785 
5786 	case OPY_TYPE_MEM1:
5787 	case OPY_TYPE_MEM234:
5788 	case OPY_TYPE_MEM5:
5789 	case OPY_TYPE_MEM7:
5790 	  return OP_TYPE_MEM1;
5791 
5792 	case OPY_TYPE_MEM6:
5793 	  return OP_TYPE_MEM6;
5794 
5795 	default:
5796 	  gcc_unreachable ();
5797 	}
5798     }
5799 }
5800 
5801 /* Implement op_mem attribute.  */
5802 enum attr_op_mem
m68k_sched_attr_op_mem(rtx insn)5803 m68k_sched_attr_op_mem (rtx insn)
5804 {
5805   enum attr_op_type opx;
5806   enum attr_op_type opy;
5807 
5808   opx = sched_get_opxy_mem_type (insn, true);
5809   opy = sched_get_opxy_mem_type (insn, false);
5810 
5811   if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
5812     return OP_MEM_00;
5813 
5814   if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
5815     {
5816       switch (get_attr_opx_access (insn))
5817 	{
5818 	case OPX_ACCESS_R:
5819 	  return OP_MEM_10;
5820 
5821 	case OPX_ACCESS_W:
5822 	  return OP_MEM_01;
5823 
5824 	case OPX_ACCESS_RW:
5825 	  return OP_MEM_11;
5826 
5827 	default:
5828 	  gcc_unreachable ();
5829 	}
5830     }
5831 
5832   if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
5833     {
5834       switch (get_attr_opx_access (insn))
5835 	{
5836 	case OPX_ACCESS_R:
5837 	  return OP_MEM_I0;
5838 
5839 	case OPX_ACCESS_W:
5840 	  return OP_MEM_0I;
5841 
5842 	case OPX_ACCESS_RW:
5843 	  return OP_MEM_I1;
5844 
5845 	default:
5846 	  gcc_unreachable ();
5847 	}
5848     }
5849 
5850   if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
5851     return OP_MEM_10;
5852 
5853   if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
5854     {
5855       switch (get_attr_opx_access (insn))
5856 	{
5857 	case OPX_ACCESS_W:
5858 	  return OP_MEM_11;
5859 
5860 	default:
5861 	  gcc_assert (!reload_completed);
5862 	  return OP_MEM_11;
5863 	}
5864     }
5865 
5866   if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
5867     {
5868       switch (get_attr_opx_access (insn))
5869 	{
5870 	case OPX_ACCESS_W:
5871 	  return OP_MEM_1I;
5872 
5873 	default:
5874 	  gcc_assert (!reload_completed);
5875 	  return OP_MEM_1I;
5876 	}
5877     }
5878 
5879   if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
5880     return OP_MEM_I0;
5881 
5882   if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
5883     {
5884       switch (get_attr_opx_access (insn))
5885 	{
5886 	case OPX_ACCESS_W:
5887 	  return OP_MEM_I1;
5888 
5889 	default:
5890 	  gcc_assert (!reload_completed);
5891 	  return OP_MEM_I1;
5892 	}
5893     }
5894 
5895   gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
5896   gcc_assert (!reload_completed);
5897   return OP_MEM_I1;
5898 }
5899 
5900 /* Data for ColdFire V4 index bypass.
5901    Producer modifies register that is used as index in consumer with
5902    specified scale.  */
5903 static struct
5904 {
5905   /* Producer instruction.  */
5906   rtx pro;
5907 
5908   /* Consumer instruction.  */
5909   rtx con;
5910 
5911   /* Scale of indexed memory access within consumer.
5912      Or zero if bypass should not be effective at the moment.  */
5913   int scale;
5914 } sched_cfv4_bypass_data;
5915 
5916 /* An empty state that is used in m68k_sched_adjust_cost.  */
5917 static state_t sched_adjust_cost_state;
5918 
5919 /* Implement adjust_cost scheduler hook.
5920    Return adjusted COST of dependency LINK between DEF_INSN and INSN.  */
5921 static int
m68k_sched_adjust_cost(rtx insn,rtx link ATTRIBUTE_UNUSED,rtx def_insn,int cost)5922 m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
5923 			int cost)
5924 {
5925   int delay;
5926 
5927   if (recog_memoized (def_insn) < 0
5928       || recog_memoized (insn) < 0)
5929     return cost;
5930 
5931   if (sched_cfv4_bypass_data.scale == 1)
5932     /* Handle ColdFire V4 bypass for indexed address with 1x scale.  */
5933     {
5934       /* haifa-sched.c: insn_cost () calls bypass_p () just before
5935 	 targetm.sched.adjust_cost ().  Hence, we can be relatively sure
5936 	 that the data in sched_cfv4_bypass_data is up to date.  */
5937       gcc_assert (sched_cfv4_bypass_data.pro == def_insn
5938 		  && sched_cfv4_bypass_data.con == insn);
5939 
5940       if (cost < 3)
5941 	cost = 3;
5942 
5943       sched_cfv4_bypass_data.pro = NULL;
5944       sched_cfv4_bypass_data.con = NULL;
5945       sched_cfv4_bypass_data.scale = 0;
5946     }
5947   else
5948     gcc_assert (sched_cfv4_bypass_data.pro == NULL
5949 		&& sched_cfv4_bypass_data.con == NULL
5950 		&& sched_cfv4_bypass_data.scale == 0);
5951 
5952   /* Don't try to issue INSN earlier than DFA permits.
5953      This is especially useful for instructions that write to memory,
5954      as their true dependence (default) latency is better to be set to 0
5955      to workaround alias analysis limitations.
5956      This is, in fact, a machine independent tweak, so, probably,
5957      it should be moved to haifa-sched.c: insn_cost ().  */
5958   delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
5959   if (delay > cost)
5960     cost = delay;
5961 
5962   return cost;
5963 }
5964 
5965 /* Return maximal number of insns that can be scheduled on a single cycle.  */
5966 static int
m68k_sched_issue_rate(void)5967 m68k_sched_issue_rate (void)
5968 {
5969   switch (m68k_sched_cpu)
5970     {
5971     case CPU_CFV1:
5972     case CPU_CFV2:
5973     case CPU_CFV3:
5974       return 1;
5975 
5976     case CPU_CFV4:
5977       return 2;
5978 
5979     default:
5980       gcc_unreachable ();
5981       return 0;
5982     }
5983 }
5984 
5985 /* Maximal length of instruction for current CPU.
5986    E.g. it is 3 for any ColdFire core.  */
5987 static int max_insn_size;
5988 
5989 /* Data to model instruction buffer of CPU.  */
5990 struct _sched_ib
5991 {
5992   /* True if instruction buffer model is modeled for current CPU.  */
5993   bool enabled_p;
5994 
5995   /* Size of the instruction buffer in words.  */
5996   int size;
5997 
5998   /* Number of filled words in the instruction buffer.  */
5999   int filled;
6000 
6001   /* Additional information about instruction buffer for CPUs that have
6002      a buffer of instruction records, rather then a plain buffer
6003      of instruction words.  */
6004   struct _sched_ib_records
6005   {
6006     /* Size of buffer in records.  */
6007     int n_insns;
6008 
6009     /* Array to hold data on adjustements made to the size of the buffer.  */
6010     int *adjust;
6011 
6012     /* Index of the above array.  */
6013     int adjust_index;
6014   } records;
6015 
6016   /* An insn that reserves (marks empty) one word in the instruction buffer.  */
6017   rtx insn;
6018 };
6019 
6020 static struct _sched_ib sched_ib;
6021 
6022 /* ID of memory unit.  */
6023 static int sched_mem_unit_code;
6024 
6025 /* Implementation of the targetm.sched.variable_issue () hook.
6026    It is called after INSN was issued.  It returns the number of insns
6027    that can possibly get scheduled on the current cycle.
6028    It is used here to determine the effect of INSN on the instruction
6029    buffer.  */
6030 static int
m68k_sched_variable_issue(FILE * sched_dump ATTRIBUTE_UNUSED,int sched_verbose ATTRIBUTE_UNUSED,rtx insn,int can_issue_more)6031 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6032 			   int sched_verbose ATTRIBUTE_UNUSED,
6033 			   rtx insn, int can_issue_more)
6034 {
6035   int insn_size;
6036 
6037   if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6038     {
6039       switch (m68k_sched_cpu)
6040 	{
6041 	case CPU_CFV1:
6042 	case CPU_CFV2:
6043 	  insn_size = sched_get_attr_size_int (insn);
6044 	  break;
6045 
6046 	case CPU_CFV3:
6047 	  insn_size = sched_get_attr_size_int (insn);
6048 
6049 	  /* ColdFire V3 and V4 cores have instruction buffers that can
6050 	     accumulate up to 8 instructions regardless of instructions'
6051 	     sizes.  So we should take care not to "prefetch" 24 one-word
6052 	     or 12 two-words instructions.
6053 	     To model this behavior we temporarily decrease size of the
6054 	     buffer by (max_insn_size - insn_size) for next 7 instructions.  */
6055 	  {
6056 	    int adjust;
6057 
6058 	    adjust = max_insn_size - insn_size;
6059 	    sched_ib.size -= adjust;
6060 
6061 	    if (sched_ib.filled > sched_ib.size)
6062 	      sched_ib.filled = sched_ib.size;
6063 
6064 	    sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6065 	  }
6066 
6067 	  ++sched_ib.records.adjust_index;
6068 	  if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6069 	    sched_ib.records.adjust_index = 0;
6070 
6071 	  /* Undo adjustement we did 7 instructions ago.  */
6072 	  sched_ib.size
6073 	    += sched_ib.records.adjust[sched_ib.records.adjust_index];
6074 
6075 	  break;
6076 
6077 	case CPU_CFV4:
6078 	  gcc_assert (!sched_ib.enabled_p);
6079 	  insn_size = 0;
6080 	  break;
6081 
6082 	default:
6083 	  gcc_unreachable ();
6084 	}
6085 
6086       if (insn_size > sched_ib.filled)
6087 	/* Scheduling for register pressure does not always take DFA into
6088 	   account.  Workaround instruction buffer not being filled enough.  */
6089 	{
6090 	  gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6091 	  insn_size = sched_ib.filled;
6092 	}
6093 
6094       --can_issue_more;
6095     }
6096   else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6097 	   || asm_noperands (PATTERN (insn)) >= 0)
6098     insn_size = sched_ib.filled;
6099   else
6100     insn_size = 0;
6101 
6102   sched_ib.filled -= insn_size;
6103 
6104   return can_issue_more;
6105 }
6106 
6107 /* Return how many instructions should scheduler lookahead to choose the
6108    best one.  */
6109 static int
m68k_sched_first_cycle_multipass_dfa_lookahead(void)6110 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6111 {
6112   return m68k_sched_issue_rate () - 1;
6113 }
6114 
6115 /* Implementation of targetm.sched.init_global () hook.
6116    It is invoked once per scheduling pass and is used here
6117    to initialize scheduler constants.  */
6118 static void
m68k_sched_md_init_global(FILE * sched_dump ATTRIBUTE_UNUSED,int sched_verbose ATTRIBUTE_UNUSED,int n_insns ATTRIBUTE_UNUSED)6119 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6120 			   int sched_verbose ATTRIBUTE_UNUSED,
6121 			   int n_insns ATTRIBUTE_UNUSED)
6122 {
6123 #ifdef ENABLE_CHECKING
6124   /* Check that all instructions have DFA reservations and
6125      that all instructions can be issued from a clean state.  */
6126   {
6127     rtx insn;
6128     state_t state;
6129 
6130     state = alloca (state_size ());
6131 
6132     for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
6133       {
6134  	if (INSN_P (insn) && recog_memoized (insn) >= 0)
6135 	  {
6136  	    gcc_assert (insn_has_dfa_reservation_p (insn));
6137 
6138  	    state_reset (state);
6139  	    if (state_transition (state, insn) >= 0)
6140  	      gcc_unreachable ();
6141  	  }
6142       }
6143   }
6144 #endif
6145 
6146   /* Setup target cpu.  */
6147 
6148   /* ColdFire V4 has a set of features to keep its instruction buffer full
6149      (e.g., a separate memory bus for instructions) and, hence, we do not model
6150      buffer for this CPU.  */
6151   sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6152 
6153   switch (m68k_sched_cpu)
6154     {
6155     case CPU_CFV4:
6156       sched_ib.filled = 0;
6157 
6158       /* FALLTHRU */
6159 
6160     case CPU_CFV1:
6161     case CPU_CFV2:
6162       max_insn_size = 3;
6163       sched_ib.records.n_insns = 0;
6164       sched_ib.records.adjust = NULL;
6165       break;
6166 
6167     case CPU_CFV3:
6168       max_insn_size = 3;
6169       sched_ib.records.n_insns = 8;
6170       sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6171       break;
6172 
6173     default:
6174       gcc_unreachable ();
6175     }
6176 
6177   sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6178 
6179   sched_adjust_cost_state = xmalloc (state_size ());
6180   state_reset (sched_adjust_cost_state);
6181 
6182   start_sequence ();
6183   emit_insn (gen_ib ());
6184   sched_ib.insn = get_insns ();
6185   end_sequence ();
6186 }
6187 
6188 /* Scheduling pass is now finished.  Free/reset static variables.  */
6189 static void
m68k_sched_md_finish_global(FILE * dump ATTRIBUTE_UNUSED,int verbose ATTRIBUTE_UNUSED)6190 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6191 			     int verbose ATTRIBUTE_UNUSED)
6192 {
6193   sched_ib.insn = NULL;
6194 
6195   free (sched_adjust_cost_state);
6196   sched_adjust_cost_state = NULL;
6197 
6198   sched_mem_unit_code = 0;
6199 
6200   free (sched_ib.records.adjust);
6201   sched_ib.records.adjust = NULL;
6202   sched_ib.records.n_insns = 0;
6203   max_insn_size = 0;
6204 }
6205 
6206 /* Implementation of targetm.sched.init () hook.
6207    It is invoked each time scheduler starts on the new block (basic block or
6208    extended basic block).  */
6209 static void
m68k_sched_md_init(FILE * sched_dump ATTRIBUTE_UNUSED,int sched_verbose ATTRIBUTE_UNUSED,int n_insns ATTRIBUTE_UNUSED)6210 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6211 		    int sched_verbose ATTRIBUTE_UNUSED,
6212 		    int n_insns ATTRIBUTE_UNUSED)
6213 {
6214   switch (m68k_sched_cpu)
6215     {
6216     case CPU_CFV1:
6217     case CPU_CFV2:
6218       sched_ib.size = 6;
6219       break;
6220 
6221     case CPU_CFV3:
6222       sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6223 
6224       memset (sched_ib.records.adjust, 0,
6225 	      sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6226       sched_ib.records.adjust_index = 0;
6227       break;
6228 
6229     case CPU_CFV4:
6230       gcc_assert (!sched_ib.enabled_p);
6231       sched_ib.size = 0;
6232       break;
6233 
6234     default:
6235       gcc_unreachable ();
6236     }
6237 
6238   if (sched_ib.enabled_p)
6239     /* haifa-sched.c: schedule_block () calls advance_cycle () just before
6240        the first cycle.  Workaround that.  */
6241     sched_ib.filled = -2;
6242 }
6243 
6244 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6245    It is invoked just before current cycle finishes and is used here
6246    to track if instruction buffer got its two words this cycle.  */
6247 static void
m68k_sched_dfa_pre_advance_cycle(void)6248 m68k_sched_dfa_pre_advance_cycle (void)
6249 {
6250   if (!sched_ib.enabled_p)
6251     return;
6252 
6253   if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6254     {
6255       sched_ib.filled += 2;
6256 
6257       if (sched_ib.filled > sched_ib.size)
6258 	sched_ib.filled = sched_ib.size;
6259     }
6260 }
6261 
6262 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6263    It is invoked just after new cycle begins and is used here
6264    to setup number of filled words in the instruction buffer so that
6265    instructions which won't have all their words prefetched would be
6266    stalled for a cycle.  */
6267 static void
m68k_sched_dfa_post_advance_cycle(void)6268 m68k_sched_dfa_post_advance_cycle (void)
6269 {
6270   int i;
6271 
6272   if (!sched_ib.enabled_p)
6273     return;
6274 
6275   /* Setup number of prefetched instruction words in the instruction
6276      buffer.  */
6277   i = max_insn_size - sched_ib.filled;
6278 
6279   while (--i >= 0)
6280     {
6281       if (state_transition (curr_state, sched_ib.insn) >= 0)
6282 	/* Pick up scheduler state.  */
6283 	++sched_ib.filled;
6284     }
6285 }
6286 
6287 /* Return X or Y (depending on OPX_P) operand of INSN,
6288    if it is an integer register, or NULL overwise.  */
6289 static rtx
sched_get_reg_operand(rtx insn,bool opx_p)6290 sched_get_reg_operand (rtx insn, bool opx_p)
6291 {
6292   rtx op = NULL;
6293 
6294   if (opx_p)
6295     {
6296       if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6297 	{
6298 	  op = sched_get_operand (insn, true);
6299 	  gcc_assert (op != NULL);
6300 
6301 	  if (!reload_completed && !REG_P (op))
6302 	    return NULL;
6303 	}
6304     }
6305   else
6306     {
6307       if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6308 	{
6309 	  op = sched_get_operand (insn, false);
6310 	  gcc_assert (op != NULL);
6311 
6312 	  if (!reload_completed && !REG_P (op))
6313 	    return NULL;
6314 	}
6315     }
6316 
6317   return op;
6318 }
6319 
6320 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6321    is a MEM.  */
6322 static bool
sched_mem_operand_p(rtx insn,bool opx_p)6323 sched_mem_operand_p (rtx insn, bool opx_p)
6324 {
6325   switch (sched_get_opxy_mem_type (insn, opx_p))
6326     {
6327     case OP_TYPE_MEM1:
6328     case OP_TYPE_MEM6:
6329       return true;
6330 
6331     default:
6332       return false;
6333     }
6334 }
6335 
6336 /* Return X or Y (depending on OPX_P) operand of INSN,
6337    if it is a MEM, or NULL overwise.  */
6338 static rtx
sched_get_mem_operand(rtx insn,bool must_read_p,bool must_write_p)6339 sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
6340 {
6341   bool opx_p;
6342   bool opy_p;
6343 
6344   opx_p = false;
6345   opy_p = false;
6346 
6347   if (must_read_p)
6348     {
6349       opx_p = true;
6350       opy_p = true;
6351     }
6352 
6353   if (must_write_p)
6354     {
6355       opx_p = true;
6356       opy_p = false;
6357     }
6358 
6359   if (opy_p && sched_mem_operand_p (insn, false))
6360     return sched_get_operand (insn, false);
6361 
6362   if (opx_p && sched_mem_operand_p (insn, true))
6363     return sched_get_operand (insn, true);
6364 
6365   gcc_unreachable ();
6366   return NULL;
6367 }
6368 
6369 /* Return non-zero if PRO modifies register used as part of
6370    address in CON.  */
6371 int
m68k_sched_address_bypass_p(rtx pro,rtx con)6372 m68k_sched_address_bypass_p (rtx pro, rtx con)
6373 {
6374   rtx pro_x;
6375   rtx con_mem_read;
6376 
6377   pro_x = sched_get_reg_operand (pro, true);
6378   if (pro_x == NULL)
6379     return 0;
6380 
6381   con_mem_read = sched_get_mem_operand (con, true, false);
6382   gcc_assert (con_mem_read != NULL);
6383 
6384   if (reg_mentioned_p (pro_x, con_mem_read))
6385     return 1;
6386 
6387   return 0;
6388 }
6389 
6390 /* Helper function for m68k_sched_indexed_address_bypass_p.
6391    if PRO modifies register used as index in CON,
6392    return scale of indexed memory access in CON.  Return zero overwise.  */
6393 static int
sched_get_indexed_address_scale(rtx pro,rtx con)6394 sched_get_indexed_address_scale (rtx pro, rtx con)
6395 {
6396   rtx reg;
6397   rtx mem;
6398   struct m68k_address address;
6399 
6400   reg = sched_get_reg_operand (pro, true);
6401   if (reg == NULL)
6402     return 0;
6403 
6404   mem = sched_get_mem_operand (con, true, false);
6405   gcc_assert (mem != NULL && MEM_P (mem));
6406 
6407   if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
6408 			       &address))
6409     gcc_unreachable ();
6410 
6411   if (REGNO (reg) == REGNO (address.index))
6412     {
6413       gcc_assert (address.scale != 0);
6414       return address.scale;
6415     }
6416 
6417   return 0;
6418 }
6419 
6420 /* Return non-zero if PRO modifies register used
6421    as index with scale 2 or 4 in CON.  */
6422 int
m68k_sched_indexed_address_bypass_p(rtx pro,rtx con)6423 m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
6424 {
6425   gcc_assert (sched_cfv4_bypass_data.pro == NULL
6426 	      && sched_cfv4_bypass_data.con == NULL
6427 	      && sched_cfv4_bypass_data.scale == 0);
6428 
6429   switch (sched_get_indexed_address_scale (pro, con))
6430     {
6431     case 1:
6432       /* We can't have a variable latency bypass, so
6433 	 remember to adjust the insn cost in adjust_cost hook.  */
6434       sched_cfv4_bypass_data.pro = pro;
6435       sched_cfv4_bypass_data.con = con;
6436       sched_cfv4_bypass_data.scale = 1;
6437       return 0;
6438 
6439     case 2:
6440     case 4:
6441       return 1;
6442 
6443     default:
6444       return 0;
6445     }
6446 }
6447 
6448 /* We generate a two-instructions program at M_TRAMP :
6449 	movea.l &CHAIN_VALUE,%a0
6450 	jmp FNADDR
6451    where %a0 can be modified by changing STATIC_CHAIN_REGNUM.  */
6452 
6453 static void
m68k_trampoline_init(rtx m_tramp,tree fndecl,rtx chain_value)6454 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
6455 {
6456   rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
6457   rtx mem;
6458 
6459   gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
6460 
6461   mem = adjust_address (m_tramp, HImode, 0);
6462   emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
6463   mem = adjust_address (m_tramp, SImode, 2);
6464   emit_move_insn (mem, chain_value);
6465 
6466   mem = adjust_address (m_tramp, HImode, 6);
6467   emit_move_insn (mem, GEN_INT(0x4EF9));
6468   mem = adjust_address (m_tramp, SImode, 8);
6469   emit_move_insn (mem, fnaddr);
6470 
6471   FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
6472 }
6473 
6474 /* On the 68000, the RTS insn cannot pop anything.
6475    On the 68010, the RTD insn may be used to pop them if the number
6476      of args is fixed, but if the number is variable then the caller
6477      must pop them all.  RTD can't be used for library calls now
6478      because the library is compiled with the Unix compiler.
6479    Use of RTD is a selectable option, since it is incompatible with
6480    standard Unix calling sequences.  If the option is not selected,
6481    the caller must always pop the args.  */
6482 
6483 static int
m68k_return_pops_args(tree fundecl,tree funtype,int size)6484 m68k_return_pops_args (tree fundecl, tree funtype, int size)
6485 {
6486   return ((TARGET_RTD
6487 	   && (!fundecl
6488 	       || TREE_CODE (fundecl) != IDENTIFIER_NODE)
6489 	   && (!stdarg_p (funtype)))
6490 	  ? size : 0);
6491 }
6492 
6493 /* Make sure everything's fine if we *don't* have a given processor.
6494    This assumes that putting a register in fixed_regs will keep the
6495    compiler's mitts completely off it.  We don't bother to zero it out
6496    of register classes.  */
6497 
6498 static void
m68k_conditional_register_usage(void)6499 m68k_conditional_register_usage (void)
6500 {
6501   int i;
6502   HARD_REG_SET x;
6503   if (!TARGET_HARD_FLOAT)
6504     {
6505       COPY_HARD_REG_SET (x, reg_class_contents[(int)FP_REGS]);
6506       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6507         if (TEST_HARD_REG_BIT (x, i))
6508 	  fixed_regs[i] = call_used_regs[i] = 1;
6509     }
6510   if (flag_pic)
6511     fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
6512 }
6513 
6514 static void
m68k_init_sync_libfuncs(void)6515 m68k_init_sync_libfuncs (void)
6516 {
6517   init_sync_libfuncs (UNITS_PER_WORD);
6518 }
6519 
6520 /* Implements EPILOGUE_USES.  All registers are live on exit from an
6521    interrupt routine.  */
6522 bool
m68k_epilogue_uses(int regno ATTRIBUTE_UNUSED)6523 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
6524 {
6525   return (reload_completed
6526 	  && (m68k_get_function_kind (current_function_decl)
6527 	      == m68k_fk_interrupt_handler));
6528 }
6529 
6530 #include "gt-m68k.h"
6531