1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
61
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
64
65 static const char * const alpha_cpu_name[] =
66 {
67 "ev4", "ev5", "ev6"
68 };
69
70 /* Specify how accurate floating-point traps need to be. */
71
72 enum alpha_trap_precision alpha_tp;
73
74 /* Specify the floating-point rounding mode. */
75
76 enum alpha_fp_rounding_mode alpha_fprm;
77
78 /* Specify which things cause traps. */
79
80 enum alpha_fp_trap_mode alpha_fptm;
81
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
84
85 struct alpha_compare alpha_compare;
86
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
89
90 static int inside_function = FALSE;
91
92 /* The number of cycles of latency we should assume on memory reads. */
93
94 int alpha_memory_latency = 3;
95
96 /* Whether the function needs the GP. */
97
98 static int alpha_function_needs_gp;
99
100 /* The alias set for prologue/epilogue register save/restore. */
101
102 static GTY(()) int alpha_sr_alias_set;
103
104 /* The assembler name of the current function. */
105
106 static const char *alpha_fnname;
107
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
111
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
118
119 /* Costs of various operations on the different architectures. */
120
121 struct alpha_rtx_cost_data
122 {
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
132 };
133
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
135 {
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
146 },
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
157 },
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
168 },
169 };
170
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
175
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
177 {
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
187 };
188
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
195
196 #define REG_PV 27
197 #define REG_RA 26
198
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
202
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
206
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
211
212 /* Implement TARGET_HANDLE_OPTION. */
213
214 static bool
alpha_handle_option(size_t code,const char * arg,int value)215 alpha_handle_option (size_t code, const char *arg, int value)
216 {
217 switch (code)
218 {
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
223
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
228
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
233 }
234
235 return true;
236 }
237
238 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
239 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
240
241 static const char *
alpha_mangle_fundamental_type(tree type)242 alpha_mangle_fundamental_type (tree type)
243 {
244 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
245 && TARGET_LONG_DOUBLE_128)
246 return "g";
247
248 /* For all other types, use normal C++ mangling. */
249 return NULL;
250 }
251 #endif
252
253 /* Parse target option strings. */
254
255 void
override_options(void)256 override_options (void)
257 {
258 static const struct cpu_table {
259 const char *const name;
260 const enum processor_type processor;
261 const int flags;
262 } cpu_table[] = {
263 { "ev4", PROCESSOR_EV4, 0 },
264 { "ev45", PROCESSOR_EV4, 0 },
265 { "21064", PROCESSOR_EV4, 0 },
266 { "ev5", PROCESSOR_EV5, 0 },
267 { "21164", PROCESSOR_EV5, 0 },
268 { "ev56", PROCESSOR_EV5, MASK_BWX },
269 { "21164a", PROCESSOR_EV5, MASK_BWX },
270 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
276 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { 0, 0, 0 }
278 };
279
280 int i;
281
282 /* Unicos/Mk doesn't have shared libraries. */
283 if (TARGET_ABI_UNICOSMK && flag_pic)
284 {
285 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
286 (flag_pic > 1) ? "PIC" : "pic");
287 flag_pic = 0;
288 }
289
290 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
291 floating-point instructions. Make that the default for this target. */
292 if (TARGET_ABI_UNICOSMK)
293 alpha_fprm = ALPHA_FPRM_DYN;
294 else
295 alpha_fprm = ALPHA_FPRM_NORM;
296
297 alpha_tp = ALPHA_TP_PROG;
298 alpha_fptm = ALPHA_FPTM_N;
299
300 /* We cannot use su and sui qualifiers for conversion instructions on
301 Unicos/Mk. I'm not sure if this is due to assembler or hardware
302 limitations. Right now, we issue a warning if -mieee is specified
303 and then ignore it; eventually, we should either get it right or
304 disable the option altogether. */
305
306 if (TARGET_IEEE)
307 {
308 if (TARGET_ABI_UNICOSMK)
309 warning (0, "-mieee not supported on Unicos/Mk");
310 else
311 {
312 alpha_tp = ALPHA_TP_INSN;
313 alpha_fptm = ALPHA_FPTM_SU;
314 }
315 }
316
317 if (TARGET_IEEE_WITH_INEXACT)
318 {
319 if (TARGET_ABI_UNICOSMK)
320 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
321 else
322 {
323 alpha_tp = ALPHA_TP_INSN;
324 alpha_fptm = ALPHA_FPTM_SUI;
325 }
326 }
327
328 if (alpha_tp_string)
329 {
330 if (! strcmp (alpha_tp_string, "p"))
331 alpha_tp = ALPHA_TP_PROG;
332 else if (! strcmp (alpha_tp_string, "f"))
333 alpha_tp = ALPHA_TP_FUNC;
334 else if (! strcmp (alpha_tp_string, "i"))
335 alpha_tp = ALPHA_TP_INSN;
336 else
337 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
338 }
339
340 if (alpha_fprm_string)
341 {
342 if (! strcmp (alpha_fprm_string, "n"))
343 alpha_fprm = ALPHA_FPRM_NORM;
344 else if (! strcmp (alpha_fprm_string, "m"))
345 alpha_fprm = ALPHA_FPRM_MINF;
346 else if (! strcmp (alpha_fprm_string, "c"))
347 alpha_fprm = ALPHA_FPRM_CHOP;
348 else if (! strcmp (alpha_fprm_string,"d"))
349 alpha_fprm = ALPHA_FPRM_DYN;
350 else
351 error ("bad value %qs for -mfp-rounding-mode switch",
352 alpha_fprm_string);
353 }
354
355 if (alpha_fptm_string)
356 {
357 if (strcmp (alpha_fptm_string, "n") == 0)
358 alpha_fptm = ALPHA_FPTM_N;
359 else if (strcmp (alpha_fptm_string, "u") == 0)
360 alpha_fptm = ALPHA_FPTM_U;
361 else if (strcmp (alpha_fptm_string, "su") == 0)
362 alpha_fptm = ALPHA_FPTM_SU;
363 else if (strcmp (alpha_fptm_string, "sui") == 0)
364 alpha_fptm = ALPHA_FPTM_SUI;
365 else
366 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
367 }
368
369 if (alpha_cpu_string)
370 {
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
373 {
374 alpha_tune = alpha_cpu = cpu_table [i].processor;
375 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
376 target_flags |= cpu_table [i].flags;
377 break;
378 }
379 if (! cpu_table [i].name)
380 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
381 }
382
383 if (alpha_tune_string)
384 {
385 for (i = 0; cpu_table [i].name; i++)
386 if (! strcmp (alpha_tune_string, cpu_table [i].name))
387 {
388 alpha_tune = cpu_table [i].processor;
389 break;
390 }
391 if (! cpu_table [i].name)
392 error ("bad value %qs for -mcpu switch", alpha_tune_string);
393 }
394
395 /* Do some sanity checks on the above options. */
396
397 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
398 {
399 warning (0, "trap mode not supported on Unicos/Mk");
400 alpha_fptm = ALPHA_FPTM_N;
401 }
402
403 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
404 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
405 {
406 warning (0, "fp software completion requires -mtrap-precision=i");
407 alpha_tp = ALPHA_TP_INSN;
408 }
409
410 if (alpha_cpu == PROCESSOR_EV6)
411 {
412 /* Except for EV6 pass 1 (not released), we always have precise
413 arithmetic traps. Which means we can do software completion
414 without minding trap shadows. */
415 alpha_tp = ALPHA_TP_PROG;
416 }
417
418 if (TARGET_FLOAT_VAX)
419 {
420 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
421 {
422 warning (0, "rounding mode not supported for VAX floats");
423 alpha_fprm = ALPHA_FPRM_NORM;
424 }
425 if (alpha_fptm == ALPHA_FPTM_SUI)
426 {
427 warning (0, "trap mode not supported for VAX floats");
428 alpha_fptm = ALPHA_FPTM_SU;
429 }
430 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
431 warning (0, "128-bit long double not supported for VAX floats");
432 target_flags &= ~MASK_LONG_DOUBLE_128;
433 }
434
435 {
436 char *end;
437 int lat;
438
439 if (!alpha_mlat_string)
440 alpha_mlat_string = "L1";
441
442 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
443 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
444 ;
445 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
446 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
447 && alpha_mlat_string[2] == '\0')
448 {
449 static int const cache_latency[][4] =
450 {
451 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
452 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
453 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
454 };
455
456 lat = alpha_mlat_string[1] - '0';
457 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
458 {
459 warning (0, "L%d cache latency unknown for %s",
460 lat, alpha_cpu_name[alpha_tune]);
461 lat = 3;
462 }
463 else
464 lat = cache_latency[alpha_tune][lat-1];
465 }
466 else if (! strcmp (alpha_mlat_string, "main"))
467 {
468 /* Most current memories have about 370ns latency. This is
469 a reasonable guess for a fast cpu. */
470 lat = 150;
471 }
472 else
473 {
474 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
475 lat = 3;
476 }
477
478 alpha_memory_latency = lat;
479 }
480
481 /* Default the definition of "small data" to 8 bytes. */
482 if (!g_switch_set)
483 g_switch_value = 8;
484
485 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
486 if (flag_pic == 1)
487 target_flags |= MASK_SMALL_DATA;
488 else if (flag_pic == 2)
489 target_flags &= ~MASK_SMALL_DATA;
490
491 /* Align labels and loops for optimal branching. */
492 /* ??? Kludge these by not doing anything if we don't optimize and also if
493 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
494 if (optimize > 0 && write_symbols != SDB_DEBUG)
495 {
496 if (align_loops <= 0)
497 align_loops = 16;
498 if (align_jumps <= 0)
499 align_jumps = 16;
500 }
501 if (align_functions <= 0)
502 align_functions = 16;
503
504 /* Acquire a unique set number for our register saves and restores. */
505 alpha_sr_alias_set = new_alias_set ();
506
507 /* Register variables and functions with the garbage collector. */
508
509 /* Set up function hooks. */
510 init_machine_status = alpha_init_machine_status;
511
512 /* Tell the compiler when we're using VAX floating point. */
513 if (TARGET_FLOAT_VAX)
514 {
515 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
516 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
517 REAL_MODE_FORMAT (TFmode) = NULL;
518 }
519 }
520
521 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
522
523 int
zap_mask(HOST_WIDE_INT value)524 zap_mask (HOST_WIDE_INT value)
525 {
526 int i;
527
528 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
529 i++, value >>= 8)
530 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
531 return 0;
532
533 return 1;
534 }
535
536 /* Return true if OP is valid for a particular TLS relocation.
537 We are already guaranteed that OP is a CONST. */
538
539 int
tls_symbolic_operand_1(rtx op,int size,int unspec)540 tls_symbolic_operand_1 (rtx op, int size, int unspec)
541 {
542 op = XEXP (op, 0);
543
544 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
545 return 0;
546 op = XVECEXP (op, 0, 0);
547
548 if (GET_CODE (op) != SYMBOL_REF)
549 return 0;
550
551 switch (SYMBOL_REF_TLS_MODEL (op))
552 {
553 case TLS_MODEL_LOCAL_DYNAMIC:
554 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
555 case TLS_MODEL_INITIAL_EXEC:
556 return unspec == UNSPEC_TPREL && size == 64;
557 case TLS_MODEL_LOCAL_EXEC:
558 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
559 default:
560 gcc_unreachable ();
561 }
562 }
563
564 /* Used by aligned_memory_operand and unaligned_memory_operand to
565 resolve what reload is going to do with OP if it's a register. */
566
567 rtx
resolve_reload_operand(rtx op)568 resolve_reload_operand (rtx op)
569 {
570 if (reload_in_progress)
571 {
572 rtx tmp = op;
573 if (GET_CODE (tmp) == SUBREG)
574 tmp = SUBREG_REG (tmp);
575 if (GET_CODE (tmp) == REG
576 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
577 {
578 op = reg_equiv_memory_loc[REGNO (tmp)];
579 if (op == 0)
580 return 0;
581 }
582 }
583 return op;
584 }
585
586 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
587 the range defined for C in [I-P]. */
588
589 bool
alpha_const_ok_for_letter_p(HOST_WIDE_INT value,int c)590 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
591 {
592 switch (c)
593 {
594 case 'I':
595 /* An unsigned 8 bit constant. */
596 return (unsigned HOST_WIDE_INT) value < 0x100;
597 case 'J':
598 /* The constant zero. */
599 return value == 0;
600 case 'K':
601 /* A signed 16 bit constant. */
602 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
603 case 'L':
604 /* A shifted signed 16 bit constant appropriate for LDAH. */
605 return ((value & 0xffff) == 0
606 && ((value) >> 31 == -1 || value >> 31 == 0));
607 case 'M':
608 /* A constant that can be AND'ed with using a ZAP insn. */
609 return zap_mask (value);
610 case 'N':
611 /* A complemented unsigned 8 bit constant. */
612 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
613 case 'O':
614 /* A negated unsigned 8 bit constant. */
615 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
616 case 'P':
617 /* The constant 1, 2 or 3. */
618 return value == 1 || value == 2 || value == 3;
619
620 default:
621 return false;
622 }
623 }
624
625 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
626 matches for C in [GH]. */
627
628 bool
alpha_const_double_ok_for_letter_p(rtx value,int c)629 alpha_const_double_ok_for_letter_p (rtx value, int c)
630 {
631 switch (c)
632 {
633 case 'G':
634 /* The floating point zero constant. */
635 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
636 && value == CONST0_RTX (GET_MODE (value)));
637
638 case 'H':
639 /* A valid operand of a ZAP insn. */
640 return (GET_MODE (value) == VOIDmode
641 && zap_mask (CONST_DOUBLE_LOW (value))
642 && zap_mask (CONST_DOUBLE_HIGH (value)));
643
644 default:
645 return false;
646 }
647 }
648
649 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
650 matches for C. */
651
652 bool
alpha_extra_constraint(rtx value,int c)653 alpha_extra_constraint (rtx value, int c)
654 {
655 switch (c)
656 {
657 case 'Q':
658 return normal_memory_operand (value, VOIDmode);
659 case 'R':
660 return direct_call_operand (value, Pmode);
661 case 'S':
662 return (GET_CODE (value) == CONST_INT
663 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
664 case 'T':
665 return GET_CODE (value) == HIGH;
666 case 'U':
667 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
668 case 'W':
669 return (GET_CODE (value) == CONST_VECTOR
670 && value == CONST0_RTX (GET_MODE (value)));
671 default:
672 return false;
673 }
674 }
675
676 /* The scalar modes supported differs from the default check-what-c-supports
677 version in that sometimes TFmode is available even when long double
678 indicates only DFmode. On unicosmk, we have the situation that HImode
679 doesn't map to any C type, but of course we still support that. */
680
681 static bool
alpha_scalar_mode_supported_p(enum machine_mode mode)682 alpha_scalar_mode_supported_p (enum machine_mode mode)
683 {
684 switch (mode)
685 {
686 case QImode:
687 case HImode:
688 case SImode:
689 case DImode:
690 case TImode: /* via optabs.c */
691 return true;
692
693 case SFmode:
694 case DFmode:
695 return true;
696
697 case TFmode:
698 return TARGET_HAS_XFLOATING_LIBS;
699
700 default:
701 return false;
702 }
703 }
704
705 /* Alpha implements a couple of integer vector mode operations when
706 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
707 which allows the vectorizer to operate on e.g. move instructions,
708 or when expand_vector_operations can do something useful. */
709
710 static bool
alpha_vector_mode_supported_p(enum machine_mode mode)711 alpha_vector_mode_supported_p (enum machine_mode mode)
712 {
713 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
714 }
715
716 /* Return 1 if this function can directly return via $26. */
717
718 int
direct_return(void)719 direct_return (void)
720 {
721 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
722 && reload_completed
723 && alpha_sa_size () == 0
724 && get_frame_size () == 0
725 && current_function_outgoing_args_size == 0
726 && current_function_pretend_args_size == 0);
727 }
728
729 /* Return the ADDR_VEC associated with a tablejump insn. */
730
731 rtx
alpha_tablejump_addr_vec(rtx insn)732 alpha_tablejump_addr_vec (rtx insn)
733 {
734 rtx tmp;
735
736 tmp = JUMP_LABEL (insn);
737 if (!tmp)
738 return NULL_RTX;
739 tmp = NEXT_INSN (tmp);
740 if (!tmp)
741 return NULL_RTX;
742 if (GET_CODE (tmp) == JUMP_INSN
743 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
744 return PATTERN (tmp);
745 return NULL_RTX;
746 }
747
748 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
749
750 rtx
alpha_tablejump_best_label(rtx insn)751 alpha_tablejump_best_label (rtx insn)
752 {
753 rtx jump_table = alpha_tablejump_addr_vec (insn);
754 rtx best_label = NULL_RTX;
755
756 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
757 there for edge frequency counts from profile data. */
758
759 if (jump_table)
760 {
761 int n_labels = XVECLEN (jump_table, 1);
762 int best_count = -1;
763 int i, j;
764
765 for (i = 0; i < n_labels; i++)
766 {
767 int count = 1;
768
769 for (j = i + 1; j < n_labels; j++)
770 if (XEXP (XVECEXP (jump_table, 1, i), 0)
771 == XEXP (XVECEXP (jump_table, 1, j), 0))
772 count++;
773
774 if (count > best_count)
775 best_count = count, best_label = XVECEXP (jump_table, 1, i);
776 }
777 }
778
779 return best_label ? best_label : const0_rtx;
780 }
781
782 /* Return the TLS model to use for SYMBOL. */
783
784 static enum tls_model
tls_symbolic_operand_type(rtx symbol)785 tls_symbolic_operand_type (rtx symbol)
786 {
787 enum tls_model model;
788
789 if (GET_CODE (symbol) != SYMBOL_REF)
790 return 0;
791 model = SYMBOL_REF_TLS_MODEL (symbol);
792
793 /* Local-exec with a 64-bit size is the same code as initial-exec. */
794 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
795 model = TLS_MODEL_INITIAL_EXEC;
796
797 return model;
798 }
799
800 /* Return true if the function DECL will share the same GP as any
801 function in the current unit of translation. */
802
803 static bool
decl_has_samegp(tree decl)804 decl_has_samegp (tree decl)
805 {
806 /* Functions that are not local can be overridden, and thus may
807 not share the same gp. */
808 if (!(*targetm.binds_local_p) (decl))
809 return false;
810
811 /* If -msmall-data is in effect, assume that there is only one GP
812 for the module, and so any local symbol has this property. We
813 need explicit relocations to be able to enforce this for symbols
814 not defined in this unit of translation, however. */
815 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
816 return true;
817
818 /* Functions that are not external are defined in this UoT. */
819 /* ??? Irritatingly, static functions not yet emitted are still
820 marked "external". Apply this to non-static functions only. */
821 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
822 }
823
824 /* Return true if EXP should be placed in the small data section. */
825
826 static bool
alpha_in_small_data_p(tree exp)827 alpha_in_small_data_p (tree exp)
828 {
829 /* We want to merge strings, so we never consider them small data. */
830 if (TREE_CODE (exp) == STRING_CST)
831 return false;
832
833 /* Functions are never in the small data area. Duh. */
834 if (TREE_CODE (exp) == FUNCTION_DECL)
835 return false;
836
837 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
838 {
839 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
840 if (strcmp (section, ".sdata") == 0
841 || strcmp (section, ".sbss") == 0)
842 return true;
843 }
844 else
845 {
846 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
847
848 /* If this is an incomplete type with size 0, then we can't put it
849 in sdata because it might be too big when completed. */
850 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
851 return true;
852 }
853
854 return false;
855 }
856
857 #if TARGET_ABI_OPEN_VMS
858 static bool
alpha_linkage_symbol_p(const char * symname)859 alpha_linkage_symbol_p (const char *symname)
860 {
861 int symlen = strlen (symname);
862
863 if (symlen > 4)
864 return strcmp (&symname [symlen - 4], "..lk") == 0;
865
866 return false;
867 }
868
869 #define LINKAGE_SYMBOL_REF_P(X) \
870 ((GET_CODE (X) == SYMBOL_REF \
871 && alpha_linkage_symbol_p (XSTR (X, 0))) \
872 || (GET_CODE (X) == CONST \
873 && GET_CODE (XEXP (X, 0)) == PLUS \
874 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
875 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
876 #endif
877
878 /* legitimate_address_p recognizes an RTL expression that is a valid
879 memory address for an instruction. The MODE argument is the
880 machine mode for the MEM expression that wants to use this address.
881
882 For Alpha, we have either a constant address or the sum of a
883 register and a constant address, or just a register. For DImode,
884 any of those forms can be surrounded with an AND that clear the
885 low-order three bits; this is an "unaligned" access. */
886
887 bool
alpha_legitimate_address_p(enum machine_mode mode,rtx x,int strict)888 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
889 {
890 /* If this is an ldq_u type address, discard the outer AND. */
891 if (mode == DImode
892 && GET_CODE (x) == AND
893 && GET_CODE (XEXP (x, 1)) == CONST_INT
894 && INTVAL (XEXP (x, 1)) == -8)
895 x = XEXP (x, 0);
896
897 /* Discard non-paradoxical subregs. */
898 if (GET_CODE (x) == SUBREG
899 && (GET_MODE_SIZE (GET_MODE (x))
900 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
901 x = SUBREG_REG (x);
902
903 /* Unadorned general registers are valid. */
904 if (REG_P (x)
905 && (strict
906 ? STRICT_REG_OK_FOR_BASE_P (x)
907 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
908 return true;
909
910 /* Constant addresses (i.e. +/- 32k) are valid. */
911 if (CONSTANT_ADDRESS_P (x))
912 return true;
913
914 #if TARGET_ABI_OPEN_VMS
915 if (LINKAGE_SYMBOL_REF_P (x))
916 return true;
917 #endif
918
919 /* Register plus a small constant offset is valid. */
920 if (GET_CODE (x) == PLUS)
921 {
922 rtx ofs = XEXP (x, 1);
923 x = XEXP (x, 0);
924
925 /* Discard non-paradoxical subregs. */
926 if (GET_CODE (x) == SUBREG
927 && (GET_MODE_SIZE (GET_MODE (x))
928 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
929 x = SUBREG_REG (x);
930
931 if (REG_P (x))
932 {
933 if (! strict
934 && NONSTRICT_REG_OK_FP_BASE_P (x)
935 && GET_CODE (ofs) == CONST_INT)
936 return true;
937 if ((strict
938 ? STRICT_REG_OK_FOR_BASE_P (x)
939 : NONSTRICT_REG_OK_FOR_BASE_P (x))
940 && CONSTANT_ADDRESS_P (ofs))
941 return true;
942 }
943 }
944
945 /* If we're managing explicit relocations, LO_SUM is valid, as
946 are small data symbols. */
947 else if (TARGET_EXPLICIT_RELOCS)
948 {
949 if (small_symbolic_operand (x, Pmode))
950 return true;
951
952 if (GET_CODE (x) == LO_SUM)
953 {
954 rtx ofs = XEXP (x, 1);
955 x = XEXP (x, 0);
956
957 /* Discard non-paradoxical subregs. */
958 if (GET_CODE (x) == SUBREG
959 && (GET_MODE_SIZE (GET_MODE (x))
960 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
961 x = SUBREG_REG (x);
962
963 /* Must have a valid base register. */
964 if (! (REG_P (x)
965 && (strict
966 ? STRICT_REG_OK_FOR_BASE_P (x)
967 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
968 return false;
969
970 /* The symbol must be local. */
971 if (local_symbolic_operand (ofs, Pmode)
972 || dtp32_symbolic_operand (ofs, Pmode)
973 || tp32_symbolic_operand (ofs, Pmode))
974 return true;
975 }
976 }
977
978 return false;
979 }
980
981 /* Build the SYMBOL_REF for __tls_get_addr. */
982
983 static GTY(()) rtx tls_get_addr_libfunc;
984
985 static rtx
get_tls_get_addr(void)986 get_tls_get_addr (void)
987 {
988 if (!tls_get_addr_libfunc)
989 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
990 return tls_get_addr_libfunc;
991 }
992
993 /* Try machine-dependent ways of modifying an illegitimate address
994 to be legitimate. If we find one, return the new, valid address. */
995
996 rtx
alpha_legitimize_address(rtx x,rtx scratch,enum machine_mode mode ATTRIBUTE_UNUSED)997 alpha_legitimize_address (rtx x, rtx scratch,
998 enum machine_mode mode ATTRIBUTE_UNUSED)
999 {
1000 HOST_WIDE_INT addend;
1001
1002 /* If the address is (plus reg const_int) and the CONST_INT is not a
1003 valid offset, compute the high part of the constant and add it to
1004 the register. Then our address is (plus temp low-part-const). */
1005 if (GET_CODE (x) == PLUS
1006 && GET_CODE (XEXP (x, 0)) == REG
1007 && GET_CODE (XEXP (x, 1)) == CONST_INT
1008 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1009 {
1010 addend = INTVAL (XEXP (x, 1));
1011 x = XEXP (x, 0);
1012 goto split_addend;
1013 }
1014
1015 /* If the address is (const (plus FOO const_int)), find the low-order
1016 part of the CONST_INT. Then load FOO plus any high-order part of the
1017 CONST_INT into a register. Our address is (plus reg low-part-const).
1018 This is done to reduce the number of GOT entries. */
1019 if (!no_new_pseudos
1020 && GET_CODE (x) == CONST
1021 && GET_CODE (XEXP (x, 0)) == PLUS
1022 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1023 {
1024 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1025 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1026 goto split_addend;
1027 }
1028
1029 /* If we have a (plus reg const), emit the load as in (2), then add
1030 the two registers, and finally generate (plus reg low-part-const) as
1031 our address. */
1032 if (!no_new_pseudos
1033 && GET_CODE (x) == PLUS
1034 && GET_CODE (XEXP (x, 0)) == REG
1035 && GET_CODE (XEXP (x, 1)) == CONST
1036 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1037 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1038 {
1039 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1040 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1041 XEXP (XEXP (XEXP (x, 1), 0), 0),
1042 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1043 goto split_addend;
1044 }
1045
1046 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1047 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1048 {
1049 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1050
1051 switch (tls_symbolic_operand_type (x))
1052 {
1053 case TLS_MODEL_NONE:
1054 break;
1055
1056 case TLS_MODEL_GLOBAL_DYNAMIC:
1057 start_sequence ();
1058
1059 r0 = gen_rtx_REG (Pmode, 0);
1060 r16 = gen_rtx_REG (Pmode, 16);
1061 tga = get_tls_get_addr ();
1062 dest = gen_reg_rtx (Pmode);
1063 seq = GEN_INT (alpha_next_sequence_number++);
1064
1065 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1066 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1067 insn = emit_call_insn (insn);
1068 CONST_OR_PURE_CALL_P (insn) = 1;
1069 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1070
1071 insn = get_insns ();
1072 end_sequence ();
1073
1074 emit_libcall_block (insn, dest, r0, x);
1075 return dest;
1076
1077 case TLS_MODEL_LOCAL_DYNAMIC:
1078 start_sequence ();
1079
1080 r0 = gen_rtx_REG (Pmode, 0);
1081 r16 = gen_rtx_REG (Pmode, 16);
1082 tga = get_tls_get_addr ();
1083 scratch = gen_reg_rtx (Pmode);
1084 seq = GEN_INT (alpha_next_sequence_number++);
1085
1086 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1087 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1088 insn = emit_call_insn (insn);
1089 CONST_OR_PURE_CALL_P (insn) = 1;
1090 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1091
1092 insn = get_insns ();
1093 end_sequence ();
1094
1095 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1096 UNSPEC_TLSLDM_CALL);
1097 emit_libcall_block (insn, scratch, r0, eqv);
1098
1099 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1100 eqv = gen_rtx_CONST (Pmode, eqv);
1101
1102 if (alpha_tls_size == 64)
1103 {
1104 dest = gen_reg_rtx (Pmode);
1105 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1106 emit_insn (gen_adddi3 (dest, dest, scratch));
1107 return dest;
1108 }
1109 if (alpha_tls_size == 32)
1110 {
1111 insn = gen_rtx_HIGH (Pmode, eqv);
1112 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1113 scratch = gen_reg_rtx (Pmode);
1114 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1115 }
1116 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1117
1118 case TLS_MODEL_INITIAL_EXEC:
1119 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1120 eqv = gen_rtx_CONST (Pmode, eqv);
1121 tp = gen_reg_rtx (Pmode);
1122 scratch = gen_reg_rtx (Pmode);
1123 dest = gen_reg_rtx (Pmode);
1124
1125 emit_insn (gen_load_tp (tp));
1126 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1127 emit_insn (gen_adddi3 (dest, tp, scratch));
1128 return dest;
1129
1130 case TLS_MODEL_LOCAL_EXEC:
1131 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1132 eqv = gen_rtx_CONST (Pmode, eqv);
1133 tp = gen_reg_rtx (Pmode);
1134
1135 emit_insn (gen_load_tp (tp));
1136 if (alpha_tls_size == 32)
1137 {
1138 insn = gen_rtx_HIGH (Pmode, eqv);
1139 insn = gen_rtx_PLUS (Pmode, tp, insn);
1140 tp = gen_reg_rtx (Pmode);
1141 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1142 }
1143 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1144
1145 default:
1146 gcc_unreachable ();
1147 }
1148
1149 if (local_symbolic_operand (x, Pmode))
1150 {
1151 if (small_symbolic_operand (x, Pmode))
1152 return x;
1153 else
1154 {
1155 if (!no_new_pseudos)
1156 scratch = gen_reg_rtx (Pmode);
1157 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1158 gen_rtx_HIGH (Pmode, x)));
1159 return gen_rtx_LO_SUM (Pmode, scratch, x);
1160 }
1161 }
1162 }
1163
1164 return NULL;
1165
1166 split_addend:
1167 {
1168 HOST_WIDE_INT low, high;
1169
1170 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1171 addend -= low;
1172 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1173 addend -= high;
1174
1175 if (addend)
1176 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1177 (no_new_pseudos ? scratch : NULL_RTX),
1178 1, OPTAB_LIB_WIDEN);
1179 if (high)
1180 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1181 (no_new_pseudos ? scratch : NULL_RTX),
1182 1, OPTAB_LIB_WIDEN);
1183
1184 return plus_constant (x, low);
1185 }
1186 }
1187
1188 /* Primarily this is required for TLS symbols, but given that our move
1189 patterns *ought* to be able to handle any symbol at any time, we
1190 should never be spilling symbolic operands to the constant pool, ever. */
1191
1192 static bool
alpha_cannot_force_const_mem(rtx x)1193 alpha_cannot_force_const_mem (rtx x)
1194 {
1195 enum rtx_code code = GET_CODE (x);
1196 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1197 }
1198
1199 /* We do not allow indirect calls to be optimized into sibling calls, nor
1200 can we allow a call to a function with a different GP to be optimized
1201 into a sibcall. */
1202
1203 static bool
alpha_function_ok_for_sibcall(tree decl,tree exp ATTRIBUTE_UNUSED)1204 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1205 {
1206 /* Can't do indirect tail calls, since we don't know if the target
1207 uses the same GP. */
1208 if (!decl)
1209 return false;
1210
1211 /* Otherwise, we can make a tail call if the target function shares
1212 the same GP. */
1213 return decl_has_samegp (decl);
1214 }
1215
1216 int
some_small_symbolic_operand_int(rtx * px,void * data ATTRIBUTE_UNUSED)1217 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1218 {
1219 rtx x = *px;
1220
1221 /* Don't re-split. */
1222 if (GET_CODE (x) == LO_SUM)
1223 return -1;
1224
1225 return small_symbolic_operand (x, Pmode) != 0;
1226 }
1227
1228 static int
split_small_symbolic_operand_1(rtx * px,void * data ATTRIBUTE_UNUSED)1229 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1230 {
1231 rtx x = *px;
1232
1233 /* Don't re-split. */
1234 if (GET_CODE (x) == LO_SUM)
1235 return -1;
1236
1237 if (small_symbolic_operand (x, Pmode))
1238 {
1239 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1240 *px = x;
1241 return -1;
1242 }
1243
1244 return 0;
1245 }
1246
1247 rtx
split_small_symbolic_operand(rtx x)1248 split_small_symbolic_operand (rtx x)
1249 {
1250 x = copy_insn (x);
1251 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1252 return x;
1253 }
1254
1255 /* Indicate that INSN cannot be duplicated. This is true for any insn
1256 that we've marked with gpdisp relocs, since those have to stay in
1257 1-1 correspondence with one another.
1258
1259 Technically we could copy them if we could set up a mapping from one
1260 sequence number to another, across the set of insns to be duplicated.
1261 This seems overly complicated and error-prone since interblock motion
1262 from sched-ebb could move one of the pair of insns to a different block.
1263
1264 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1265 then they'll be in a different block from their ldgp. Which could lead
1266 the bb reorder code to think that it would be ok to copy just the block
1267 containing the call and branch to the block containing the ldgp. */
1268
1269 static bool
alpha_cannot_copy_insn_p(rtx insn)1270 alpha_cannot_copy_insn_p (rtx insn)
1271 {
1272 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1273 return false;
1274 if (recog_memoized (insn) >= 0)
1275 return get_attr_cannot_copy (insn);
1276 else
1277 return false;
1278 }
1279
1280
1281 /* Try a machine-dependent way of reloading an illegitimate address
1282 operand. If we find one, push the reload and return the new rtx. */
1283
1284 rtx
alpha_legitimize_reload_address(rtx x,enum machine_mode mode ATTRIBUTE_UNUSED,int opnum,int type,int ind_levels ATTRIBUTE_UNUSED)1285 alpha_legitimize_reload_address (rtx x,
1286 enum machine_mode mode ATTRIBUTE_UNUSED,
1287 int opnum, int type,
1288 int ind_levels ATTRIBUTE_UNUSED)
1289 {
1290 /* We must recognize output that we have already generated ourselves. */
1291 if (GET_CODE (x) == PLUS
1292 && GET_CODE (XEXP (x, 0)) == PLUS
1293 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1294 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1295 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1296 {
1297 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1298 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1299 opnum, type);
1300 return x;
1301 }
1302
1303 /* We wish to handle large displacements off a base register by
1304 splitting the addend across an ldah and the mem insn. This
1305 cuts number of extra insns needed from 3 to 1. */
1306 if (GET_CODE (x) == PLUS
1307 && GET_CODE (XEXP (x, 0)) == REG
1308 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1309 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1310 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1311 {
1312 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1313 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1314 HOST_WIDE_INT high
1315 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1316
1317 /* Check for 32-bit overflow. */
1318 if (high + low != val)
1319 return NULL_RTX;
1320
1321 /* Reload the high part into a base reg; leave the low part
1322 in the mem directly. */
1323 x = gen_rtx_PLUS (GET_MODE (x),
1324 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1325 GEN_INT (high)),
1326 GEN_INT (low));
1327
1328 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1329 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1330 opnum, type);
1331 return x;
1332 }
1333
1334 return NULL_RTX;
1335 }
1336
1337 /* Compute a (partial) cost for rtx X. Return true if the complete
1338 cost has been computed, and false if subexpressions should be
1339 scanned. In either case, *TOTAL contains the cost result. */
1340
1341 static bool
alpha_rtx_costs(rtx x,int code,int outer_code,int * total)1342 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1343 {
1344 enum machine_mode mode = GET_MODE (x);
1345 bool float_mode_p = FLOAT_MODE_P (mode);
1346 const struct alpha_rtx_cost_data *cost_data;
1347
1348 if (optimize_size)
1349 cost_data = &alpha_rtx_cost_size;
1350 else
1351 cost_data = &alpha_rtx_cost_data[alpha_tune];
1352
1353 switch (code)
1354 {
1355 case CONST_INT:
1356 /* If this is an 8-bit constant, return zero since it can be used
1357 nearly anywhere with no cost. If it is a valid operand for an
1358 ADD or AND, likewise return 0 if we know it will be used in that
1359 context. Otherwise, return 2 since it might be used there later.
1360 All other constants take at least two insns. */
1361 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1362 {
1363 *total = 0;
1364 return true;
1365 }
1366 /* FALLTHRU */
1367
1368 case CONST_DOUBLE:
1369 if (x == CONST0_RTX (mode))
1370 *total = 0;
1371 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1372 || (outer_code == AND && and_operand (x, VOIDmode)))
1373 *total = 0;
1374 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1375 *total = 2;
1376 else
1377 *total = COSTS_N_INSNS (2);
1378 return true;
1379
1380 case CONST:
1381 case SYMBOL_REF:
1382 case LABEL_REF:
1383 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1384 *total = COSTS_N_INSNS (outer_code != MEM);
1385 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1386 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1387 else if (tls_symbolic_operand_type (x))
1388 /* Estimate of cost for call_pal rduniq. */
1389 /* ??? How many insns do we emit here? More than one... */
1390 *total = COSTS_N_INSNS (15);
1391 else
1392 /* Otherwise we do a load from the GOT. */
1393 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1394 return true;
1395
1396 case HIGH:
1397 /* This is effectively an add_operand. */
1398 *total = 2;
1399 return true;
1400
1401 case PLUS:
1402 case MINUS:
1403 if (float_mode_p)
1404 *total = cost_data->fp_add;
1405 else if (GET_CODE (XEXP (x, 0)) == MULT
1406 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1407 {
1408 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1409 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1410 return true;
1411 }
1412 return false;
1413
1414 case MULT:
1415 if (float_mode_p)
1416 *total = cost_data->fp_mult;
1417 else if (mode == DImode)
1418 *total = cost_data->int_mult_di;
1419 else
1420 *total = cost_data->int_mult_si;
1421 return false;
1422
1423 case ASHIFT:
1424 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1425 && INTVAL (XEXP (x, 1)) <= 3)
1426 {
1427 *total = COSTS_N_INSNS (1);
1428 return false;
1429 }
1430 /* FALLTHRU */
1431
1432 case ASHIFTRT:
1433 case LSHIFTRT:
1434 *total = cost_data->int_shift;
1435 return false;
1436
1437 case IF_THEN_ELSE:
1438 if (float_mode_p)
1439 *total = cost_data->fp_add;
1440 else
1441 *total = cost_data->int_cmov;
1442 return false;
1443
1444 case DIV:
1445 case UDIV:
1446 case MOD:
1447 case UMOD:
1448 if (!float_mode_p)
1449 *total = cost_data->int_div;
1450 else if (mode == SFmode)
1451 *total = cost_data->fp_div_sf;
1452 else
1453 *total = cost_data->fp_div_df;
1454 return false;
1455
1456 case MEM:
1457 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1458 return true;
1459
1460 case NEG:
1461 if (! float_mode_p)
1462 {
1463 *total = COSTS_N_INSNS (1);
1464 return false;
1465 }
1466 /* FALLTHRU */
1467
1468 case ABS:
1469 if (! float_mode_p)
1470 {
1471 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1472 return false;
1473 }
1474 /* FALLTHRU */
1475
1476 case FLOAT:
1477 case UNSIGNED_FLOAT:
1478 case FIX:
1479 case UNSIGNED_FIX:
1480 case FLOAT_TRUNCATE:
1481 *total = cost_data->fp_add;
1482 return false;
1483
1484 case FLOAT_EXTEND:
1485 if (GET_CODE (XEXP (x, 0)) == MEM)
1486 *total = 0;
1487 else
1488 *total = cost_data->fp_add;
1489 return false;
1490
1491 default:
1492 return false;
1493 }
1494 }
1495
1496 /* REF is an alignable memory location. Place an aligned SImode
1497 reference into *PALIGNED_MEM and the number of bits to shift into
1498 *PBITNUM. SCRATCH is a free register for use in reloading out
1499 of range stack slots. */
1500
1501 void
get_aligned_mem(rtx ref,rtx * paligned_mem,rtx * pbitnum)1502 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1503 {
1504 rtx base;
1505 HOST_WIDE_INT disp, offset;
1506
1507 gcc_assert (GET_CODE (ref) == MEM);
1508
1509 if (reload_in_progress
1510 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1511 {
1512 base = find_replacement (&XEXP (ref, 0));
1513 gcc_assert (memory_address_p (GET_MODE (ref), base));
1514 }
1515 else
1516 base = XEXP (ref, 0);
1517
1518 if (GET_CODE (base) == PLUS)
1519 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1520 else
1521 disp = 0;
1522
1523 /* Find the byte offset within an aligned word. If the memory itself is
1524 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1525 will have examined the base register and determined it is aligned, and
1526 thus displacements from it are naturally alignable. */
1527 if (MEM_ALIGN (ref) >= 32)
1528 offset = 0;
1529 else
1530 offset = disp & 3;
1531
1532 /* Access the entire aligned word. */
1533 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1534
1535 /* Convert the byte offset within the word to a bit offset. */
1536 if (WORDS_BIG_ENDIAN)
1537 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1538 else
1539 offset *= 8;
1540 *pbitnum = GEN_INT (offset);
1541 }
1542
1543 /* Similar, but just get the address. Handle the two reload cases.
1544 Add EXTRA_OFFSET to the address we return. */
1545
1546 rtx
get_unaligned_address(rtx ref,int extra_offset)1547 get_unaligned_address (rtx ref, int extra_offset)
1548 {
1549 rtx base;
1550 HOST_WIDE_INT offset = 0;
1551
1552 gcc_assert (GET_CODE (ref) == MEM);
1553
1554 if (reload_in_progress
1555 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1556 {
1557 base = find_replacement (&XEXP (ref, 0));
1558
1559 gcc_assert (memory_address_p (GET_MODE (ref), base));
1560 }
1561 else
1562 base = XEXP (ref, 0);
1563
1564 if (GET_CODE (base) == PLUS)
1565 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1566
1567 return plus_constant (base, offset + extra_offset);
1568 }
1569
1570 /* On the Alpha, all (non-symbolic) constants except zero go into
1571 a floating-point register via memory. Note that we cannot
1572 return anything that is not a subset of CLASS, and that some
1573 symbolic constants cannot be dropped to memory. */
1574
1575 enum reg_class
alpha_preferred_reload_class(rtx x,enum reg_class class)1576 alpha_preferred_reload_class(rtx x, enum reg_class class)
1577 {
1578 /* Zero is present in any register class. */
1579 if (x == CONST0_RTX (GET_MODE (x)))
1580 return class;
1581
1582 /* These sorts of constants we can easily drop to memory. */
1583 if (GET_CODE (x) == CONST_INT
1584 || GET_CODE (x) == CONST_DOUBLE
1585 || GET_CODE (x) == CONST_VECTOR)
1586 {
1587 if (class == FLOAT_REGS)
1588 return NO_REGS;
1589 if (class == ALL_REGS)
1590 return GENERAL_REGS;
1591 return class;
1592 }
1593
1594 /* All other kinds of constants should not (and in the case of HIGH
1595 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1596 secondary reload. */
1597 if (CONSTANT_P (x))
1598 return (class == ALL_REGS ? GENERAL_REGS : class);
1599
1600 return class;
1601 }
1602
1603 /* Loading and storing HImode or QImode values to and from memory
1604 usually requires a scratch register. The exceptions are loading
1605 QImode and HImode from an aligned address to a general register
1606 unless byte instructions are permitted.
1607
1608 We also cannot load an unaligned address or a paradoxical SUBREG
1609 into an FP register.
1610
1611 We also cannot do integral arithmetic into FP regs, as might result
1612 from register elimination into a DImode fp register. */
1613
1614 enum reg_class
secondary_reload_class(enum reg_class class,enum machine_mode mode,rtx x,int in)1615 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1616 rtx x, int in)
1617 {
1618 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1619 {
1620 if (GET_CODE (x) == MEM
1621 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1622 || (GET_CODE (x) == SUBREG
1623 && (GET_CODE (SUBREG_REG (x)) == MEM
1624 || (GET_CODE (SUBREG_REG (x)) == REG
1625 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1626 {
1627 if (!in || !aligned_memory_operand(x, mode))
1628 return GENERAL_REGS;
1629 }
1630 }
1631
1632 if (class == FLOAT_REGS)
1633 {
1634 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1635 return GENERAL_REGS;
1636
1637 if (GET_CODE (x) == SUBREG
1638 && (GET_MODE_SIZE (GET_MODE (x))
1639 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1640 return GENERAL_REGS;
1641
1642 if (in && INTEGRAL_MODE_P (mode)
1643 && ! (memory_operand (x, mode) || x == const0_rtx))
1644 return GENERAL_REGS;
1645 }
1646
1647 return NO_REGS;
1648 }
1649
1650 /* Subfunction of the following function. Update the flags of any MEM
1651 found in part of X. */
1652
1653 static int
alpha_set_memflags_1(rtx * xp,void * data)1654 alpha_set_memflags_1 (rtx *xp, void *data)
1655 {
1656 rtx x = *xp, orig = (rtx) data;
1657
1658 if (GET_CODE (x) != MEM)
1659 return 0;
1660
1661 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1662 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1663 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1664 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1665 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1666
1667 /* Sadly, we cannot use alias sets because the extra aliasing
1668 produced by the AND interferes. Given that two-byte quantities
1669 are the only thing we would be able to differentiate anyway,
1670 there does not seem to be any point in convoluting the early
1671 out of the alias check. */
1672
1673 return -1;
1674 }
1675
1676 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1677 generated to perform a memory operation, look for any MEMs in either
1678 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1679 volatile flags from REF into each of the MEMs found. If REF is not
1680 a MEM, don't do anything. */
1681
1682 void
alpha_set_memflags(rtx insn,rtx ref)1683 alpha_set_memflags (rtx insn, rtx ref)
1684 {
1685 rtx *base_ptr;
1686
1687 if (GET_CODE (ref) != MEM)
1688 return;
1689
1690 /* This is only called from alpha.md, after having had something
1691 generated from one of the insn patterns. So if everything is
1692 zero, the pattern is already up-to-date. */
1693 if (!MEM_VOLATILE_P (ref)
1694 && !MEM_IN_STRUCT_P (ref)
1695 && !MEM_SCALAR_P (ref)
1696 && !MEM_NOTRAP_P (ref)
1697 && !MEM_READONLY_P (ref))
1698 return;
1699
1700 if (INSN_P (insn))
1701 base_ptr = &PATTERN (insn);
1702 else
1703 base_ptr = &insn;
1704 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1705 }
1706
1707 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1708 int, bool);
1709
1710 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1711 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1712 and return pc_rtx if successful. */
1713
1714 static rtx
alpha_emit_set_const_1(rtx target,enum machine_mode mode,HOST_WIDE_INT c,int n,bool no_output)1715 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1716 HOST_WIDE_INT c, int n, bool no_output)
1717 {
1718 HOST_WIDE_INT new;
1719 int i, bits;
1720 /* Use a pseudo if highly optimizing and still generating RTL. */
1721 rtx subtarget
1722 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1723 rtx temp, insn;
1724
1725 /* If this is a sign-extended 32-bit constant, we can do this in at most
1726 three insns, so do it if we have enough insns left. We always have
1727 a sign-extended 32-bit constant when compiling on a narrow machine. */
1728
1729 if (HOST_BITS_PER_WIDE_INT != 64
1730 || c >> 31 == -1 || c >> 31 == 0)
1731 {
1732 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1733 HOST_WIDE_INT tmp1 = c - low;
1734 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1735 HOST_WIDE_INT extra = 0;
1736
1737 /* If HIGH will be interpreted as negative but the constant is
1738 positive, we must adjust it to do two ldha insns. */
1739
1740 if ((high & 0x8000) != 0 && c >= 0)
1741 {
1742 extra = 0x4000;
1743 tmp1 -= 0x40000000;
1744 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1745 }
1746
1747 if (c == low || (low == 0 && extra == 0))
1748 {
1749 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1750 but that meant that we can't handle INT_MIN on 32-bit machines
1751 (like NT/Alpha), because we recurse indefinitely through
1752 emit_move_insn to gen_movdi. So instead, since we know exactly
1753 what we want, create it explicitly. */
1754
1755 if (no_output)
1756 return pc_rtx;
1757 if (target == NULL)
1758 target = gen_reg_rtx (mode);
1759 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1760 return target;
1761 }
1762 else if (n >= 2 + (extra != 0))
1763 {
1764 if (no_output)
1765 return pc_rtx;
1766 if (no_new_pseudos)
1767 {
1768 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1769 temp = target;
1770 }
1771 else
1772 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1773 subtarget, mode);
1774
1775 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1776 This means that if we go through expand_binop, we'll try to
1777 generate extensions, etc, which will require new pseudos, which
1778 will fail during some split phases. The SImode add patterns
1779 still exist, but are not named. So build the insns by hand. */
1780
1781 if (extra != 0)
1782 {
1783 if (! subtarget)
1784 subtarget = gen_reg_rtx (mode);
1785 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1786 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1787 emit_insn (insn);
1788 temp = subtarget;
1789 }
1790
1791 if (target == NULL)
1792 target = gen_reg_rtx (mode);
1793 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1794 insn = gen_rtx_SET (VOIDmode, target, insn);
1795 emit_insn (insn);
1796 return target;
1797 }
1798 }
1799
1800 /* If we couldn't do it that way, try some other methods. But if we have
1801 no instructions left, don't bother. Likewise, if this is SImode and
1802 we can't make pseudos, we can't do anything since the expand_binop
1803 and expand_unop calls will widen and try to make pseudos. */
1804
1805 if (n == 1 || (mode == SImode && no_new_pseudos))
1806 return 0;
1807
1808 /* Next, see if we can load a related constant and then shift and possibly
1809 negate it to get the constant we want. Try this once each increasing
1810 numbers of insns. */
1811
1812 for (i = 1; i < n; i++)
1813 {
1814 /* First, see if minus some low bits, we've an easy load of
1815 high bits. */
1816
1817 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1818 if (new != 0)
1819 {
1820 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1821 if (temp)
1822 {
1823 if (no_output)
1824 return temp;
1825 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1826 target, 0, OPTAB_WIDEN);
1827 }
1828 }
1829
1830 /* Next try complementing. */
1831 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1832 if (temp)
1833 {
1834 if (no_output)
1835 return temp;
1836 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1837 }
1838
1839 /* Next try to form a constant and do a left shift. We can do this
1840 if some low-order bits are zero; the exact_log2 call below tells
1841 us that information. The bits we are shifting out could be any
1842 value, but here we'll just try the 0- and sign-extended forms of
1843 the constant. To try to increase the chance of having the same
1844 constant in more than one insn, start at the highest number of
1845 bits to shift, but try all possibilities in case a ZAPNOT will
1846 be useful. */
1847
1848 bits = exact_log2 (c & -c);
1849 if (bits > 0)
1850 for (; bits > 0; bits--)
1851 {
1852 new = c >> bits;
1853 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1854 if (!temp && c < 0)
1855 {
1856 new = (unsigned HOST_WIDE_INT)c >> bits;
1857 temp = alpha_emit_set_const (subtarget, mode, new,
1858 i, no_output);
1859 }
1860 if (temp)
1861 {
1862 if (no_output)
1863 return temp;
1864 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1865 target, 0, OPTAB_WIDEN);
1866 }
1867 }
1868
1869 /* Now try high-order zero bits. Here we try the shifted-in bits as
1870 all zero and all ones. Be careful to avoid shifting outside the
1871 mode and to avoid shifting outside the host wide int size. */
1872 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1873 confuse the recursive call and set all of the high 32 bits. */
1874
1875 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1876 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1877 if (bits > 0)
1878 for (; bits > 0; bits--)
1879 {
1880 new = c << bits;
1881 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1882 if (!temp)
1883 {
1884 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1885 temp = alpha_emit_set_const (subtarget, mode, new,
1886 i, no_output);
1887 }
1888 if (temp)
1889 {
1890 if (no_output)
1891 return temp;
1892 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1893 target, 1, OPTAB_WIDEN);
1894 }
1895 }
1896
1897 /* Now try high-order 1 bits. We get that with a sign-extension.
1898 But one bit isn't enough here. Be careful to avoid shifting outside
1899 the mode and to avoid shifting outside the host wide int size. */
1900
1901 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1902 - floor_log2 (~ c) - 2);
1903 if (bits > 0)
1904 for (; bits > 0; bits--)
1905 {
1906 new = c << bits;
1907 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1908 if (!temp)
1909 {
1910 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1911 temp = alpha_emit_set_const (subtarget, mode, new,
1912 i, no_output);
1913 }
1914 if (temp)
1915 {
1916 if (no_output)
1917 return temp;
1918 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1919 target, 0, OPTAB_WIDEN);
1920 }
1921 }
1922 }
1923
1924 #if HOST_BITS_PER_WIDE_INT == 64
1925 /* Finally, see if can load a value into the target that is the same as the
1926 constant except that all bytes that are 0 are changed to be 0xff. If we
1927 can, then we can do a ZAPNOT to obtain the desired constant. */
1928
1929 new = c;
1930 for (i = 0; i < 64; i += 8)
1931 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1932 new |= (HOST_WIDE_INT) 0xff << i;
1933
1934 /* We are only called for SImode and DImode. If this is SImode, ensure that
1935 we are sign extended to a full word. */
1936
1937 if (mode == SImode)
1938 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1939
1940 if (new != c)
1941 {
1942 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1943 if (temp)
1944 {
1945 if (no_output)
1946 return temp;
1947 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1948 target, 0, OPTAB_WIDEN);
1949 }
1950 }
1951 #endif
1952
1953 return 0;
1954 }
1955
1956 /* Try to output insns to set TARGET equal to the constant C if it can be
1957 done in less than N insns. Do all computations in MODE. Returns the place
1958 where the output has been placed if it can be done and the insns have been
1959 emitted. If it would take more than N insns, zero is returned and no
1960 insns and emitted. */
1961
1962 static rtx
alpha_emit_set_const(rtx target,enum machine_mode mode,HOST_WIDE_INT c,int n,bool no_output)1963 alpha_emit_set_const (rtx target, enum machine_mode mode,
1964 HOST_WIDE_INT c, int n, bool no_output)
1965 {
1966 enum machine_mode orig_mode = mode;
1967 rtx orig_target = target;
1968 rtx result = 0;
1969 int i;
1970
1971 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1972 can't load this constant in one insn, do this in DImode. */
1973 if (no_new_pseudos && mode == SImode
1974 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1975 {
1976 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1977 if (result)
1978 return result;
1979
1980 target = no_output ? NULL : gen_lowpart (DImode, target);
1981 mode = DImode;
1982 }
1983 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1984 {
1985 target = no_output ? NULL : gen_lowpart (DImode, target);
1986 mode = DImode;
1987 }
1988
1989 /* Try 1 insn, then 2, then up to N. */
1990 for (i = 1; i <= n; i++)
1991 {
1992 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1993 if (result)
1994 {
1995 rtx insn, set;
1996
1997 if (no_output)
1998 return result;
1999
2000 insn = get_last_insn ();
2001 set = single_set (insn);
2002 if (! CONSTANT_P (SET_SRC (set)))
2003 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2004 break;
2005 }
2006 }
2007
2008 /* Allow for the case where we changed the mode of TARGET. */
2009 if (result)
2010 {
2011 if (result == target)
2012 result = orig_target;
2013 else if (mode != orig_mode)
2014 result = gen_lowpart (orig_mode, result);
2015 }
2016
2017 return result;
2018 }
2019
2020 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2021 fall back to a straight forward decomposition. We do this to avoid
2022 exponential run times encountered when looking for longer sequences
2023 with alpha_emit_set_const. */
2024
2025 static rtx
alpha_emit_set_long_const(rtx target,HOST_WIDE_INT c1,HOST_WIDE_INT c2)2026 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2027 {
2028 HOST_WIDE_INT d1, d2, d3, d4;
2029
2030 /* Decompose the entire word */
2031 #if HOST_BITS_PER_WIDE_INT >= 64
2032 gcc_assert (c2 == -(c1 < 0));
2033 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2034 c1 -= d1;
2035 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2036 c1 = (c1 - d2) >> 32;
2037 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2038 c1 -= d3;
2039 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2040 gcc_assert (c1 == d4);
2041 #else
2042 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2043 c1 -= d1;
2044 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2045 gcc_assert (c1 == d2);
2046 c2 += (d2 < 0);
2047 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2048 c2 -= d3;
2049 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2050 gcc_assert (c2 == d4);
2051 #endif
2052
2053 /* Construct the high word */
2054 if (d4)
2055 {
2056 emit_move_insn (target, GEN_INT (d4));
2057 if (d3)
2058 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2059 }
2060 else
2061 emit_move_insn (target, GEN_INT (d3));
2062
2063 /* Shift it into place */
2064 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2065
2066 /* Add in the low bits. */
2067 if (d2)
2068 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2069 if (d1)
2070 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2071
2072 return target;
2073 }
2074
2075 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2076 the low 64 bits. */
2077
2078 static void
alpha_extract_integer(rtx x,HOST_WIDE_INT * p0,HOST_WIDE_INT * p1)2079 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2080 {
2081 HOST_WIDE_INT i0, i1;
2082
2083 if (GET_CODE (x) == CONST_VECTOR)
2084 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2085
2086
2087 if (GET_CODE (x) == CONST_INT)
2088 {
2089 i0 = INTVAL (x);
2090 i1 = -(i0 < 0);
2091 }
2092 else if (HOST_BITS_PER_WIDE_INT >= 64)
2093 {
2094 i0 = CONST_DOUBLE_LOW (x);
2095 i1 = -(i0 < 0);
2096 }
2097 else
2098 {
2099 i0 = CONST_DOUBLE_LOW (x);
2100 i1 = CONST_DOUBLE_HIGH (x);
2101 }
2102
2103 *p0 = i0;
2104 *p1 = i1;
2105 }
2106
2107 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2108 are willing to load the value into a register via a move pattern.
2109 Normally this is all symbolic constants, integral constants that
2110 take three or fewer instructions, and floating-point zero. */
2111
2112 bool
alpha_legitimate_constant_p(rtx x)2113 alpha_legitimate_constant_p (rtx x)
2114 {
2115 enum machine_mode mode = GET_MODE (x);
2116 HOST_WIDE_INT i0, i1;
2117
2118 switch (GET_CODE (x))
2119 {
2120 case CONST:
2121 case LABEL_REF:
2122 case HIGH:
2123 return true;
2124
2125 case SYMBOL_REF:
2126 /* TLS symbols are never valid. */
2127 return SYMBOL_REF_TLS_MODEL (x) == 0;
2128
2129 case CONST_DOUBLE:
2130 if (x == CONST0_RTX (mode))
2131 return true;
2132 if (FLOAT_MODE_P (mode))
2133 return false;
2134 goto do_integer;
2135
2136 case CONST_VECTOR:
2137 if (x == CONST0_RTX (mode))
2138 return true;
2139 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2140 return false;
2141 if (GET_MODE_SIZE (mode) != 8)
2142 return false;
2143 goto do_integer;
2144
2145 case CONST_INT:
2146 do_integer:
2147 if (TARGET_BUILD_CONSTANTS)
2148 return true;
2149 alpha_extract_integer (x, &i0, &i1);
2150 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2151 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2152 return false;
2153
2154 default:
2155 return false;
2156 }
2157 }
2158
2159 /* Operand 1 is known to be a constant, and should require more than one
2160 instruction to load. Emit that multi-part load. */
2161
2162 bool
alpha_split_const_mov(enum machine_mode mode,rtx * operands)2163 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2164 {
2165 HOST_WIDE_INT i0, i1;
2166 rtx temp = NULL_RTX;
2167
2168 alpha_extract_integer (operands[1], &i0, &i1);
2169
2170 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2171 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2172
2173 if (!temp && TARGET_BUILD_CONSTANTS)
2174 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2175
2176 if (temp)
2177 {
2178 if (!rtx_equal_p (operands[0], temp))
2179 emit_move_insn (operands[0], temp);
2180 return true;
2181 }
2182
2183 return false;
2184 }
2185
2186 /* Expand a move instruction; return true if all work is done.
2187 We don't handle non-bwx subword loads here. */
2188
2189 bool
alpha_expand_mov(enum machine_mode mode,rtx * operands)2190 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2191 {
2192 /* If the output is not a register, the input must be. */
2193 if (GET_CODE (operands[0]) == MEM
2194 && ! reg_or_0_operand (operands[1], mode))
2195 operands[1] = force_reg (mode, operands[1]);
2196
2197 /* Allow legitimize_address to perform some simplifications. */
2198 if (mode == Pmode && symbolic_operand (operands[1], mode))
2199 {
2200 rtx tmp;
2201
2202 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2203 if (tmp)
2204 {
2205 if (tmp == operands[0])
2206 return true;
2207 operands[1] = tmp;
2208 return false;
2209 }
2210 }
2211
2212 /* Early out for non-constants and valid constants. */
2213 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2214 return false;
2215
2216 /* Split large integers. */
2217 if (GET_CODE (operands[1]) == CONST_INT
2218 || GET_CODE (operands[1]) == CONST_DOUBLE
2219 || GET_CODE (operands[1]) == CONST_VECTOR)
2220 {
2221 if (alpha_split_const_mov (mode, operands))
2222 return true;
2223 }
2224
2225 /* Otherwise we've nothing left but to drop the thing to memory. */
2226 operands[1] = force_const_mem (mode, operands[1]);
2227 if (reload_in_progress)
2228 {
2229 emit_move_insn (operands[0], XEXP (operands[1], 0));
2230 operands[1] = copy_rtx (operands[1]);
2231 XEXP (operands[1], 0) = operands[0];
2232 }
2233 else
2234 operands[1] = validize_mem (operands[1]);
2235 return false;
2236 }
2237
2238 /* Expand a non-bwx QImode or HImode move instruction;
2239 return true if all work is done. */
2240
2241 bool
alpha_expand_mov_nobwx(enum machine_mode mode,rtx * operands)2242 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2243 {
2244 /* If the output is not a register, the input must be. */
2245 if (GET_CODE (operands[0]) == MEM)
2246 operands[1] = force_reg (mode, operands[1]);
2247
2248 /* Handle four memory cases, unaligned and aligned for either the input
2249 or the output. The only case where we can be called during reload is
2250 for aligned loads; all other cases require temporaries. */
2251
2252 if (GET_CODE (operands[1]) == MEM
2253 || (GET_CODE (operands[1]) == SUBREG
2254 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2255 || (reload_in_progress && GET_CODE (operands[1]) == REG
2256 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2257 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2258 && GET_CODE (SUBREG_REG (operands[1])) == REG
2259 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2260 {
2261 if (aligned_memory_operand (operands[1], mode))
2262 {
2263 if (reload_in_progress)
2264 {
2265 emit_insn ((mode == QImode
2266 ? gen_reload_inqi_help
2267 : gen_reload_inhi_help)
2268 (operands[0], operands[1],
2269 gen_rtx_REG (SImode, REGNO (operands[0]))));
2270 }
2271 else
2272 {
2273 rtx aligned_mem, bitnum;
2274 rtx scratch = gen_reg_rtx (SImode);
2275 rtx subtarget;
2276 bool copyout;
2277
2278 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2279
2280 subtarget = operands[0];
2281 if (GET_CODE (subtarget) == REG)
2282 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2283 else
2284 subtarget = gen_reg_rtx (DImode), copyout = true;
2285
2286 emit_insn ((mode == QImode
2287 ? gen_aligned_loadqi
2288 : gen_aligned_loadhi)
2289 (subtarget, aligned_mem, bitnum, scratch));
2290
2291 if (copyout)
2292 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2293 }
2294 }
2295 else
2296 {
2297 /* Don't pass these as parameters since that makes the generated
2298 code depend on parameter evaluation order which will cause
2299 bootstrap failures. */
2300
2301 rtx temp1, temp2, seq, subtarget;
2302 bool copyout;
2303
2304 temp1 = gen_reg_rtx (DImode);
2305 temp2 = gen_reg_rtx (DImode);
2306
2307 subtarget = operands[0];
2308 if (GET_CODE (subtarget) == REG)
2309 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2310 else
2311 subtarget = gen_reg_rtx (DImode), copyout = true;
2312
2313 seq = ((mode == QImode
2314 ? gen_unaligned_loadqi
2315 : gen_unaligned_loadhi)
2316 (subtarget, get_unaligned_address (operands[1], 0),
2317 temp1, temp2));
2318 alpha_set_memflags (seq, operands[1]);
2319 emit_insn (seq);
2320
2321 if (copyout)
2322 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2323 }
2324 return true;
2325 }
2326
2327 if (GET_CODE (operands[0]) == MEM
2328 || (GET_CODE (operands[0]) == SUBREG
2329 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2330 || (reload_in_progress && GET_CODE (operands[0]) == REG
2331 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2332 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2333 && GET_CODE (SUBREG_REG (operands[0])) == REG
2334 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2335 {
2336 if (aligned_memory_operand (operands[0], mode))
2337 {
2338 rtx aligned_mem, bitnum;
2339 rtx temp1 = gen_reg_rtx (SImode);
2340 rtx temp2 = gen_reg_rtx (SImode);
2341
2342 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2343
2344 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2345 temp1, temp2));
2346 }
2347 else
2348 {
2349 rtx temp1 = gen_reg_rtx (DImode);
2350 rtx temp2 = gen_reg_rtx (DImode);
2351 rtx temp3 = gen_reg_rtx (DImode);
2352 rtx seq = ((mode == QImode
2353 ? gen_unaligned_storeqi
2354 : gen_unaligned_storehi)
2355 (get_unaligned_address (operands[0], 0),
2356 operands[1], temp1, temp2, temp3));
2357
2358 alpha_set_memflags (seq, operands[0]);
2359 emit_insn (seq);
2360 }
2361 return true;
2362 }
2363
2364 return false;
2365 }
2366
2367 /* Implement the movmisalign patterns. One of the operands is a memory
2368 that is not naturally aligned. Emit instructions to load it. */
2369
2370 void
alpha_expand_movmisalign(enum machine_mode mode,rtx * operands)2371 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2372 {
2373 /* Honor misaligned loads, for those we promised to do so. */
2374 if (MEM_P (operands[1]))
2375 {
2376 rtx tmp;
2377
2378 if (register_operand (operands[0], mode))
2379 tmp = operands[0];
2380 else
2381 tmp = gen_reg_rtx (mode);
2382
2383 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2384 if (tmp != operands[0])
2385 emit_move_insn (operands[0], tmp);
2386 }
2387 else if (MEM_P (operands[0]))
2388 {
2389 if (!reg_or_0_operand (operands[1], mode))
2390 operands[1] = force_reg (mode, operands[1]);
2391 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2392 }
2393 else
2394 gcc_unreachable ();
2395 }
2396
2397 /* Generate an unsigned DImode to FP conversion. This is the same code
2398 optabs would emit if we didn't have TFmode patterns.
2399
2400 For SFmode, this is the only construction I've found that can pass
2401 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2402 intermediates will work, because you'll get intermediate rounding
2403 that ruins the end result. Some of this could be fixed by turning
2404 on round-to-positive-infinity, but that requires diddling the fpsr,
2405 which kills performance. I tried turning this around and converting
2406 to a negative number, so that I could turn on /m, but either I did
2407 it wrong or there's something else cause I wound up with the exact
2408 same single-bit error. There is a branch-less form of this same code:
2409
2410 srl $16,1,$1
2411 and $16,1,$2
2412 cmplt $16,0,$3
2413 or $1,$2,$2
2414 cmovge $16,$16,$2
2415 itoft $3,$f10
2416 itoft $2,$f11
2417 cvtqs $f11,$f11
2418 adds $f11,$f11,$f0
2419 fcmoveq $f10,$f11,$f0
2420
2421 I'm not using it because it's the same number of instructions as
2422 this branch-full form, and it has more serialized long latency
2423 instructions on the critical path.
2424
2425 For DFmode, we can avoid rounding errors by breaking up the word
2426 into two pieces, converting them separately, and adding them back:
2427
2428 LC0: .long 0,0x5f800000
2429
2430 itoft $16,$f11
2431 lda $2,LC0
2432 cmplt $16,0,$1
2433 cpyse $f11,$f31,$f10
2434 cpyse $f31,$f11,$f11
2435 s4addq $1,$2,$1
2436 lds $f12,0($1)
2437 cvtqt $f10,$f10
2438 cvtqt $f11,$f11
2439 addt $f12,$f10,$f0
2440 addt $f0,$f11,$f0
2441
2442 This doesn't seem to be a clear-cut win over the optabs form.
2443 It probably all depends on the distribution of numbers being
2444 converted -- in the optabs form, all but high-bit-set has a
2445 much lower minimum execution time. */
2446
2447 void
alpha_emit_floatuns(rtx operands[2])2448 alpha_emit_floatuns (rtx operands[2])
2449 {
2450 rtx neglab, donelab, i0, i1, f0, in, out;
2451 enum machine_mode mode;
2452
2453 out = operands[0];
2454 in = force_reg (DImode, operands[1]);
2455 mode = GET_MODE (out);
2456 neglab = gen_label_rtx ();
2457 donelab = gen_label_rtx ();
2458 i0 = gen_reg_rtx (DImode);
2459 i1 = gen_reg_rtx (DImode);
2460 f0 = gen_reg_rtx (mode);
2461
2462 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2463
2464 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2465 emit_jump_insn (gen_jump (donelab));
2466 emit_barrier ();
2467
2468 emit_label (neglab);
2469
2470 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2471 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2472 emit_insn (gen_iordi3 (i0, i0, i1));
2473 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2474 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2475
2476 emit_label (donelab);
2477 }
2478
2479 /* Generate the comparison for a conditional branch. */
2480
2481 rtx
alpha_emit_conditional_branch(enum rtx_code code)2482 alpha_emit_conditional_branch (enum rtx_code code)
2483 {
2484 enum rtx_code cmp_code, branch_code;
2485 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2486 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2487 rtx tem;
2488
2489 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2490 {
2491 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2492 op1 = const0_rtx;
2493 alpha_compare.fp_p = 0;
2494 }
2495
2496 /* The general case: fold the comparison code to the types of compares
2497 that we have, choosing the branch as necessary. */
2498 switch (code)
2499 {
2500 case EQ: case LE: case LT: case LEU: case LTU:
2501 case UNORDERED:
2502 /* We have these compares: */
2503 cmp_code = code, branch_code = NE;
2504 break;
2505
2506 case NE:
2507 case ORDERED:
2508 /* These must be reversed. */
2509 cmp_code = reverse_condition (code), branch_code = EQ;
2510 break;
2511
2512 case GE: case GT: case GEU: case GTU:
2513 /* For FP, we swap them, for INT, we reverse them. */
2514 if (alpha_compare.fp_p)
2515 {
2516 cmp_code = swap_condition (code);
2517 branch_code = NE;
2518 tem = op0, op0 = op1, op1 = tem;
2519 }
2520 else
2521 {
2522 cmp_code = reverse_condition (code);
2523 branch_code = EQ;
2524 }
2525 break;
2526
2527 default:
2528 gcc_unreachable ();
2529 }
2530
2531 if (alpha_compare.fp_p)
2532 {
2533 cmp_mode = DFmode;
2534 if (flag_unsafe_math_optimizations)
2535 {
2536 /* When we are not as concerned about non-finite values, and we
2537 are comparing against zero, we can branch directly. */
2538 if (op1 == CONST0_RTX (DFmode))
2539 cmp_code = UNKNOWN, branch_code = code;
2540 else if (op0 == CONST0_RTX (DFmode))
2541 {
2542 /* Undo the swap we probably did just above. */
2543 tem = op0, op0 = op1, op1 = tem;
2544 branch_code = swap_condition (cmp_code);
2545 cmp_code = UNKNOWN;
2546 }
2547 }
2548 else
2549 {
2550 /* ??? We mark the branch mode to be CCmode to prevent the
2551 compare and branch from being combined, since the compare
2552 insn follows IEEE rules that the branch does not. */
2553 branch_mode = CCmode;
2554 }
2555 }
2556 else
2557 {
2558 cmp_mode = DImode;
2559
2560 /* The following optimizations are only for signed compares. */
2561 if (code != LEU && code != LTU && code != GEU && code != GTU)
2562 {
2563 /* Whee. Compare and branch against 0 directly. */
2564 if (op1 == const0_rtx)
2565 cmp_code = UNKNOWN, branch_code = code;
2566
2567 /* If the constants doesn't fit into an immediate, but can
2568 be generated by lda/ldah, we adjust the argument and
2569 compare against zero, so we can use beq/bne directly. */
2570 /* ??? Don't do this when comparing against symbols, otherwise
2571 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2572 be declared false out of hand (at least for non-weak). */
2573 else if (GET_CODE (op1) == CONST_INT
2574 && (code == EQ || code == NE)
2575 && !(symbolic_operand (op0, VOIDmode)
2576 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2577 {
2578 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2579
2580 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2581 && (CONST_OK_FOR_LETTER_P (n, 'K')
2582 || CONST_OK_FOR_LETTER_P (n, 'L')))
2583 {
2584 cmp_code = PLUS, branch_code = code;
2585 op1 = GEN_INT (n);
2586 }
2587 }
2588 }
2589
2590 if (!reg_or_0_operand (op0, DImode))
2591 op0 = force_reg (DImode, op0);
2592 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2593 op1 = force_reg (DImode, op1);
2594 }
2595
2596 /* Emit an initial compare instruction, if necessary. */
2597 tem = op0;
2598 if (cmp_code != UNKNOWN)
2599 {
2600 tem = gen_reg_rtx (cmp_mode);
2601 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2602 }
2603
2604 /* Zero the operands. */
2605 memset (&alpha_compare, 0, sizeof (alpha_compare));
2606
2607 /* Return the branch comparison. */
2608 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2609 }
2610
2611 /* Certain simplifications can be done to make invalid setcc operations
2612 valid. Return the final comparison, or NULL if we can't work. */
2613
2614 rtx
alpha_emit_setcc(enum rtx_code code)2615 alpha_emit_setcc (enum rtx_code code)
2616 {
2617 enum rtx_code cmp_code;
2618 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2619 int fp_p = alpha_compare.fp_p;
2620 rtx tmp;
2621
2622 /* Zero the operands. */
2623 memset (&alpha_compare, 0, sizeof (alpha_compare));
2624
2625 if (fp_p && GET_MODE (op0) == TFmode)
2626 {
2627 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2628 op1 = const0_rtx;
2629 fp_p = 0;
2630 }
2631
2632 if (fp_p && !TARGET_FIX)
2633 return NULL_RTX;
2634
2635 /* The general case: fold the comparison code to the types of compares
2636 that we have, choosing the branch as necessary. */
2637
2638 cmp_code = UNKNOWN;
2639 switch (code)
2640 {
2641 case EQ: case LE: case LT: case LEU: case LTU:
2642 case UNORDERED:
2643 /* We have these compares. */
2644 if (fp_p)
2645 cmp_code = code, code = NE;
2646 break;
2647
2648 case NE:
2649 if (!fp_p && op1 == const0_rtx)
2650 break;
2651 /* FALLTHRU */
2652
2653 case ORDERED:
2654 cmp_code = reverse_condition (code);
2655 code = EQ;
2656 break;
2657
2658 case GE: case GT: case GEU: case GTU:
2659 /* These normally need swapping, but for integer zero we have
2660 special patterns that recognize swapped operands. */
2661 if (!fp_p && op1 == const0_rtx)
2662 break;
2663 code = swap_condition (code);
2664 if (fp_p)
2665 cmp_code = code, code = NE;
2666 tmp = op0, op0 = op1, op1 = tmp;
2667 break;
2668
2669 default:
2670 gcc_unreachable ();
2671 }
2672
2673 if (!fp_p)
2674 {
2675 if (!register_operand (op0, DImode))
2676 op0 = force_reg (DImode, op0);
2677 if (!reg_or_8bit_operand (op1, DImode))
2678 op1 = force_reg (DImode, op1);
2679 }
2680
2681 /* Emit an initial compare instruction, if necessary. */
2682 if (cmp_code != UNKNOWN)
2683 {
2684 enum machine_mode mode = fp_p ? DFmode : DImode;
2685
2686 tmp = gen_reg_rtx (mode);
2687 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2688 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2689
2690 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2691 op1 = const0_rtx;
2692 }
2693
2694 /* Return the setcc comparison. */
2695 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2696 }
2697
2698
2699 /* Rewrite a comparison against zero CMP of the form
2700 (CODE (cc0) (const_int 0)) so it can be written validly in
2701 a conditional move (if_then_else CMP ...).
2702 If both of the operands that set cc0 are nonzero we must emit
2703 an insn to perform the compare (it can't be done within
2704 the conditional move). */
2705
2706 rtx
alpha_emit_conditional_move(rtx cmp,enum machine_mode mode)2707 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2708 {
2709 enum rtx_code code = GET_CODE (cmp);
2710 enum rtx_code cmov_code = NE;
2711 rtx op0 = alpha_compare.op0;
2712 rtx op1 = alpha_compare.op1;
2713 int fp_p = alpha_compare.fp_p;
2714 enum machine_mode cmp_mode
2715 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2716 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2717 enum machine_mode cmov_mode = VOIDmode;
2718 int local_fast_math = flag_unsafe_math_optimizations;
2719 rtx tem;
2720
2721 /* Zero the operands. */
2722 memset (&alpha_compare, 0, sizeof (alpha_compare));
2723
2724 if (fp_p != FLOAT_MODE_P (mode))
2725 {
2726 enum rtx_code cmp_code;
2727
2728 if (! TARGET_FIX)
2729 return 0;
2730
2731 /* If we have fp<->int register move instructions, do a cmov by
2732 performing the comparison in fp registers, and move the
2733 zero/nonzero value to integer registers, where we can then
2734 use a normal cmov, or vice-versa. */
2735
2736 switch (code)
2737 {
2738 case EQ: case LE: case LT: case LEU: case LTU:
2739 /* We have these compares. */
2740 cmp_code = code, code = NE;
2741 break;
2742
2743 case NE:
2744 /* This must be reversed. */
2745 cmp_code = EQ, code = EQ;
2746 break;
2747
2748 case GE: case GT: case GEU: case GTU:
2749 /* These normally need swapping, but for integer zero we have
2750 special patterns that recognize swapped operands. */
2751 if (!fp_p && op1 == const0_rtx)
2752 cmp_code = code, code = NE;
2753 else
2754 {
2755 cmp_code = swap_condition (code);
2756 code = NE;
2757 tem = op0, op0 = op1, op1 = tem;
2758 }
2759 break;
2760
2761 default:
2762 gcc_unreachable ();
2763 }
2764
2765 tem = gen_reg_rtx (cmp_op_mode);
2766 emit_insn (gen_rtx_SET (VOIDmode, tem,
2767 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2768 op0, op1)));
2769
2770 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2771 op0 = gen_lowpart (cmp_op_mode, tem);
2772 op1 = CONST0_RTX (cmp_op_mode);
2773 fp_p = !fp_p;
2774 local_fast_math = 1;
2775 }
2776
2777 /* We may be able to use a conditional move directly.
2778 This avoids emitting spurious compares. */
2779 if (signed_comparison_operator (cmp, VOIDmode)
2780 && (!fp_p || local_fast_math)
2781 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2782 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2783
2784 /* We can't put the comparison inside the conditional move;
2785 emit a compare instruction and put that inside the
2786 conditional move. Make sure we emit only comparisons we have;
2787 swap or reverse as necessary. */
2788
2789 if (no_new_pseudos)
2790 return NULL_RTX;
2791
2792 switch (code)
2793 {
2794 case EQ: case LE: case LT: case LEU: case LTU:
2795 /* We have these compares: */
2796 break;
2797
2798 case NE:
2799 /* This must be reversed. */
2800 code = reverse_condition (code);
2801 cmov_code = EQ;
2802 break;
2803
2804 case GE: case GT: case GEU: case GTU:
2805 /* These must be swapped. */
2806 if (op1 != CONST0_RTX (cmp_mode))
2807 {
2808 code = swap_condition (code);
2809 tem = op0, op0 = op1, op1 = tem;
2810 }
2811 break;
2812
2813 default:
2814 gcc_unreachable ();
2815 }
2816
2817 if (!fp_p)
2818 {
2819 if (!reg_or_0_operand (op0, DImode))
2820 op0 = force_reg (DImode, op0);
2821 if (!reg_or_8bit_operand (op1, DImode))
2822 op1 = force_reg (DImode, op1);
2823 }
2824
2825 /* ??? We mark the branch mode to be CCmode to prevent the compare
2826 and cmov from being combined, since the compare insn follows IEEE
2827 rules that the cmov does not. */
2828 if (fp_p && !local_fast_math)
2829 cmov_mode = CCmode;
2830
2831 tem = gen_reg_rtx (cmp_op_mode);
2832 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2833 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2834 }
2835
2836 /* Simplify a conditional move of two constants into a setcc with
2837 arithmetic. This is done with a splitter since combine would
2838 just undo the work if done during code generation. It also catches
2839 cases we wouldn't have before cse. */
2840
2841 int
alpha_split_conditional_move(enum rtx_code code,rtx dest,rtx cond,rtx t_rtx,rtx f_rtx)2842 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2843 rtx t_rtx, rtx f_rtx)
2844 {
2845 HOST_WIDE_INT t, f, diff;
2846 enum machine_mode mode;
2847 rtx target, subtarget, tmp;
2848
2849 mode = GET_MODE (dest);
2850 t = INTVAL (t_rtx);
2851 f = INTVAL (f_rtx);
2852 diff = t - f;
2853
2854 if (((code == NE || code == EQ) && diff < 0)
2855 || (code == GE || code == GT))
2856 {
2857 code = reverse_condition (code);
2858 diff = t, t = f, f = diff;
2859 diff = t - f;
2860 }
2861
2862 subtarget = target = dest;
2863 if (mode != DImode)
2864 {
2865 target = gen_lowpart (DImode, dest);
2866 if (! no_new_pseudos)
2867 subtarget = gen_reg_rtx (DImode);
2868 else
2869 subtarget = target;
2870 }
2871 /* Below, we must be careful to use copy_rtx on target and subtarget
2872 in intermediate insns, as they may be a subreg rtx, which may not
2873 be shared. */
2874
2875 if (f == 0 && exact_log2 (diff) > 0
2876 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2877 viable over a longer latency cmove. On EV5, the E0 slot is a
2878 scarce resource, and on EV4 shift has the same latency as a cmove. */
2879 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2880 {
2881 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2882 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2883
2884 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2885 GEN_INT (exact_log2 (t)));
2886 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2887 }
2888 else if (f == 0 && t == -1)
2889 {
2890 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2891 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2892
2893 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2894 }
2895 else if (diff == 1 || diff == 4 || diff == 8)
2896 {
2897 rtx add_op;
2898
2899 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2900 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2901
2902 if (diff == 1)
2903 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2904 else
2905 {
2906 add_op = GEN_INT (f);
2907 if (sext_add_operand (add_op, mode))
2908 {
2909 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2910 GEN_INT (diff));
2911 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2912 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2913 }
2914 else
2915 return 0;
2916 }
2917 }
2918 else
2919 return 0;
2920
2921 return 1;
2922 }
2923
2924 /* Look up the function X_floating library function name for the
2925 given operation. */
2926
2927 struct xfloating_op GTY(())
2928 {
2929 const enum rtx_code code;
2930 const char *const GTY((skip)) osf_func;
2931 const char *const GTY((skip)) vms_func;
2932 rtx libcall;
2933 };
2934
2935 static GTY(()) struct xfloating_op xfloating_ops[] =
2936 {
2937 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2938 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2939 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2940 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2941 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2942 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2943 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2944 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2945 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2946 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2947 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2948 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2949 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2950 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2951 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2952 };
2953
2954 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2955 {
2956 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2957 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2958 };
2959
2960 static rtx
alpha_lookup_xfloating_lib_func(enum rtx_code code)2961 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2962 {
2963 struct xfloating_op *ops = xfloating_ops;
2964 long n = ARRAY_SIZE (xfloating_ops);
2965 long i;
2966
2967 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2968
2969 /* How irritating. Nothing to key off for the main table. */
2970 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2971 {
2972 ops = vax_cvt_ops;
2973 n = ARRAY_SIZE (vax_cvt_ops);
2974 }
2975
2976 for (i = 0; i < n; ++i, ++ops)
2977 if (ops->code == code)
2978 {
2979 rtx func = ops->libcall;
2980 if (!func)
2981 {
2982 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2983 ? ops->vms_func : ops->osf_func);
2984 ops->libcall = func;
2985 }
2986 return func;
2987 }
2988
2989 gcc_unreachable ();
2990 }
2991
2992 /* Most X_floating operations take the rounding mode as an argument.
2993 Compute that here. */
2994
2995 static int
alpha_compute_xfloating_mode_arg(enum rtx_code code,enum alpha_fp_rounding_mode round)2996 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2997 enum alpha_fp_rounding_mode round)
2998 {
2999 int mode;
3000
3001 switch (round)
3002 {
3003 case ALPHA_FPRM_NORM:
3004 mode = 2;
3005 break;
3006 case ALPHA_FPRM_MINF:
3007 mode = 1;
3008 break;
3009 case ALPHA_FPRM_CHOP:
3010 mode = 0;
3011 break;
3012 case ALPHA_FPRM_DYN:
3013 mode = 4;
3014 break;
3015 default:
3016 gcc_unreachable ();
3017
3018 /* XXX For reference, round to +inf is mode = 3. */
3019 }
3020
3021 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3022 mode |= 0x10000;
3023
3024 return mode;
3025 }
3026
3027 /* Emit an X_floating library function call.
3028
3029 Note that these functions do not follow normal calling conventions:
3030 TFmode arguments are passed in two integer registers (as opposed to
3031 indirect); TFmode return values appear in R16+R17.
3032
3033 FUNC is the function to call.
3034 TARGET is where the output belongs.
3035 OPERANDS are the inputs.
3036 NOPERANDS is the count of inputs.
3037 EQUIV is the expression equivalent for the function.
3038 */
3039
3040 static void
alpha_emit_xfloating_libcall(rtx func,rtx target,rtx operands[],int noperands,rtx equiv)3041 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3042 int noperands, rtx equiv)
3043 {
3044 rtx usage = NULL_RTX, tmp, reg;
3045 int regno = 16, i;
3046
3047 start_sequence ();
3048
3049 for (i = 0; i < noperands; ++i)
3050 {
3051 switch (GET_MODE (operands[i]))
3052 {
3053 case TFmode:
3054 reg = gen_rtx_REG (TFmode, regno);
3055 regno += 2;
3056 break;
3057
3058 case DFmode:
3059 reg = gen_rtx_REG (DFmode, regno + 32);
3060 regno += 1;
3061 break;
3062
3063 case VOIDmode:
3064 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3065 /* FALLTHRU */
3066 case DImode:
3067 reg = gen_rtx_REG (DImode, regno);
3068 regno += 1;
3069 break;
3070
3071 default:
3072 gcc_unreachable ();
3073 }
3074
3075 emit_move_insn (reg, operands[i]);
3076 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3077 }
3078
3079 switch (GET_MODE (target))
3080 {
3081 case TFmode:
3082 reg = gen_rtx_REG (TFmode, 16);
3083 break;
3084 case DFmode:
3085 reg = gen_rtx_REG (DFmode, 32);
3086 break;
3087 case DImode:
3088 reg = gen_rtx_REG (DImode, 0);
3089 break;
3090 default:
3091 gcc_unreachable ();
3092 }
3093
3094 tmp = gen_rtx_MEM (QImode, func);
3095 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3096 const0_rtx, const0_rtx));
3097 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3098 CONST_OR_PURE_CALL_P (tmp) = 1;
3099
3100 tmp = get_insns ();
3101 end_sequence ();
3102
3103 emit_libcall_block (tmp, target, reg, equiv);
3104 }
3105
3106 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3107
3108 void
alpha_emit_xfloating_arith(enum rtx_code code,rtx operands[])3109 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3110 {
3111 rtx func;
3112 int mode;
3113 rtx out_operands[3];
3114
3115 func = alpha_lookup_xfloating_lib_func (code);
3116 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3117
3118 out_operands[0] = operands[1];
3119 out_operands[1] = operands[2];
3120 out_operands[2] = GEN_INT (mode);
3121 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3122 gen_rtx_fmt_ee (code, TFmode, operands[1],
3123 operands[2]));
3124 }
3125
3126 /* Emit an X_floating library function call for a comparison. */
3127
3128 static rtx
alpha_emit_xfloating_compare(enum rtx_code * pcode,rtx op0,rtx op1)3129 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3130 {
3131 enum rtx_code cmp_code, res_code;
3132 rtx func, out, operands[2];
3133
3134 /* X_floating library comparison functions return
3135 -1 unordered
3136 0 false
3137 1 true
3138 Convert the compare against the raw return value. */
3139
3140 cmp_code = *pcode;
3141 switch (cmp_code)
3142 {
3143 case UNORDERED:
3144 cmp_code = EQ;
3145 res_code = LT;
3146 break;
3147 case ORDERED:
3148 cmp_code = EQ;
3149 res_code = GE;
3150 break;
3151 case NE:
3152 res_code = NE;
3153 break;
3154 case EQ:
3155 case LT:
3156 case GT:
3157 case LE:
3158 case GE:
3159 res_code = GT;
3160 break;
3161 default:
3162 gcc_unreachable ();
3163 }
3164 *pcode = res_code;
3165
3166 func = alpha_lookup_xfloating_lib_func (cmp_code);
3167
3168 operands[0] = op0;
3169 operands[1] = op1;
3170 out = gen_reg_rtx (DImode);
3171
3172 /* ??? Strange mode for equiv because what's actually returned
3173 is -1,0,1, not a proper boolean value. */
3174 alpha_emit_xfloating_libcall (func, out, operands, 2,
3175 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3176
3177 return out;
3178 }
3179
3180 /* Emit an X_floating library function call for a conversion. */
3181
3182 void
alpha_emit_xfloating_cvt(enum rtx_code orig_code,rtx operands[])3183 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3184 {
3185 int noperands = 1, mode;
3186 rtx out_operands[2];
3187 rtx func;
3188 enum rtx_code code = orig_code;
3189
3190 if (code == UNSIGNED_FIX)
3191 code = FIX;
3192
3193 func = alpha_lookup_xfloating_lib_func (code);
3194
3195 out_operands[0] = operands[1];
3196
3197 switch (code)
3198 {
3199 case FIX:
3200 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3201 out_operands[1] = GEN_INT (mode);
3202 noperands = 2;
3203 break;
3204 case FLOAT_TRUNCATE:
3205 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3206 out_operands[1] = GEN_INT (mode);
3207 noperands = 2;
3208 break;
3209 default:
3210 break;
3211 }
3212
3213 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3214 gen_rtx_fmt_e (orig_code,
3215 GET_MODE (operands[0]),
3216 operands[1]));
3217 }
3218
3219 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3220 OP[0] into OP[0,1]. Naturally, output operand ordering is
3221 little-endian. */
3222
3223 void
alpha_split_tfmode_pair(rtx operands[4])3224 alpha_split_tfmode_pair (rtx operands[4])
3225 {
3226 switch (GET_CODE (operands[1]))
3227 {
3228 case REG:
3229 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3230 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3231 break;
3232
3233 case MEM:
3234 operands[3] = adjust_address (operands[1], DImode, 8);
3235 operands[2] = adjust_address (operands[1], DImode, 0);
3236 break;
3237
3238 case CONST_DOUBLE:
3239 gcc_assert (operands[1] == CONST0_RTX (TFmode));
3240 operands[2] = operands[3] = const0_rtx;
3241 break;
3242
3243 default:
3244 gcc_unreachable ();
3245 }
3246
3247 switch (GET_CODE (operands[0]))
3248 {
3249 case REG:
3250 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3251 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3252 break;
3253
3254 case MEM:
3255 operands[1] = adjust_address (operands[0], DImode, 8);
3256 operands[0] = adjust_address (operands[0], DImode, 0);
3257 break;
3258
3259 default:
3260 gcc_unreachable ();
3261 }
3262 }
3263
3264 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3265 op2 is a register containing the sign bit, operation is the
3266 logical operation to be performed. */
3267
3268 void
alpha_split_tfmode_frobsign(rtx operands[3],rtx (* operation)(rtx,rtx,rtx))3269 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3270 {
3271 rtx high_bit = operands[2];
3272 rtx scratch;
3273 int move;
3274
3275 alpha_split_tfmode_pair (operands);
3276
3277 /* Detect three flavors of operand overlap. */
3278 move = 1;
3279 if (rtx_equal_p (operands[0], operands[2]))
3280 move = 0;
3281 else if (rtx_equal_p (operands[1], operands[2]))
3282 {
3283 if (rtx_equal_p (operands[0], high_bit))
3284 move = 2;
3285 else
3286 move = -1;
3287 }
3288
3289 if (move < 0)
3290 emit_move_insn (operands[0], operands[2]);
3291
3292 /* ??? If the destination overlaps both source tf and high_bit, then
3293 assume source tf is dead in its entirety and use the other half
3294 for a scratch register. Otherwise "scratch" is just the proper
3295 destination register. */
3296 scratch = operands[move < 2 ? 1 : 3];
3297
3298 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3299
3300 if (move > 0)
3301 {
3302 emit_move_insn (operands[0], operands[2]);
3303 if (move > 1)
3304 emit_move_insn (operands[1], scratch);
3305 }
3306 }
3307
3308 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3309 unaligned data:
3310
3311 unsigned: signed:
3312 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3313 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3314 lda r3,X(r11) lda r3,X+2(r11)
3315 extwl r1,r3,r1 extql r1,r3,r1
3316 extwh r2,r3,r2 extqh r2,r3,r2
3317 or r1.r2.r1 or r1,r2,r1
3318 sra r1,48,r1
3319
3320 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3321 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3322 lda r3,X(r11) lda r3,X(r11)
3323 extll r1,r3,r1 extll r1,r3,r1
3324 extlh r2,r3,r2 extlh r2,r3,r2
3325 or r1.r2.r1 addl r1,r2,r1
3326
3327 quad: ldq_u r1,X(r11)
3328 ldq_u r2,X+7(r11)
3329 lda r3,X(r11)
3330 extql r1,r3,r1
3331 extqh r2,r3,r2
3332 or r1.r2.r1
3333 */
3334
3335 void
alpha_expand_unaligned_load(rtx tgt,rtx mem,HOST_WIDE_INT size,HOST_WIDE_INT ofs,int sign)3336 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3337 HOST_WIDE_INT ofs, int sign)
3338 {
3339 rtx meml, memh, addr, extl, exth, tmp, mema;
3340 enum machine_mode mode;
3341
3342 if (TARGET_BWX && size == 2)
3343 {
3344 meml = adjust_address (mem, QImode, ofs);
3345 memh = adjust_address (mem, QImode, ofs+1);
3346 if (BYTES_BIG_ENDIAN)
3347 tmp = meml, meml = memh, memh = tmp;
3348 extl = gen_reg_rtx (DImode);
3349 exth = gen_reg_rtx (DImode);
3350 emit_insn (gen_zero_extendqidi2 (extl, meml));
3351 emit_insn (gen_zero_extendqidi2 (exth, memh));
3352 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3353 NULL, 1, OPTAB_LIB_WIDEN);
3354 addr = expand_simple_binop (DImode, IOR, extl, exth,
3355 NULL, 1, OPTAB_LIB_WIDEN);
3356
3357 if (sign && GET_MODE (tgt) != HImode)
3358 {
3359 addr = gen_lowpart (HImode, addr);
3360 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3361 }
3362 else
3363 {
3364 if (GET_MODE (tgt) != DImode)
3365 addr = gen_lowpart (GET_MODE (tgt), addr);
3366 emit_move_insn (tgt, addr);
3367 }
3368 return;
3369 }
3370
3371 meml = gen_reg_rtx (DImode);
3372 memh = gen_reg_rtx (DImode);
3373 addr = gen_reg_rtx (DImode);
3374 extl = gen_reg_rtx (DImode);
3375 exth = gen_reg_rtx (DImode);
3376
3377 mema = XEXP (mem, 0);
3378 if (GET_CODE (mema) == LO_SUM)
3379 mema = force_reg (Pmode, mema);
3380
3381 /* AND addresses cannot be in any alias set, since they may implicitly
3382 alias surrounding code. Ideally we'd have some alias set that
3383 covered all types except those with alignment 8 or higher. */
3384
3385 tmp = change_address (mem, DImode,
3386 gen_rtx_AND (DImode,
3387 plus_constant (mema, ofs),
3388 GEN_INT (-8)));
3389 set_mem_alias_set (tmp, 0);
3390 emit_move_insn (meml, tmp);
3391
3392 tmp = change_address (mem, DImode,
3393 gen_rtx_AND (DImode,
3394 plus_constant (mema, ofs + size - 1),
3395 GEN_INT (-8)));
3396 set_mem_alias_set (tmp, 0);
3397 emit_move_insn (memh, tmp);
3398
3399 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3400 {
3401 emit_move_insn (addr, plus_constant (mema, -1));
3402
3403 emit_insn (gen_extqh_be (extl, meml, addr));
3404 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3405
3406 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3407 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3408 addr, 1, OPTAB_WIDEN);
3409 }
3410 else if (sign && size == 2)
3411 {
3412 emit_move_insn (addr, plus_constant (mema, ofs+2));
3413
3414 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3415 emit_insn (gen_extqh_le (exth, memh, addr));
3416
3417 /* We must use tgt here for the target. Alpha-vms port fails if we use
3418 addr for the target, because addr is marked as a pointer and combine
3419 knows that pointers are always sign-extended 32 bit values. */
3420 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3421 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3422 addr, 1, OPTAB_WIDEN);
3423 }
3424 else
3425 {
3426 if (WORDS_BIG_ENDIAN)
3427 {
3428 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3429 switch ((int) size)
3430 {
3431 case 2:
3432 emit_insn (gen_extwh_be (extl, meml, addr));
3433 mode = HImode;
3434 break;
3435
3436 case 4:
3437 emit_insn (gen_extlh_be (extl, meml, addr));
3438 mode = SImode;
3439 break;
3440
3441 case 8:
3442 emit_insn (gen_extqh_be (extl, meml, addr));
3443 mode = DImode;
3444 break;
3445
3446 default:
3447 gcc_unreachable ();
3448 }
3449 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3450 }
3451 else
3452 {
3453 emit_move_insn (addr, plus_constant (mema, ofs));
3454 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3455 switch ((int) size)
3456 {
3457 case 2:
3458 emit_insn (gen_extwh_le (exth, memh, addr));
3459 mode = HImode;
3460 break;
3461
3462 case 4:
3463 emit_insn (gen_extlh_le (exth, memh, addr));
3464 mode = SImode;
3465 break;
3466
3467 case 8:
3468 emit_insn (gen_extqh_le (exth, memh, addr));
3469 mode = DImode;
3470 break;
3471
3472 default:
3473 gcc_unreachable ();
3474 }
3475 }
3476
3477 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3478 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3479 sign, OPTAB_WIDEN);
3480 }
3481
3482 if (addr != tgt)
3483 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3484 }
3485
3486 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3487
3488 void
alpha_expand_unaligned_store(rtx dst,rtx src,HOST_WIDE_INT size,HOST_WIDE_INT ofs)3489 alpha_expand_unaligned_store (rtx dst, rtx src,
3490 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3491 {
3492 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3493
3494 if (TARGET_BWX && size == 2)
3495 {
3496 if (src != const0_rtx)
3497 {
3498 dstl = gen_lowpart (QImode, src);
3499 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3500 NULL, 1, OPTAB_LIB_WIDEN);
3501 dsth = gen_lowpart (QImode, dsth);
3502 }
3503 else
3504 dstl = dsth = const0_rtx;
3505
3506 meml = adjust_address (dst, QImode, ofs);
3507 memh = adjust_address (dst, QImode, ofs+1);
3508 if (BYTES_BIG_ENDIAN)
3509 addr = meml, meml = memh, memh = addr;
3510
3511 emit_move_insn (meml, dstl);
3512 emit_move_insn (memh, dsth);
3513 return;
3514 }
3515
3516 dstl = gen_reg_rtx (DImode);
3517 dsth = gen_reg_rtx (DImode);
3518 insl = gen_reg_rtx (DImode);
3519 insh = gen_reg_rtx (DImode);
3520
3521 dsta = XEXP (dst, 0);
3522 if (GET_CODE (dsta) == LO_SUM)
3523 dsta = force_reg (Pmode, dsta);
3524
3525 /* AND addresses cannot be in any alias set, since they may implicitly
3526 alias surrounding code. Ideally we'd have some alias set that
3527 covered all types except those with alignment 8 or higher. */
3528
3529 meml = change_address (dst, DImode,
3530 gen_rtx_AND (DImode,
3531 plus_constant (dsta, ofs),
3532 GEN_INT (-8)));
3533 set_mem_alias_set (meml, 0);
3534
3535 memh = change_address (dst, DImode,
3536 gen_rtx_AND (DImode,
3537 plus_constant (dsta, ofs + size - 1),
3538 GEN_INT (-8)));
3539 set_mem_alias_set (memh, 0);
3540
3541 emit_move_insn (dsth, memh);
3542 emit_move_insn (dstl, meml);
3543 if (WORDS_BIG_ENDIAN)
3544 {
3545 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3546
3547 if (src != const0_rtx)
3548 {
3549 switch ((int) size)
3550 {
3551 case 2:
3552 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3553 break;
3554 case 4:
3555 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3556 break;
3557 case 8:
3558 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3559 break;
3560 }
3561 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3562 GEN_INT (size*8), addr));
3563 }
3564
3565 switch ((int) size)
3566 {
3567 case 2:
3568 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3569 break;
3570 case 4:
3571 {
3572 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3573 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3574 break;
3575 }
3576 case 8:
3577 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3578 break;
3579 }
3580
3581 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3582 }
3583 else
3584 {
3585 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3586
3587 if (src != CONST0_RTX (GET_MODE (src)))
3588 {
3589 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3590 GEN_INT (size*8), addr));
3591
3592 switch ((int) size)
3593 {
3594 case 2:
3595 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3596 break;
3597 case 4:
3598 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3599 break;
3600 case 8:
3601 emit_insn (gen_insql_le (insl, src, addr));
3602 break;
3603 }
3604 }
3605
3606 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3607
3608 switch ((int) size)
3609 {
3610 case 2:
3611 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3612 break;
3613 case 4:
3614 {
3615 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3616 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3617 break;
3618 }
3619 case 8:
3620 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3621 break;
3622 }
3623 }
3624
3625 if (src != CONST0_RTX (GET_MODE (src)))
3626 {
3627 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3628 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3629 }
3630
3631 if (WORDS_BIG_ENDIAN)
3632 {
3633 emit_move_insn (meml, dstl);
3634 emit_move_insn (memh, dsth);
3635 }
3636 else
3637 {
3638 /* Must store high before low for degenerate case of aligned. */
3639 emit_move_insn (memh, dsth);
3640 emit_move_insn (meml, dstl);
3641 }
3642 }
3643
3644 /* The block move code tries to maximize speed by separating loads and
3645 stores at the expense of register pressure: we load all of the data
3646 before we store it back out. There are two secondary effects worth
3647 mentioning, that this speeds copying to/from aligned and unaligned
3648 buffers, and that it makes the code significantly easier to write. */
3649
3650 #define MAX_MOVE_WORDS 8
3651
3652 /* Load an integral number of consecutive unaligned quadwords. */
3653
3654 static void
alpha_expand_unaligned_load_words(rtx * out_regs,rtx smem,HOST_WIDE_INT words,HOST_WIDE_INT ofs)3655 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3656 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3657 {
3658 rtx const im8 = GEN_INT (-8);
3659 rtx const i64 = GEN_INT (64);
3660 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3661 rtx sreg, areg, tmp, smema;
3662 HOST_WIDE_INT i;
3663
3664 smema = XEXP (smem, 0);
3665 if (GET_CODE (smema) == LO_SUM)
3666 smema = force_reg (Pmode, smema);
3667
3668 /* Generate all the tmp registers we need. */
3669 for (i = 0; i < words; ++i)
3670 {
3671 data_regs[i] = out_regs[i];
3672 ext_tmps[i] = gen_reg_rtx (DImode);
3673 }
3674 data_regs[words] = gen_reg_rtx (DImode);
3675
3676 if (ofs != 0)
3677 smem = adjust_address (smem, GET_MODE (smem), ofs);
3678
3679 /* Load up all of the source data. */
3680 for (i = 0; i < words; ++i)
3681 {
3682 tmp = change_address (smem, DImode,
3683 gen_rtx_AND (DImode,
3684 plus_constant (smema, 8*i),
3685 im8));
3686 set_mem_alias_set (tmp, 0);
3687 emit_move_insn (data_regs[i], tmp);
3688 }
3689
3690 tmp = change_address (smem, DImode,
3691 gen_rtx_AND (DImode,
3692 plus_constant (smema, 8*words - 1),
3693 im8));
3694 set_mem_alias_set (tmp, 0);
3695 emit_move_insn (data_regs[words], tmp);
3696
3697 /* Extract the half-word fragments. Unfortunately DEC decided to make
3698 extxh with offset zero a noop instead of zeroing the register, so
3699 we must take care of that edge condition ourselves with cmov. */
3700
3701 sreg = copy_addr_to_reg (smema);
3702 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3703 1, OPTAB_WIDEN);
3704 if (WORDS_BIG_ENDIAN)
3705 emit_move_insn (sreg, plus_constant (sreg, 7));
3706 for (i = 0; i < words; ++i)
3707 {
3708 if (WORDS_BIG_ENDIAN)
3709 {
3710 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3711 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3712 }
3713 else
3714 {
3715 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3716 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3717 }
3718 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3719 gen_rtx_IF_THEN_ELSE (DImode,
3720 gen_rtx_EQ (DImode, areg,
3721 const0_rtx),
3722 const0_rtx, ext_tmps[i])));
3723 }
3724
3725 /* Merge the half-words into whole words. */
3726 for (i = 0; i < words; ++i)
3727 {
3728 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3729 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3730 }
3731 }
3732
3733 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3734 may be NULL to store zeros. */
3735
3736 static void
alpha_expand_unaligned_store_words(rtx * data_regs,rtx dmem,HOST_WIDE_INT words,HOST_WIDE_INT ofs)3737 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3738 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3739 {
3740 rtx const im8 = GEN_INT (-8);
3741 rtx const i64 = GEN_INT (64);
3742 rtx ins_tmps[MAX_MOVE_WORDS];
3743 rtx st_tmp_1, st_tmp_2, dreg;
3744 rtx st_addr_1, st_addr_2, dmema;
3745 HOST_WIDE_INT i;
3746
3747 dmema = XEXP (dmem, 0);
3748 if (GET_CODE (dmema) == LO_SUM)
3749 dmema = force_reg (Pmode, dmema);
3750
3751 /* Generate all the tmp registers we need. */
3752 if (data_regs != NULL)
3753 for (i = 0; i < words; ++i)
3754 ins_tmps[i] = gen_reg_rtx(DImode);
3755 st_tmp_1 = gen_reg_rtx(DImode);
3756 st_tmp_2 = gen_reg_rtx(DImode);
3757
3758 if (ofs != 0)
3759 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3760
3761 st_addr_2 = change_address (dmem, DImode,
3762 gen_rtx_AND (DImode,
3763 plus_constant (dmema, words*8 - 1),
3764 im8));
3765 set_mem_alias_set (st_addr_2, 0);
3766
3767 st_addr_1 = change_address (dmem, DImode,
3768 gen_rtx_AND (DImode, dmema, im8));
3769 set_mem_alias_set (st_addr_1, 0);
3770
3771 /* Load up the destination end bits. */
3772 emit_move_insn (st_tmp_2, st_addr_2);
3773 emit_move_insn (st_tmp_1, st_addr_1);
3774
3775 /* Shift the input data into place. */
3776 dreg = copy_addr_to_reg (dmema);
3777 if (WORDS_BIG_ENDIAN)
3778 emit_move_insn (dreg, plus_constant (dreg, 7));
3779 if (data_regs != NULL)
3780 {
3781 for (i = words-1; i >= 0; --i)
3782 {
3783 if (WORDS_BIG_ENDIAN)
3784 {
3785 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3786 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3787 }
3788 else
3789 {
3790 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3791 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3792 }
3793 }
3794 for (i = words-1; i > 0; --i)
3795 {
3796 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3797 ins_tmps[i-1], ins_tmps[i-1], 1,
3798 OPTAB_WIDEN);
3799 }
3800 }
3801
3802 /* Split and merge the ends with the destination data. */
3803 if (WORDS_BIG_ENDIAN)
3804 {
3805 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3806 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3807 }
3808 else
3809 {
3810 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3811 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3812 }
3813
3814 if (data_regs != NULL)
3815 {
3816 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3817 st_tmp_2, 1, OPTAB_WIDEN);
3818 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3819 st_tmp_1, 1, OPTAB_WIDEN);
3820 }
3821
3822 /* Store it all. */
3823 if (WORDS_BIG_ENDIAN)
3824 emit_move_insn (st_addr_1, st_tmp_1);
3825 else
3826 emit_move_insn (st_addr_2, st_tmp_2);
3827 for (i = words-1; i > 0; --i)
3828 {
3829 rtx tmp = change_address (dmem, DImode,
3830 gen_rtx_AND (DImode,
3831 plus_constant(dmema,
3832 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3833 im8));
3834 set_mem_alias_set (tmp, 0);
3835 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3836 }
3837 if (WORDS_BIG_ENDIAN)
3838 emit_move_insn (st_addr_2, st_tmp_2);
3839 else
3840 emit_move_insn (st_addr_1, st_tmp_1);
3841 }
3842
3843
3844 /* Expand string/block move operations.
3845
3846 operands[0] is the pointer to the destination.
3847 operands[1] is the pointer to the source.
3848 operands[2] is the number of bytes to move.
3849 operands[3] is the alignment. */
3850
3851 int
alpha_expand_block_move(rtx operands[])3852 alpha_expand_block_move (rtx operands[])
3853 {
3854 rtx bytes_rtx = operands[2];
3855 rtx align_rtx = operands[3];
3856 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3857 HOST_WIDE_INT bytes = orig_bytes;
3858 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3859 HOST_WIDE_INT dst_align = src_align;
3860 rtx orig_src = operands[1];
3861 rtx orig_dst = operands[0];
3862 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3863 rtx tmp;
3864 unsigned int i, words, ofs, nregs = 0;
3865
3866 if (orig_bytes <= 0)
3867 return 1;
3868 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3869 return 0;
3870
3871 /* Look for additional alignment information from recorded register info. */
3872
3873 tmp = XEXP (orig_src, 0);
3874 if (GET_CODE (tmp) == REG)
3875 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3876 else if (GET_CODE (tmp) == PLUS
3877 && GET_CODE (XEXP (tmp, 0)) == REG
3878 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3879 {
3880 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3881 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3882
3883 if (a > src_align)
3884 {
3885 if (a >= 64 && c % 8 == 0)
3886 src_align = 64;
3887 else if (a >= 32 && c % 4 == 0)
3888 src_align = 32;
3889 else if (a >= 16 && c % 2 == 0)
3890 src_align = 16;
3891 }
3892 }
3893
3894 tmp = XEXP (orig_dst, 0);
3895 if (GET_CODE (tmp) == REG)
3896 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3897 else if (GET_CODE (tmp) == PLUS
3898 && GET_CODE (XEXP (tmp, 0)) == REG
3899 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3900 {
3901 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3902 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3903
3904 if (a > dst_align)
3905 {
3906 if (a >= 64 && c % 8 == 0)
3907 dst_align = 64;
3908 else if (a >= 32 && c % 4 == 0)
3909 dst_align = 32;
3910 else if (a >= 16 && c % 2 == 0)
3911 dst_align = 16;
3912 }
3913 }
3914
3915 ofs = 0;
3916 if (src_align >= 64 && bytes >= 8)
3917 {
3918 words = bytes / 8;
3919
3920 for (i = 0; i < words; ++i)
3921 data_regs[nregs + i] = gen_reg_rtx (DImode);
3922
3923 for (i = 0; i < words; ++i)
3924 emit_move_insn (data_regs[nregs + i],
3925 adjust_address (orig_src, DImode, ofs + i * 8));
3926
3927 nregs += words;
3928 bytes -= words * 8;
3929 ofs += words * 8;
3930 }
3931
3932 if (src_align >= 32 && bytes >= 4)
3933 {
3934 words = bytes / 4;
3935
3936 for (i = 0; i < words; ++i)
3937 data_regs[nregs + i] = gen_reg_rtx (SImode);
3938
3939 for (i = 0; i < words; ++i)
3940 emit_move_insn (data_regs[nregs + i],
3941 adjust_address (orig_src, SImode, ofs + i * 4));
3942
3943 nregs += words;
3944 bytes -= words * 4;
3945 ofs += words * 4;
3946 }
3947
3948 if (bytes >= 8)
3949 {
3950 words = bytes / 8;
3951
3952 for (i = 0; i < words+1; ++i)
3953 data_regs[nregs + i] = gen_reg_rtx (DImode);
3954
3955 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3956 words, ofs);
3957
3958 nregs += words;
3959 bytes -= words * 8;
3960 ofs += words * 8;
3961 }
3962
3963 if (! TARGET_BWX && bytes >= 4)
3964 {
3965 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3966 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3967 bytes -= 4;
3968 ofs += 4;
3969 }
3970
3971 if (bytes >= 2)
3972 {
3973 if (src_align >= 16)
3974 {
3975 do {
3976 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3977 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3978 bytes -= 2;
3979 ofs += 2;
3980 } while (bytes >= 2);
3981 }
3982 else if (! TARGET_BWX)
3983 {
3984 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3985 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3986 bytes -= 2;
3987 ofs += 2;
3988 }
3989 }
3990
3991 while (bytes > 0)
3992 {
3993 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3994 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3995 bytes -= 1;
3996 ofs += 1;
3997 }
3998
3999 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
4000
4001 /* Now save it back out again. */
4002
4003 i = 0, ofs = 0;
4004
4005 /* Write out the data in whatever chunks reading the source allowed. */
4006 if (dst_align >= 64)
4007 {
4008 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4009 {
4010 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4011 data_regs[i]);
4012 ofs += 8;
4013 i++;
4014 }
4015 }
4016
4017 if (dst_align >= 32)
4018 {
4019 /* If the source has remaining DImode regs, write them out in
4020 two pieces. */
4021 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4022 {
4023 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4024 NULL_RTX, 1, OPTAB_WIDEN);
4025
4026 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4027 gen_lowpart (SImode, data_regs[i]));
4028 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4029 gen_lowpart (SImode, tmp));
4030 ofs += 8;
4031 i++;
4032 }
4033
4034 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4035 {
4036 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4037 data_regs[i]);
4038 ofs += 4;
4039 i++;
4040 }
4041 }
4042
4043 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4044 {
4045 /* Write out a remaining block of words using unaligned methods. */
4046
4047 for (words = 1; i + words < nregs; words++)
4048 if (GET_MODE (data_regs[i + words]) != DImode)
4049 break;
4050
4051 if (words == 1)
4052 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4053 else
4054 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4055 words, ofs);
4056
4057 i += words;
4058 ofs += words * 8;
4059 }
4060
4061 /* Due to the above, this won't be aligned. */
4062 /* ??? If we have more than one of these, consider constructing full
4063 words in registers and using alpha_expand_unaligned_store_words. */
4064 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4065 {
4066 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4067 ofs += 4;
4068 i++;
4069 }
4070
4071 if (dst_align >= 16)
4072 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4073 {
4074 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4075 i++;
4076 ofs += 2;
4077 }
4078 else
4079 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4080 {
4081 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4082 i++;
4083 ofs += 2;
4084 }
4085
4086 /* The remainder must be byte copies. */
4087 while (i < nregs)
4088 {
4089 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4090 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4091 i++;
4092 ofs += 1;
4093 }
4094
4095 return 1;
4096 }
4097
4098 int
alpha_expand_block_clear(rtx operands[])4099 alpha_expand_block_clear (rtx operands[])
4100 {
4101 rtx bytes_rtx = operands[1];
4102 rtx align_rtx = operands[3];
4103 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4104 HOST_WIDE_INT bytes = orig_bytes;
4105 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4106 HOST_WIDE_INT alignofs = 0;
4107 rtx orig_dst = operands[0];
4108 rtx tmp;
4109 int i, words, ofs = 0;
4110
4111 if (orig_bytes <= 0)
4112 return 1;
4113 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4114 return 0;
4115
4116 /* Look for stricter alignment. */
4117 tmp = XEXP (orig_dst, 0);
4118 if (GET_CODE (tmp) == REG)
4119 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4120 else if (GET_CODE (tmp) == PLUS
4121 && GET_CODE (XEXP (tmp, 0)) == REG
4122 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4123 {
4124 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4125 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4126
4127 if (a > align)
4128 {
4129 if (a >= 64)
4130 align = a, alignofs = 8 - c % 8;
4131 else if (a >= 32)
4132 align = a, alignofs = 4 - c % 4;
4133 else if (a >= 16)
4134 align = a, alignofs = 2 - c % 2;
4135 }
4136 }
4137
4138 /* Handle an unaligned prefix first. */
4139
4140 if (alignofs > 0)
4141 {
4142 #if HOST_BITS_PER_WIDE_INT >= 64
4143 /* Given that alignofs is bounded by align, the only time BWX could
4144 generate three stores is for a 7 byte fill. Prefer two individual
4145 stores over a load/mask/store sequence. */
4146 if ((!TARGET_BWX || alignofs == 7)
4147 && align >= 32
4148 && !(alignofs == 4 && bytes >= 4))
4149 {
4150 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4151 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4152 rtx mem, tmp;
4153 HOST_WIDE_INT mask;
4154
4155 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4156 set_mem_alias_set (mem, 0);
4157
4158 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4159 if (bytes < alignofs)
4160 {
4161 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4162 ofs += bytes;
4163 bytes = 0;
4164 }
4165 else
4166 {
4167 bytes -= alignofs;
4168 ofs += alignofs;
4169 }
4170 alignofs = 0;
4171
4172 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4173 NULL_RTX, 1, OPTAB_WIDEN);
4174
4175 emit_move_insn (mem, tmp);
4176 }
4177 #endif
4178
4179 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4180 {
4181 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4182 bytes -= 1;
4183 ofs += 1;
4184 alignofs -= 1;
4185 }
4186 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4187 {
4188 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4189 bytes -= 2;
4190 ofs += 2;
4191 alignofs -= 2;
4192 }
4193 if (alignofs == 4 && bytes >= 4)
4194 {
4195 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4196 bytes -= 4;
4197 ofs += 4;
4198 alignofs = 0;
4199 }
4200
4201 /* If we've not used the extra lead alignment information by now,
4202 we won't be able to. Downgrade align to match what's left over. */
4203 if (alignofs > 0)
4204 {
4205 alignofs = alignofs & -alignofs;
4206 align = MIN (align, alignofs * BITS_PER_UNIT);
4207 }
4208 }
4209
4210 /* Handle a block of contiguous long-words. */
4211
4212 if (align >= 64 && bytes >= 8)
4213 {
4214 words = bytes / 8;
4215
4216 for (i = 0; i < words; ++i)
4217 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4218 const0_rtx);
4219
4220 bytes -= words * 8;
4221 ofs += words * 8;
4222 }
4223
4224 /* If the block is large and appropriately aligned, emit a single
4225 store followed by a sequence of stq_u insns. */
4226
4227 if (align >= 32 && bytes > 16)
4228 {
4229 rtx orig_dsta;
4230
4231 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4232 bytes -= 4;
4233 ofs += 4;
4234
4235 orig_dsta = XEXP (orig_dst, 0);
4236 if (GET_CODE (orig_dsta) == LO_SUM)
4237 orig_dsta = force_reg (Pmode, orig_dsta);
4238
4239 words = bytes / 8;
4240 for (i = 0; i < words; ++i)
4241 {
4242 rtx mem
4243 = change_address (orig_dst, DImode,
4244 gen_rtx_AND (DImode,
4245 plus_constant (orig_dsta, ofs + i*8),
4246 GEN_INT (-8)));
4247 set_mem_alias_set (mem, 0);
4248 emit_move_insn (mem, const0_rtx);
4249 }
4250
4251 /* Depending on the alignment, the first stq_u may have overlapped
4252 with the initial stl, which means that the last stq_u didn't
4253 write as much as it would appear. Leave those questionable bytes
4254 unaccounted for. */
4255 bytes -= words * 8 - 4;
4256 ofs += words * 8 - 4;
4257 }
4258
4259 /* Handle a smaller block of aligned words. */
4260
4261 if ((align >= 64 && bytes == 4)
4262 || (align == 32 && bytes >= 4))
4263 {
4264 words = bytes / 4;
4265
4266 for (i = 0; i < words; ++i)
4267 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4268 const0_rtx);
4269
4270 bytes -= words * 4;
4271 ofs += words * 4;
4272 }
4273
4274 /* An unaligned block uses stq_u stores for as many as possible. */
4275
4276 if (bytes >= 8)
4277 {
4278 words = bytes / 8;
4279
4280 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4281
4282 bytes -= words * 8;
4283 ofs += words * 8;
4284 }
4285
4286 /* Next clean up any trailing pieces. */
4287
4288 #if HOST_BITS_PER_WIDE_INT >= 64
4289 /* Count the number of bits in BYTES for which aligned stores could
4290 be emitted. */
4291 words = 0;
4292 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4293 if (bytes & i)
4294 words += 1;
4295
4296 /* If we have appropriate alignment (and it wouldn't take too many
4297 instructions otherwise), mask out the bytes we need. */
4298 if (TARGET_BWX ? words > 2 : bytes > 0)
4299 {
4300 if (align >= 64)
4301 {
4302 rtx mem, tmp;
4303 HOST_WIDE_INT mask;
4304
4305 mem = adjust_address (orig_dst, DImode, ofs);
4306 set_mem_alias_set (mem, 0);
4307
4308 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4309
4310 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4311 NULL_RTX, 1, OPTAB_WIDEN);
4312
4313 emit_move_insn (mem, tmp);
4314 return 1;
4315 }
4316 else if (align >= 32 && bytes < 4)
4317 {
4318 rtx mem, tmp;
4319 HOST_WIDE_INT mask;
4320
4321 mem = adjust_address (orig_dst, SImode, ofs);
4322 set_mem_alias_set (mem, 0);
4323
4324 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4325
4326 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4327 NULL_RTX, 1, OPTAB_WIDEN);
4328
4329 emit_move_insn (mem, tmp);
4330 return 1;
4331 }
4332 }
4333 #endif
4334
4335 if (!TARGET_BWX && bytes >= 4)
4336 {
4337 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4338 bytes -= 4;
4339 ofs += 4;
4340 }
4341
4342 if (bytes >= 2)
4343 {
4344 if (align >= 16)
4345 {
4346 do {
4347 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4348 const0_rtx);
4349 bytes -= 2;
4350 ofs += 2;
4351 } while (bytes >= 2);
4352 }
4353 else if (! TARGET_BWX)
4354 {
4355 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4356 bytes -= 2;
4357 ofs += 2;
4358 }
4359 }
4360
4361 while (bytes > 0)
4362 {
4363 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4364 bytes -= 1;
4365 ofs += 1;
4366 }
4367
4368 return 1;
4369 }
4370
4371 /* Returns a mask so that zap(x, value) == x & mask. */
4372
4373 rtx
alpha_expand_zap_mask(HOST_WIDE_INT value)4374 alpha_expand_zap_mask (HOST_WIDE_INT value)
4375 {
4376 rtx result;
4377 int i;
4378
4379 if (HOST_BITS_PER_WIDE_INT >= 64)
4380 {
4381 HOST_WIDE_INT mask = 0;
4382
4383 for (i = 7; i >= 0; --i)
4384 {
4385 mask <<= 8;
4386 if (!((value >> i) & 1))
4387 mask |= 0xff;
4388 }
4389
4390 result = gen_int_mode (mask, DImode);
4391 }
4392 else
4393 {
4394 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4395
4396 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4397
4398 for (i = 7; i >= 4; --i)
4399 {
4400 mask_hi <<= 8;
4401 if (!((value >> i) & 1))
4402 mask_hi |= 0xff;
4403 }
4404
4405 for (i = 3; i >= 0; --i)
4406 {
4407 mask_lo <<= 8;
4408 if (!((value >> i) & 1))
4409 mask_lo |= 0xff;
4410 }
4411
4412 result = immed_double_const (mask_lo, mask_hi, DImode);
4413 }
4414
4415 return result;
4416 }
4417
4418 void
alpha_expand_builtin_vector_binop(rtx (* gen)(rtx,rtx,rtx),enum machine_mode mode,rtx op0,rtx op1,rtx op2)4419 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4420 enum machine_mode mode,
4421 rtx op0, rtx op1, rtx op2)
4422 {
4423 op0 = gen_lowpart (mode, op0);
4424
4425 if (op1 == const0_rtx)
4426 op1 = CONST0_RTX (mode);
4427 else
4428 op1 = gen_lowpart (mode, op1);
4429
4430 if (op2 == const0_rtx)
4431 op2 = CONST0_RTX (mode);
4432 else
4433 op2 = gen_lowpart (mode, op2);
4434
4435 emit_insn ((*gen) (op0, op1, op2));
4436 }
4437
4438 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4439 COND is true. Mark the jump as unlikely to be taken. */
4440
4441 static void
emit_unlikely_jump(rtx cond,rtx label)4442 emit_unlikely_jump (rtx cond, rtx label)
4443 {
4444 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4445 rtx x;
4446
4447 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4448 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4449 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4450 }
4451
4452 /* A subroutine of the atomic operation splitters. Emit a load-locked
4453 instruction in MODE. */
4454
4455 static void
emit_load_locked(enum machine_mode mode,rtx reg,rtx mem)4456 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4457 {
4458 rtx (*fn) (rtx, rtx) = NULL;
4459 if (mode == SImode)
4460 fn = gen_load_locked_si;
4461 else if (mode == DImode)
4462 fn = gen_load_locked_di;
4463 emit_insn (fn (reg, mem));
4464 }
4465
4466 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4467 instruction in MODE. */
4468
4469 static void
emit_store_conditional(enum machine_mode mode,rtx res,rtx mem,rtx val)4470 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4471 {
4472 rtx (*fn) (rtx, rtx, rtx) = NULL;
4473 if (mode == SImode)
4474 fn = gen_store_conditional_si;
4475 else if (mode == DImode)
4476 fn = gen_store_conditional_di;
4477 emit_insn (fn (res, mem, val));
4478 }
4479
4480 /* A subroutine of the atomic operation splitters. Emit an insxl
4481 instruction in MODE. */
4482
4483 static rtx
emit_insxl(enum machine_mode mode,rtx op1,rtx op2)4484 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4485 {
4486 rtx ret = gen_reg_rtx (DImode);
4487 rtx (*fn) (rtx, rtx, rtx);
4488
4489 if (WORDS_BIG_ENDIAN)
4490 {
4491 if (mode == QImode)
4492 fn = gen_insbl_be;
4493 else
4494 fn = gen_inswl_be;
4495 }
4496 else
4497 {
4498 if (mode == QImode)
4499 fn = gen_insbl_le;
4500 else
4501 fn = gen_inswl_le;
4502 }
4503 emit_insn (fn (ret, op1, op2));
4504
4505 return ret;
4506 }
4507
4508 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4509 to perform. MEM is the memory on which to operate. VAL is the second
4510 operand of the binary operator. BEFORE and AFTER are optional locations to
4511 return the value of MEM either before of after the operation. SCRATCH is
4512 a scratch register. */
4513
4514 void
alpha_split_atomic_op(enum rtx_code code,rtx mem,rtx val,rtx before,rtx after,rtx scratch)4515 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4516 rtx before, rtx after, rtx scratch)
4517 {
4518 enum machine_mode mode = GET_MODE (mem);
4519 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4520
4521 emit_insn (gen_memory_barrier ());
4522
4523 label = gen_label_rtx ();
4524 emit_label (label);
4525 label = gen_rtx_LABEL_REF (DImode, label);
4526
4527 if (before == NULL)
4528 before = scratch;
4529 emit_load_locked (mode, before, mem);
4530
4531 if (code == NOT)
4532 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4533 else
4534 x = gen_rtx_fmt_ee (code, mode, before, val);
4535 if (after)
4536 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4537 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4538
4539 emit_store_conditional (mode, cond, mem, scratch);
4540
4541 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4542 emit_unlikely_jump (x, label);
4543
4544 emit_insn (gen_memory_barrier ());
4545 }
4546
4547 /* Expand a compare and swap operation. */
4548
4549 void
alpha_split_compare_and_swap(rtx retval,rtx mem,rtx oldval,rtx newval,rtx scratch)4550 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4551 rtx scratch)
4552 {
4553 enum machine_mode mode = GET_MODE (mem);
4554 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4555
4556 emit_insn (gen_memory_barrier ());
4557
4558 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4559 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4560 emit_label (XEXP (label1, 0));
4561
4562 emit_load_locked (mode, retval, mem);
4563
4564 x = gen_lowpart (DImode, retval);
4565 if (oldval == const0_rtx)
4566 x = gen_rtx_NE (DImode, x, const0_rtx);
4567 else
4568 {
4569 x = gen_rtx_EQ (DImode, x, oldval);
4570 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4571 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4572 }
4573 emit_unlikely_jump (x, label2);
4574
4575 emit_move_insn (scratch, newval);
4576 emit_store_conditional (mode, cond, mem, scratch);
4577
4578 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4579 emit_unlikely_jump (x, label1);
4580
4581 emit_insn (gen_memory_barrier ());
4582 emit_label (XEXP (label2, 0));
4583 }
4584
4585 void
alpha_expand_compare_and_swap_12(rtx dst,rtx mem,rtx oldval,rtx newval)4586 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4587 {
4588 enum machine_mode mode = GET_MODE (mem);
4589 rtx addr, align, wdst;
4590 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4591
4592 addr = force_reg (DImode, XEXP (mem, 0));
4593 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4594 NULL_RTX, 1, OPTAB_DIRECT);
4595
4596 oldval = convert_modes (DImode, mode, oldval, 1);
4597 newval = emit_insxl (mode, newval, addr);
4598
4599 wdst = gen_reg_rtx (DImode);
4600 if (mode == QImode)
4601 fn5 = gen_sync_compare_and_swapqi_1;
4602 else
4603 fn5 = gen_sync_compare_and_swaphi_1;
4604 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4605
4606 emit_move_insn (dst, gen_lowpart (mode, wdst));
4607 }
4608
4609 void
alpha_split_compare_and_swap_12(enum machine_mode mode,rtx dest,rtx addr,rtx oldval,rtx newval,rtx align,rtx scratch,rtx cond)4610 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4611 rtx oldval, rtx newval, rtx align,
4612 rtx scratch, rtx cond)
4613 {
4614 rtx label1, label2, mem, width, mask, x;
4615
4616 mem = gen_rtx_MEM (DImode, align);
4617 MEM_VOLATILE_P (mem) = 1;
4618
4619 emit_insn (gen_memory_barrier ());
4620 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4621 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4622 emit_label (XEXP (label1, 0));
4623
4624 emit_load_locked (DImode, scratch, mem);
4625
4626 width = GEN_INT (GET_MODE_BITSIZE (mode));
4627 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4628 if (WORDS_BIG_ENDIAN)
4629 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4630 else
4631 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4632
4633 if (oldval == const0_rtx)
4634 x = gen_rtx_NE (DImode, dest, const0_rtx);
4635 else
4636 {
4637 x = gen_rtx_EQ (DImode, dest, oldval);
4638 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4639 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4640 }
4641 emit_unlikely_jump (x, label2);
4642
4643 if (WORDS_BIG_ENDIAN)
4644 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4645 else
4646 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4647 emit_insn (gen_iordi3 (scratch, scratch, newval));
4648
4649 emit_store_conditional (DImode, scratch, mem, scratch);
4650
4651 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4652 emit_unlikely_jump (x, label1);
4653
4654 emit_insn (gen_memory_barrier ());
4655 emit_label (XEXP (label2, 0));
4656 }
4657
4658 /* Expand an atomic exchange operation. */
4659
4660 void
alpha_split_lock_test_and_set(rtx retval,rtx mem,rtx val,rtx scratch)4661 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4662 {
4663 enum machine_mode mode = GET_MODE (mem);
4664 rtx label, x, cond = gen_lowpart (DImode, scratch);
4665
4666 emit_insn (gen_memory_barrier ());
4667
4668 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4669 emit_label (XEXP (label, 0));
4670
4671 emit_load_locked (mode, retval, mem);
4672 emit_move_insn (scratch, val);
4673 emit_store_conditional (mode, cond, mem, scratch);
4674
4675 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4676 emit_unlikely_jump (x, label);
4677 }
4678
4679 void
alpha_expand_lock_test_and_set_12(rtx dst,rtx mem,rtx val)4680 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4681 {
4682 enum machine_mode mode = GET_MODE (mem);
4683 rtx addr, align, wdst;
4684 rtx (*fn4) (rtx, rtx, rtx, rtx);
4685
4686 /* Force the address into a register. */
4687 addr = force_reg (DImode, XEXP (mem, 0));
4688
4689 /* Align it to a multiple of 8. */
4690 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4691 NULL_RTX, 1, OPTAB_DIRECT);
4692
4693 /* Insert val into the correct byte location within the word. */
4694 val = emit_insxl (mode, val, addr);
4695
4696 wdst = gen_reg_rtx (DImode);
4697 if (mode == QImode)
4698 fn4 = gen_sync_lock_test_and_setqi_1;
4699 else
4700 fn4 = gen_sync_lock_test_and_sethi_1;
4701 emit_insn (fn4 (wdst, addr, val, align));
4702
4703 emit_move_insn (dst, gen_lowpart (mode, wdst));
4704 }
4705
4706 void
alpha_split_lock_test_and_set_12(enum machine_mode mode,rtx dest,rtx addr,rtx val,rtx align,rtx scratch)4707 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4708 rtx val, rtx align, rtx scratch)
4709 {
4710 rtx label, mem, width, mask, x;
4711
4712 mem = gen_rtx_MEM (DImode, align);
4713 MEM_VOLATILE_P (mem) = 1;
4714
4715 emit_insn (gen_memory_barrier ());
4716 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4717 emit_label (XEXP (label, 0));
4718
4719 emit_load_locked (DImode, scratch, mem);
4720
4721 width = GEN_INT (GET_MODE_BITSIZE (mode));
4722 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4723 if (WORDS_BIG_ENDIAN)
4724 {
4725 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4726 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4727 }
4728 else
4729 {
4730 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4731 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4732 }
4733 emit_insn (gen_iordi3 (scratch, scratch, val));
4734
4735 emit_store_conditional (DImode, scratch, mem, scratch);
4736
4737 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4738 emit_unlikely_jump (x, label);
4739 }
4740
4741 /* Adjust the cost of a scheduling dependency. Return the new cost of
4742 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4743
4744 static int
alpha_adjust_cost(rtx insn,rtx link,rtx dep_insn,int cost)4745 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4746 {
4747 enum attr_type insn_type, dep_insn_type;
4748
4749 /* If the dependence is an anti-dependence, there is no cost. For an
4750 output dependence, there is sometimes a cost, but it doesn't seem
4751 worth handling those few cases. */
4752 if (REG_NOTE_KIND (link) != 0)
4753 return cost;
4754
4755 /* If we can't recognize the insns, we can't really do anything. */
4756 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4757 return cost;
4758
4759 insn_type = get_attr_type (insn);
4760 dep_insn_type = get_attr_type (dep_insn);
4761
4762 /* Bring in the user-defined memory latency. */
4763 if (dep_insn_type == TYPE_ILD
4764 || dep_insn_type == TYPE_FLD
4765 || dep_insn_type == TYPE_LDSYM)
4766 cost += alpha_memory_latency-1;
4767
4768 /* Everything else handled in DFA bypasses now. */
4769
4770 return cost;
4771 }
4772
4773 /* The number of instructions that can be issued per cycle. */
4774
4775 static int
alpha_issue_rate(void)4776 alpha_issue_rate (void)
4777 {
4778 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4779 }
4780
4781 /* How many alternative schedules to try. This should be as wide as the
4782 scheduling freedom in the DFA, but no wider. Making this value too
4783 large results extra work for the scheduler.
4784
4785 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4786 alternative schedules. For EV5, we can choose between E0/E1 and
4787 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4788
4789 static int
alpha_multipass_dfa_lookahead(void)4790 alpha_multipass_dfa_lookahead (void)
4791 {
4792 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4793 }
4794
4795 /* Machine-specific function data. */
4796
4797 struct machine_function GTY(())
4798 {
4799 /* For unicosmk. */
4800 /* List of call information words for calls from this function. */
4801 struct rtx_def *first_ciw;
4802 struct rtx_def *last_ciw;
4803 int ciw_count;
4804
4805 /* List of deferred case vectors. */
4806 struct rtx_def *addr_list;
4807
4808 /* For OSF. */
4809 const char *some_ld_name;
4810
4811 /* For TARGET_LD_BUGGY_LDGP. */
4812 struct rtx_def *gp_save_rtx;
4813 };
4814
4815 /* How to allocate a 'struct machine_function'. */
4816
4817 static struct machine_function *
alpha_init_machine_status(void)4818 alpha_init_machine_status (void)
4819 {
4820 return ((struct machine_function *)
4821 ggc_alloc_cleared (sizeof (struct machine_function)));
4822 }
4823
4824 /* Functions to save and restore alpha_return_addr_rtx. */
4825
4826 /* Start the ball rolling with RETURN_ADDR_RTX. */
4827
4828 rtx
alpha_return_addr(int count,rtx frame ATTRIBUTE_UNUSED)4829 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4830 {
4831 if (count != 0)
4832 return const0_rtx;
4833
4834 return get_hard_reg_initial_val (Pmode, REG_RA);
4835 }
4836
4837 /* Return or create a memory slot containing the gp value for the current
4838 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4839
4840 rtx
alpha_gp_save_rtx(void)4841 alpha_gp_save_rtx (void)
4842 {
4843 rtx seq, m = cfun->machine->gp_save_rtx;
4844
4845 if (m == NULL)
4846 {
4847 start_sequence ();
4848
4849 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4850 m = validize_mem (m);
4851 emit_move_insn (m, pic_offset_table_rtx);
4852
4853 seq = get_insns ();
4854 end_sequence ();
4855 emit_insn_after (seq, entry_of_function ());
4856
4857 cfun->machine->gp_save_rtx = m;
4858 }
4859
4860 return m;
4861 }
4862
4863 static int
alpha_ra_ever_killed(void)4864 alpha_ra_ever_killed (void)
4865 {
4866 rtx top;
4867
4868 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4869 return regs_ever_live[REG_RA];
4870
4871 push_topmost_sequence ();
4872 top = get_insns ();
4873 pop_topmost_sequence ();
4874
4875 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4876 }
4877
4878
4879 /* Return the trap mode suffix applicable to the current
4880 instruction, or NULL. */
4881
4882 static const char *
get_trap_mode_suffix(void)4883 get_trap_mode_suffix (void)
4884 {
4885 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4886
4887 switch (s)
4888 {
4889 case TRAP_SUFFIX_NONE:
4890 return NULL;
4891
4892 case TRAP_SUFFIX_SU:
4893 if (alpha_fptm >= ALPHA_FPTM_SU)
4894 return "su";
4895 return NULL;
4896
4897 case TRAP_SUFFIX_SUI:
4898 if (alpha_fptm >= ALPHA_FPTM_SUI)
4899 return "sui";
4900 return NULL;
4901
4902 case TRAP_SUFFIX_V_SV:
4903 switch (alpha_fptm)
4904 {
4905 case ALPHA_FPTM_N:
4906 return NULL;
4907 case ALPHA_FPTM_U:
4908 return "v";
4909 case ALPHA_FPTM_SU:
4910 case ALPHA_FPTM_SUI:
4911 return "sv";
4912 default:
4913 gcc_unreachable ();
4914 }
4915
4916 case TRAP_SUFFIX_V_SV_SVI:
4917 switch (alpha_fptm)
4918 {
4919 case ALPHA_FPTM_N:
4920 return NULL;
4921 case ALPHA_FPTM_U:
4922 return "v";
4923 case ALPHA_FPTM_SU:
4924 return "sv";
4925 case ALPHA_FPTM_SUI:
4926 return "svi";
4927 default:
4928 gcc_unreachable ();
4929 }
4930 break;
4931
4932 case TRAP_SUFFIX_U_SU_SUI:
4933 switch (alpha_fptm)
4934 {
4935 case ALPHA_FPTM_N:
4936 return NULL;
4937 case ALPHA_FPTM_U:
4938 return "u";
4939 case ALPHA_FPTM_SU:
4940 return "su";
4941 case ALPHA_FPTM_SUI:
4942 return "sui";
4943 default:
4944 gcc_unreachable ();
4945 }
4946 break;
4947
4948 default:
4949 gcc_unreachable ();
4950 }
4951 gcc_unreachable ();
4952 }
4953
4954 /* Return the rounding mode suffix applicable to the current
4955 instruction, or NULL. */
4956
4957 static const char *
get_round_mode_suffix(void)4958 get_round_mode_suffix (void)
4959 {
4960 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4961
4962 switch (s)
4963 {
4964 case ROUND_SUFFIX_NONE:
4965 return NULL;
4966 case ROUND_SUFFIX_NORMAL:
4967 switch (alpha_fprm)
4968 {
4969 case ALPHA_FPRM_NORM:
4970 return NULL;
4971 case ALPHA_FPRM_MINF:
4972 return "m";
4973 case ALPHA_FPRM_CHOP:
4974 return "c";
4975 case ALPHA_FPRM_DYN:
4976 return "d";
4977 default:
4978 gcc_unreachable ();
4979 }
4980 break;
4981
4982 case ROUND_SUFFIX_C:
4983 return "c";
4984
4985 default:
4986 gcc_unreachable ();
4987 }
4988 gcc_unreachable ();
4989 }
4990
4991 /* Locate some local-dynamic symbol still in use by this function
4992 so that we can print its name in some movdi_er_tlsldm pattern. */
4993
4994 static int
get_some_local_dynamic_name_1(rtx * px,void * data ATTRIBUTE_UNUSED)4995 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4996 {
4997 rtx x = *px;
4998
4999 if (GET_CODE (x) == SYMBOL_REF
5000 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5001 {
5002 cfun->machine->some_ld_name = XSTR (x, 0);
5003 return 1;
5004 }
5005
5006 return 0;
5007 }
5008
5009 static const char *
get_some_local_dynamic_name(void)5010 get_some_local_dynamic_name (void)
5011 {
5012 rtx insn;
5013
5014 if (cfun->machine->some_ld_name)
5015 return cfun->machine->some_ld_name;
5016
5017 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5018 if (INSN_P (insn)
5019 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5020 return cfun->machine->some_ld_name;
5021
5022 gcc_unreachable ();
5023 }
5024
5025 /* Print an operand. Recognize special options, documented below. */
5026
5027 void
print_operand(FILE * file,rtx x,int code)5028 print_operand (FILE *file, rtx x, int code)
5029 {
5030 int i;
5031
5032 switch (code)
5033 {
5034 case '~':
5035 /* Print the assembler name of the current function. */
5036 assemble_name (file, alpha_fnname);
5037 break;
5038
5039 case '&':
5040 assemble_name (file, get_some_local_dynamic_name ());
5041 break;
5042
5043 case '/':
5044 {
5045 const char *trap = get_trap_mode_suffix ();
5046 const char *round = get_round_mode_suffix ();
5047
5048 if (trap || round)
5049 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5050 (trap ? trap : ""), (round ? round : ""));
5051 break;
5052 }
5053
5054 case ',':
5055 /* Generates single precision instruction suffix. */
5056 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5057 break;
5058
5059 case '-':
5060 /* Generates double precision instruction suffix. */
5061 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5062 break;
5063
5064 case '+':
5065 /* Generates a nop after a noreturn call at the very end of the
5066 function. */
5067 if (next_real_insn (current_output_insn) == 0)
5068 fprintf (file, "\n\tnop");
5069 break;
5070
5071 case '#':
5072 if (alpha_this_literal_sequence_number == 0)
5073 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5074 fprintf (file, "%d", alpha_this_literal_sequence_number);
5075 break;
5076
5077 case '*':
5078 if (alpha_this_gpdisp_sequence_number == 0)
5079 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5080 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5081 break;
5082
5083 case 'H':
5084 if (GET_CODE (x) == HIGH)
5085 output_addr_const (file, XEXP (x, 0));
5086 else
5087 output_operand_lossage ("invalid %%H value");
5088 break;
5089
5090 case 'J':
5091 {
5092 const char *lituse;
5093
5094 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5095 {
5096 x = XVECEXP (x, 0, 0);
5097 lituse = "lituse_tlsgd";
5098 }
5099 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5100 {
5101 x = XVECEXP (x, 0, 0);
5102 lituse = "lituse_tlsldm";
5103 }
5104 else if (GET_CODE (x) == CONST_INT)
5105 lituse = "lituse_jsr";
5106 else
5107 {
5108 output_operand_lossage ("invalid %%J value");
5109 break;
5110 }
5111
5112 if (x != const0_rtx)
5113 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5114 }
5115 break;
5116
5117 case 'j':
5118 {
5119 const char *lituse;
5120
5121 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5122 lituse = "lituse_jsrdirect";
5123 #else
5124 lituse = "lituse_jsr";
5125 #endif
5126
5127 gcc_assert (INTVAL (x) != 0);
5128 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5129 }
5130 break;
5131 case 'r':
5132 /* If this operand is the constant zero, write it as "$31". */
5133 if (GET_CODE (x) == REG)
5134 fprintf (file, "%s", reg_names[REGNO (x)]);
5135 else if (x == CONST0_RTX (GET_MODE (x)))
5136 fprintf (file, "$31");
5137 else
5138 output_operand_lossage ("invalid %%r value");
5139 break;
5140
5141 case 'R':
5142 /* Similar, but for floating-point. */
5143 if (GET_CODE (x) == REG)
5144 fprintf (file, "%s", reg_names[REGNO (x)]);
5145 else if (x == CONST0_RTX (GET_MODE (x)))
5146 fprintf (file, "$f31");
5147 else
5148 output_operand_lossage ("invalid %%R value");
5149 break;
5150
5151 case 'N':
5152 /* Write the 1's complement of a constant. */
5153 if (GET_CODE (x) != CONST_INT)
5154 output_operand_lossage ("invalid %%N value");
5155
5156 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5157 break;
5158
5159 case 'P':
5160 /* Write 1 << C, for a constant C. */
5161 if (GET_CODE (x) != CONST_INT)
5162 output_operand_lossage ("invalid %%P value");
5163
5164 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5165 break;
5166
5167 case 'h':
5168 /* Write the high-order 16 bits of a constant, sign-extended. */
5169 if (GET_CODE (x) != CONST_INT)
5170 output_operand_lossage ("invalid %%h value");
5171
5172 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5173 break;
5174
5175 case 'L':
5176 /* Write the low-order 16 bits of a constant, sign-extended. */
5177 if (GET_CODE (x) != CONST_INT)
5178 output_operand_lossage ("invalid %%L value");
5179
5180 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5181 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5182 break;
5183
5184 case 'm':
5185 /* Write mask for ZAP insn. */
5186 if (GET_CODE (x) == CONST_DOUBLE)
5187 {
5188 HOST_WIDE_INT mask = 0;
5189 HOST_WIDE_INT value;
5190
5191 value = CONST_DOUBLE_LOW (x);
5192 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5193 i++, value >>= 8)
5194 if (value & 0xff)
5195 mask |= (1 << i);
5196
5197 value = CONST_DOUBLE_HIGH (x);
5198 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5199 i++, value >>= 8)
5200 if (value & 0xff)
5201 mask |= (1 << (i + sizeof (int)));
5202
5203 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5204 }
5205
5206 else if (GET_CODE (x) == CONST_INT)
5207 {
5208 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5209
5210 for (i = 0; i < 8; i++, value >>= 8)
5211 if (value & 0xff)
5212 mask |= (1 << i);
5213
5214 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5215 }
5216 else
5217 output_operand_lossage ("invalid %%m value");
5218 break;
5219
5220 case 'M':
5221 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5222 if (GET_CODE (x) != CONST_INT
5223 || (INTVAL (x) != 8 && INTVAL (x) != 16
5224 && INTVAL (x) != 32 && INTVAL (x) != 64))
5225 output_operand_lossage ("invalid %%M value");
5226
5227 fprintf (file, "%s",
5228 (INTVAL (x) == 8 ? "b"
5229 : INTVAL (x) == 16 ? "w"
5230 : INTVAL (x) == 32 ? "l"
5231 : "q"));
5232 break;
5233
5234 case 'U':
5235 /* Similar, except do it from the mask. */
5236 if (GET_CODE (x) == CONST_INT)
5237 {
5238 HOST_WIDE_INT value = INTVAL (x);
5239
5240 if (value == 0xff)
5241 {
5242 fputc ('b', file);
5243 break;
5244 }
5245 if (value == 0xffff)
5246 {
5247 fputc ('w', file);
5248 break;
5249 }
5250 if (value == 0xffffffff)
5251 {
5252 fputc ('l', file);
5253 break;
5254 }
5255 if (value == -1)
5256 {
5257 fputc ('q', file);
5258 break;
5259 }
5260 }
5261 else if (HOST_BITS_PER_WIDE_INT == 32
5262 && GET_CODE (x) == CONST_DOUBLE
5263 && CONST_DOUBLE_LOW (x) == 0xffffffff
5264 && CONST_DOUBLE_HIGH (x) == 0)
5265 {
5266 fputc ('l', file);
5267 break;
5268 }
5269 output_operand_lossage ("invalid %%U value");
5270 break;
5271
5272 case 's':
5273 /* Write the constant value divided by 8 for little-endian mode or
5274 (56 - value) / 8 for big-endian mode. */
5275
5276 if (GET_CODE (x) != CONST_INT
5277 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5278 ? 56
5279 : 64)
5280 || (INTVAL (x) & 7) != 0)
5281 output_operand_lossage ("invalid %%s value");
5282
5283 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5284 WORDS_BIG_ENDIAN
5285 ? (56 - INTVAL (x)) / 8
5286 : INTVAL (x) / 8);
5287 break;
5288
5289 case 'S':
5290 /* Same, except compute (64 - c) / 8 */
5291
5292 if (GET_CODE (x) != CONST_INT
5293 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5294 && (INTVAL (x) & 7) != 8)
5295 output_operand_lossage ("invalid %%s value");
5296
5297 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5298 break;
5299
5300 case 't':
5301 {
5302 /* On Unicos/Mk systems: use a DEX expression if the symbol
5303 clashes with a register name. */
5304 int dex = unicosmk_need_dex (x);
5305 if (dex)
5306 fprintf (file, "DEX(%d)", dex);
5307 else
5308 output_addr_const (file, x);
5309 }
5310 break;
5311
5312 case 'C': case 'D': case 'c': case 'd':
5313 /* Write out comparison name. */
5314 {
5315 enum rtx_code c = GET_CODE (x);
5316
5317 if (!COMPARISON_P (x))
5318 output_operand_lossage ("invalid %%C value");
5319
5320 else if (code == 'D')
5321 c = reverse_condition (c);
5322 else if (code == 'c')
5323 c = swap_condition (c);
5324 else if (code == 'd')
5325 c = swap_condition (reverse_condition (c));
5326
5327 if (c == LEU)
5328 fprintf (file, "ule");
5329 else if (c == LTU)
5330 fprintf (file, "ult");
5331 else if (c == UNORDERED)
5332 fprintf (file, "un");
5333 else
5334 fprintf (file, "%s", GET_RTX_NAME (c));
5335 }
5336 break;
5337
5338 case 'E':
5339 /* Write the divide or modulus operator. */
5340 switch (GET_CODE (x))
5341 {
5342 case DIV:
5343 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5344 break;
5345 case UDIV:
5346 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5347 break;
5348 case MOD:
5349 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5350 break;
5351 case UMOD:
5352 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5353 break;
5354 default:
5355 output_operand_lossage ("invalid %%E value");
5356 break;
5357 }
5358 break;
5359
5360 case 'A':
5361 /* Write "_u" for unaligned access. */
5362 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5363 fprintf (file, "_u");
5364 break;
5365
5366 case 0:
5367 if (GET_CODE (x) == REG)
5368 fprintf (file, "%s", reg_names[REGNO (x)]);
5369 else if (GET_CODE (x) == MEM)
5370 output_address (XEXP (x, 0));
5371 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5372 {
5373 switch (XINT (XEXP (x, 0), 1))
5374 {
5375 case UNSPEC_DTPREL:
5376 case UNSPEC_TPREL:
5377 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5378 break;
5379 default:
5380 output_operand_lossage ("unknown relocation unspec");
5381 break;
5382 }
5383 }
5384 else
5385 output_addr_const (file, x);
5386 break;
5387
5388 default:
5389 output_operand_lossage ("invalid %%xn code");
5390 }
5391 }
5392
5393 void
print_operand_address(FILE * file,rtx addr)5394 print_operand_address (FILE *file, rtx addr)
5395 {
5396 int basereg = 31;
5397 HOST_WIDE_INT offset = 0;
5398
5399 if (GET_CODE (addr) == AND)
5400 addr = XEXP (addr, 0);
5401
5402 if (GET_CODE (addr) == PLUS
5403 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5404 {
5405 offset = INTVAL (XEXP (addr, 1));
5406 addr = XEXP (addr, 0);
5407 }
5408
5409 if (GET_CODE (addr) == LO_SUM)
5410 {
5411 const char *reloc16, *reloclo;
5412 rtx op1 = XEXP (addr, 1);
5413
5414 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5415 {
5416 op1 = XEXP (op1, 0);
5417 switch (XINT (op1, 1))
5418 {
5419 case UNSPEC_DTPREL:
5420 reloc16 = NULL;
5421 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5422 break;
5423 case UNSPEC_TPREL:
5424 reloc16 = NULL;
5425 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5426 break;
5427 default:
5428 output_operand_lossage ("unknown relocation unspec");
5429 return;
5430 }
5431
5432 output_addr_const (file, XVECEXP (op1, 0, 0));
5433 }
5434 else
5435 {
5436 reloc16 = "gprel";
5437 reloclo = "gprellow";
5438 output_addr_const (file, op1);
5439 }
5440
5441 if (offset)
5442 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5443
5444 addr = XEXP (addr, 0);
5445 switch (GET_CODE (addr))
5446 {
5447 case REG:
5448 basereg = REGNO (addr);
5449 break;
5450
5451 case SUBREG:
5452 basereg = subreg_regno (addr);
5453 break;
5454
5455 default:
5456 gcc_unreachable ();
5457 }
5458
5459 fprintf (file, "($%d)\t\t!%s", basereg,
5460 (basereg == 29 ? reloc16 : reloclo));
5461 return;
5462 }
5463
5464 switch (GET_CODE (addr))
5465 {
5466 case REG:
5467 basereg = REGNO (addr);
5468 break;
5469
5470 case SUBREG:
5471 basereg = subreg_regno (addr);
5472 break;
5473
5474 case CONST_INT:
5475 offset = INTVAL (addr);
5476 break;
5477
5478 #if TARGET_ABI_OPEN_VMS
5479 case SYMBOL_REF:
5480 fprintf (file, "%s", XSTR (addr, 0));
5481 return;
5482
5483 case CONST:
5484 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5485 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5486 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5487 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5488 INTVAL (XEXP (XEXP (addr, 0), 1)));
5489 return;
5490
5491 #endif
5492 default:
5493 gcc_unreachable ();
5494 }
5495
5496 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5497 }
5498
5499 /* Emit RTL insns to initialize the variable parts of a trampoline at
5500 TRAMP. FNADDR is an RTX for the address of the function's pure
5501 code. CXT is an RTX for the static chain value for the function.
5502
5503 The three offset parameters are for the individual template's
5504 layout. A JMPOFS < 0 indicates that the trampoline does not
5505 contain instructions at all.
5506
5507 We assume here that a function will be called many more times than
5508 its address is taken (e.g., it might be passed to qsort), so we
5509 take the trouble to initialize the "hint" field in the JMP insn.
5510 Note that the hint field is PC (new) + 4 * bits 13:0. */
5511
5512 void
alpha_initialize_trampoline(rtx tramp,rtx fnaddr,rtx cxt,int fnofs,int cxtofs,int jmpofs)5513 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5514 int fnofs, int cxtofs, int jmpofs)
5515 {
5516 rtx temp, temp1, addr;
5517 /* VMS really uses DImode pointers in memory at this point. */
5518 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5519
5520 #ifdef POINTERS_EXTEND_UNSIGNED
5521 fnaddr = convert_memory_address (mode, fnaddr);
5522 cxt = convert_memory_address (mode, cxt);
5523 #endif
5524
5525 /* Store function address and CXT. */
5526 addr = memory_address (mode, plus_constant (tramp, fnofs));
5527 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5528 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5529 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5530
5531 /* This has been disabled since the hint only has a 32k range, and in
5532 no existing OS is the stack within 32k of the text segment. */
5533 if (0 && jmpofs >= 0)
5534 {
5535 /* Compute hint value. */
5536 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5537 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5538 OPTAB_WIDEN);
5539 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5540 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5541 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5542 GEN_INT (0x3fff), 0);
5543
5544 /* Merge in the hint. */
5545 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5546 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5547 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5548 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5549 OPTAB_WIDEN);
5550 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5551 }
5552
5553 #ifdef ENABLE_EXECUTE_STACK
5554 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5555 0, VOIDmode, 1, tramp, Pmode);
5556 #endif
5557
5558 if (jmpofs >= 0)
5559 emit_insn (gen_imb ());
5560 }
5561
5562 /* Determine where to put an argument to a function.
5563 Value is zero to push the argument on the stack,
5564 or a hard register in which to store the argument.
5565
5566 MODE is the argument's machine mode.
5567 TYPE is the data type of the argument (as a tree).
5568 This is null for libcalls where that information may
5569 not be available.
5570 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5571 the preceding args and about the function being called.
5572 NAMED is nonzero if this argument is a named parameter
5573 (otherwise it is an extra parameter matching an ellipsis).
5574
5575 On Alpha the first 6 words of args are normally in registers
5576 and the rest are pushed. */
5577
5578 rtx
function_arg(CUMULATIVE_ARGS cum,enum machine_mode mode,tree type,int named ATTRIBUTE_UNUSED)5579 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5580 int named ATTRIBUTE_UNUSED)
5581 {
5582 int basereg;
5583 int num_args;
5584
5585 /* Don't get confused and pass small structures in FP registers. */
5586 if (type && AGGREGATE_TYPE_P (type))
5587 basereg = 16;
5588 else
5589 {
5590 #ifdef ENABLE_CHECKING
5591 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5592 values here. */
5593 gcc_assert (!COMPLEX_MODE_P (mode));
5594 #endif
5595
5596 /* Set up defaults for FP operands passed in FP registers, and
5597 integral operands passed in integer registers. */
5598 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5599 basereg = 32 + 16;
5600 else
5601 basereg = 16;
5602 }
5603
5604 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5605 the three platforms, so we can't avoid conditional compilation. */
5606 #if TARGET_ABI_OPEN_VMS
5607 {
5608 if (mode == VOIDmode)
5609 return alpha_arg_info_reg_val (cum);
5610
5611 num_args = cum.num_args;
5612 if (num_args >= 6
5613 || targetm.calls.must_pass_in_stack (mode, type))
5614 return NULL_RTX;
5615 }
5616 #elif TARGET_ABI_UNICOSMK
5617 {
5618 int size;
5619
5620 /* If this is the last argument, generate the call info word (CIW). */
5621 /* ??? We don't include the caller's line number in the CIW because
5622 I don't know how to determine it if debug infos are turned off. */
5623 if (mode == VOIDmode)
5624 {
5625 int i;
5626 HOST_WIDE_INT lo;
5627 HOST_WIDE_INT hi;
5628 rtx ciw;
5629
5630 lo = 0;
5631
5632 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5633 if (cum.reg_args_type[i])
5634 lo |= (1 << (7 - i));
5635
5636 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5637 lo |= 7;
5638 else
5639 lo |= cum.num_reg_words;
5640
5641 #if HOST_BITS_PER_WIDE_INT == 32
5642 hi = (cum.num_args << 20) | cum.num_arg_words;
5643 #else
5644 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5645 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5646 hi = 0;
5647 #endif
5648 ciw = immed_double_const (lo, hi, DImode);
5649
5650 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5651 UNSPEC_UMK_LOAD_CIW);
5652 }
5653
5654 size = ALPHA_ARG_SIZE (mode, type, named);
5655 num_args = cum.num_reg_words;
5656 if (cum.force_stack
5657 || cum.num_reg_words + size > 6
5658 || targetm.calls.must_pass_in_stack (mode, type))
5659 return NULL_RTX;
5660 else if (type && TYPE_MODE (type) == BLKmode)
5661 {
5662 rtx reg1, reg2;
5663
5664 reg1 = gen_rtx_REG (DImode, num_args + 16);
5665 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5666
5667 /* The argument fits in two registers. Note that we still need to
5668 reserve a register for empty structures. */
5669 if (size == 0)
5670 return NULL_RTX;
5671 else if (size == 1)
5672 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5673 else
5674 {
5675 reg2 = gen_rtx_REG (DImode, num_args + 17);
5676 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5677 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5678 }
5679 }
5680 }
5681 #elif TARGET_ABI_OSF
5682 {
5683 if (cum >= 6)
5684 return NULL_RTX;
5685 num_args = cum;
5686
5687 /* VOID is passed as a special flag for "last argument". */
5688 if (type == void_type_node)
5689 basereg = 16;
5690 else if (targetm.calls.must_pass_in_stack (mode, type))
5691 return NULL_RTX;
5692 }
5693 #else
5694 #error Unhandled ABI
5695 #endif
5696
5697 return gen_rtx_REG (mode, num_args + basereg);
5698 }
5699
5700 static int
alpha_arg_partial_bytes(CUMULATIVE_ARGS * cum ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,tree type ATTRIBUTE_UNUSED,bool named ATTRIBUTE_UNUSED)5701 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5702 enum machine_mode mode ATTRIBUTE_UNUSED,
5703 tree type ATTRIBUTE_UNUSED,
5704 bool named ATTRIBUTE_UNUSED)
5705 {
5706 int words = 0;
5707
5708 #if TARGET_ABI_OPEN_VMS
5709 if (cum->num_args < 6
5710 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5711 words = 6 - cum->num_args;
5712 #elif TARGET_ABI_UNICOSMK
5713 /* Never any split arguments. */
5714 #elif TARGET_ABI_OSF
5715 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5716 words = 6 - *cum;
5717 #else
5718 #error Unhandled ABI
5719 #endif
5720
5721 return words * UNITS_PER_WORD;
5722 }
5723
5724
5725 /* Return true if TYPE must be returned in memory, instead of in registers. */
5726
5727 static bool
alpha_return_in_memory(tree type,tree fndecl ATTRIBUTE_UNUSED)5728 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5729 {
5730 enum machine_mode mode = VOIDmode;
5731 int size;
5732
5733 if (type)
5734 {
5735 mode = TYPE_MODE (type);
5736
5737 /* All aggregates are returned in memory. */
5738 if (AGGREGATE_TYPE_P (type))
5739 return true;
5740 }
5741
5742 size = GET_MODE_SIZE (mode);
5743 switch (GET_MODE_CLASS (mode))
5744 {
5745 case MODE_VECTOR_FLOAT:
5746 /* Pass all float vectors in memory, like an aggregate. */
5747 return true;
5748
5749 case MODE_COMPLEX_FLOAT:
5750 /* We judge complex floats on the size of their element,
5751 not the size of the whole type. */
5752 size = GET_MODE_UNIT_SIZE (mode);
5753 break;
5754
5755 case MODE_INT:
5756 case MODE_FLOAT:
5757 case MODE_COMPLEX_INT:
5758 case MODE_VECTOR_INT:
5759 break;
5760
5761 default:
5762 /* ??? We get called on all sorts of random stuff from
5763 aggregate_value_p. We must return something, but it's not
5764 clear what's safe to return. Pretend it's a struct I
5765 guess. */
5766 return true;
5767 }
5768
5769 /* Otherwise types must fit in one register. */
5770 return size > UNITS_PER_WORD;
5771 }
5772
5773 /* Return true if TYPE should be passed by invisible reference. */
5774
5775 static bool
alpha_pass_by_reference(CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,enum machine_mode mode,tree type ATTRIBUTE_UNUSED,bool named ATTRIBUTE_UNUSED)5776 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5777 enum machine_mode mode,
5778 tree type ATTRIBUTE_UNUSED,
5779 bool named ATTRIBUTE_UNUSED)
5780 {
5781 return mode == TFmode || mode == TCmode;
5782 }
5783
5784 /* Define how to find the value returned by a function. VALTYPE is the
5785 data type of the value (as a tree). If the precise function being
5786 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5787 MODE is set instead of VALTYPE for libcalls.
5788
5789 On Alpha the value is found in $0 for integer functions and
5790 $f0 for floating-point functions. */
5791
5792 rtx
function_value(tree valtype,tree func ATTRIBUTE_UNUSED,enum machine_mode mode)5793 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5794 enum machine_mode mode)
5795 {
5796 unsigned int regnum, dummy;
5797 enum mode_class class;
5798
5799 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5800
5801 if (valtype)
5802 mode = TYPE_MODE (valtype);
5803
5804 class = GET_MODE_CLASS (mode);
5805 switch (class)
5806 {
5807 case MODE_INT:
5808 PROMOTE_MODE (mode, dummy, valtype);
5809 /* FALLTHRU */
5810
5811 case MODE_COMPLEX_INT:
5812 case MODE_VECTOR_INT:
5813 regnum = 0;
5814 break;
5815
5816 case MODE_FLOAT:
5817 regnum = 32;
5818 break;
5819
5820 case MODE_COMPLEX_FLOAT:
5821 {
5822 enum machine_mode cmode = GET_MODE_INNER (mode);
5823
5824 return gen_rtx_PARALLEL
5825 (VOIDmode,
5826 gen_rtvec (2,
5827 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5828 const0_rtx),
5829 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5830 GEN_INT (GET_MODE_SIZE (cmode)))));
5831 }
5832
5833 default:
5834 gcc_unreachable ();
5835 }
5836
5837 return gen_rtx_REG (mode, regnum);
5838 }
5839
5840 /* TCmode complex values are passed by invisible reference. We
5841 should not split these values. */
5842
5843 static bool
alpha_split_complex_arg(tree type)5844 alpha_split_complex_arg (tree type)
5845 {
5846 return TYPE_MODE (type) != TCmode;
5847 }
5848
5849 static tree
alpha_build_builtin_va_list(void)5850 alpha_build_builtin_va_list (void)
5851 {
5852 tree base, ofs, space, record, type_decl;
5853
5854 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5855 return ptr_type_node;
5856
5857 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5858 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5859 TREE_CHAIN (record) = type_decl;
5860 TYPE_NAME (record) = type_decl;
5861
5862 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5863
5864 /* Dummy field to prevent alignment warnings. */
5865 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5866 DECL_FIELD_CONTEXT (space) = record;
5867 DECL_ARTIFICIAL (space) = 1;
5868 DECL_IGNORED_P (space) = 1;
5869
5870 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5871 integer_type_node);
5872 DECL_FIELD_CONTEXT (ofs) = record;
5873 TREE_CHAIN (ofs) = space;
5874
5875 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5876 ptr_type_node);
5877 DECL_FIELD_CONTEXT (base) = record;
5878 TREE_CHAIN (base) = ofs;
5879
5880 TYPE_FIELDS (record) = base;
5881 layout_type (record);
5882
5883 va_list_gpr_counter_field = ofs;
5884 return record;
5885 }
5886
5887 #if TARGET_ABI_OSF
5888 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5889 and constant additions. */
5890
5891 static tree
va_list_skip_additions(tree lhs)5892 va_list_skip_additions (tree lhs)
5893 {
5894 tree rhs, stmt;
5895
5896 if (TREE_CODE (lhs) != SSA_NAME)
5897 return lhs;
5898
5899 for (;;)
5900 {
5901 stmt = SSA_NAME_DEF_STMT (lhs);
5902
5903 if (TREE_CODE (stmt) == PHI_NODE)
5904 return stmt;
5905
5906 if (TREE_CODE (stmt) != MODIFY_EXPR
5907 || TREE_OPERAND (stmt, 0) != lhs)
5908 return lhs;
5909
5910 rhs = TREE_OPERAND (stmt, 1);
5911 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5912 rhs = TREE_OPERAND (rhs, 0);
5913
5914 if ((TREE_CODE (rhs) != NOP_EXPR
5915 && TREE_CODE (rhs) != CONVERT_EXPR
5916 && (TREE_CODE (rhs) != PLUS_EXPR
5917 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5918 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5919 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5920 return rhs;
5921
5922 lhs = TREE_OPERAND (rhs, 0);
5923 }
5924 }
5925
5926 /* Check if LHS = RHS statement is
5927 LHS = *(ap.__base + ap.__offset + cst)
5928 or
5929 LHS = *(ap.__base
5930 + ((ap.__offset + cst <= 47)
5931 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5932 If the former, indicate that GPR registers are needed,
5933 if the latter, indicate that FPR registers are needed.
5934 On alpha, cfun->va_list_gpr_size is used as size of the needed
5935 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5936 GPR registers are needed and bit 1 set if FPR registers are needed.
5937 Return true if va_list references should not be scanned for the current
5938 statement. */
5939
5940 static bool
alpha_stdarg_optimize_hook(struct stdarg_info * si,tree lhs,tree rhs)5941 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5942 {
5943 tree base, offset, arg1, arg2;
5944 int offset_arg = 1;
5945
5946 if (TREE_CODE (rhs) != INDIRECT_REF
5947 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5948 return false;
5949
5950 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5951 if (lhs == NULL_TREE
5952 || TREE_CODE (lhs) != PLUS_EXPR)
5953 return false;
5954
5955 base = TREE_OPERAND (lhs, 0);
5956 if (TREE_CODE (base) == SSA_NAME)
5957 base = va_list_skip_additions (base);
5958
5959 if (TREE_CODE (base) != COMPONENT_REF
5960 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5961 {
5962 base = TREE_OPERAND (lhs, 0);
5963 if (TREE_CODE (base) == SSA_NAME)
5964 base = va_list_skip_additions (base);
5965
5966 if (TREE_CODE (base) != COMPONENT_REF
5967 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5968 return false;
5969
5970 offset_arg = 0;
5971 }
5972
5973 base = get_base_address (base);
5974 if (TREE_CODE (base) != VAR_DECL
5975 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5976 return false;
5977
5978 offset = TREE_OPERAND (lhs, offset_arg);
5979 if (TREE_CODE (offset) == SSA_NAME)
5980 offset = va_list_skip_additions (offset);
5981
5982 if (TREE_CODE (offset) == PHI_NODE)
5983 {
5984 HOST_WIDE_INT sub;
5985
5986 if (PHI_NUM_ARGS (offset) != 2)
5987 goto escapes;
5988
5989 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5990 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5991 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5992 {
5993 tree tem = arg1;
5994 arg1 = arg2;
5995 arg2 = tem;
5996
5997 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5998 goto escapes;
5999 }
6000 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
6001 goto escapes;
6002
6003 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
6004 if (TREE_CODE (arg2) == MINUS_EXPR)
6005 sub = -sub;
6006 if (sub < -48 || sub > -32)
6007 goto escapes;
6008
6009 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
6010 if (arg1 != arg2)
6011 goto escapes;
6012
6013 if (TREE_CODE (arg1) == SSA_NAME)
6014 arg1 = va_list_skip_additions (arg1);
6015
6016 if (TREE_CODE (arg1) != COMPONENT_REF
6017 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6018 || get_base_address (arg1) != base)
6019 goto escapes;
6020
6021 /* Need floating point regs. */
6022 cfun->va_list_fpr_size |= 2;
6023 }
6024 else if (TREE_CODE (offset) != COMPONENT_REF
6025 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6026 || get_base_address (offset) != base)
6027 goto escapes;
6028 else
6029 /* Need general regs. */
6030 cfun->va_list_fpr_size |= 1;
6031 return false;
6032
6033 escapes:
6034 si->va_list_escapes = true;
6035 return false;
6036 }
6037 #endif
6038
6039 /* Perform any needed actions needed for a function that is receiving a
6040 variable number of arguments. */
6041
6042 static void
alpha_setup_incoming_varargs(CUMULATIVE_ARGS * pcum,enum machine_mode mode,tree type,int * pretend_size,int no_rtl)6043 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6044 tree type, int *pretend_size, int no_rtl)
6045 {
6046 CUMULATIVE_ARGS cum = *pcum;
6047
6048 /* Skip the current argument. */
6049 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6050
6051 #if TARGET_ABI_UNICOSMK
6052 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6053 arguments on the stack. Unfortunately, it doesn't always store the first
6054 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6055 with stdargs as we always have at least one named argument there. */
6056 if (cum.num_reg_words < 6)
6057 {
6058 if (!no_rtl)
6059 {
6060 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6061 emit_insn (gen_arg_home_umk ());
6062 }
6063 *pretend_size = 0;
6064 }
6065 #elif TARGET_ABI_OPEN_VMS
6066 /* For VMS, we allocate space for all 6 arg registers plus a count.
6067
6068 However, if NO registers need to be saved, don't allocate any space.
6069 This is not only because we won't need the space, but because AP
6070 includes the current_pretend_args_size and we don't want to mess up
6071 any ap-relative addresses already made. */
6072 if (cum.num_args < 6)
6073 {
6074 if (!no_rtl)
6075 {
6076 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6077 emit_insn (gen_arg_home ());
6078 }
6079 *pretend_size = 7 * UNITS_PER_WORD;
6080 }
6081 #else
6082 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6083 only push those that are remaining. However, if NO registers need to
6084 be saved, don't allocate any space. This is not only because we won't
6085 need the space, but because AP includes the current_pretend_args_size
6086 and we don't want to mess up any ap-relative addresses already made.
6087
6088 If we are not to use the floating-point registers, save the integer
6089 registers where we would put the floating-point registers. This is
6090 not the most efficient way to implement varargs with just one register
6091 class, but it isn't worth doing anything more efficient in this rare
6092 case. */
6093 if (cum >= 6)
6094 return;
6095
6096 if (!no_rtl)
6097 {
6098 int count, set = get_varargs_alias_set ();
6099 rtx tmp;
6100
6101 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6102 if (count > 6 - cum)
6103 count = 6 - cum;
6104
6105 /* Detect whether integer registers or floating-point registers
6106 are needed by the detected va_arg statements. See above for
6107 how these values are computed. Note that the "escape" value
6108 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6109 these bits set. */
6110 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6111
6112 if (cfun->va_list_fpr_size & 1)
6113 {
6114 tmp = gen_rtx_MEM (BLKmode,
6115 plus_constant (virtual_incoming_args_rtx,
6116 (cum + 6) * UNITS_PER_WORD));
6117 MEM_NOTRAP_P (tmp) = 1;
6118 set_mem_alias_set (tmp, set);
6119 move_block_from_reg (16 + cum, tmp, count);
6120 }
6121
6122 if (cfun->va_list_fpr_size & 2)
6123 {
6124 tmp = gen_rtx_MEM (BLKmode,
6125 plus_constant (virtual_incoming_args_rtx,
6126 cum * UNITS_PER_WORD));
6127 MEM_NOTRAP_P (tmp) = 1;
6128 set_mem_alias_set (tmp, set);
6129 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6130 }
6131 }
6132 *pretend_size = 12 * UNITS_PER_WORD;
6133 #endif
6134 }
6135
6136 void
alpha_va_start(tree valist,rtx nextarg ATTRIBUTE_UNUSED)6137 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6138 {
6139 HOST_WIDE_INT offset;
6140 tree t, offset_field, base_field;
6141
6142 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6143 return;
6144
6145 if (TARGET_ABI_UNICOSMK)
6146 std_expand_builtin_va_start (valist, nextarg);
6147
6148 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6149 up by 48, storing fp arg registers in the first 48 bytes, and the
6150 integer arg registers in the next 48 bytes. This is only done,
6151 however, if any integer registers need to be stored.
6152
6153 If no integer registers need be stored, then we must subtract 48
6154 in order to account for the integer arg registers which are counted
6155 in argsize above, but which are not actually stored on the stack.
6156 Must further be careful here about structures straddling the last
6157 integer argument register; that futzes with pretend_args_size,
6158 which changes the meaning of AP. */
6159
6160 if (NUM_ARGS < 6)
6161 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6162 else
6163 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6164
6165 if (TARGET_ABI_OPEN_VMS)
6166 {
6167 nextarg = plus_constant (nextarg, offset);
6168 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6169 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6170 make_tree (ptr_type_node, nextarg));
6171 TREE_SIDE_EFFECTS (t) = 1;
6172
6173 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6174 }
6175 else
6176 {
6177 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6178 offset_field = TREE_CHAIN (base_field);
6179
6180 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6181 valist, base_field, NULL_TREE);
6182 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6183 valist, offset_field, NULL_TREE);
6184
6185 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6186 t = build (PLUS_EXPR, ptr_type_node, t,
6187 build_int_cst (NULL_TREE, offset));
6188 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6189 TREE_SIDE_EFFECTS (t) = 1;
6190 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6191
6192 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6193 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6194 TREE_SIDE_EFFECTS (t) = 1;
6195 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6196 }
6197 }
6198
6199 static tree
alpha_gimplify_va_arg_1(tree type,tree base,tree offset,tree * pre_p)6200 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6201 {
6202 tree type_size, ptr_type, addend, t, addr, internal_post;
6203
6204 /* If the type could not be passed in registers, skip the block
6205 reserved for the registers. */
6206 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6207 {
6208 t = build_int_cst (TREE_TYPE (offset), 6*8);
6209 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
6210 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
6211 gimplify_and_add (t, pre_p);
6212 }
6213
6214 addend = offset;
6215 ptr_type = build_pointer_type (type);
6216
6217 if (TREE_CODE (type) == COMPLEX_TYPE)
6218 {
6219 tree real_part, imag_part, real_temp;
6220
6221 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6222 offset, pre_p);
6223
6224 /* Copy the value into a new temporary, lest the formal temporary
6225 be reused out from under us. */
6226 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6227
6228 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6229 offset, pre_p);
6230
6231 return build (COMPLEX_EXPR, type, real_temp, imag_part);
6232 }
6233 else if (TREE_CODE (type) == REAL_TYPE)
6234 {
6235 tree fpaddend, cond, fourtyeight;
6236
6237 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6238 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
6239 addend, fourtyeight));
6240 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
6241 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6242 fpaddend, addend));
6243 }
6244
6245 /* Build the final address and force that value into a temporary. */
6246 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6247 fold_convert (ptr_type, addend));
6248 internal_post = NULL;
6249 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6250 append_to_statement_list (internal_post, pre_p);
6251
6252 /* Update the offset field. */
6253 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6254 if (type_size == NULL || TREE_OVERFLOW (type_size))
6255 t = size_zero_node;
6256 else
6257 {
6258 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6259 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6260 t = size_binop (MULT_EXPR, t, size_int (8));
6261 }
6262 t = fold_convert (TREE_TYPE (offset), t);
6263 t = build (MODIFY_EXPR, void_type_node, offset,
6264 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6265 gimplify_and_add (t, pre_p);
6266
6267 return build_va_arg_indirect_ref (addr);
6268 }
6269
6270 static tree
alpha_gimplify_va_arg(tree valist,tree type,tree * pre_p,tree * post_p)6271 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6272 {
6273 tree offset_field, base_field, offset, base, t, r;
6274 bool indirect;
6275
6276 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6277 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6278
6279 base_field = TYPE_FIELDS (va_list_type_node);
6280 offset_field = TREE_CHAIN (base_field);
6281 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6282 valist, base_field, NULL_TREE);
6283 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6284 valist, offset_field, NULL_TREE);
6285
6286 /* Pull the fields of the structure out into temporaries. Since we never
6287 modify the base field, we can use a formal temporary. Sign-extend the
6288 offset field so that it's the proper width for pointer arithmetic. */
6289 base = get_formal_tmp_var (base_field, pre_p);
6290
6291 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6292 offset = get_initialized_tmp_var (t, pre_p, NULL);
6293
6294 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6295 if (indirect)
6296 type = build_pointer_type (type);
6297
6298 /* Find the value. Note that this will be a stable indirection, or
6299 a composite of stable indirections in the case of complex. */
6300 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6301
6302 /* Stuff the offset temporary back into its field. */
6303 t = build (MODIFY_EXPR, void_type_node, offset_field,
6304 fold_convert (TREE_TYPE (offset_field), offset));
6305 gimplify_and_add (t, pre_p);
6306
6307 if (indirect)
6308 r = build_va_arg_indirect_ref (r);
6309
6310 return r;
6311 }
6312
6313 /* Builtins. */
6314
6315 enum alpha_builtin
6316 {
6317 ALPHA_BUILTIN_CMPBGE,
6318 ALPHA_BUILTIN_EXTBL,
6319 ALPHA_BUILTIN_EXTWL,
6320 ALPHA_BUILTIN_EXTLL,
6321 ALPHA_BUILTIN_EXTQL,
6322 ALPHA_BUILTIN_EXTWH,
6323 ALPHA_BUILTIN_EXTLH,
6324 ALPHA_BUILTIN_EXTQH,
6325 ALPHA_BUILTIN_INSBL,
6326 ALPHA_BUILTIN_INSWL,
6327 ALPHA_BUILTIN_INSLL,
6328 ALPHA_BUILTIN_INSQL,
6329 ALPHA_BUILTIN_INSWH,
6330 ALPHA_BUILTIN_INSLH,
6331 ALPHA_BUILTIN_INSQH,
6332 ALPHA_BUILTIN_MSKBL,
6333 ALPHA_BUILTIN_MSKWL,
6334 ALPHA_BUILTIN_MSKLL,
6335 ALPHA_BUILTIN_MSKQL,
6336 ALPHA_BUILTIN_MSKWH,
6337 ALPHA_BUILTIN_MSKLH,
6338 ALPHA_BUILTIN_MSKQH,
6339 ALPHA_BUILTIN_UMULH,
6340 ALPHA_BUILTIN_ZAP,
6341 ALPHA_BUILTIN_ZAPNOT,
6342 ALPHA_BUILTIN_AMASK,
6343 ALPHA_BUILTIN_IMPLVER,
6344 ALPHA_BUILTIN_RPCC,
6345 ALPHA_BUILTIN_THREAD_POINTER,
6346 ALPHA_BUILTIN_SET_THREAD_POINTER,
6347
6348 /* TARGET_MAX */
6349 ALPHA_BUILTIN_MINUB8,
6350 ALPHA_BUILTIN_MINSB8,
6351 ALPHA_BUILTIN_MINUW4,
6352 ALPHA_BUILTIN_MINSW4,
6353 ALPHA_BUILTIN_MAXUB8,
6354 ALPHA_BUILTIN_MAXSB8,
6355 ALPHA_BUILTIN_MAXUW4,
6356 ALPHA_BUILTIN_MAXSW4,
6357 ALPHA_BUILTIN_PERR,
6358 ALPHA_BUILTIN_PKLB,
6359 ALPHA_BUILTIN_PKWB,
6360 ALPHA_BUILTIN_UNPKBL,
6361 ALPHA_BUILTIN_UNPKBW,
6362
6363 /* TARGET_CIX */
6364 ALPHA_BUILTIN_CTTZ,
6365 ALPHA_BUILTIN_CTLZ,
6366 ALPHA_BUILTIN_CTPOP,
6367
6368 ALPHA_BUILTIN_max
6369 };
6370
6371 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6372 CODE_FOR_builtin_cmpbge,
6373 CODE_FOR_builtin_extbl,
6374 CODE_FOR_builtin_extwl,
6375 CODE_FOR_builtin_extll,
6376 CODE_FOR_builtin_extql,
6377 CODE_FOR_builtin_extwh,
6378 CODE_FOR_builtin_extlh,
6379 CODE_FOR_builtin_extqh,
6380 CODE_FOR_builtin_insbl,
6381 CODE_FOR_builtin_inswl,
6382 CODE_FOR_builtin_insll,
6383 CODE_FOR_builtin_insql,
6384 CODE_FOR_builtin_inswh,
6385 CODE_FOR_builtin_inslh,
6386 CODE_FOR_builtin_insqh,
6387 CODE_FOR_builtin_mskbl,
6388 CODE_FOR_builtin_mskwl,
6389 CODE_FOR_builtin_mskll,
6390 CODE_FOR_builtin_mskql,
6391 CODE_FOR_builtin_mskwh,
6392 CODE_FOR_builtin_msklh,
6393 CODE_FOR_builtin_mskqh,
6394 CODE_FOR_umuldi3_highpart,
6395 CODE_FOR_builtin_zap,
6396 CODE_FOR_builtin_zapnot,
6397 CODE_FOR_builtin_amask,
6398 CODE_FOR_builtin_implver,
6399 CODE_FOR_builtin_rpcc,
6400 CODE_FOR_load_tp,
6401 CODE_FOR_set_tp,
6402
6403 /* TARGET_MAX */
6404 CODE_FOR_builtin_minub8,
6405 CODE_FOR_builtin_minsb8,
6406 CODE_FOR_builtin_minuw4,
6407 CODE_FOR_builtin_minsw4,
6408 CODE_FOR_builtin_maxub8,
6409 CODE_FOR_builtin_maxsb8,
6410 CODE_FOR_builtin_maxuw4,
6411 CODE_FOR_builtin_maxsw4,
6412 CODE_FOR_builtin_perr,
6413 CODE_FOR_builtin_pklb,
6414 CODE_FOR_builtin_pkwb,
6415 CODE_FOR_builtin_unpkbl,
6416 CODE_FOR_builtin_unpkbw,
6417
6418 /* TARGET_CIX */
6419 CODE_FOR_ctzdi2,
6420 CODE_FOR_clzdi2,
6421 CODE_FOR_popcountdi2
6422 };
6423
6424 struct alpha_builtin_def
6425 {
6426 const char *name;
6427 enum alpha_builtin code;
6428 unsigned int target_mask;
6429 bool is_const;
6430 };
6431
6432 static struct alpha_builtin_def const zero_arg_builtins[] = {
6433 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6434 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6435 };
6436
6437 static struct alpha_builtin_def const one_arg_builtins[] = {
6438 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6439 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6440 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6441 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6442 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6443 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6444 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6445 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6446 };
6447
6448 static struct alpha_builtin_def const two_arg_builtins[] = {
6449 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6450 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6451 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6452 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6453 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6454 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6455 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6456 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6457 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6458 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6459 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6460 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6461 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6462 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6463 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6464 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6465 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6466 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6467 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6468 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6469 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6470 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6471 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6472 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6473 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6474 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6475 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6476 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6477 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6478 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6479 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6480 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6481 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6482 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6483 };
6484
6485 static GTY(()) tree alpha_v8qi_u;
6486 static GTY(()) tree alpha_v8qi_s;
6487 static GTY(()) tree alpha_v4hi_u;
6488 static GTY(()) tree alpha_v4hi_s;
6489
6490 static void
alpha_init_builtins(void)6491 alpha_init_builtins (void)
6492 {
6493 const struct alpha_builtin_def *p;
6494 tree dimode_integer_type_node;
6495 tree ftype, attrs[2];
6496 size_t i;
6497
6498 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6499
6500 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6501 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6502
6503 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6504
6505 p = zero_arg_builtins;
6506 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6507 if ((target_flags & p->target_mask) == p->target_mask)
6508 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6509 NULL, attrs[p->is_const]);
6510
6511 ftype = build_function_type_list (dimode_integer_type_node,
6512 dimode_integer_type_node, NULL_TREE);
6513
6514 p = one_arg_builtins;
6515 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6516 if ((target_flags & p->target_mask) == p->target_mask)
6517 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6518 NULL, attrs[p->is_const]);
6519
6520 ftype = build_function_type_list (dimode_integer_type_node,
6521 dimode_integer_type_node,
6522 dimode_integer_type_node, NULL_TREE);
6523
6524 p = two_arg_builtins;
6525 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6526 if ((target_flags & p->target_mask) == p->target_mask)
6527 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6528 NULL, attrs[p->is_const]);
6529
6530 ftype = build_function_type (ptr_type_node, void_list_node);
6531 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6532 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6533 NULL, attrs[0]);
6534
6535 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6536 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6537 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6538 NULL, attrs[0]);
6539
6540 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6541 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6542 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6543 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6544 }
6545
6546 /* Expand an expression EXP that calls a built-in function,
6547 with result going to TARGET if that's convenient
6548 (and in mode MODE if that's convenient).
6549 SUBTARGET may be used as the target for computing one of EXP's operands.
6550 IGNORE is nonzero if the value is to be ignored. */
6551
6552 static rtx
alpha_expand_builtin(tree exp,rtx target,rtx subtarget ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,int ignore ATTRIBUTE_UNUSED)6553 alpha_expand_builtin (tree exp, rtx target,
6554 rtx subtarget ATTRIBUTE_UNUSED,
6555 enum machine_mode mode ATTRIBUTE_UNUSED,
6556 int ignore ATTRIBUTE_UNUSED)
6557 {
6558 #define MAX_ARGS 2
6559
6560 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6561 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6562 tree arglist = TREE_OPERAND (exp, 1);
6563 enum insn_code icode;
6564 rtx op[MAX_ARGS], pat;
6565 int arity;
6566 bool nonvoid;
6567
6568 if (fcode >= ALPHA_BUILTIN_max)
6569 internal_error ("bad builtin fcode");
6570 icode = code_for_builtin[fcode];
6571 if (icode == 0)
6572 internal_error ("bad builtin fcode");
6573
6574 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6575
6576 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6577 arglist;
6578 arglist = TREE_CHAIN (arglist), arity++)
6579 {
6580 const struct insn_operand_data *insn_op;
6581
6582 tree arg = TREE_VALUE (arglist);
6583 if (arg == error_mark_node)
6584 return NULL_RTX;
6585 if (arity > MAX_ARGS)
6586 return NULL_RTX;
6587
6588 insn_op = &insn_data[icode].operand[arity + nonvoid];
6589
6590 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6591
6592 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6593 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6594 }
6595
6596 if (nonvoid)
6597 {
6598 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6599 if (!target
6600 || GET_MODE (target) != tmode
6601 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6602 target = gen_reg_rtx (tmode);
6603 }
6604
6605 switch (arity)
6606 {
6607 case 0:
6608 pat = GEN_FCN (icode) (target);
6609 break;
6610 case 1:
6611 if (nonvoid)
6612 pat = GEN_FCN (icode) (target, op[0]);
6613 else
6614 pat = GEN_FCN (icode) (op[0]);
6615 break;
6616 case 2:
6617 pat = GEN_FCN (icode) (target, op[0], op[1]);
6618 break;
6619 default:
6620 gcc_unreachable ();
6621 }
6622 if (!pat)
6623 return NULL_RTX;
6624 emit_insn (pat);
6625
6626 if (nonvoid)
6627 return target;
6628 else
6629 return const0_rtx;
6630 }
6631
6632
6633 /* Several bits below assume HWI >= 64 bits. This should be enforced
6634 by config.gcc. */
6635 #if HOST_BITS_PER_WIDE_INT < 64
6636 # error "HOST_WIDE_INT too small"
6637 #endif
6638
6639 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6640 with an 8 bit output vector. OPINT contains the integer operands; bit N
6641 of OP_CONST is set if OPINT[N] is valid. */
6642
6643 static tree
alpha_fold_builtin_cmpbge(unsigned HOST_WIDE_INT opint[],long op_const)6644 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6645 {
6646 if (op_const == 3)
6647 {
6648 int i, val;
6649 for (i = 0, val = 0; i < 8; ++i)
6650 {
6651 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6652 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6653 if (c0 >= c1)
6654 val |= 1 << i;
6655 }
6656 return build_int_cst (long_integer_type_node, val);
6657 }
6658 else if (op_const == 2 && opint[1] == 0)
6659 return build_int_cst (long_integer_type_node, 0xff);
6660 return NULL;
6661 }
6662
6663 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6664 specialized form of an AND operation. Other byte manipulation instructions
6665 are defined in terms of this instruction, so this is also used as a
6666 subroutine for other builtins.
6667
6668 OP contains the tree operands; OPINT contains the extracted integer values.
6669 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6670 OPINT may be considered. */
6671
6672 static tree
alpha_fold_builtin_zapnot(tree * op,unsigned HOST_WIDE_INT opint[],long op_const)6673 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6674 long op_const)
6675 {
6676 if (op_const & 2)
6677 {
6678 unsigned HOST_WIDE_INT mask = 0;
6679 int i;
6680
6681 for (i = 0; i < 8; ++i)
6682 if ((opint[1] >> i) & 1)
6683 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6684
6685 if (op_const & 1)
6686 return build_int_cst (long_integer_type_node, opint[0] & mask);
6687
6688 if (op)
6689 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6690 build_int_cst (long_integer_type_node, mask)));
6691 }
6692 else if ((op_const & 1) && opint[0] == 0)
6693 return build_int_cst (long_integer_type_node, 0);
6694 return NULL;
6695 }
6696
6697 /* Fold the builtins for the EXT family of instructions. */
6698
6699 static tree
alpha_fold_builtin_extxx(tree op[],unsigned HOST_WIDE_INT opint[],long op_const,unsigned HOST_WIDE_INT bytemask,bool is_high)6700 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6701 long op_const, unsigned HOST_WIDE_INT bytemask,
6702 bool is_high)
6703 {
6704 long zap_const = 2;
6705 tree *zap_op = NULL;
6706
6707 if (op_const & 2)
6708 {
6709 unsigned HOST_WIDE_INT loc;
6710
6711 loc = opint[1] & 7;
6712 if (BYTES_BIG_ENDIAN)
6713 loc ^= 7;
6714 loc *= 8;
6715
6716 if (loc != 0)
6717 {
6718 if (op_const & 1)
6719 {
6720 unsigned HOST_WIDE_INT temp = opint[0];
6721 if (is_high)
6722 temp <<= loc;
6723 else
6724 temp >>= loc;
6725 opint[0] = temp;
6726 zap_const = 3;
6727 }
6728 }
6729 else
6730 zap_op = op;
6731 }
6732
6733 opint[1] = bytemask;
6734 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6735 }
6736
6737 /* Fold the builtins for the INS family of instructions. */
6738
6739 static tree
alpha_fold_builtin_insxx(tree op[],unsigned HOST_WIDE_INT opint[],long op_const,unsigned HOST_WIDE_INT bytemask,bool is_high)6740 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6741 long op_const, unsigned HOST_WIDE_INT bytemask,
6742 bool is_high)
6743 {
6744 if ((op_const & 1) && opint[0] == 0)
6745 return build_int_cst (long_integer_type_node, 0);
6746
6747 if (op_const & 2)
6748 {
6749 unsigned HOST_WIDE_INT temp, loc, byteloc;
6750 tree *zap_op = NULL;
6751
6752 loc = opint[1] & 7;
6753 if (BYTES_BIG_ENDIAN)
6754 loc ^= 7;
6755 bytemask <<= loc;
6756
6757 temp = opint[0];
6758 if (is_high)
6759 {
6760 byteloc = (64 - (loc * 8)) & 0x3f;
6761 if (byteloc == 0)
6762 zap_op = op;
6763 else
6764 temp >>= byteloc;
6765 bytemask >>= 8;
6766 }
6767 else
6768 {
6769 byteloc = loc * 8;
6770 if (byteloc == 0)
6771 zap_op = op;
6772 else
6773 temp <<= byteloc;
6774 }
6775
6776 opint[0] = temp;
6777 opint[1] = bytemask;
6778 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6779 }
6780
6781 return NULL;
6782 }
6783
6784 static tree
alpha_fold_builtin_mskxx(tree op[],unsigned HOST_WIDE_INT opint[],long op_const,unsigned HOST_WIDE_INT bytemask,bool is_high)6785 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6786 long op_const, unsigned HOST_WIDE_INT bytemask,
6787 bool is_high)
6788 {
6789 if (op_const & 2)
6790 {
6791 unsigned HOST_WIDE_INT loc;
6792
6793 loc = opint[1] & 7;
6794 if (BYTES_BIG_ENDIAN)
6795 loc ^= 7;
6796 bytemask <<= loc;
6797
6798 if (is_high)
6799 bytemask >>= 8;
6800
6801 opint[1] = bytemask ^ 0xff;
6802 }
6803
6804 return alpha_fold_builtin_zapnot (op, opint, op_const);
6805 }
6806
6807 static tree
alpha_fold_builtin_umulh(unsigned HOST_WIDE_INT opint[],long op_const)6808 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6809 {
6810 switch (op_const)
6811 {
6812 case 3:
6813 {
6814 unsigned HOST_WIDE_INT l;
6815 HOST_WIDE_INT h;
6816
6817 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6818
6819 #if HOST_BITS_PER_WIDE_INT > 64
6820 # error fixme
6821 #endif
6822
6823 return build_int_cst (long_integer_type_node, h);
6824 }
6825
6826 case 1:
6827 opint[1] = opint[0];
6828 /* FALLTHRU */
6829 case 2:
6830 /* Note that (X*1) >> 64 == 0. */
6831 if (opint[1] == 0 || opint[1] == 1)
6832 return build_int_cst (long_integer_type_node, 0);
6833 break;
6834 }
6835 return NULL;
6836 }
6837
6838 static tree
alpha_fold_vector_minmax(enum tree_code code,tree op[],tree vtype)6839 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6840 {
6841 tree op0 = fold_convert (vtype, op[0]);
6842 tree op1 = fold_convert (vtype, op[1]);
6843 tree val = fold (build2 (code, vtype, op0, op1));
6844 return fold_convert (long_integer_type_node, val);
6845 }
6846
6847 static tree
alpha_fold_builtin_perr(unsigned HOST_WIDE_INT opint[],long op_const)6848 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6849 {
6850 unsigned HOST_WIDE_INT temp = 0;
6851 int i;
6852
6853 if (op_const != 3)
6854 return NULL;
6855
6856 for (i = 0; i < 8; ++i)
6857 {
6858 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6859 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6860 if (a >= b)
6861 temp += a - b;
6862 else
6863 temp += b - a;
6864 }
6865
6866 return build_int_cst (long_integer_type_node, temp);
6867 }
6868
6869 static tree
alpha_fold_builtin_pklb(unsigned HOST_WIDE_INT opint[],long op_const)6870 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6871 {
6872 unsigned HOST_WIDE_INT temp;
6873
6874 if (op_const == 0)
6875 return NULL;
6876
6877 temp = opint[0] & 0xff;
6878 temp |= (opint[0] >> 24) & 0xff00;
6879
6880 return build_int_cst (long_integer_type_node, temp);
6881 }
6882
6883 static tree
alpha_fold_builtin_pkwb(unsigned HOST_WIDE_INT opint[],long op_const)6884 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6885 {
6886 unsigned HOST_WIDE_INT temp;
6887
6888 if (op_const == 0)
6889 return NULL;
6890
6891 temp = opint[0] & 0xff;
6892 temp |= (opint[0] >> 8) & 0xff00;
6893 temp |= (opint[0] >> 16) & 0xff0000;
6894 temp |= (opint[0] >> 24) & 0xff000000;
6895
6896 return build_int_cst (long_integer_type_node, temp);
6897 }
6898
6899 static tree
alpha_fold_builtin_unpkbl(unsigned HOST_WIDE_INT opint[],long op_const)6900 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6901 {
6902 unsigned HOST_WIDE_INT temp;
6903
6904 if (op_const == 0)
6905 return NULL;
6906
6907 temp = opint[0] & 0xff;
6908 temp |= (opint[0] & 0xff00) << 24;
6909
6910 return build_int_cst (long_integer_type_node, temp);
6911 }
6912
6913 static tree
alpha_fold_builtin_unpkbw(unsigned HOST_WIDE_INT opint[],long op_const)6914 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6915 {
6916 unsigned HOST_WIDE_INT temp;
6917
6918 if (op_const == 0)
6919 return NULL;
6920
6921 temp = opint[0] & 0xff;
6922 temp |= (opint[0] & 0x0000ff00) << 8;
6923 temp |= (opint[0] & 0x00ff0000) << 16;
6924 temp |= (opint[0] & 0xff000000) << 24;
6925
6926 return build_int_cst (long_integer_type_node, temp);
6927 }
6928
6929 static tree
alpha_fold_builtin_cttz(unsigned HOST_WIDE_INT opint[],long op_const)6930 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6931 {
6932 unsigned HOST_WIDE_INT temp;
6933
6934 if (op_const == 0)
6935 return NULL;
6936
6937 if (opint[0] == 0)
6938 temp = 64;
6939 else
6940 temp = exact_log2 (opint[0] & -opint[0]);
6941
6942 return build_int_cst (long_integer_type_node, temp);
6943 }
6944
6945 static tree
alpha_fold_builtin_ctlz(unsigned HOST_WIDE_INT opint[],long op_const)6946 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6947 {
6948 unsigned HOST_WIDE_INT temp;
6949
6950 if (op_const == 0)
6951 return NULL;
6952
6953 if (opint[0] == 0)
6954 temp = 64;
6955 else
6956 temp = 64 - floor_log2 (opint[0]) - 1;
6957
6958 return build_int_cst (long_integer_type_node, temp);
6959 }
6960
6961 static tree
alpha_fold_builtin_ctpop(unsigned HOST_WIDE_INT opint[],long op_const)6962 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6963 {
6964 unsigned HOST_WIDE_INT temp, op;
6965
6966 if (op_const == 0)
6967 return NULL;
6968
6969 op = opint[0];
6970 temp = 0;
6971 while (op)
6972 temp++, op &= op - 1;
6973
6974 return build_int_cst (long_integer_type_node, temp);
6975 }
6976
6977 /* Fold one of our builtin functions. */
6978
6979 static tree
alpha_fold_builtin(tree fndecl,tree arglist,bool ignore ATTRIBUTE_UNUSED)6980 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6981 {
6982 tree op[MAX_ARGS], t;
6983 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6984 long op_const = 0, arity = 0;
6985
6986 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6987 {
6988 tree arg = TREE_VALUE (t);
6989 if (arg == error_mark_node)
6990 return NULL;
6991 if (arity >= MAX_ARGS)
6992 return NULL;
6993
6994 op[arity] = arg;
6995 opint[arity] = 0;
6996 if (TREE_CODE (arg) == INTEGER_CST)
6997 {
6998 op_const |= 1L << arity;
6999 opint[arity] = int_cst_value (arg);
7000 }
7001 }
7002
7003 switch (DECL_FUNCTION_CODE (fndecl))
7004 {
7005 case ALPHA_BUILTIN_CMPBGE:
7006 return alpha_fold_builtin_cmpbge (opint, op_const);
7007
7008 case ALPHA_BUILTIN_EXTBL:
7009 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7010 case ALPHA_BUILTIN_EXTWL:
7011 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7012 case ALPHA_BUILTIN_EXTLL:
7013 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7014 case ALPHA_BUILTIN_EXTQL:
7015 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7016 case ALPHA_BUILTIN_EXTWH:
7017 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7018 case ALPHA_BUILTIN_EXTLH:
7019 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7020 case ALPHA_BUILTIN_EXTQH:
7021 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7022
7023 case ALPHA_BUILTIN_INSBL:
7024 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7025 case ALPHA_BUILTIN_INSWL:
7026 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7027 case ALPHA_BUILTIN_INSLL:
7028 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7029 case ALPHA_BUILTIN_INSQL:
7030 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7031 case ALPHA_BUILTIN_INSWH:
7032 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7033 case ALPHA_BUILTIN_INSLH:
7034 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7035 case ALPHA_BUILTIN_INSQH:
7036 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7037
7038 case ALPHA_BUILTIN_MSKBL:
7039 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7040 case ALPHA_BUILTIN_MSKWL:
7041 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7042 case ALPHA_BUILTIN_MSKLL:
7043 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7044 case ALPHA_BUILTIN_MSKQL:
7045 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7046 case ALPHA_BUILTIN_MSKWH:
7047 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7048 case ALPHA_BUILTIN_MSKLH:
7049 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7050 case ALPHA_BUILTIN_MSKQH:
7051 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7052
7053 case ALPHA_BUILTIN_UMULH:
7054 return alpha_fold_builtin_umulh (opint, op_const);
7055
7056 case ALPHA_BUILTIN_ZAP:
7057 opint[1] ^= 0xff;
7058 /* FALLTHRU */
7059 case ALPHA_BUILTIN_ZAPNOT:
7060 return alpha_fold_builtin_zapnot (op, opint, op_const);
7061
7062 case ALPHA_BUILTIN_MINUB8:
7063 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7064 case ALPHA_BUILTIN_MINSB8:
7065 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7066 case ALPHA_BUILTIN_MINUW4:
7067 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7068 case ALPHA_BUILTIN_MINSW4:
7069 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7070 case ALPHA_BUILTIN_MAXUB8:
7071 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7072 case ALPHA_BUILTIN_MAXSB8:
7073 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7074 case ALPHA_BUILTIN_MAXUW4:
7075 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7076 case ALPHA_BUILTIN_MAXSW4:
7077 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7078
7079 case ALPHA_BUILTIN_PERR:
7080 return alpha_fold_builtin_perr (opint, op_const);
7081 case ALPHA_BUILTIN_PKLB:
7082 return alpha_fold_builtin_pklb (opint, op_const);
7083 case ALPHA_BUILTIN_PKWB:
7084 return alpha_fold_builtin_pkwb (opint, op_const);
7085 case ALPHA_BUILTIN_UNPKBL:
7086 return alpha_fold_builtin_unpkbl (opint, op_const);
7087 case ALPHA_BUILTIN_UNPKBW:
7088 return alpha_fold_builtin_unpkbw (opint, op_const);
7089
7090 case ALPHA_BUILTIN_CTTZ:
7091 return alpha_fold_builtin_cttz (opint, op_const);
7092 case ALPHA_BUILTIN_CTLZ:
7093 return alpha_fold_builtin_ctlz (opint, op_const);
7094 case ALPHA_BUILTIN_CTPOP:
7095 return alpha_fold_builtin_ctpop (opint, op_const);
7096
7097 case ALPHA_BUILTIN_AMASK:
7098 case ALPHA_BUILTIN_IMPLVER:
7099 case ALPHA_BUILTIN_RPCC:
7100 case ALPHA_BUILTIN_THREAD_POINTER:
7101 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7102 /* None of these are foldable at compile-time. */
7103 default:
7104 return NULL;
7105 }
7106 }
7107
7108 /* This page contains routines that are used to determine what the function
7109 prologue and epilogue code will do and write them out. */
7110
7111 /* Compute the size of the save area in the stack. */
7112
7113 /* These variables are used for communication between the following functions.
7114 They indicate various things about the current function being compiled
7115 that are used to tell what kind of prologue, epilogue and procedure
7116 descriptor to generate. */
7117
7118 /* Nonzero if we need a stack procedure. */
7119 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7120 static enum alpha_procedure_types alpha_procedure_type;
7121
7122 /* Register number (either FP or SP) that is used to unwind the frame. */
7123 static int vms_unwind_regno;
7124
7125 /* Register number used to save FP. We need not have one for RA since
7126 we don't modify it for register procedures. This is only defined
7127 for register frame procedures. */
7128 static int vms_save_fp_regno;
7129
7130 /* Register number used to reference objects off our PV. */
7131 static int vms_base_regno;
7132
7133 /* Compute register masks for saved registers. */
7134
7135 static void
alpha_sa_mask(unsigned long * imaskP,unsigned long * fmaskP)7136 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7137 {
7138 unsigned long imask = 0;
7139 unsigned long fmask = 0;
7140 unsigned int i;
7141
7142 /* When outputting a thunk, we don't have valid register life info,
7143 but assemble_start_function wants to output .frame and .mask
7144 directives. */
7145 if (current_function_is_thunk)
7146 {
7147 *imaskP = 0;
7148 *fmaskP = 0;
7149 return;
7150 }
7151
7152 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7153 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7154
7155 /* One for every register we have to save. */
7156 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7157 if (! fixed_regs[i] && ! call_used_regs[i]
7158 && regs_ever_live[i] && i != REG_RA
7159 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7160 {
7161 if (i < 32)
7162 imask |= (1UL << i);
7163 else
7164 fmask |= (1UL << (i - 32));
7165 }
7166
7167 /* We need to restore these for the handler. */
7168 if (current_function_calls_eh_return)
7169 {
7170 for (i = 0; ; ++i)
7171 {
7172 unsigned regno = EH_RETURN_DATA_REGNO (i);
7173 if (regno == INVALID_REGNUM)
7174 break;
7175 imask |= 1UL << regno;
7176 }
7177 }
7178
7179 /* If any register spilled, then spill the return address also. */
7180 /* ??? This is required by the Digital stack unwind specification
7181 and isn't needed if we're doing Dwarf2 unwinding. */
7182 if (imask || fmask || alpha_ra_ever_killed ())
7183 imask |= (1UL << REG_RA);
7184
7185 *imaskP = imask;
7186 *fmaskP = fmask;
7187 }
7188
7189 int
alpha_sa_size(void)7190 alpha_sa_size (void)
7191 {
7192 unsigned long mask[2];
7193 int sa_size = 0;
7194 int i, j;
7195
7196 alpha_sa_mask (&mask[0], &mask[1]);
7197
7198 if (TARGET_ABI_UNICOSMK)
7199 {
7200 if (mask[0] || mask[1])
7201 sa_size = 14;
7202 }
7203 else
7204 {
7205 for (j = 0; j < 2; ++j)
7206 for (i = 0; i < 32; ++i)
7207 if ((mask[j] >> i) & 1)
7208 sa_size++;
7209 }
7210
7211 if (TARGET_ABI_UNICOSMK)
7212 {
7213 /* We might not need to generate a frame if we don't make any calls
7214 (including calls to __T3E_MISMATCH if this is a vararg function),
7215 don't have any local variables which require stack slots, don't
7216 use alloca and have not determined that we need a frame for other
7217 reasons. */
7218
7219 alpha_procedure_type
7220 = (sa_size || get_frame_size() != 0
7221 || current_function_outgoing_args_size
7222 || current_function_stdarg || current_function_calls_alloca
7223 || frame_pointer_needed)
7224 ? PT_STACK : PT_REGISTER;
7225
7226 /* Always reserve space for saving callee-saved registers if we
7227 need a frame as required by the calling convention. */
7228 if (alpha_procedure_type == PT_STACK)
7229 sa_size = 14;
7230 }
7231 else if (TARGET_ABI_OPEN_VMS)
7232 {
7233 /* Start by assuming we can use a register procedure if we don't
7234 make any calls (REG_RA not used) or need to save any
7235 registers and a stack procedure if we do. */
7236 if ((mask[0] >> REG_RA) & 1)
7237 alpha_procedure_type = PT_STACK;
7238 else if (get_frame_size() != 0)
7239 alpha_procedure_type = PT_REGISTER;
7240 else
7241 alpha_procedure_type = PT_NULL;
7242
7243 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7244 made the final decision on stack procedure vs register procedure. */
7245 if (alpha_procedure_type == PT_STACK)
7246 sa_size -= 2;
7247
7248 /* Decide whether to refer to objects off our PV via FP or PV.
7249 If we need FP for something else or if we receive a nonlocal
7250 goto (which expects PV to contain the value), we must use PV.
7251 Otherwise, start by assuming we can use FP. */
7252
7253 vms_base_regno
7254 = (frame_pointer_needed
7255 || current_function_has_nonlocal_label
7256 || alpha_procedure_type == PT_STACK
7257 || current_function_outgoing_args_size)
7258 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7259
7260 /* If we want to copy PV into FP, we need to find some register
7261 in which to save FP. */
7262
7263 vms_save_fp_regno = -1;
7264 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7265 for (i = 0; i < 32; i++)
7266 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7267 vms_save_fp_regno = i;
7268
7269 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7270 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7271 else if (alpha_procedure_type == PT_NULL)
7272 vms_base_regno = REG_PV;
7273
7274 /* Stack unwinding should be done via FP unless we use it for PV. */
7275 vms_unwind_regno = (vms_base_regno == REG_PV
7276 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7277
7278 /* If this is a stack procedure, allow space for saving FP and RA. */
7279 if (alpha_procedure_type == PT_STACK)
7280 sa_size += 2;
7281 }
7282 else
7283 {
7284 /* Our size must be even (multiple of 16 bytes). */
7285 if (sa_size & 1)
7286 sa_size++;
7287 }
7288
7289 return sa_size * 8;
7290 }
7291
7292 /* Define the offset between two registers, one to be eliminated,
7293 and the other its replacement, at the start of a routine. */
7294
7295 HOST_WIDE_INT
alpha_initial_elimination_offset(unsigned int from,unsigned int to ATTRIBUTE_UNUSED)7296 alpha_initial_elimination_offset (unsigned int from,
7297 unsigned int to ATTRIBUTE_UNUSED)
7298 {
7299 HOST_WIDE_INT ret;
7300
7301 ret = alpha_sa_size ();
7302 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7303
7304 switch (from)
7305 {
7306 case FRAME_POINTER_REGNUM:
7307 break;
7308
7309 case ARG_POINTER_REGNUM:
7310 ret += (ALPHA_ROUND (get_frame_size ()
7311 + current_function_pretend_args_size)
7312 - current_function_pretend_args_size);
7313 break;
7314
7315 default:
7316 gcc_unreachable ();
7317 }
7318
7319 return ret;
7320 }
7321
7322 int
alpha_pv_save_size(void)7323 alpha_pv_save_size (void)
7324 {
7325 alpha_sa_size ();
7326 return alpha_procedure_type == PT_STACK ? 8 : 0;
7327 }
7328
7329 int
alpha_using_fp(void)7330 alpha_using_fp (void)
7331 {
7332 alpha_sa_size ();
7333 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7334 }
7335
7336 #if TARGET_ABI_OPEN_VMS
7337
7338 const struct attribute_spec vms_attribute_table[] =
7339 {
7340 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7341 { "overlaid", 0, 0, true, false, false, NULL },
7342 { "global", 0, 0, true, false, false, NULL },
7343 { "initialize", 0, 0, true, false, false, NULL },
7344 { NULL, 0, 0, false, false, false, NULL }
7345 };
7346
7347 #endif
7348
7349 static int
find_lo_sum_using_gp(rtx * px,void * data ATTRIBUTE_UNUSED)7350 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7351 {
7352 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7353 }
7354
7355 int
alpha_find_lo_sum_using_gp(rtx insn)7356 alpha_find_lo_sum_using_gp (rtx insn)
7357 {
7358 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7359 }
7360
7361 static int
alpha_does_function_need_gp(void)7362 alpha_does_function_need_gp (void)
7363 {
7364 rtx insn;
7365
7366 /* The GP being variable is an OSF abi thing. */
7367 if (! TARGET_ABI_OSF)
7368 return 0;
7369
7370 /* We need the gp to load the address of __mcount. */
7371 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7372 return 1;
7373
7374 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7375 if (current_function_is_thunk)
7376 return 1;
7377
7378 /* The nonlocal receiver pattern assumes that the gp is valid for
7379 the nested function. Reasonable because it's almost always set
7380 correctly already. For the cases where that's wrong, make sure
7381 the nested function loads its gp on entry. */
7382 if (current_function_has_nonlocal_goto)
7383 return 1;
7384
7385 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7386 Even if we are a static function, we still need to do this in case
7387 our address is taken and passed to something like qsort. */
7388
7389 push_topmost_sequence ();
7390 insn = get_insns ();
7391 pop_topmost_sequence ();
7392
7393 for (; insn; insn = NEXT_INSN (insn))
7394 if (INSN_P (insn)
7395 && GET_CODE (PATTERN (insn)) != USE
7396 && GET_CODE (PATTERN (insn)) != CLOBBER
7397 && get_attr_usegp (insn))
7398 return 1;
7399
7400 return 0;
7401 }
7402
7403
7404 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7405 sequences. */
7406
7407 static rtx
set_frame_related_p(void)7408 set_frame_related_p (void)
7409 {
7410 rtx seq = get_insns ();
7411 rtx insn;
7412
7413 end_sequence ();
7414
7415 if (!seq)
7416 return NULL_RTX;
7417
7418 if (INSN_P (seq))
7419 {
7420 insn = seq;
7421 while (insn != NULL_RTX)
7422 {
7423 RTX_FRAME_RELATED_P (insn) = 1;
7424 insn = NEXT_INSN (insn);
7425 }
7426 seq = emit_insn (seq);
7427 }
7428 else
7429 {
7430 seq = emit_insn (seq);
7431 RTX_FRAME_RELATED_P (seq) = 1;
7432 }
7433 return seq;
7434 }
7435
7436 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7437
7438 /* Generates a store with the proper unwind info attached. VALUE is
7439 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7440 contains SP+FRAME_BIAS, and that is the unwind info that should be
7441 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7442 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7443
7444 static void
emit_frame_store_1(rtx value,rtx base_reg,HOST_WIDE_INT frame_bias,HOST_WIDE_INT base_ofs,rtx frame_reg)7445 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7446 HOST_WIDE_INT base_ofs, rtx frame_reg)
7447 {
7448 rtx addr, mem, insn;
7449
7450 addr = plus_constant (base_reg, base_ofs);
7451 mem = gen_rtx_MEM (DImode, addr);
7452 set_mem_alias_set (mem, alpha_sr_alias_set);
7453
7454 insn = emit_move_insn (mem, value);
7455 RTX_FRAME_RELATED_P (insn) = 1;
7456
7457 if (frame_bias || value != frame_reg)
7458 {
7459 if (frame_bias)
7460 {
7461 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7462 mem = gen_rtx_MEM (DImode, addr);
7463 }
7464
7465 REG_NOTES (insn)
7466 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7467 gen_rtx_SET (VOIDmode, mem, frame_reg),
7468 REG_NOTES (insn));
7469 }
7470 }
7471
7472 static void
emit_frame_store(unsigned int regno,rtx base_reg,HOST_WIDE_INT frame_bias,HOST_WIDE_INT base_ofs)7473 emit_frame_store (unsigned int regno, rtx base_reg,
7474 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7475 {
7476 rtx reg = gen_rtx_REG (DImode, regno);
7477 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7478 }
7479
7480 /* Write function prologue. */
7481
7482 /* On vms we have two kinds of functions:
7483
7484 - stack frame (PROC_STACK)
7485 these are 'normal' functions with local vars and which are
7486 calling other functions
7487 - register frame (PROC_REGISTER)
7488 keeps all data in registers, needs no stack
7489
7490 We must pass this to the assembler so it can generate the
7491 proper pdsc (procedure descriptor)
7492 This is done with the '.pdesc' command.
7493
7494 On not-vms, we don't really differentiate between the two, as we can
7495 simply allocate stack without saving registers. */
7496
7497 void
alpha_expand_prologue(void)7498 alpha_expand_prologue (void)
7499 {
7500 /* Registers to save. */
7501 unsigned long imask = 0;
7502 unsigned long fmask = 0;
7503 /* Stack space needed for pushing registers clobbered by us. */
7504 HOST_WIDE_INT sa_size;
7505 /* Complete stack size needed. */
7506 HOST_WIDE_INT frame_size;
7507 /* Offset from base reg to register save area. */
7508 HOST_WIDE_INT reg_offset;
7509 rtx sa_reg;
7510 int i;
7511
7512 sa_size = alpha_sa_size ();
7513
7514 frame_size = get_frame_size ();
7515 if (TARGET_ABI_OPEN_VMS)
7516 frame_size = ALPHA_ROUND (sa_size
7517 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7518 + frame_size
7519 + current_function_pretend_args_size);
7520 else if (TARGET_ABI_UNICOSMK)
7521 /* We have to allocate space for the DSIB if we generate a frame. */
7522 frame_size = ALPHA_ROUND (sa_size
7523 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7524 + ALPHA_ROUND (frame_size
7525 + current_function_outgoing_args_size);
7526 else
7527 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7528 + sa_size
7529 + ALPHA_ROUND (frame_size
7530 + current_function_pretend_args_size));
7531
7532 if (TARGET_ABI_OPEN_VMS)
7533 reg_offset = 8;
7534 else
7535 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7536
7537 alpha_sa_mask (&imask, &fmask);
7538
7539 /* Emit an insn to reload GP, if needed. */
7540 if (TARGET_ABI_OSF)
7541 {
7542 alpha_function_needs_gp = alpha_does_function_need_gp ();
7543 if (alpha_function_needs_gp)
7544 emit_insn (gen_prologue_ldgp ());
7545 }
7546
7547 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7548 the call to mcount ourselves, rather than having the linker do it
7549 magically in response to -pg. Since _mcount has special linkage,
7550 don't represent the call as a call. */
7551 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7552 emit_insn (gen_prologue_mcount ());
7553
7554 if (TARGET_ABI_UNICOSMK)
7555 unicosmk_gen_dsib (&imask);
7556
7557 /* Adjust the stack by the frame size. If the frame size is > 4096
7558 bytes, we need to be sure we probe somewhere in the first and last
7559 4096 bytes (we can probably get away without the latter test) and
7560 every 8192 bytes in between. If the frame size is > 32768, we
7561 do this in a loop. Otherwise, we generate the explicit probe
7562 instructions.
7563
7564 Note that we are only allowed to adjust sp once in the prologue. */
7565
7566 if (frame_size <= 32768)
7567 {
7568 if (frame_size > 4096)
7569 {
7570 int probed;
7571
7572 for (probed = 4096; probed < frame_size; probed += 8192)
7573 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7574 ? -probed + 64
7575 : -probed)));
7576
7577 /* We only have to do this probe if we aren't saving registers. */
7578 if (sa_size == 0 && frame_size > probed - 4096)
7579 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7580 }
7581
7582 if (frame_size != 0)
7583 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7584 GEN_INT (TARGET_ABI_UNICOSMK
7585 ? -frame_size + 64
7586 : -frame_size))));
7587 }
7588 else
7589 {
7590 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7591 number of 8192 byte blocks to probe. We then probe each block
7592 in the loop and then set SP to the proper location. If the
7593 amount remaining is > 4096, we have to do one more probe if we
7594 are not saving any registers. */
7595
7596 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7597 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7598 rtx ptr = gen_rtx_REG (DImode, 22);
7599 rtx count = gen_rtx_REG (DImode, 23);
7600 rtx seq;
7601
7602 emit_move_insn (count, GEN_INT (blocks));
7603 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7604 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7605
7606 /* Because of the difficulty in emitting a new basic block this
7607 late in the compilation, generate the loop as a single insn. */
7608 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7609
7610 if (leftover > 4096 && sa_size == 0)
7611 {
7612 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7613 MEM_VOLATILE_P (last) = 1;
7614 emit_move_insn (last, const0_rtx);
7615 }
7616
7617 if (TARGET_ABI_WINDOWS_NT)
7618 {
7619 /* For NT stack unwind (done by 'reverse execution'), it's
7620 not OK to take the result of a loop, even though the value
7621 is already in ptr, so we reload it via a single operation
7622 and subtract it to sp.
7623
7624 Yes, that's correct -- we have to reload the whole constant
7625 into a temporary via ldah+lda then subtract from sp. */
7626
7627 HOST_WIDE_INT lo, hi;
7628 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7629 hi = frame_size - lo;
7630
7631 emit_move_insn (ptr, GEN_INT (hi));
7632 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7633 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7634 ptr));
7635 }
7636 else
7637 {
7638 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7639 GEN_INT (-leftover)));
7640 }
7641
7642 /* This alternative is special, because the DWARF code cannot
7643 possibly intuit through the loop above. So we invent this
7644 note it looks at instead. */
7645 RTX_FRAME_RELATED_P (seq) = 1;
7646 REG_NOTES (seq)
7647 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7648 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7649 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7650 GEN_INT (TARGET_ABI_UNICOSMK
7651 ? -frame_size + 64
7652 : -frame_size))),
7653 REG_NOTES (seq));
7654 }
7655
7656 if (!TARGET_ABI_UNICOSMK)
7657 {
7658 HOST_WIDE_INT sa_bias = 0;
7659
7660 /* Cope with very large offsets to the register save area. */
7661 sa_reg = stack_pointer_rtx;
7662 if (reg_offset + sa_size > 0x8000)
7663 {
7664 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7665 rtx sa_bias_rtx;
7666
7667 if (low + sa_size <= 0x8000)
7668 sa_bias = reg_offset - low, reg_offset = low;
7669 else
7670 sa_bias = reg_offset, reg_offset = 0;
7671
7672 sa_reg = gen_rtx_REG (DImode, 24);
7673 sa_bias_rtx = GEN_INT (sa_bias);
7674
7675 if (add_operand (sa_bias_rtx, DImode))
7676 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7677 else
7678 {
7679 emit_move_insn (sa_reg, sa_bias_rtx);
7680 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7681 }
7682 }
7683
7684 /* Save regs in stack order. Beginning with VMS PV. */
7685 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7686 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7687
7688 /* Save register RA next. */
7689 if (imask & (1UL << REG_RA))
7690 {
7691 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7692 imask &= ~(1UL << REG_RA);
7693 reg_offset += 8;
7694 }
7695
7696 /* Now save any other registers required to be saved. */
7697 for (i = 0; i < 31; i++)
7698 if (imask & (1UL << i))
7699 {
7700 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7701 reg_offset += 8;
7702 }
7703
7704 for (i = 0; i < 31; i++)
7705 if (fmask & (1UL << i))
7706 {
7707 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7708 reg_offset += 8;
7709 }
7710 }
7711 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7712 {
7713 /* The standard frame on the T3E includes space for saving registers.
7714 We just have to use it. We don't have to save the return address and
7715 the old frame pointer here - they are saved in the DSIB. */
7716
7717 reg_offset = -56;
7718 for (i = 9; i < 15; i++)
7719 if (imask & (1UL << i))
7720 {
7721 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7722 reg_offset -= 8;
7723 }
7724 for (i = 2; i < 10; i++)
7725 if (fmask & (1UL << i))
7726 {
7727 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7728 reg_offset -= 8;
7729 }
7730 }
7731
7732 if (TARGET_ABI_OPEN_VMS)
7733 {
7734 if (alpha_procedure_type == PT_REGISTER)
7735 /* Register frame procedures save the fp.
7736 ?? Ought to have a dwarf2 save for this. */
7737 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7738 hard_frame_pointer_rtx);
7739
7740 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7741 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7742 gen_rtx_REG (DImode, REG_PV)));
7743
7744 if (alpha_procedure_type != PT_NULL
7745 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7746 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7747
7748 /* If we have to allocate space for outgoing args, do it now. */
7749 if (current_function_outgoing_args_size != 0)
7750 {
7751 rtx seq
7752 = emit_move_insn (stack_pointer_rtx,
7753 plus_constant
7754 (hard_frame_pointer_rtx,
7755 - (ALPHA_ROUND
7756 (current_function_outgoing_args_size))));
7757
7758 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7759 if ! frame_pointer_needed. Setting the bit will change the CFA
7760 computation rule to use sp again, which would be wrong if we had
7761 frame_pointer_needed, as this means sp might move unpredictably
7762 later on.
7763
7764 Also, note that
7765 frame_pointer_needed
7766 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7767 and
7768 current_function_outgoing_args_size != 0
7769 => alpha_procedure_type != PT_NULL,
7770
7771 so when we are not setting the bit here, we are guaranteed to
7772 have emitted an FRP frame pointer update just before. */
7773 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7774 }
7775 }
7776 else if (!TARGET_ABI_UNICOSMK)
7777 {
7778 /* If we need a frame pointer, set it from the stack pointer. */
7779 if (frame_pointer_needed)
7780 {
7781 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7782 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7783 else
7784 /* This must always be the last instruction in the
7785 prologue, thus we emit a special move + clobber. */
7786 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7787 stack_pointer_rtx, sa_reg)));
7788 }
7789 }
7790
7791 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7792 the prologue, for exception handling reasons, we cannot do this for
7793 any insn that might fault. We could prevent this for mems with a
7794 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7795 have to prevent all such scheduling with a blockage.
7796
7797 Linux, on the other hand, never bothered to implement OSF/1's
7798 exception handling, and so doesn't care about such things. Anyone
7799 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7800
7801 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7802 emit_insn (gen_blockage ());
7803 }
7804
7805 /* Count the number of .file directives, so that .loc is up to date. */
7806 int num_source_filenames = 0;
7807
7808 /* Output the textual info surrounding the prologue. */
7809
7810 void
alpha_start_function(FILE * file,const char * fnname,tree decl ATTRIBUTE_UNUSED)7811 alpha_start_function (FILE *file, const char *fnname,
7812 tree decl ATTRIBUTE_UNUSED)
7813 {
7814 unsigned long imask = 0;
7815 unsigned long fmask = 0;
7816 /* Stack space needed for pushing registers clobbered by us. */
7817 HOST_WIDE_INT sa_size;
7818 /* Complete stack size needed. */
7819 unsigned HOST_WIDE_INT frame_size;
7820 /* Offset from base reg to register save area. */
7821 HOST_WIDE_INT reg_offset;
7822 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7823 int i;
7824
7825 /* Don't emit an extern directive for functions defined in the same file. */
7826 if (TARGET_ABI_UNICOSMK)
7827 {
7828 tree name_tree;
7829 name_tree = get_identifier (fnname);
7830 TREE_ASM_WRITTEN (name_tree) = 1;
7831 }
7832
7833 alpha_fnname = fnname;
7834 sa_size = alpha_sa_size ();
7835
7836 frame_size = get_frame_size ();
7837 if (TARGET_ABI_OPEN_VMS)
7838 frame_size = ALPHA_ROUND (sa_size
7839 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7840 + frame_size
7841 + current_function_pretend_args_size);
7842 else if (TARGET_ABI_UNICOSMK)
7843 frame_size = ALPHA_ROUND (sa_size
7844 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7845 + ALPHA_ROUND (frame_size
7846 + current_function_outgoing_args_size);
7847 else
7848 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7849 + sa_size
7850 + ALPHA_ROUND (frame_size
7851 + current_function_pretend_args_size));
7852
7853 if (TARGET_ABI_OPEN_VMS)
7854 reg_offset = 8;
7855 else
7856 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7857
7858 alpha_sa_mask (&imask, &fmask);
7859
7860 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7861 We have to do that before the .ent directive as we cannot switch
7862 files within procedures with native ecoff because line numbers are
7863 linked to procedure descriptors.
7864 Outputting the lineno helps debugging of one line functions as they
7865 would otherwise get no line number at all. Please note that we would
7866 like to put out last_linenum from final.c, but it is not accessible. */
7867
7868 if (write_symbols == SDB_DEBUG)
7869 {
7870 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7871 ASM_OUTPUT_SOURCE_FILENAME (file,
7872 DECL_SOURCE_FILE (current_function_decl));
7873 #endif
7874 #ifdef SDB_OUTPUT_SOURCE_LINE
7875 if (debug_info_level != DINFO_LEVEL_TERSE)
7876 SDB_OUTPUT_SOURCE_LINE (file,
7877 DECL_SOURCE_LINE (current_function_decl));
7878 #endif
7879 }
7880
7881 /* Issue function start and label. */
7882 if (TARGET_ABI_OPEN_VMS
7883 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7884 {
7885 fputs ("\t.ent ", file);
7886 assemble_name (file, fnname);
7887 putc ('\n', file);
7888
7889 /* If the function needs GP, we'll write the "..ng" label there.
7890 Otherwise, do it here. */
7891 if (TARGET_ABI_OSF
7892 && ! alpha_function_needs_gp
7893 && ! current_function_is_thunk)
7894 {
7895 putc ('$', file);
7896 assemble_name (file, fnname);
7897 fputs ("..ng:\n", file);
7898 }
7899 }
7900
7901 strcpy (entry_label, fnname);
7902 if (TARGET_ABI_OPEN_VMS)
7903 strcat (entry_label, "..en");
7904
7905 /* For public functions, the label must be globalized by appending an
7906 additional colon. */
7907 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7908 strcat (entry_label, ":");
7909
7910 ASM_OUTPUT_LABEL (file, entry_label);
7911 inside_function = TRUE;
7912
7913 if (TARGET_ABI_OPEN_VMS)
7914 fprintf (file, "\t.base $%d\n", vms_base_regno);
7915
7916 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7917 && !flag_inhibit_size_directive)
7918 {
7919 /* Set flags in procedure descriptor to request IEEE-conformant
7920 math-library routines. The value we set it to is PDSC_EXC_IEEE
7921 (/usr/include/pdsc.h). */
7922 fputs ("\t.eflag 48\n", file);
7923 }
7924
7925 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7926 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7927 alpha_arg_offset = -frame_size + 48;
7928
7929 /* Describe our frame. If the frame size is larger than an integer,
7930 print it as zero to avoid an assembler error. We won't be
7931 properly describing such a frame, but that's the best we can do. */
7932 if (TARGET_ABI_UNICOSMK)
7933 ;
7934 else if (TARGET_ABI_OPEN_VMS)
7935 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7936 HOST_WIDE_INT_PRINT_DEC "\n",
7937 vms_unwind_regno,
7938 frame_size >= (1UL << 31) ? 0 : frame_size,
7939 reg_offset);
7940 else if (!flag_inhibit_size_directive)
7941 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7942 (frame_pointer_needed
7943 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7944 frame_size >= (1UL << 31) ? 0 : frame_size,
7945 current_function_pretend_args_size);
7946
7947 /* Describe which registers were spilled. */
7948 if (TARGET_ABI_UNICOSMK)
7949 ;
7950 else if (TARGET_ABI_OPEN_VMS)
7951 {
7952 if (imask)
7953 /* ??? Does VMS care if mask contains ra? The old code didn't
7954 set it, so I don't here. */
7955 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7956 if (fmask)
7957 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7958 if (alpha_procedure_type == PT_REGISTER)
7959 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7960 }
7961 else if (!flag_inhibit_size_directive)
7962 {
7963 if (imask)
7964 {
7965 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7966 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7967
7968 for (i = 0; i < 32; ++i)
7969 if (imask & (1UL << i))
7970 reg_offset += 8;
7971 }
7972
7973 if (fmask)
7974 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7975 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7976 }
7977
7978 #if TARGET_ABI_OPEN_VMS
7979 /* Ifdef'ed cause link_section are only available then. */
7980 readonly_data_section ();
7981 fprintf (file, "\t.align 3\n");
7982 assemble_name (file, fnname); fputs ("..na:\n", file);
7983 fputs ("\t.ascii \"", file);
7984 assemble_name (file, fnname);
7985 fputs ("\\0\"\n", file);
7986 alpha_need_linkage (fnname, 1);
7987 text_section ();
7988 #endif
7989 }
7990
7991 /* Emit the .prologue note at the scheduled end of the prologue. */
7992
7993 static void
alpha_output_function_end_prologue(FILE * file)7994 alpha_output_function_end_prologue (FILE *file)
7995 {
7996 if (TARGET_ABI_UNICOSMK)
7997 ;
7998 else if (TARGET_ABI_OPEN_VMS)
7999 fputs ("\t.prologue\n", file);
8000 else if (TARGET_ABI_WINDOWS_NT)
8001 fputs ("\t.prologue 0\n", file);
8002 else if (!flag_inhibit_size_directive)
8003 fprintf (file, "\t.prologue %d\n",
8004 alpha_function_needs_gp || current_function_is_thunk);
8005 }
8006
8007 /* Write function epilogue. */
8008
8009 /* ??? At some point we will want to support full unwind, and so will
8010 need to mark the epilogue as well. At the moment, we just confuse
8011 dwarf2out. */
8012 #undef FRP
8013 #define FRP(exp) exp
8014
8015 void
alpha_expand_epilogue(void)8016 alpha_expand_epilogue (void)
8017 {
8018 /* Registers to save. */
8019 unsigned long imask = 0;
8020 unsigned long fmask = 0;
8021 /* Stack space needed for pushing registers clobbered by us. */
8022 HOST_WIDE_INT sa_size;
8023 /* Complete stack size needed. */
8024 HOST_WIDE_INT frame_size;
8025 /* Offset from base reg to register save area. */
8026 HOST_WIDE_INT reg_offset;
8027 int fp_is_frame_pointer, fp_offset;
8028 rtx sa_reg, sa_reg_exp = NULL;
8029 rtx sp_adj1, sp_adj2, mem;
8030 rtx eh_ofs;
8031 int i;
8032
8033 sa_size = alpha_sa_size ();
8034
8035 frame_size = get_frame_size ();
8036 if (TARGET_ABI_OPEN_VMS)
8037 frame_size = ALPHA_ROUND (sa_size
8038 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8039 + frame_size
8040 + current_function_pretend_args_size);
8041 else if (TARGET_ABI_UNICOSMK)
8042 frame_size = ALPHA_ROUND (sa_size
8043 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8044 + ALPHA_ROUND (frame_size
8045 + current_function_outgoing_args_size);
8046 else
8047 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8048 + sa_size
8049 + ALPHA_ROUND (frame_size
8050 + current_function_pretend_args_size));
8051
8052 if (TARGET_ABI_OPEN_VMS)
8053 {
8054 if (alpha_procedure_type == PT_STACK)
8055 reg_offset = 8;
8056 else
8057 reg_offset = 0;
8058 }
8059 else
8060 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8061
8062 alpha_sa_mask (&imask, &fmask);
8063
8064 fp_is_frame_pointer
8065 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8066 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8067 fp_offset = 0;
8068 sa_reg = stack_pointer_rtx;
8069
8070 if (current_function_calls_eh_return)
8071 eh_ofs = EH_RETURN_STACKADJ_RTX;
8072 else
8073 eh_ofs = NULL_RTX;
8074
8075 if (!TARGET_ABI_UNICOSMK && sa_size)
8076 {
8077 /* If we have a frame pointer, restore SP from it. */
8078 if ((TARGET_ABI_OPEN_VMS
8079 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8080 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8081 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8082
8083 /* Cope with very large offsets to the register save area. */
8084 if (reg_offset + sa_size > 0x8000)
8085 {
8086 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8087 HOST_WIDE_INT bias;
8088
8089 if (low + sa_size <= 0x8000)
8090 bias = reg_offset - low, reg_offset = low;
8091 else
8092 bias = reg_offset, reg_offset = 0;
8093
8094 sa_reg = gen_rtx_REG (DImode, 22);
8095 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8096
8097 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8098 }
8099
8100 /* Restore registers in order, excepting a true frame pointer. */
8101
8102 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8103 if (! eh_ofs)
8104 set_mem_alias_set (mem, alpha_sr_alias_set);
8105 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8106
8107 reg_offset += 8;
8108 imask &= ~(1UL << REG_RA);
8109
8110 for (i = 0; i < 31; ++i)
8111 if (imask & (1UL << i))
8112 {
8113 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8114 fp_offset = reg_offset;
8115 else
8116 {
8117 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8118 set_mem_alias_set (mem, alpha_sr_alias_set);
8119 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8120 }
8121 reg_offset += 8;
8122 }
8123
8124 for (i = 0; i < 31; ++i)
8125 if (fmask & (1UL << i))
8126 {
8127 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8128 set_mem_alias_set (mem, alpha_sr_alias_set);
8129 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8130 reg_offset += 8;
8131 }
8132 }
8133 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8134 {
8135 /* Restore callee-saved general-purpose registers. */
8136
8137 reg_offset = -56;
8138
8139 for (i = 9; i < 15; i++)
8140 if (imask & (1UL << i))
8141 {
8142 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8143 reg_offset));
8144 set_mem_alias_set (mem, alpha_sr_alias_set);
8145 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8146 reg_offset -= 8;
8147 }
8148
8149 for (i = 2; i < 10; i++)
8150 if (fmask & (1UL << i))
8151 {
8152 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8153 reg_offset));
8154 set_mem_alias_set (mem, alpha_sr_alias_set);
8155 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8156 reg_offset -= 8;
8157 }
8158
8159 /* Restore the return address from the DSIB. */
8160
8161 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8162 set_mem_alias_set (mem, alpha_sr_alias_set);
8163 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8164 }
8165
8166 if (frame_size || eh_ofs)
8167 {
8168 sp_adj1 = stack_pointer_rtx;
8169
8170 if (eh_ofs)
8171 {
8172 sp_adj1 = gen_rtx_REG (DImode, 23);
8173 emit_move_insn (sp_adj1,
8174 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8175 }
8176
8177 /* If the stack size is large, begin computation into a temporary
8178 register so as not to interfere with a potential fp restore,
8179 which must be consecutive with an SP restore. */
8180 if (frame_size < 32768
8181 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8182 sp_adj2 = GEN_INT (frame_size);
8183 else if (TARGET_ABI_UNICOSMK)
8184 {
8185 sp_adj1 = gen_rtx_REG (DImode, 23);
8186 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8187 sp_adj2 = const0_rtx;
8188 }
8189 else if (frame_size < 0x40007fffL)
8190 {
8191 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8192
8193 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8194 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8195 sp_adj1 = sa_reg;
8196 else
8197 {
8198 sp_adj1 = gen_rtx_REG (DImode, 23);
8199 FRP (emit_move_insn (sp_adj1, sp_adj2));
8200 }
8201 sp_adj2 = GEN_INT (low);
8202 }
8203 else
8204 {
8205 rtx tmp = gen_rtx_REG (DImode, 23);
8206 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8207 3, false));
8208 if (!sp_adj2)
8209 {
8210 /* We can't drop new things to memory this late, afaik,
8211 so build it up by pieces. */
8212 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8213 -(frame_size < 0)));
8214 gcc_assert (sp_adj2);
8215 }
8216 }
8217
8218 /* From now on, things must be in order. So emit blockages. */
8219
8220 /* Restore the frame pointer. */
8221 if (TARGET_ABI_UNICOSMK)
8222 {
8223 emit_insn (gen_blockage ());
8224 mem = gen_rtx_MEM (DImode,
8225 plus_constant (hard_frame_pointer_rtx, -16));
8226 set_mem_alias_set (mem, alpha_sr_alias_set);
8227 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8228 }
8229 else if (fp_is_frame_pointer)
8230 {
8231 emit_insn (gen_blockage ());
8232 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8233 set_mem_alias_set (mem, alpha_sr_alias_set);
8234 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8235 }
8236 else if (TARGET_ABI_OPEN_VMS)
8237 {
8238 emit_insn (gen_blockage ());
8239 FRP (emit_move_insn (hard_frame_pointer_rtx,
8240 gen_rtx_REG (DImode, vms_save_fp_regno)));
8241 }
8242
8243 /* Restore the stack pointer. */
8244 emit_insn (gen_blockage ());
8245 if (sp_adj2 == const0_rtx)
8246 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8247 else
8248 FRP (emit_move_insn (stack_pointer_rtx,
8249 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8250 }
8251 else
8252 {
8253 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8254 {
8255 emit_insn (gen_blockage ());
8256 FRP (emit_move_insn (hard_frame_pointer_rtx,
8257 gen_rtx_REG (DImode, vms_save_fp_regno)));
8258 }
8259 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8260 {
8261 /* Decrement the frame pointer if the function does not have a
8262 frame. */
8263
8264 emit_insn (gen_blockage ());
8265 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8266 hard_frame_pointer_rtx, constm1_rtx)));
8267 }
8268 }
8269 }
8270
8271 /* Output the rest of the textual info surrounding the epilogue. */
8272
8273 void
alpha_end_function(FILE * file,const char * fnname,tree decl ATTRIBUTE_UNUSED)8274 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8275 {
8276 #if TARGET_ABI_OPEN_VMS
8277 alpha_write_linkage (file, fnname, decl);
8278 #endif
8279
8280 /* End the function. */
8281 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8282 {
8283 fputs ("\t.end ", file);
8284 assemble_name (file, fnname);
8285 putc ('\n', file);
8286 }
8287 inside_function = FALSE;
8288
8289 /* Output jump tables and the static subroutine information block. */
8290 if (TARGET_ABI_UNICOSMK)
8291 {
8292 unicosmk_output_ssib (file, fnname);
8293 unicosmk_output_deferred_case_vectors (file);
8294 }
8295 }
8296
8297 #if TARGET_ABI_OSF
8298 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8299
8300 In order to avoid the hordes of differences between generated code
8301 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8302 lots of code loading up large constants, generate rtl and emit it
8303 instead of going straight to text.
8304
8305 Not sure why this idea hasn't been explored before... */
8306
8307 static void
alpha_output_mi_thunk_osf(FILE * file,tree thunk_fndecl ATTRIBUTE_UNUSED,HOST_WIDE_INT delta,HOST_WIDE_INT vcall_offset,tree function)8308 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8309 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8310 tree function)
8311 {
8312 HOST_WIDE_INT hi, lo;
8313 rtx this, insn, funexp;
8314
8315 reset_block_changes ();
8316
8317 /* We always require a valid GP. */
8318 emit_insn (gen_prologue_ldgp ());
8319 emit_note (NOTE_INSN_PROLOGUE_END);
8320
8321 /* Find the "this" pointer. If the function returns a structure,
8322 the structure return pointer is in $16. */
8323 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8324 this = gen_rtx_REG (Pmode, 17);
8325 else
8326 this = gen_rtx_REG (Pmode, 16);
8327
8328 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8329 entire constant for the add. */
8330 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8331 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8332 if (hi + lo == delta)
8333 {
8334 if (hi)
8335 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8336 if (lo)
8337 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8338 }
8339 else
8340 {
8341 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8342 delta, -(delta < 0));
8343 emit_insn (gen_adddi3 (this, this, tmp));
8344 }
8345
8346 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8347 if (vcall_offset)
8348 {
8349 rtx tmp, tmp2;
8350
8351 tmp = gen_rtx_REG (Pmode, 0);
8352 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8353
8354 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8355 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8356 if (hi + lo == vcall_offset)
8357 {
8358 if (hi)
8359 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8360 }
8361 else
8362 {
8363 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8364 vcall_offset, -(vcall_offset < 0));
8365 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8366 lo = 0;
8367 }
8368 if (lo)
8369 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8370 else
8371 tmp2 = tmp;
8372 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8373
8374 emit_insn (gen_adddi3 (this, this, tmp));
8375 }
8376
8377 /* Generate a tail call to the target function. */
8378 if (! TREE_USED (function))
8379 {
8380 assemble_external (function);
8381 TREE_USED (function) = 1;
8382 }
8383 funexp = XEXP (DECL_RTL (function), 0);
8384 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8385 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8386 SIBLING_CALL_P (insn) = 1;
8387
8388 /* Run just enough of rest_of_compilation to get the insns emitted.
8389 There's not really enough bulk here to make other passes such as
8390 instruction scheduling worth while. Note that use_thunk calls
8391 assemble_start_function and assemble_end_function. */
8392 insn = get_insns ();
8393 insn_locators_initialize ();
8394 shorten_branches (insn);
8395 final_start_function (insn, file, 1);
8396 final (insn, file, 1);
8397 final_end_function ();
8398 }
8399 #endif /* TARGET_ABI_OSF */
8400
8401 /* Debugging support. */
8402
8403 #include "gstab.h"
8404
8405 /* Count the number of sdb related labels are generated (to find block
8406 start and end boundaries). */
8407
8408 int sdb_label_count = 0;
8409
8410 /* Name of the file containing the current function. */
8411
8412 static const char *current_function_file = "";
8413
8414 /* Offsets to alpha virtual arg/local debugging pointers. */
8415
8416 long alpha_arg_offset;
8417 long alpha_auto_offset;
8418
8419 /* Emit a new filename to a stream. */
8420
8421 void
alpha_output_filename(FILE * stream,const char * name)8422 alpha_output_filename (FILE *stream, const char *name)
8423 {
8424 static int first_time = TRUE;
8425
8426 if (first_time)
8427 {
8428 first_time = FALSE;
8429 ++num_source_filenames;
8430 current_function_file = name;
8431 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8432 output_quoted_string (stream, name);
8433 fprintf (stream, "\n");
8434 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8435 fprintf (stream, "\t#@stabs\n");
8436 }
8437
8438 else if (write_symbols == DBX_DEBUG)
8439 /* dbxout.c will emit an appropriate .stabs directive. */
8440 return;
8441
8442 else if (name != current_function_file
8443 && strcmp (name, current_function_file) != 0)
8444 {
8445 if (inside_function && ! TARGET_GAS)
8446 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8447 else
8448 {
8449 ++num_source_filenames;
8450 current_function_file = name;
8451 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8452 }
8453
8454 output_quoted_string (stream, name);
8455 fprintf (stream, "\n");
8456 }
8457 }
8458
8459 /* Structure to show the current status of registers and memory. */
8460
8461 struct shadow_summary
8462 {
8463 struct {
8464 unsigned int i : 31; /* Mask of int regs */
8465 unsigned int fp : 31; /* Mask of fp regs */
8466 unsigned int mem : 1; /* mem == imem | fpmem */
8467 } used, defd;
8468 };
8469
8470 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8471 to the summary structure. SET is nonzero if the insn is setting the
8472 object, otherwise zero. */
8473
8474 static void
summarize_insn(rtx x,struct shadow_summary * sum,int set)8475 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8476 {
8477 const char *format_ptr;
8478 int i, j;
8479
8480 if (x == 0)
8481 return;
8482
8483 switch (GET_CODE (x))
8484 {
8485 /* ??? Note that this case would be incorrect if the Alpha had a
8486 ZERO_EXTRACT in SET_DEST. */
8487 case SET:
8488 summarize_insn (SET_SRC (x), sum, 0);
8489 summarize_insn (SET_DEST (x), sum, 1);
8490 break;
8491
8492 case CLOBBER:
8493 summarize_insn (XEXP (x, 0), sum, 1);
8494 break;
8495
8496 case USE:
8497 summarize_insn (XEXP (x, 0), sum, 0);
8498 break;
8499
8500 case ASM_OPERANDS:
8501 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8502 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8503 break;
8504
8505 case PARALLEL:
8506 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8507 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8508 break;
8509
8510 case SUBREG:
8511 summarize_insn (SUBREG_REG (x), sum, 0);
8512 break;
8513
8514 case REG:
8515 {
8516 int regno = REGNO (x);
8517 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8518
8519 if (regno == 31 || regno == 63)
8520 break;
8521
8522 if (set)
8523 {
8524 if (regno < 32)
8525 sum->defd.i |= mask;
8526 else
8527 sum->defd.fp |= mask;
8528 }
8529 else
8530 {
8531 if (regno < 32)
8532 sum->used.i |= mask;
8533 else
8534 sum->used.fp |= mask;
8535 }
8536 }
8537 break;
8538
8539 case MEM:
8540 if (set)
8541 sum->defd.mem = 1;
8542 else
8543 sum->used.mem = 1;
8544
8545 /* Find the regs used in memory address computation: */
8546 summarize_insn (XEXP (x, 0), sum, 0);
8547 break;
8548
8549 case CONST_INT: case CONST_DOUBLE:
8550 case SYMBOL_REF: case LABEL_REF: case CONST:
8551 case SCRATCH: case ASM_INPUT:
8552 break;
8553
8554 /* Handle common unary and binary ops for efficiency. */
8555 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8556 case MOD: case UDIV: case UMOD: case AND: case IOR:
8557 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8558 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8559 case NE: case EQ: case GE: case GT: case LE:
8560 case LT: case GEU: case GTU: case LEU: case LTU:
8561 summarize_insn (XEXP (x, 0), sum, 0);
8562 summarize_insn (XEXP (x, 1), sum, 0);
8563 break;
8564
8565 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8566 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8567 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8568 case SQRT: case FFS:
8569 summarize_insn (XEXP (x, 0), sum, 0);
8570 break;
8571
8572 default:
8573 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8574 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8575 switch (format_ptr[i])
8576 {
8577 case 'e':
8578 summarize_insn (XEXP (x, i), sum, 0);
8579 break;
8580
8581 case 'E':
8582 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8583 summarize_insn (XVECEXP (x, i, j), sum, 0);
8584 break;
8585
8586 case 'i':
8587 break;
8588
8589 default:
8590 gcc_unreachable ();
8591 }
8592 }
8593 }
8594
8595 /* Ensure a sufficient number of `trapb' insns are in the code when
8596 the user requests code with a trap precision of functions or
8597 instructions.
8598
8599 In naive mode, when the user requests a trap-precision of
8600 "instruction", a trapb is needed after every instruction that may
8601 generate a trap. This ensures that the code is resumption safe but
8602 it is also slow.
8603
8604 When optimizations are turned on, we delay issuing a trapb as long
8605 as possible. In this context, a trap shadow is the sequence of
8606 instructions that starts with a (potentially) trap generating
8607 instruction and extends to the next trapb or call_pal instruction
8608 (but GCC never generates call_pal by itself). We can delay (and
8609 therefore sometimes omit) a trapb subject to the following
8610 conditions:
8611
8612 (a) On entry to the trap shadow, if any Alpha register or memory
8613 location contains a value that is used as an operand value by some
8614 instruction in the trap shadow (live on entry), then no instruction
8615 in the trap shadow may modify the register or memory location.
8616
8617 (b) Within the trap shadow, the computation of the base register
8618 for a memory load or store instruction may not involve using the
8619 result of an instruction that might generate an UNPREDICTABLE
8620 result.
8621
8622 (c) Within the trap shadow, no register may be used more than once
8623 as a destination register. (This is to make life easier for the
8624 trap-handler.)
8625
8626 (d) The trap shadow may not include any branch instructions. */
8627
8628 static void
alpha_handle_trap_shadows(void)8629 alpha_handle_trap_shadows (void)
8630 {
8631 struct shadow_summary shadow;
8632 int trap_pending, exception_nesting;
8633 rtx i, n;
8634
8635 trap_pending = 0;
8636 exception_nesting = 0;
8637 shadow.used.i = 0;
8638 shadow.used.fp = 0;
8639 shadow.used.mem = 0;
8640 shadow.defd = shadow.used;
8641
8642 for (i = get_insns (); i ; i = NEXT_INSN (i))
8643 {
8644 if (GET_CODE (i) == NOTE)
8645 {
8646 switch (NOTE_LINE_NUMBER (i))
8647 {
8648 case NOTE_INSN_EH_REGION_BEG:
8649 exception_nesting++;
8650 if (trap_pending)
8651 goto close_shadow;
8652 break;
8653
8654 case NOTE_INSN_EH_REGION_END:
8655 exception_nesting--;
8656 if (trap_pending)
8657 goto close_shadow;
8658 break;
8659
8660 case NOTE_INSN_EPILOGUE_BEG:
8661 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8662 goto close_shadow;
8663 break;
8664 }
8665 }
8666 else if (trap_pending)
8667 {
8668 if (alpha_tp == ALPHA_TP_FUNC)
8669 {
8670 if (GET_CODE (i) == JUMP_INSN
8671 && GET_CODE (PATTERN (i)) == RETURN)
8672 goto close_shadow;
8673 }
8674 else if (alpha_tp == ALPHA_TP_INSN)
8675 {
8676 if (optimize > 0)
8677 {
8678 struct shadow_summary sum;
8679
8680 sum.used.i = 0;
8681 sum.used.fp = 0;
8682 sum.used.mem = 0;
8683 sum.defd = sum.used;
8684
8685 switch (GET_CODE (i))
8686 {
8687 case INSN:
8688 /* Annoyingly, get_attr_trap will die on these. */
8689 if (GET_CODE (PATTERN (i)) == USE
8690 || GET_CODE (PATTERN (i)) == CLOBBER)
8691 break;
8692
8693 summarize_insn (PATTERN (i), &sum, 0);
8694
8695 if ((sum.defd.i & shadow.defd.i)
8696 || (sum.defd.fp & shadow.defd.fp))
8697 {
8698 /* (c) would be violated */
8699 goto close_shadow;
8700 }
8701
8702 /* Combine shadow with summary of current insn: */
8703 shadow.used.i |= sum.used.i;
8704 shadow.used.fp |= sum.used.fp;
8705 shadow.used.mem |= sum.used.mem;
8706 shadow.defd.i |= sum.defd.i;
8707 shadow.defd.fp |= sum.defd.fp;
8708 shadow.defd.mem |= sum.defd.mem;
8709
8710 if ((sum.defd.i & shadow.used.i)
8711 || (sum.defd.fp & shadow.used.fp)
8712 || (sum.defd.mem & shadow.used.mem))
8713 {
8714 /* (a) would be violated (also takes care of (b)) */
8715 gcc_assert (get_attr_trap (i) != TRAP_YES
8716 || (!(sum.defd.i & sum.used.i)
8717 && !(sum.defd.fp & sum.used.fp)));
8718
8719 goto close_shadow;
8720 }
8721 break;
8722
8723 case JUMP_INSN:
8724 case CALL_INSN:
8725 case CODE_LABEL:
8726 goto close_shadow;
8727
8728 default:
8729 gcc_unreachable ();
8730 }
8731 }
8732 else
8733 {
8734 close_shadow:
8735 n = emit_insn_before (gen_trapb (), i);
8736 PUT_MODE (n, TImode);
8737 PUT_MODE (i, TImode);
8738 trap_pending = 0;
8739 shadow.used.i = 0;
8740 shadow.used.fp = 0;
8741 shadow.used.mem = 0;
8742 shadow.defd = shadow.used;
8743 }
8744 }
8745 }
8746
8747 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8748 && GET_CODE (i) == INSN
8749 && GET_CODE (PATTERN (i)) != USE
8750 && GET_CODE (PATTERN (i)) != CLOBBER
8751 && get_attr_trap (i) == TRAP_YES)
8752 {
8753 if (optimize && !trap_pending)
8754 summarize_insn (PATTERN (i), &shadow, 0);
8755 trap_pending = 1;
8756 }
8757 }
8758 }
8759
8760 /* Alpha can only issue instruction groups simultaneously if they are
8761 suitably aligned. This is very processor-specific. */
8762 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8763 that are marked "fake". These instructions do not exist on that target,
8764 but it is possible to see these insns with deranged combinations of
8765 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8766 choose a result at random. */
8767
8768 enum alphaev4_pipe {
8769 EV4_STOP = 0,
8770 EV4_IB0 = 1,
8771 EV4_IB1 = 2,
8772 EV4_IBX = 4
8773 };
8774
8775 enum alphaev5_pipe {
8776 EV5_STOP = 0,
8777 EV5_NONE = 1,
8778 EV5_E01 = 2,
8779 EV5_E0 = 4,
8780 EV5_E1 = 8,
8781 EV5_FAM = 16,
8782 EV5_FA = 32,
8783 EV5_FM = 64
8784 };
8785
8786 static enum alphaev4_pipe
alphaev4_insn_pipe(rtx insn)8787 alphaev4_insn_pipe (rtx insn)
8788 {
8789 if (recog_memoized (insn) < 0)
8790 return EV4_STOP;
8791 if (get_attr_length (insn) != 4)
8792 return EV4_STOP;
8793
8794 switch (get_attr_type (insn))
8795 {
8796 case TYPE_ILD:
8797 case TYPE_LDSYM:
8798 case TYPE_FLD:
8799 case TYPE_LD_L:
8800 return EV4_IBX;
8801
8802 case TYPE_IADD:
8803 case TYPE_ILOG:
8804 case TYPE_ICMOV:
8805 case TYPE_ICMP:
8806 case TYPE_FST:
8807 case TYPE_SHIFT:
8808 case TYPE_IMUL:
8809 case TYPE_FBR:
8810 case TYPE_MVI: /* fake */
8811 return EV4_IB0;
8812
8813 case TYPE_IST:
8814 case TYPE_MISC:
8815 case TYPE_IBR:
8816 case TYPE_JSR:
8817 case TYPE_CALLPAL:
8818 case TYPE_FCPYS:
8819 case TYPE_FCMOV:
8820 case TYPE_FADD:
8821 case TYPE_FDIV:
8822 case TYPE_FMUL:
8823 case TYPE_ST_C:
8824 case TYPE_MB:
8825 case TYPE_FSQRT: /* fake */
8826 case TYPE_FTOI: /* fake */
8827 case TYPE_ITOF: /* fake */
8828 return EV4_IB1;
8829
8830 default:
8831 gcc_unreachable ();
8832 }
8833 }
8834
8835 static enum alphaev5_pipe
alphaev5_insn_pipe(rtx insn)8836 alphaev5_insn_pipe (rtx insn)
8837 {
8838 if (recog_memoized (insn) < 0)
8839 return EV5_STOP;
8840 if (get_attr_length (insn) != 4)
8841 return EV5_STOP;
8842
8843 switch (get_attr_type (insn))
8844 {
8845 case TYPE_ILD:
8846 case TYPE_FLD:
8847 case TYPE_LDSYM:
8848 case TYPE_IADD:
8849 case TYPE_ILOG:
8850 case TYPE_ICMOV:
8851 case TYPE_ICMP:
8852 return EV5_E01;
8853
8854 case TYPE_IST:
8855 case TYPE_FST:
8856 case TYPE_SHIFT:
8857 case TYPE_IMUL:
8858 case TYPE_MISC:
8859 case TYPE_MVI:
8860 case TYPE_LD_L:
8861 case TYPE_ST_C:
8862 case TYPE_MB:
8863 case TYPE_FTOI: /* fake */
8864 case TYPE_ITOF: /* fake */
8865 return EV5_E0;
8866
8867 case TYPE_IBR:
8868 case TYPE_JSR:
8869 case TYPE_CALLPAL:
8870 return EV5_E1;
8871
8872 case TYPE_FCPYS:
8873 return EV5_FAM;
8874
8875 case TYPE_FBR:
8876 case TYPE_FCMOV:
8877 case TYPE_FADD:
8878 case TYPE_FDIV:
8879 case TYPE_FSQRT: /* fake */
8880 return EV5_FA;
8881
8882 case TYPE_FMUL:
8883 return EV5_FM;
8884
8885 default:
8886 gcc_unreachable ();
8887 }
8888 }
8889
8890 /* IN_USE is a mask of the slots currently filled within the insn group.
8891 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8892 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8893
8894 LEN is, of course, the length of the group in bytes. */
8895
8896 static rtx
alphaev4_next_group(rtx insn,int * pin_use,int * plen)8897 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8898 {
8899 int len, in_use;
8900
8901 len = in_use = 0;
8902
8903 if (! INSN_P (insn)
8904 || GET_CODE (PATTERN (insn)) == CLOBBER
8905 || GET_CODE (PATTERN (insn)) == USE)
8906 goto next_and_done;
8907
8908 while (1)
8909 {
8910 enum alphaev4_pipe pipe;
8911
8912 pipe = alphaev4_insn_pipe (insn);
8913 switch (pipe)
8914 {
8915 case EV4_STOP:
8916 /* Force complex instructions to start new groups. */
8917 if (in_use)
8918 goto done;
8919
8920 /* If this is a completely unrecognized insn, it's an asm.
8921 We don't know how long it is, so record length as -1 to
8922 signal a needed realignment. */
8923 if (recog_memoized (insn) < 0)
8924 len = -1;
8925 else
8926 len = get_attr_length (insn);
8927 goto next_and_done;
8928
8929 case EV4_IBX:
8930 if (in_use & EV4_IB0)
8931 {
8932 if (in_use & EV4_IB1)
8933 goto done;
8934 in_use |= EV4_IB1;
8935 }
8936 else
8937 in_use |= EV4_IB0 | EV4_IBX;
8938 break;
8939
8940 case EV4_IB0:
8941 if (in_use & EV4_IB0)
8942 {
8943 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8944 goto done;
8945 in_use |= EV4_IB1;
8946 }
8947 in_use |= EV4_IB0;
8948 break;
8949
8950 case EV4_IB1:
8951 if (in_use & EV4_IB1)
8952 goto done;
8953 in_use |= EV4_IB1;
8954 break;
8955
8956 default:
8957 gcc_unreachable ();
8958 }
8959 len += 4;
8960
8961 /* Haifa doesn't do well scheduling branches. */
8962 if (GET_CODE (insn) == JUMP_INSN)
8963 goto next_and_done;
8964
8965 next:
8966 insn = next_nonnote_insn (insn);
8967
8968 if (!insn || ! INSN_P (insn))
8969 goto done;
8970
8971 /* Let Haifa tell us where it thinks insn group boundaries are. */
8972 if (GET_MODE (insn) == TImode)
8973 goto done;
8974
8975 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8976 goto next;
8977 }
8978
8979 next_and_done:
8980 insn = next_nonnote_insn (insn);
8981
8982 done:
8983 *plen = len;
8984 *pin_use = in_use;
8985 return insn;
8986 }
8987
8988 /* IN_USE is a mask of the slots currently filled within the insn group.
8989 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8990 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8991
8992 LEN is, of course, the length of the group in bytes. */
8993
8994 static rtx
alphaev5_next_group(rtx insn,int * pin_use,int * plen)8995 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8996 {
8997 int len, in_use;
8998
8999 len = in_use = 0;
9000
9001 if (! INSN_P (insn)
9002 || GET_CODE (PATTERN (insn)) == CLOBBER
9003 || GET_CODE (PATTERN (insn)) == USE)
9004 goto next_and_done;
9005
9006 while (1)
9007 {
9008 enum alphaev5_pipe pipe;
9009
9010 pipe = alphaev5_insn_pipe (insn);
9011 switch (pipe)
9012 {
9013 case EV5_STOP:
9014 /* Force complex instructions to start new groups. */
9015 if (in_use)
9016 goto done;
9017
9018 /* If this is a completely unrecognized insn, it's an asm.
9019 We don't know how long it is, so record length as -1 to
9020 signal a needed realignment. */
9021 if (recog_memoized (insn) < 0)
9022 len = -1;
9023 else
9024 len = get_attr_length (insn);
9025 goto next_and_done;
9026
9027 /* ??? Most of the places below, we would like to assert never
9028 happen, as it would indicate an error either in Haifa, or
9029 in the scheduling description. Unfortunately, Haifa never
9030 schedules the last instruction of the BB, so we don't have
9031 an accurate TI bit to go off. */
9032 case EV5_E01:
9033 if (in_use & EV5_E0)
9034 {
9035 if (in_use & EV5_E1)
9036 goto done;
9037 in_use |= EV5_E1;
9038 }
9039 else
9040 in_use |= EV5_E0 | EV5_E01;
9041 break;
9042
9043 case EV5_E0:
9044 if (in_use & EV5_E0)
9045 {
9046 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9047 goto done;
9048 in_use |= EV5_E1;
9049 }
9050 in_use |= EV5_E0;
9051 break;
9052
9053 case EV5_E1:
9054 if (in_use & EV5_E1)
9055 goto done;
9056 in_use |= EV5_E1;
9057 break;
9058
9059 case EV5_FAM:
9060 if (in_use & EV5_FA)
9061 {
9062 if (in_use & EV5_FM)
9063 goto done;
9064 in_use |= EV5_FM;
9065 }
9066 else
9067 in_use |= EV5_FA | EV5_FAM;
9068 break;
9069
9070 case EV5_FA:
9071 if (in_use & EV5_FA)
9072 goto done;
9073 in_use |= EV5_FA;
9074 break;
9075
9076 case EV5_FM:
9077 if (in_use & EV5_FM)
9078 goto done;
9079 in_use |= EV5_FM;
9080 break;
9081
9082 case EV5_NONE:
9083 break;
9084
9085 default:
9086 gcc_unreachable ();
9087 }
9088 len += 4;
9089
9090 /* Haifa doesn't do well scheduling branches. */
9091 /* ??? If this is predicted not-taken, slotting continues, except
9092 that no more IBR, FBR, or JSR insns may be slotted. */
9093 if (GET_CODE (insn) == JUMP_INSN)
9094 goto next_and_done;
9095
9096 next:
9097 insn = next_nonnote_insn (insn);
9098
9099 if (!insn || ! INSN_P (insn))
9100 goto done;
9101
9102 /* Let Haifa tell us where it thinks insn group boundaries are. */
9103 if (GET_MODE (insn) == TImode)
9104 goto done;
9105
9106 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9107 goto next;
9108 }
9109
9110 next_and_done:
9111 insn = next_nonnote_insn (insn);
9112
9113 done:
9114 *plen = len;
9115 *pin_use = in_use;
9116 return insn;
9117 }
9118
9119 static rtx
alphaev4_next_nop(int * pin_use)9120 alphaev4_next_nop (int *pin_use)
9121 {
9122 int in_use = *pin_use;
9123 rtx nop;
9124
9125 if (!(in_use & EV4_IB0))
9126 {
9127 in_use |= EV4_IB0;
9128 nop = gen_nop ();
9129 }
9130 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9131 {
9132 in_use |= EV4_IB1;
9133 nop = gen_nop ();
9134 }
9135 else if (TARGET_FP && !(in_use & EV4_IB1))
9136 {
9137 in_use |= EV4_IB1;
9138 nop = gen_fnop ();
9139 }
9140 else
9141 nop = gen_unop ();
9142
9143 *pin_use = in_use;
9144 return nop;
9145 }
9146
9147 static rtx
alphaev5_next_nop(int * pin_use)9148 alphaev5_next_nop (int *pin_use)
9149 {
9150 int in_use = *pin_use;
9151 rtx nop;
9152
9153 if (!(in_use & EV5_E1))
9154 {
9155 in_use |= EV5_E1;
9156 nop = gen_nop ();
9157 }
9158 else if (TARGET_FP && !(in_use & EV5_FA))
9159 {
9160 in_use |= EV5_FA;
9161 nop = gen_fnop ();
9162 }
9163 else if (TARGET_FP && !(in_use & EV5_FM))
9164 {
9165 in_use |= EV5_FM;
9166 nop = gen_fnop ();
9167 }
9168 else
9169 nop = gen_unop ();
9170
9171 *pin_use = in_use;
9172 return nop;
9173 }
9174
9175 /* The instruction group alignment main loop. */
9176
9177 static void
alpha_align_insns(unsigned int max_align,rtx (* next_group)(rtx,int *,int *),rtx (* next_nop)(int *))9178 alpha_align_insns (unsigned int max_align,
9179 rtx (*next_group) (rtx, int *, int *),
9180 rtx (*next_nop) (int *))
9181 {
9182 /* ALIGN is the known alignment for the insn group. */
9183 unsigned int align;
9184 /* OFS is the offset of the current insn in the insn group. */
9185 int ofs;
9186 int prev_in_use, in_use, len, ldgp;
9187 rtx i, next;
9188
9189 /* Let shorten branches care for assigning alignments to code labels. */
9190 shorten_branches (get_insns ());
9191
9192 if (align_functions < 4)
9193 align = 4;
9194 else if ((unsigned int) align_functions < max_align)
9195 align = align_functions;
9196 else
9197 align = max_align;
9198
9199 ofs = prev_in_use = 0;
9200 i = get_insns ();
9201 if (GET_CODE (i) == NOTE)
9202 i = next_nonnote_insn (i);
9203
9204 ldgp = alpha_function_needs_gp ? 8 : 0;
9205
9206 while (i)
9207 {
9208 next = (*next_group) (i, &in_use, &len);
9209
9210 /* When we see a label, resync alignment etc. */
9211 if (GET_CODE (i) == CODE_LABEL)
9212 {
9213 unsigned int new_align = 1 << label_to_alignment (i);
9214
9215 if (new_align >= align)
9216 {
9217 align = new_align < max_align ? new_align : max_align;
9218 ofs = 0;
9219 }
9220
9221 else if (ofs & (new_align-1))
9222 ofs = (ofs | (new_align-1)) + 1;
9223 gcc_assert (!len);
9224 }
9225
9226 /* Handle complex instructions special. */
9227 else if (in_use == 0)
9228 {
9229 /* Asms will have length < 0. This is a signal that we have
9230 lost alignment knowledge. Assume, however, that the asm
9231 will not mis-align instructions. */
9232 if (len < 0)
9233 {
9234 ofs = 0;
9235 align = 4;
9236 len = 0;
9237 }
9238 }
9239
9240 /* If the known alignment is smaller than the recognized insn group,
9241 realign the output. */
9242 else if ((int) align < len)
9243 {
9244 unsigned int new_log_align = len > 8 ? 4 : 3;
9245 rtx prev, where;
9246
9247 where = prev = prev_nonnote_insn (i);
9248 if (!where || GET_CODE (where) != CODE_LABEL)
9249 where = i;
9250
9251 /* Can't realign between a call and its gp reload. */
9252 if (! (TARGET_EXPLICIT_RELOCS
9253 && prev && GET_CODE (prev) == CALL_INSN))
9254 {
9255 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9256 align = 1 << new_log_align;
9257 ofs = 0;
9258 }
9259 }
9260
9261 /* We may not insert padding inside the initial ldgp sequence. */
9262 else if (ldgp > 0)
9263 ldgp -= len;
9264
9265 /* If the group won't fit in the same INT16 as the previous,
9266 we need to add padding to keep the group together. Rather
9267 than simply leaving the insn filling to the assembler, we
9268 can make use of the knowledge of what sorts of instructions
9269 were issued in the previous group to make sure that all of
9270 the added nops are really free. */
9271 else if (ofs + len > (int) align)
9272 {
9273 int nop_count = (align - ofs) / 4;
9274 rtx where;
9275
9276 /* Insert nops before labels, branches, and calls to truly merge
9277 the execution of the nops with the previous instruction group. */
9278 where = prev_nonnote_insn (i);
9279 if (where)
9280 {
9281 if (GET_CODE (where) == CODE_LABEL)
9282 {
9283 rtx where2 = prev_nonnote_insn (where);
9284 if (where2 && GET_CODE (where2) == JUMP_INSN)
9285 where = where2;
9286 }
9287 else if (GET_CODE (where) == INSN)
9288 where = i;
9289 }
9290 else
9291 where = i;
9292
9293 do
9294 emit_insn_before ((*next_nop)(&prev_in_use), where);
9295 while (--nop_count);
9296 ofs = 0;
9297 }
9298
9299 ofs = (ofs + len) & (align - 1);
9300 prev_in_use = in_use;
9301 i = next;
9302 }
9303 }
9304
9305 /* Machine dependent reorg pass. */
9306
9307 static void
alpha_reorg(void)9308 alpha_reorg (void)
9309 {
9310 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9311 alpha_handle_trap_shadows ();
9312
9313 /* Due to the number of extra trapb insns, don't bother fixing up
9314 alignment when trap precision is instruction. Moreover, we can
9315 only do our job when sched2 is run. */
9316 if (optimize && !optimize_size
9317 && alpha_tp != ALPHA_TP_INSN
9318 && flag_schedule_insns_after_reload)
9319 {
9320 if (alpha_tune == PROCESSOR_EV4)
9321 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9322 else if (alpha_tune == PROCESSOR_EV5)
9323 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9324 }
9325 }
9326
9327 #if !TARGET_ABI_UNICOSMK
9328
9329 #ifdef HAVE_STAMP_H
9330 #include <stamp.h>
9331 #endif
9332
9333 static void
alpha_file_start(void)9334 alpha_file_start (void)
9335 {
9336 #ifdef OBJECT_FORMAT_ELF
9337 /* If emitting dwarf2 debug information, we cannot generate a .file
9338 directive to start the file, as it will conflict with dwarf2out
9339 file numbers. So it's only useful when emitting mdebug output. */
9340 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9341 #endif
9342
9343 default_file_start ();
9344 #ifdef MS_STAMP
9345 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9346 #endif
9347
9348 fputs ("\t.set noreorder\n", asm_out_file);
9349 fputs ("\t.set volatile\n", asm_out_file);
9350 if (!TARGET_ABI_OPEN_VMS)
9351 fputs ("\t.set noat\n", asm_out_file);
9352 if (TARGET_EXPLICIT_RELOCS)
9353 fputs ("\t.set nomacro\n", asm_out_file);
9354 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9355 {
9356 const char *arch;
9357
9358 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9359 arch = "ev6";
9360 else if (TARGET_MAX)
9361 arch = "pca56";
9362 else if (TARGET_BWX)
9363 arch = "ev56";
9364 else if (alpha_cpu == PROCESSOR_EV5)
9365 arch = "ev5";
9366 else
9367 arch = "ev4";
9368
9369 fprintf (asm_out_file, "\t.arch %s\n", arch);
9370 }
9371 }
9372 #endif
9373
9374 #ifdef OBJECT_FORMAT_ELF
9375
9376 /* Switch to the section to which we should output X. The only thing
9377 special we do here is to honor small data. */
9378
9379 static void
alpha_elf_select_rtx_section(enum machine_mode mode,rtx x,unsigned HOST_WIDE_INT align)9380 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9381 unsigned HOST_WIDE_INT align)
9382 {
9383 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9384 /* ??? Consider using mergeable sdata sections. */
9385 sdata_section ();
9386 else
9387 default_elf_select_rtx_section (mode, x, align);
9388 }
9389
9390 #endif /* OBJECT_FORMAT_ELF */
9391
9392 /* Structure to collect function names for final output in link section. */
9393 /* Note that items marked with GTY can't be ifdef'ed out. */
9394
9395 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9396 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9397
9398 struct alpha_links GTY(())
9399 {
9400 int num;
9401 rtx linkage;
9402 enum links_kind lkind;
9403 enum reloc_kind rkind;
9404 };
9405
9406 struct alpha_funcs GTY(())
9407 {
9408 int num;
9409 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9410 links;
9411 };
9412
9413 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9414 splay_tree alpha_links_tree;
9415 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9416 splay_tree alpha_funcs_tree;
9417
9418 static GTY(()) int alpha_funcs_num;
9419
9420 #if TARGET_ABI_OPEN_VMS
9421
9422 /* Return the VMS argument type corresponding to MODE. */
9423
9424 enum avms_arg_type
alpha_arg_type(enum machine_mode mode)9425 alpha_arg_type (enum machine_mode mode)
9426 {
9427 switch (mode)
9428 {
9429 case SFmode:
9430 return TARGET_FLOAT_VAX ? FF : FS;
9431 case DFmode:
9432 return TARGET_FLOAT_VAX ? FD : FT;
9433 default:
9434 return I64;
9435 }
9436 }
9437
9438 /* Return an rtx for an integer representing the VMS Argument Information
9439 register value. */
9440
9441 rtx
alpha_arg_info_reg_val(CUMULATIVE_ARGS cum)9442 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9443 {
9444 unsigned HOST_WIDE_INT regval = cum.num_args;
9445 int i;
9446
9447 for (i = 0; i < 6; i++)
9448 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9449
9450 return GEN_INT (regval);
9451 }
9452
9453 /* Make (or fake) .linkage entry for function call.
9454
9455 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9456
9457 Return an SYMBOL_REF rtx for the linkage. */
9458
9459 rtx
alpha_need_linkage(const char * name,int is_local)9460 alpha_need_linkage (const char *name, int is_local)
9461 {
9462 splay_tree_node node;
9463 struct alpha_links *al;
9464
9465 if (name[0] == '*')
9466 name++;
9467
9468 if (is_local)
9469 {
9470 struct alpha_funcs *cfaf;
9471
9472 if (!alpha_funcs_tree)
9473 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9474 splay_tree_compare_pointers);
9475
9476 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9477
9478 cfaf->links = 0;
9479 cfaf->num = ++alpha_funcs_num;
9480
9481 splay_tree_insert (alpha_funcs_tree,
9482 (splay_tree_key) current_function_decl,
9483 (splay_tree_value) cfaf);
9484 }
9485
9486 if (alpha_links_tree)
9487 {
9488 /* Is this name already defined? */
9489
9490 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9491 if (node)
9492 {
9493 al = (struct alpha_links *) node->value;
9494 if (is_local)
9495 {
9496 /* Defined here but external assumed. */
9497 if (al->lkind == KIND_EXTERN)
9498 al->lkind = KIND_LOCAL;
9499 }
9500 else
9501 {
9502 /* Used here but unused assumed. */
9503 if (al->lkind == KIND_UNUSED)
9504 al->lkind = KIND_LOCAL;
9505 }
9506 return al->linkage;
9507 }
9508 }
9509 else
9510 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9511
9512 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9513 name = ggc_strdup (name);
9514
9515 /* Assume external if no definition. */
9516 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9517
9518 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9519 get_identifier (name);
9520
9521 /* Construct a SYMBOL_REF for us to call. */
9522 {
9523 size_t name_len = strlen (name);
9524 char *linksym = alloca (name_len + 6);
9525 linksym[0] = '$';
9526 memcpy (linksym + 1, name, name_len);
9527 memcpy (linksym + 1 + name_len, "..lk", 5);
9528 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9529 ggc_alloc_string (linksym, name_len + 5));
9530 }
9531
9532 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9533 (splay_tree_value) al);
9534
9535 return al->linkage;
9536 }
9537
9538 rtx
alpha_use_linkage(rtx linkage,tree cfundecl,int lflag,int rflag)9539 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9540 {
9541 splay_tree_node cfunnode;
9542 struct alpha_funcs *cfaf;
9543 struct alpha_links *al;
9544 const char *name = XSTR (linkage, 0);
9545
9546 cfaf = (struct alpha_funcs *) 0;
9547 al = (struct alpha_links *) 0;
9548
9549 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9550 cfaf = (struct alpha_funcs *) cfunnode->value;
9551
9552 if (cfaf->links)
9553 {
9554 splay_tree_node lnode;
9555
9556 /* Is this name already defined? */
9557
9558 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9559 if (lnode)
9560 al = (struct alpha_links *) lnode->value;
9561 }
9562 else
9563 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9564
9565 if (!al)
9566 {
9567 size_t name_len;
9568 size_t buflen;
9569 char buf [512];
9570 char *linksym;
9571 splay_tree_node node = 0;
9572 struct alpha_links *anl;
9573
9574 if (name[0] == '*')
9575 name++;
9576
9577 name_len = strlen (name);
9578
9579 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9580 al->num = cfaf->num;
9581
9582 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9583 if (node)
9584 {
9585 anl = (struct alpha_links *) node->value;
9586 al->lkind = anl->lkind;
9587 }
9588
9589 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9590 buflen = strlen (buf);
9591 linksym = alloca (buflen + 1);
9592 memcpy (linksym, buf, buflen + 1);
9593
9594 al->linkage = gen_rtx_SYMBOL_REF
9595 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9596
9597 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9598 (splay_tree_value) al);
9599 }
9600
9601 if (rflag)
9602 al->rkind = KIND_CODEADDR;
9603 else
9604 al->rkind = KIND_LINKAGE;
9605
9606 if (lflag)
9607 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9608 else
9609 return al->linkage;
9610 }
9611
9612 static int
alpha_write_one_linkage(splay_tree_node node,void * data)9613 alpha_write_one_linkage (splay_tree_node node, void *data)
9614 {
9615 const char *const name = (const char *) node->key;
9616 struct alpha_links *link = (struct alpha_links *) node->value;
9617 FILE *stream = (FILE *) data;
9618
9619 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9620 if (link->rkind == KIND_CODEADDR)
9621 {
9622 if (link->lkind == KIND_LOCAL)
9623 {
9624 /* Local and used */
9625 fprintf (stream, "\t.quad %s..en\n", name);
9626 }
9627 else
9628 {
9629 /* External and used, request code address. */
9630 fprintf (stream, "\t.code_address %s\n", name);
9631 }
9632 }
9633 else
9634 {
9635 if (link->lkind == KIND_LOCAL)
9636 {
9637 /* Local and used, build linkage pair. */
9638 fprintf (stream, "\t.quad %s..en\n", name);
9639 fprintf (stream, "\t.quad %s\n", name);
9640 }
9641 else
9642 {
9643 /* External and used, request linkage pair. */
9644 fprintf (stream, "\t.linkage %s\n", name);
9645 }
9646 }
9647
9648 return 0;
9649 }
9650
9651 static void
alpha_write_linkage(FILE * stream,const char * funname,tree fundecl)9652 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9653 {
9654 splay_tree_node node;
9655 struct alpha_funcs *func;
9656
9657 link_section ();
9658 fprintf (stream, "\t.align 3\n");
9659 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9660 func = (struct alpha_funcs *) node->value;
9661
9662 fputs ("\t.name ", stream);
9663 assemble_name (stream, funname);
9664 fputs ("..na\n", stream);
9665 ASM_OUTPUT_LABEL (stream, funname);
9666 fprintf (stream, "\t.pdesc ");
9667 assemble_name (stream, funname);
9668 fprintf (stream, "..en,%s\n",
9669 alpha_procedure_type == PT_STACK ? "stack"
9670 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9671
9672 if (func->links)
9673 {
9674 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9675 /* splay_tree_delete (func->links); */
9676 }
9677 }
9678
9679 /* Given a decl, a section name, and whether the decl initializer
9680 has relocs, choose attributes for the section. */
9681
9682 #define SECTION_VMS_OVERLAY SECTION_FORGET
9683 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9684 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9685
9686 static unsigned int
vms_section_type_flags(tree decl,const char * name,int reloc)9687 vms_section_type_flags (tree decl, const char *name, int reloc)
9688 {
9689 unsigned int flags = default_section_type_flags (decl, name, reloc);
9690
9691 if (decl && DECL_ATTRIBUTES (decl)
9692 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9693 flags |= SECTION_VMS_OVERLAY;
9694 if (decl && DECL_ATTRIBUTES (decl)
9695 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9696 flags |= SECTION_VMS_GLOBAL;
9697 if (decl && DECL_ATTRIBUTES (decl)
9698 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9699 flags |= SECTION_VMS_INITIALIZE;
9700
9701 return flags;
9702 }
9703
9704 /* Switch to an arbitrary section NAME with attributes as specified
9705 by FLAGS. ALIGN specifies any known alignment requirements for
9706 the section; 0 if the default should be used. */
9707
9708 static void
vms_asm_named_section(const char * name,unsigned int flags,tree decl ATTRIBUTE_UNUSED)9709 vms_asm_named_section (const char *name, unsigned int flags,
9710 tree decl ATTRIBUTE_UNUSED)
9711 {
9712 fputc ('\n', asm_out_file);
9713 fprintf (asm_out_file, ".section\t%s", name);
9714
9715 if (flags & SECTION_VMS_OVERLAY)
9716 fprintf (asm_out_file, ",OVR");
9717 if (flags & SECTION_VMS_GLOBAL)
9718 fprintf (asm_out_file, ",GBL");
9719 if (flags & SECTION_VMS_INITIALIZE)
9720 fprintf (asm_out_file, ",NOMOD");
9721 if (flags & SECTION_DEBUG)
9722 fprintf (asm_out_file, ",NOWRT");
9723
9724 fputc ('\n', asm_out_file);
9725 }
9726
9727 /* Record an element in the table of global constructors. SYMBOL is
9728 a SYMBOL_REF of the function to be called; PRIORITY is a number
9729 between 0 and MAX_INIT_PRIORITY.
9730
9731 Differs from default_ctors_section_asm_out_constructor in that the
9732 width of the .ctors entry is always 64 bits, rather than the 32 bits
9733 used by a normal pointer. */
9734
9735 static void
vms_asm_out_constructor(rtx symbol,int priority ATTRIBUTE_UNUSED)9736 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9737 {
9738 ctors_section ();
9739 assemble_align (BITS_PER_WORD);
9740 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9741 }
9742
9743 static void
vms_asm_out_destructor(rtx symbol,int priority ATTRIBUTE_UNUSED)9744 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9745 {
9746 dtors_section ();
9747 assemble_align (BITS_PER_WORD);
9748 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9749 }
9750 #else
9751
9752 rtx
alpha_need_linkage(const char * name ATTRIBUTE_UNUSED,int is_local ATTRIBUTE_UNUSED)9753 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9754 int is_local ATTRIBUTE_UNUSED)
9755 {
9756 return NULL_RTX;
9757 }
9758
9759 rtx
alpha_use_linkage(rtx linkage ATTRIBUTE_UNUSED,tree cfundecl ATTRIBUTE_UNUSED,int lflag ATTRIBUTE_UNUSED,int rflag ATTRIBUTE_UNUSED)9760 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9761 tree cfundecl ATTRIBUTE_UNUSED,
9762 int lflag ATTRIBUTE_UNUSED,
9763 int rflag ATTRIBUTE_UNUSED)
9764 {
9765 return NULL_RTX;
9766 }
9767
9768 #endif /* TARGET_ABI_OPEN_VMS */
9769
9770 #if TARGET_ABI_UNICOSMK
9771
9772 /* This evaluates to true if we do not know how to pass TYPE solely in
9773 registers. This is the case for all arguments that do not fit in two
9774 registers. */
9775
9776 static bool
unicosmk_must_pass_in_stack(enum machine_mode mode,tree type)9777 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9778 {
9779 if (type == NULL)
9780 return false;
9781
9782 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9783 return true;
9784 if (TREE_ADDRESSABLE (type))
9785 return true;
9786
9787 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9788 }
9789
9790 /* Define the offset between two registers, one to be eliminated, and the
9791 other its replacement, at the start of a routine. */
9792
9793 int
unicosmk_initial_elimination_offset(int from,int to)9794 unicosmk_initial_elimination_offset (int from, int to)
9795 {
9796 int fixed_size;
9797
9798 fixed_size = alpha_sa_size();
9799 if (fixed_size != 0)
9800 fixed_size += 48;
9801
9802 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9803 return -fixed_size;
9804 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9805 return 0;
9806 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9807 return (ALPHA_ROUND (current_function_outgoing_args_size)
9808 + ALPHA_ROUND (get_frame_size()));
9809 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9810 return (ALPHA_ROUND (fixed_size)
9811 + ALPHA_ROUND (get_frame_size()
9812 + current_function_outgoing_args_size));
9813 else
9814 gcc_unreachable ();
9815 }
9816
9817 /* Output the module name for .ident and .end directives. We have to strip
9818 directories and add make sure that the module name starts with a letter
9819 or '$'. */
9820
9821 static void
unicosmk_output_module_name(FILE * file)9822 unicosmk_output_module_name (FILE *file)
9823 {
9824 const char *name = lbasename (main_input_filename);
9825 unsigned len = strlen (name);
9826 char *clean_name = alloca (len + 2);
9827 char *ptr = clean_name;
9828
9829 /* CAM only accepts module names that start with a letter or '$'. We
9830 prefix the module name with a '$' if necessary. */
9831
9832 if (!ISALPHA (*name))
9833 *ptr++ = '$';
9834 memcpy (ptr, name, len + 1);
9835 clean_symbol_name (clean_name);
9836 fputs (clean_name, file);
9837 }
9838
9839 /* Output the definition of a common variable. */
9840
9841 void
unicosmk_output_common(FILE * file,const char * name,int size,int align)9842 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9843 {
9844 tree name_tree;
9845 printf ("T3E__: common %s\n", name);
9846
9847 common_section ();
9848 fputs("\t.endp\n\n\t.psect ", file);
9849 assemble_name(file, name);
9850 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9851 fprintf(file, "\t.byte\t0:%d\n", size);
9852
9853 /* Mark the symbol as defined in this module. */
9854 name_tree = get_identifier (name);
9855 TREE_ASM_WRITTEN (name_tree) = 1;
9856 }
9857
9858 #define SECTION_PUBLIC SECTION_MACH_DEP
9859 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9860 static int current_section_align;
9861
9862 static unsigned int
unicosmk_section_type_flags(tree decl,const char * name,int reloc ATTRIBUTE_UNUSED)9863 unicosmk_section_type_flags (tree decl, const char *name,
9864 int reloc ATTRIBUTE_UNUSED)
9865 {
9866 unsigned int flags = default_section_type_flags (decl, name, reloc);
9867
9868 if (!decl)
9869 return flags;
9870
9871 if (TREE_CODE (decl) == FUNCTION_DECL)
9872 {
9873 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9874 if (align_functions_log > current_section_align)
9875 current_section_align = align_functions_log;
9876
9877 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9878 flags |= SECTION_MAIN;
9879 }
9880 else
9881 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9882
9883 if (TREE_PUBLIC (decl))
9884 flags |= SECTION_PUBLIC;
9885
9886 return flags;
9887 }
9888
9889 /* Generate a section name for decl and associate it with the
9890 declaration. */
9891
9892 static void
unicosmk_unique_section(tree decl,int reloc ATTRIBUTE_UNUSED)9893 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9894 {
9895 const char *name;
9896 int len;
9897
9898 gcc_assert (decl);
9899
9900 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9901 name = default_strip_name_encoding (name);
9902 len = strlen (name);
9903
9904 if (TREE_CODE (decl) == FUNCTION_DECL)
9905 {
9906 char *string;
9907
9908 /* It is essential that we prefix the section name here because
9909 otherwise the section names generated for constructors and
9910 destructors confuse collect2. */
9911
9912 string = alloca (len + 6);
9913 sprintf (string, "code@%s", name);
9914 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9915 }
9916 else if (TREE_PUBLIC (decl))
9917 DECL_SECTION_NAME (decl) = build_string (len, name);
9918 else
9919 {
9920 char *string;
9921
9922 string = alloca (len + 6);
9923 sprintf (string, "data@%s", name);
9924 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9925 }
9926 }
9927
9928 /* Switch to an arbitrary section NAME with attributes as specified
9929 by FLAGS. ALIGN specifies any known alignment requirements for
9930 the section; 0 if the default should be used. */
9931
9932 static void
unicosmk_asm_named_section(const char * name,unsigned int flags,tree decl ATTRIBUTE_UNUSED)9933 unicosmk_asm_named_section (const char *name, unsigned int flags,
9934 tree decl ATTRIBUTE_UNUSED)
9935 {
9936 const char *kind;
9937
9938 /* Close the previous section. */
9939
9940 fputs ("\t.endp\n\n", asm_out_file);
9941
9942 /* Find out what kind of section we are opening. */
9943
9944 if (flags & SECTION_MAIN)
9945 fputs ("\t.start\tmain\n", asm_out_file);
9946
9947 if (flags & SECTION_CODE)
9948 kind = "code";
9949 else if (flags & SECTION_PUBLIC)
9950 kind = "common";
9951 else
9952 kind = "data";
9953
9954 if (current_section_align != 0)
9955 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9956 current_section_align, kind);
9957 else
9958 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9959 }
9960
9961 static void
unicosmk_insert_attributes(tree decl,tree * attr_ptr ATTRIBUTE_UNUSED)9962 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9963 {
9964 if (DECL_P (decl)
9965 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9966 unicosmk_unique_section (decl, 0);
9967 }
9968
9969 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9970 in code sections because .align fill unused space with zeroes. */
9971
9972 void
unicosmk_output_align(FILE * file,int align)9973 unicosmk_output_align (FILE *file, int align)
9974 {
9975 if (inside_function)
9976 fprintf (file, "\tgcc@code@align\t%d\n", align);
9977 else
9978 fprintf (file, "\t.align\t%d\n", align);
9979 }
9980
9981 /* Add a case vector to the current function's list of deferred case
9982 vectors. Case vectors have to be put into a separate section because CAM
9983 does not allow data definitions in code sections. */
9984
9985 void
unicosmk_defer_case_vector(rtx lab,rtx vec)9986 unicosmk_defer_case_vector (rtx lab, rtx vec)
9987 {
9988 struct machine_function *machine = cfun->machine;
9989
9990 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9991 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9992 machine->addr_list);
9993 }
9994
9995 /* Output a case vector. */
9996
9997 static void
unicosmk_output_addr_vec(FILE * file,rtx vec)9998 unicosmk_output_addr_vec (FILE *file, rtx vec)
9999 {
10000 rtx lab = XEXP (vec, 0);
10001 rtx body = XEXP (vec, 1);
10002 int vlen = XVECLEN (body, 0);
10003 int idx;
10004
10005 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10006
10007 for (idx = 0; idx < vlen; idx++)
10008 {
10009 ASM_OUTPUT_ADDR_VEC_ELT
10010 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10011 }
10012 }
10013
10014 /* Output current function's deferred case vectors. */
10015
10016 static void
unicosmk_output_deferred_case_vectors(FILE * file)10017 unicosmk_output_deferred_case_vectors (FILE *file)
10018 {
10019 struct machine_function *machine = cfun->machine;
10020 rtx t;
10021
10022 if (machine->addr_list == NULL_RTX)
10023 return;
10024
10025 data_section ();
10026 for (t = machine->addr_list; t; t = XEXP (t, 1))
10027 unicosmk_output_addr_vec (file, XEXP (t, 0));
10028 }
10029
10030 /* Generate the name of the SSIB section for the current function. */
10031
10032 #define SSIB_PREFIX "__SSIB_"
10033 #define SSIB_PREFIX_LEN 7
10034
10035 static const char *
unicosmk_ssib_name(void)10036 unicosmk_ssib_name (void)
10037 {
10038 /* This is ok since CAM won't be able to deal with names longer than that
10039 anyway. */
10040
10041 static char name[256];
10042
10043 rtx x;
10044 const char *fnname;
10045 int len;
10046
10047 x = DECL_RTL (cfun->decl);
10048 gcc_assert (GET_CODE (x) == MEM);
10049 x = XEXP (x, 0);
10050 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10051 fnname = XSTR (x, 0);
10052
10053 len = strlen (fnname);
10054 if (len + SSIB_PREFIX_LEN > 255)
10055 len = 255 - SSIB_PREFIX_LEN;
10056
10057 strcpy (name, SSIB_PREFIX);
10058 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10059 name[len + SSIB_PREFIX_LEN] = 0;
10060
10061 return name;
10062 }
10063
10064 /* Set up the dynamic subprogram information block (DSIB) and update the
10065 frame pointer register ($15) for subroutines which have a frame. If the
10066 subroutine doesn't have a frame, simply increment $15. */
10067
10068 static void
unicosmk_gen_dsib(unsigned long * imaskP)10069 unicosmk_gen_dsib (unsigned long *imaskP)
10070 {
10071 if (alpha_procedure_type == PT_STACK)
10072 {
10073 const char *ssib_name;
10074 rtx mem;
10075
10076 /* Allocate 64 bytes for the DSIB. */
10077
10078 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10079 GEN_INT (-64))));
10080 emit_insn (gen_blockage ());
10081
10082 /* Save the return address. */
10083
10084 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10085 set_mem_alias_set (mem, alpha_sr_alias_set);
10086 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10087 (*imaskP) &= ~(1UL << REG_RA);
10088
10089 /* Save the old frame pointer. */
10090
10091 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10092 set_mem_alias_set (mem, alpha_sr_alias_set);
10093 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10094 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10095
10096 emit_insn (gen_blockage ());
10097
10098 /* Store the SSIB pointer. */
10099
10100 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10101 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10102 set_mem_alias_set (mem, alpha_sr_alias_set);
10103
10104 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10105 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10106 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10107
10108 /* Save the CIW index. */
10109
10110 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10111 set_mem_alias_set (mem, alpha_sr_alias_set);
10112 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10113
10114 emit_insn (gen_blockage ());
10115
10116 /* Set the new frame pointer. */
10117
10118 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10119 stack_pointer_rtx, GEN_INT (64))));
10120
10121 }
10122 else
10123 {
10124 /* Increment the frame pointer register to indicate that we do not
10125 have a frame. */
10126
10127 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10128 hard_frame_pointer_rtx, const1_rtx)));
10129 }
10130 }
10131
10132 /* Output the static subroutine information block for the current
10133 function. */
10134
10135 static void
unicosmk_output_ssib(FILE * file,const char * fnname)10136 unicosmk_output_ssib (FILE *file, const char *fnname)
10137 {
10138 int len;
10139 int i;
10140 rtx x;
10141 rtx ciw;
10142 struct machine_function *machine = cfun->machine;
10143
10144 ssib_section ();
10145 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10146 unicosmk_ssib_name ());
10147
10148 /* Some required stuff and the function name length. */
10149
10150 len = strlen (fnname);
10151 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10152
10153 /* Saved registers
10154 ??? We don't do that yet. */
10155
10156 fputs ("\t.quad\t0\n", file);
10157
10158 /* Function address. */
10159
10160 fputs ("\t.quad\t", file);
10161 assemble_name (file, fnname);
10162 putc ('\n', file);
10163
10164 fputs ("\t.quad\t0\n", file);
10165 fputs ("\t.quad\t0\n", file);
10166
10167 /* Function name.
10168 ??? We do it the same way Cray CC does it but this could be
10169 simplified. */
10170
10171 for( i = 0; i < len; i++ )
10172 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10173 if( (len % 8) == 0 )
10174 fputs ("\t.quad\t0\n", file);
10175 else
10176 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10177
10178 /* All call information words used in the function. */
10179
10180 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10181 {
10182 ciw = XEXP (x, 0);
10183 #if HOST_BITS_PER_WIDE_INT == 32
10184 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10185 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10186 #else
10187 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10188 #endif
10189 }
10190 }
10191
10192 /* Add a call information word (CIW) to the list of the current function's
10193 CIWs and return its index.
10194
10195 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10196
10197 rtx
unicosmk_add_call_info_word(rtx x)10198 unicosmk_add_call_info_word (rtx x)
10199 {
10200 rtx node;
10201 struct machine_function *machine = cfun->machine;
10202
10203 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10204 if (machine->first_ciw == NULL_RTX)
10205 machine->first_ciw = node;
10206 else
10207 XEXP (machine->last_ciw, 1) = node;
10208
10209 machine->last_ciw = node;
10210 ++machine->ciw_count;
10211
10212 return GEN_INT (machine->ciw_count
10213 + strlen (current_function_name ())/8 + 5);
10214 }
10215
10216 static char unicosmk_section_buf[100];
10217
10218 char *
unicosmk_text_section(void)10219 unicosmk_text_section (void)
10220 {
10221 static int count = 0;
10222 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10223 count++);
10224 return unicosmk_section_buf;
10225 }
10226
10227 char *
unicosmk_data_section(void)10228 unicosmk_data_section (void)
10229 {
10230 static int count = 1;
10231 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10232 count++);
10233 return unicosmk_section_buf;
10234 }
10235
10236 /* The Cray assembler doesn't accept extern declarations for symbols which
10237 are defined in the same file. We have to keep track of all global
10238 symbols which are referenced and/or defined in a source file and output
10239 extern declarations for those which are referenced but not defined at
10240 the end of file. */
10241
10242 /* List of identifiers for which an extern declaration might have to be
10243 emitted. */
10244 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10245
10246 struct unicosmk_extern_list
10247 {
10248 struct unicosmk_extern_list *next;
10249 const char *name;
10250 };
10251
10252 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10253
10254 /* Output extern declarations which are required for every asm file. */
10255
10256 static void
unicosmk_output_default_externs(FILE * file)10257 unicosmk_output_default_externs (FILE *file)
10258 {
10259 static const char *const externs[] =
10260 { "__T3E_MISMATCH" };
10261
10262 int i;
10263 int n;
10264
10265 n = ARRAY_SIZE (externs);
10266
10267 for (i = 0; i < n; i++)
10268 fprintf (file, "\t.extern\t%s\n", externs[i]);
10269 }
10270
10271 /* Output extern declarations for global symbols which are have been
10272 referenced but not defined. */
10273
10274 static void
unicosmk_output_externs(FILE * file)10275 unicosmk_output_externs (FILE *file)
10276 {
10277 struct unicosmk_extern_list *p;
10278 const char *real_name;
10279 int len;
10280 tree name_tree;
10281
10282 len = strlen (user_label_prefix);
10283 for (p = unicosmk_extern_head; p != 0; p = p->next)
10284 {
10285 /* We have to strip the encoding and possibly remove user_label_prefix
10286 from the identifier in order to handle -fleading-underscore and
10287 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10288 real_name = default_strip_name_encoding (p->name);
10289 if (len && p->name[0] == '*'
10290 && !memcmp (real_name, user_label_prefix, len))
10291 real_name += len;
10292
10293 name_tree = get_identifier (real_name);
10294 if (! TREE_ASM_WRITTEN (name_tree))
10295 {
10296 TREE_ASM_WRITTEN (name_tree) = 1;
10297 fputs ("\t.extern\t", file);
10298 assemble_name (file, p->name);
10299 putc ('\n', file);
10300 }
10301 }
10302 }
10303
10304 /* Record an extern. */
10305
10306 void
unicosmk_add_extern(const char * name)10307 unicosmk_add_extern (const char *name)
10308 {
10309 struct unicosmk_extern_list *p;
10310
10311 p = (struct unicosmk_extern_list *)
10312 xmalloc (sizeof (struct unicosmk_extern_list));
10313 p->next = unicosmk_extern_head;
10314 p->name = name;
10315 unicosmk_extern_head = p;
10316 }
10317
10318 /* The Cray assembler generates incorrect code if identifiers which
10319 conflict with register names are used as instruction operands. We have
10320 to replace such identifiers with DEX expressions. */
10321
10322 /* Structure to collect identifiers which have been replaced by DEX
10323 expressions. */
10324 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10325
10326 struct unicosmk_dex {
10327 struct unicosmk_dex *next;
10328 const char *name;
10329 };
10330
10331 /* List of identifiers which have been replaced by DEX expressions. The DEX
10332 number is determined by the position in the list. */
10333
10334 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10335
10336 /* The number of elements in the DEX list. */
10337
10338 static int unicosmk_dex_count = 0;
10339
10340 /* Check if NAME must be replaced by a DEX expression. */
10341
10342 static int
unicosmk_special_name(const char * name)10343 unicosmk_special_name (const char *name)
10344 {
10345 if (name[0] == '*')
10346 ++name;
10347
10348 if (name[0] == '$')
10349 ++name;
10350
10351 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10352 return 0;
10353
10354 switch (name[1])
10355 {
10356 case '1': case '2':
10357 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10358
10359 case '3':
10360 return (name[2] == '\0'
10361 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10362
10363 default:
10364 return (ISDIGIT (name[1]) && name[2] == '\0');
10365 }
10366 }
10367
10368 /* Return the DEX number if X must be replaced by a DEX expression and 0
10369 otherwise. */
10370
10371 static int
unicosmk_need_dex(rtx x)10372 unicosmk_need_dex (rtx x)
10373 {
10374 struct unicosmk_dex *dex;
10375 const char *name;
10376 int i;
10377
10378 if (GET_CODE (x) != SYMBOL_REF)
10379 return 0;
10380
10381 name = XSTR (x,0);
10382 if (! unicosmk_special_name (name))
10383 return 0;
10384
10385 i = unicosmk_dex_count;
10386 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10387 {
10388 if (! strcmp (name, dex->name))
10389 return i;
10390 --i;
10391 }
10392
10393 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10394 dex->name = name;
10395 dex->next = unicosmk_dex_list;
10396 unicosmk_dex_list = dex;
10397
10398 ++unicosmk_dex_count;
10399 return unicosmk_dex_count;
10400 }
10401
10402 /* Output the DEX definitions for this file. */
10403
10404 static void
unicosmk_output_dex(FILE * file)10405 unicosmk_output_dex (FILE *file)
10406 {
10407 struct unicosmk_dex *dex;
10408 int i;
10409
10410 if (unicosmk_dex_list == NULL)
10411 return;
10412
10413 fprintf (file, "\t.dexstart\n");
10414
10415 i = unicosmk_dex_count;
10416 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10417 {
10418 fprintf (file, "\tDEX (%d) = ", i);
10419 assemble_name (file, dex->name);
10420 putc ('\n', file);
10421 --i;
10422 }
10423
10424 fprintf (file, "\t.dexend\n");
10425 }
10426
10427 /* Output text that to appear at the beginning of an assembler file. */
10428
10429 static void
unicosmk_file_start(void)10430 unicosmk_file_start (void)
10431 {
10432 int i;
10433
10434 fputs ("\t.ident\t", asm_out_file);
10435 unicosmk_output_module_name (asm_out_file);
10436 fputs ("\n\n", asm_out_file);
10437
10438 /* The Unicos/Mk assembler uses different register names. Instead of trying
10439 to support them, we simply use micro definitions. */
10440
10441 /* CAM has different register names: rN for the integer register N and fN
10442 for the floating-point register N. Instead of trying to use these in
10443 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10444 register. */
10445
10446 for (i = 0; i < 32; ++i)
10447 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10448
10449 for (i = 0; i < 32; ++i)
10450 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10451
10452 putc ('\n', asm_out_file);
10453
10454 /* The .align directive fill unused space with zeroes which does not work
10455 in code sections. We define the macro 'gcc@code@align' which uses nops
10456 instead. Note that it assumes that code sections always have the
10457 biggest possible alignment since . refers to the current offset from
10458 the beginning of the section. */
10459
10460 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10461 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10462 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10463 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10464 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10465 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10466 fputs ("\t.endr\n", asm_out_file);
10467 fputs ("\t.endif\n", asm_out_file);
10468 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10469
10470 /* Output extern declarations which should always be visible. */
10471 unicosmk_output_default_externs (asm_out_file);
10472
10473 /* Open a dummy section. We always need to be inside a section for the
10474 section-switching code to work correctly.
10475 ??? This should be a module id or something like that. I still have to
10476 figure out what the rules for those are. */
10477 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10478 }
10479
10480 /* Output text to appear at the end of an assembler file. This includes all
10481 pending extern declarations and DEX expressions. */
10482
10483 static void
unicosmk_file_end(void)10484 unicosmk_file_end (void)
10485 {
10486 fputs ("\t.endp\n\n", asm_out_file);
10487
10488 /* Output all pending externs. */
10489
10490 unicosmk_output_externs (asm_out_file);
10491
10492 /* Output dex definitions used for functions whose names conflict with
10493 register names. */
10494
10495 unicosmk_output_dex (asm_out_file);
10496
10497 fputs ("\t.end\t", asm_out_file);
10498 unicosmk_output_module_name (asm_out_file);
10499 putc ('\n', asm_out_file);
10500 }
10501
10502 #else
10503
10504 static void
unicosmk_output_deferred_case_vectors(FILE * file ATTRIBUTE_UNUSED)10505 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10506 {}
10507
10508 static void
unicosmk_gen_dsib(unsigned long * imaskP ATTRIBUTE_UNUSED)10509 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10510 {}
10511
10512 static void
unicosmk_output_ssib(FILE * file ATTRIBUTE_UNUSED,const char * fnname ATTRIBUTE_UNUSED)10513 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10514 const char * fnname ATTRIBUTE_UNUSED)
10515 {}
10516
10517 rtx
unicosmk_add_call_info_word(rtx x ATTRIBUTE_UNUSED)10518 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10519 {
10520 return NULL_RTX;
10521 }
10522
10523 static int
unicosmk_need_dex(rtx x ATTRIBUTE_UNUSED)10524 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10525 {
10526 return 0;
10527 }
10528
10529 #endif /* TARGET_ABI_UNICOSMK */
10530
10531 static void
alpha_init_libfuncs(void)10532 alpha_init_libfuncs (void)
10533 {
10534 if (TARGET_ABI_UNICOSMK)
10535 {
10536 /* Prevent gcc from generating calls to __divsi3. */
10537 set_optab_libfunc (sdiv_optab, SImode, 0);
10538 set_optab_libfunc (udiv_optab, SImode, 0);
10539
10540 /* Use the functions provided by the system library
10541 for DImode integer division. */
10542 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10543 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10544 }
10545 else if (TARGET_ABI_OPEN_VMS)
10546 {
10547 /* Use the VMS runtime library functions for division and
10548 remainder. */
10549 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10550 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10551 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10552 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10553 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10554 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10555 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10556 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10557 }
10558 }
10559
10560
10561 /* Initialize the GCC target structure. */
10562 #if TARGET_ABI_OPEN_VMS
10563 # undef TARGET_ATTRIBUTE_TABLE
10564 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10565 # undef TARGET_SECTION_TYPE_FLAGS
10566 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10567 #endif
10568
10569 #undef TARGET_IN_SMALL_DATA_P
10570 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10571
10572 #if TARGET_ABI_UNICOSMK
10573 # undef TARGET_INSERT_ATTRIBUTES
10574 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10575 # undef TARGET_SECTION_TYPE_FLAGS
10576 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10577 # undef TARGET_ASM_UNIQUE_SECTION
10578 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10579 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10580 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10581 # undef TARGET_ASM_GLOBALIZE_LABEL
10582 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10583 # undef TARGET_MUST_PASS_IN_STACK
10584 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10585 #endif
10586
10587 #undef TARGET_ASM_ALIGNED_HI_OP
10588 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10589 #undef TARGET_ASM_ALIGNED_DI_OP
10590 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10591
10592 /* Default unaligned ops are provided for ELF systems. To get unaligned
10593 data for non-ELF systems, we have to turn off auto alignment. */
10594 #ifndef OBJECT_FORMAT_ELF
10595 #undef TARGET_ASM_UNALIGNED_HI_OP
10596 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10597 #undef TARGET_ASM_UNALIGNED_SI_OP
10598 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10599 #undef TARGET_ASM_UNALIGNED_DI_OP
10600 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10601 #endif
10602
10603 #ifdef OBJECT_FORMAT_ELF
10604 #undef TARGET_ASM_SELECT_RTX_SECTION
10605 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10606 #endif
10607
10608 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10609 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10610
10611 #undef TARGET_INIT_LIBFUNCS
10612 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10613
10614 #if TARGET_ABI_UNICOSMK
10615 #undef TARGET_ASM_FILE_START
10616 #define TARGET_ASM_FILE_START unicosmk_file_start
10617 #undef TARGET_ASM_FILE_END
10618 #define TARGET_ASM_FILE_END unicosmk_file_end
10619 #else
10620 #undef TARGET_ASM_FILE_START
10621 #define TARGET_ASM_FILE_START alpha_file_start
10622 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10623 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10624 #endif
10625
10626 #undef TARGET_SCHED_ADJUST_COST
10627 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10628 #undef TARGET_SCHED_ISSUE_RATE
10629 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10630 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10631 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10632 alpha_multipass_dfa_lookahead
10633
10634 #undef TARGET_HAVE_TLS
10635 #define TARGET_HAVE_TLS HAVE_AS_TLS
10636
10637 #undef TARGET_INIT_BUILTINS
10638 #define TARGET_INIT_BUILTINS alpha_init_builtins
10639 #undef TARGET_EXPAND_BUILTIN
10640 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10641 #undef TARGET_FOLD_BUILTIN
10642 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10643
10644 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10645 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10646 #undef TARGET_CANNOT_COPY_INSN_P
10647 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10648 #undef TARGET_CANNOT_FORCE_CONST_MEM
10649 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10650
10651 #if TARGET_ABI_OSF
10652 #undef TARGET_ASM_OUTPUT_MI_THUNK
10653 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10654 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10655 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10656 #undef TARGET_STDARG_OPTIMIZE_HOOK
10657 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10658 #endif
10659
10660 #undef TARGET_RTX_COSTS
10661 #define TARGET_RTX_COSTS alpha_rtx_costs
10662 #undef TARGET_ADDRESS_COST
10663 #define TARGET_ADDRESS_COST hook_int_rtx_0
10664
10665 #undef TARGET_MACHINE_DEPENDENT_REORG
10666 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10667
10668 #undef TARGET_PROMOTE_FUNCTION_ARGS
10669 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10670 #undef TARGET_PROMOTE_FUNCTION_RETURN
10671 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10672 #undef TARGET_PROMOTE_PROTOTYPES
10673 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10674 #undef TARGET_RETURN_IN_MEMORY
10675 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10676 #undef TARGET_PASS_BY_REFERENCE
10677 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10678 #undef TARGET_SETUP_INCOMING_VARARGS
10679 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10680 #undef TARGET_STRICT_ARGUMENT_NAMING
10681 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10682 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10683 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10684 #undef TARGET_SPLIT_COMPLEX_ARG
10685 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10686 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10687 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10688 #undef TARGET_ARG_PARTIAL_BYTES
10689 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10690
10691 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10692 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10693 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10694 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10695
10696 #undef TARGET_BUILD_BUILTIN_VA_LIST
10697 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10698
10699 /* The Alpha architecture does not require sequential consistency. See
10700 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10701 for an example of how it can be violated in practice. */
10702 #undef TARGET_RELAXED_ORDERING
10703 #define TARGET_RELAXED_ORDERING true
10704
10705 #undef TARGET_DEFAULT_TARGET_FLAGS
10706 #define TARGET_DEFAULT_TARGET_FLAGS \
10707 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10708 #undef TARGET_HANDLE_OPTION
10709 #define TARGET_HANDLE_OPTION alpha_handle_option
10710
10711 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10712 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10713 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10714 #endif
10715
10716 struct gcc_target targetm = TARGET_INITIALIZER;
10717
10718
10719 #include "gt-alpha.h"
10720