1 /* BFD back-end for Renesas Super-H COFF binaries.
2    Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3    2003, 2004, 2005 Free Software Foundation, Inc.
4    Contributed by Cygnus Support.
5    Written by Steve Chamberlain, <sac@cygnus.com>.
6    Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
7 
8    This file is part of BFD, the Binary File Descriptor library.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2 of the License, or
13    (at your option) any later version.
14 
15    This program is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with this program; if not, write to the Free Software
22    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
23 
24 #include "bfd.h"
25 #include "sysdep.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31 
32 #ifdef COFF_WITH_PE
33 #include "coff/pe.h"
34 
35 #ifndef COFF_IMAGE_WITH_PE
36 static bfd_boolean sh_align_load_span
37   PARAMS ((bfd *, asection *, bfd_byte *,
38 	   bfd_boolean (*) (bfd *, asection *, PTR, bfd_byte *, bfd_vma),
39 	   PTR, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *));
40 
41 #define _bfd_sh_align_load_span sh_align_load_span
42 #endif
43 #endif
44 
45 #include "libcoff.h"
46 
47 /* Internal functions.  */
48 static bfd_reloc_status_type sh_reloc
49   PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
50 static long get_symbol_value PARAMS ((asymbol *));
51 static bfd_boolean sh_relax_section
52   PARAMS ((bfd *, asection *, struct bfd_link_info *, bfd_boolean *));
53 static bfd_boolean sh_relax_delete_bytes
54   PARAMS ((bfd *, asection *, bfd_vma, int));
55 #ifndef COFF_IMAGE_WITH_PE
56 static const struct sh_opcode *sh_insn_info PARAMS ((unsigned int));
57 #endif
58 static bfd_boolean sh_align_loads
59   PARAMS ((bfd *, asection *, struct internal_reloc *, bfd_byte *,
60 	   bfd_boolean *));
61 static bfd_boolean sh_swap_insns
62   PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
63 static bfd_boolean sh_relocate_section
64   PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
65 	   struct internal_reloc *, struct internal_syment *, asection **));
66 static bfd_byte *sh_coff_get_relocated_section_contents
67   PARAMS ((bfd *, struct bfd_link_info *, struct bfd_link_order *,
68 	   bfd_byte *, bfd_boolean, asymbol **));
69 static reloc_howto_type * sh_coff_reloc_type_lookup PARAMS ((bfd *, bfd_reloc_code_real_type));
70 
71 #ifdef COFF_WITH_PE
72 /* Can't build import tables with 2**4 alignment.  */
73 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	2
74 #else
75 /* Default section alignment to 2**4.  */
76 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	4
77 #endif
78 
79 #ifdef COFF_IMAGE_WITH_PE
80 /* Align PE executables.  */
81 #define COFF_PAGE_SIZE 0x1000
82 #endif
83 
84 /* Generate long file names.  */
85 #define COFF_LONG_FILENAMES
86 
87 #ifdef COFF_WITH_PE
88 static bfd_boolean in_reloc_p PARAMS ((bfd *, reloc_howto_type *));
89 /* Return TRUE if this relocation should
90    appear in the output .reloc section.  */
in_reloc_p(abfd,howto)91 static bfd_boolean in_reloc_p (abfd, howto)
92      bfd * abfd ATTRIBUTE_UNUSED;
93      reloc_howto_type * howto;
94 {
95   return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
96 }
97 #endif
98 
99 /* The supported relocations.  There are a lot of relocations defined
100    in coff/internal.h which we do not expect to ever see.  */
101 static reloc_howto_type sh_coff_howtos[] =
102 {
103   EMPTY_HOWTO (0),
104   EMPTY_HOWTO (1),
105 #ifdef COFF_WITH_PE
106   /* Windows CE */
107   HOWTO (R_SH_IMM32CE,		/* type */
108 	 0,			/* rightshift */
109 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
110 	 32,			/* bitsize */
111 	 FALSE,			/* pc_relative */
112 	 0,			/* bitpos */
113 	 complain_overflow_bitfield, /* complain_on_overflow */
114 	 sh_reloc,		/* special_function */
115 	 "r_imm32ce",		/* name */
116 	 TRUE,			/* partial_inplace */
117 	 0xffffffff,		/* src_mask */
118 	 0xffffffff,		/* dst_mask */
119 	 FALSE),		/* pcrel_offset */
120 #else
121   EMPTY_HOWTO (2),
122 #endif
123   EMPTY_HOWTO (3), /* R_SH_PCREL8 */
124   EMPTY_HOWTO (4), /* R_SH_PCREL16 */
125   EMPTY_HOWTO (5), /* R_SH_HIGH8 */
126   EMPTY_HOWTO (6), /* R_SH_IMM24 */
127   EMPTY_HOWTO (7), /* R_SH_LOW16 */
128   EMPTY_HOWTO (8),
129   EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
130 
131   HOWTO (R_SH_PCDISP8BY2,	/* type */
132 	 1,			/* rightshift */
133 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
134 	 8,			/* bitsize */
135 	 TRUE,			/* pc_relative */
136 	 0,			/* bitpos */
137 	 complain_overflow_signed, /* complain_on_overflow */
138 	 sh_reloc,		/* special_function */
139 	 "r_pcdisp8by2",	/* name */
140 	 TRUE,			/* partial_inplace */
141 	 0xff,			/* src_mask */
142 	 0xff,			/* dst_mask */
143 	 TRUE),			/* pcrel_offset */
144 
145   EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
146 
147   HOWTO (R_SH_PCDISP,		/* type */
148 	 1,			/* rightshift */
149 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
150 	 12,			/* bitsize */
151 	 TRUE,			/* pc_relative */
152 	 0,			/* bitpos */
153 	 complain_overflow_signed, /* complain_on_overflow */
154 	 sh_reloc,		/* special_function */
155 	 "r_pcdisp12by2",	/* name */
156 	 TRUE,			/* partial_inplace */
157 	 0xfff,			/* src_mask */
158 	 0xfff,			/* dst_mask */
159 	 TRUE),			/* pcrel_offset */
160 
161   EMPTY_HOWTO (13),
162 
163   HOWTO (R_SH_IMM32,		/* type */
164 	 0,			/* rightshift */
165 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
166 	 32,			/* bitsize */
167 	 FALSE,			/* pc_relative */
168 	 0,			/* bitpos */
169 	 complain_overflow_bitfield, /* complain_on_overflow */
170 	 sh_reloc,		/* special_function */
171 	 "r_imm32",		/* name */
172 	 TRUE,			/* partial_inplace */
173 	 0xffffffff,		/* src_mask */
174 	 0xffffffff,		/* dst_mask */
175 	 FALSE),		/* pcrel_offset */
176 
177   EMPTY_HOWTO (15),
178 #ifdef COFF_WITH_PE
179   HOWTO (R_SH_IMAGEBASE,        /* type */
180 	 0,	                /* rightshift */
181 	 2,	                /* size (0 = byte, 1 = short, 2 = long) */
182 	 32,	                /* bitsize */
183 	 FALSE,	                /* pc_relative */
184 	 0,	                /* bitpos */
185 	 complain_overflow_bitfield, /* complain_on_overflow */
186 	 sh_reloc,       	/* special_function */
187 	 "rva32",	        /* name */
188 	 TRUE,	                /* partial_inplace */
189 	 0xffffffff,            /* src_mask */
190 	 0xffffffff,            /* dst_mask */
191 	 FALSE),                /* pcrel_offset */
192 #else
193   EMPTY_HOWTO (16), /* R_SH_IMM8 */
194 #endif
195   EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
196   EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
197   EMPTY_HOWTO (19), /* R_SH_IMM4 */
198   EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
199   EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
200 
201   HOWTO (R_SH_PCRELIMM8BY2,	/* type */
202 	 1,			/* rightshift */
203 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
204 	 8,			/* bitsize */
205 	 TRUE,			/* pc_relative */
206 	 0,			/* bitpos */
207 	 complain_overflow_unsigned, /* complain_on_overflow */
208 	 sh_reloc,		/* special_function */
209 	 "r_pcrelimm8by2",	/* name */
210 	 TRUE,			/* partial_inplace */
211 	 0xff,			/* src_mask */
212 	 0xff,			/* dst_mask */
213 	 TRUE),			/* pcrel_offset */
214 
215   HOWTO (R_SH_PCRELIMM8BY4,	/* type */
216 	 2,			/* rightshift */
217 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
218 	 8,			/* bitsize */
219 	 TRUE,			/* pc_relative */
220 	 0,			/* bitpos */
221 	 complain_overflow_unsigned, /* complain_on_overflow */
222 	 sh_reloc,		/* special_function */
223 	 "r_pcrelimm8by4",	/* name */
224 	 TRUE,			/* partial_inplace */
225 	 0xff,			/* src_mask */
226 	 0xff,			/* dst_mask */
227 	 TRUE),			/* pcrel_offset */
228 
229   HOWTO (R_SH_IMM16,		/* type */
230 	 0,			/* rightshift */
231 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
232 	 16,			/* bitsize */
233 	 FALSE,			/* pc_relative */
234 	 0,			/* bitpos */
235 	 complain_overflow_bitfield, /* complain_on_overflow */
236 	 sh_reloc,		/* special_function */
237 	 "r_imm16",		/* name */
238 	 TRUE,			/* partial_inplace */
239 	 0xffff,		/* src_mask */
240 	 0xffff,		/* dst_mask */
241 	 FALSE),		/* pcrel_offset */
242 
243   HOWTO (R_SH_SWITCH16,		/* type */
244 	 0,			/* rightshift */
245 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
246 	 16,			/* bitsize */
247 	 FALSE,			/* pc_relative */
248 	 0,			/* bitpos */
249 	 complain_overflow_bitfield, /* complain_on_overflow */
250 	 sh_reloc,		/* special_function */
251 	 "r_switch16",		/* name */
252 	 TRUE,			/* partial_inplace */
253 	 0xffff,		/* src_mask */
254 	 0xffff,		/* dst_mask */
255 	 FALSE),		/* pcrel_offset */
256 
257   HOWTO (R_SH_SWITCH32,		/* type */
258 	 0,			/* rightshift */
259 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
260 	 32,			/* bitsize */
261 	 FALSE,			/* pc_relative */
262 	 0,			/* bitpos */
263 	 complain_overflow_bitfield, /* complain_on_overflow */
264 	 sh_reloc,		/* special_function */
265 	 "r_switch32",		/* name */
266 	 TRUE,			/* partial_inplace */
267 	 0xffffffff,		/* src_mask */
268 	 0xffffffff,		/* dst_mask */
269 	 FALSE),		/* pcrel_offset */
270 
271   HOWTO (R_SH_USES,		/* type */
272 	 0,			/* rightshift */
273 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
274 	 16,			/* bitsize */
275 	 FALSE,			/* pc_relative */
276 	 0,			/* bitpos */
277 	 complain_overflow_bitfield, /* complain_on_overflow */
278 	 sh_reloc,		/* special_function */
279 	 "r_uses",		/* name */
280 	 TRUE,			/* partial_inplace */
281 	 0xffff,		/* src_mask */
282 	 0xffff,		/* dst_mask */
283 	 FALSE),		/* pcrel_offset */
284 
285   HOWTO (R_SH_COUNT,		/* type */
286 	 0,			/* rightshift */
287 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
288 	 32,			/* bitsize */
289 	 FALSE,			/* pc_relative */
290 	 0,			/* bitpos */
291 	 complain_overflow_bitfield, /* complain_on_overflow */
292 	 sh_reloc,		/* special_function */
293 	 "r_count",		/* name */
294 	 TRUE,			/* partial_inplace */
295 	 0xffffffff,		/* src_mask */
296 	 0xffffffff,		/* dst_mask */
297 	 FALSE),		/* pcrel_offset */
298 
299   HOWTO (R_SH_ALIGN,		/* type */
300 	 0,			/* rightshift */
301 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
302 	 32,			/* bitsize */
303 	 FALSE,			/* pc_relative */
304 	 0,			/* bitpos */
305 	 complain_overflow_bitfield, /* complain_on_overflow */
306 	 sh_reloc,		/* special_function */
307 	 "r_align",		/* name */
308 	 TRUE,			/* partial_inplace */
309 	 0xffffffff,		/* src_mask */
310 	 0xffffffff,		/* dst_mask */
311 	 FALSE),		/* pcrel_offset */
312 
313   HOWTO (R_SH_CODE,		/* type */
314 	 0,			/* rightshift */
315 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
316 	 32,			/* bitsize */
317 	 FALSE,			/* pc_relative */
318 	 0,			/* bitpos */
319 	 complain_overflow_bitfield, /* complain_on_overflow */
320 	 sh_reloc,		/* special_function */
321 	 "r_code",		/* name */
322 	 TRUE,			/* partial_inplace */
323 	 0xffffffff,		/* src_mask */
324 	 0xffffffff,		/* dst_mask */
325 	 FALSE),		/* pcrel_offset */
326 
327   HOWTO (R_SH_DATA,		/* type */
328 	 0,			/* rightshift */
329 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
330 	 32,			/* bitsize */
331 	 FALSE,			/* pc_relative */
332 	 0,			/* bitpos */
333 	 complain_overflow_bitfield, /* complain_on_overflow */
334 	 sh_reloc,		/* special_function */
335 	 "r_data",		/* name */
336 	 TRUE,			/* partial_inplace */
337 	 0xffffffff,		/* src_mask */
338 	 0xffffffff,		/* dst_mask */
339 	 FALSE),		/* pcrel_offset */
340 
341   HOWTO (R_SH_LABEL,		/* type */
342 	 0,			/* rightshift */
343 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
344 	 32,			/* bitsize */
345 	 FALSE,			/* pc_relative */
346 	 0,			/* bitpos */
347 	 complain_overflow_bitfield, /* complain_on_overflow */
348 	 sh_reloc,		/* special_function */
349 	 "r_label",		/* name */
350 	 TRUE,			/* partial_inplace */
351 	 0xffffffff,		/* src_mask */
352 	 0xffffffff,		/* dst_mask */
353 	 FALSE),		/* pcrel_offset */
354 
355   HOWTO (R_SH_SWITCH8,		/* type */
356 	 0,			/* rightshift */
357 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
358 	 8,			/* bitsize */
359 	 FALSE,			/* pc_relative */
360 	 0,			/* bitpos */
361 	 complain_overflow_bitfield, /* complain_on_overflow */
362 	 sh_reloc,		/* special_function */
363 	 "r_switch8",		/* name */
364 	 TRUE,			/* partial_inplace */
365 	 0xff,			/* src_mask */
366 	 0xff,			/* dst_mask */
367 	 FALSE)			/* pcrel_offset */
368 };
369 
370 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
371 
372 /* Check for a bad magic number.  */
373 #define BADMAG(x) SHBADMAG(x)
374 
375 /* Customize coffcode.h (this is not currently used).  */
376 #define SH 1
377 
378 /* FIXME: This should not be set here.  */
379 #define __A_MAGIC_SET__
380 
381 #ifndef COFF_WITH_PE
382 /* Swap the r_offset field in and out.  */
383 #define SWAP_IN_RELOC_OFFSET  H_GET_32
384 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
385 
386 /* Swap out extra information in the reloc structure.  */
387 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst)	\
388   do						\
389     {						\
390       dst->r_stuff[0] = 'S';			\
391       dst->r_stuff[1] = 'C';			\
392     }						\
393   while (0)
394 #endif
395 
396 /* Get the value of a symbol, when performing a relocation.  */
397 
398 static long
get_symbol_value(symbol)399 get_symbol_value (symbol)
400      asymbol *symbol;
401 {
402   bfd_vma relocation;
403 
404   if (bfd_is_com_section (symbol->section))
405     relocation = 0;
406   else
407     relocation = (symbol->value +
408 		  symbol->section->output_section->vma +
409 		  symbol->section->output_offset);
410 
411   return relocation;
412 }
413 
414 #ifdef COFF_WITH_PE
415 /* Convert an rtype to howto for the COFF backend linker.
416    Copied from coff-i386.  */
417 #define coff_rtype_to_howto coff_sh_rtype_to_howto
418 static reloc_howto_type * coff_sh_rtype_to_howto PARAMS ((bfd *, asection *, struct internal_reloc *, struct coff_link_hash_entry *, struct internal_syment *, bfd_vma *));
419 
420 static reloc_howto_type *
coff_sh_rtype_to_howto(abfd,sec,rel,h,sym,addendp)421 coff_sh_rtype_to_howto (abfd, sec, rel, h, sym, addendp)
422      bfd * abfd ATTRIBUTE_UNUSED;
423      asection * sec;
424      struct internal_reloc * rel;
425      struct coff_link_hash_entry * h;
426      struct internal_syment * sym;
427      bfd_vma * addendp;
428 {
429   reloc_howto_type * howto;
430 
431   howto = sh_coff_howtos + rel->r_type;
432 
433   *addendp = 0;
434 
435   if (howto->pc_relative)
436     *addendp += sec->vma;
437 
438   if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
439     {
440       /* This is a common symbol.  The section contents include the
441 	 size (sym->n_value) as an addend.  The relocate_section
442 	 function will be adding in the final value of the symbol.  We
443 	 need to subtract out the current size in order to get the
444 	 correct result.  */
445       BFD_ASSERT (h != NULL);
446     }
447 
448   if (howto->pc_relative)
449     {
450       *addendp -= 4;
451 
452       /* If the symbol is defined, then the generic code is going to
453          add back the symbol value in order to cancel out an
454          adjustment it made to the addend.  However, we set the addend
455          to 0 at the start of this function.  We need to adjust here,
456          to avoid the adjustment the generic code will make.  FIXME:
457          This is getting a bit hackish.  */
458       if (sym != NULL && sym->n_scnum != 0)
459 	*addendp -= sym->n_value;
460     }
461 
462   if (rel->r_type == R_SH_IMAGEBASE)
463     *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
464 
465   return howto;
466 }
467 
468 #endif /* COFF_WITH_PE */
469 
470 /* This structure is used to map BFD reloc codes to SH PE relocs.  */
471 struct shcoff_reloc_map
472 {
473   bfd_reloc_code_real_type bfd_reloc_val;
474   unsigned char shcoff_reloc_val;
475 };
476 
477 #ifdef COFF_WITH_PE
478 /* An array mapping BFD reloc codes to SH PE relocs.  */
479 static const struct shcoff_reloc_map sh_reloc_map[] =
480 {
481   { BFD_RELOC_32, R_SH_IMM32CE },
482   { BFD_RELOC_RVA, R_SH_IMAGEBASE },
483   { BFD_RELOC_CTOR, R_SH_IMM32CE },
484 };
485 #else
486 /* An array mapping BFD reloc codes to SH PE relocs.  */
487 static const struct shcoff_reloc_map sh_reloc_map[] =
488 {
489   { BFD_RELOC_32, R_SH_IMM32 },
490   { BFD_RELOC_CTOR, R_SH_IMM32 },
491 };
492 #endif
493 
494 /* Given a BFD reloc code, return the howto structure for the
495    corresponding SH PE reloc.  */
496 #define coff_bfd_reloc_type_lookup	sh_coff_reloc_type_lookup
497 
498 static reloc_howto_type *
sh_coff_reloc_type_lookup(abfd,code)499 sh_coff_reloc_type_lookup (abfd, code)
500      bfd * abfd ATTRIBUTE_UNUSED;
501      bfd_reloc_code_real_type code;
502 {
503   unsigned int i;
504 
505   for (i = ARRAY_SIZE (sh_reloc_map); i--;)
506     if (sh_reloc_map[i].bfd_reloc_val == code)
507       return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
508 
509   fprintf (stderr, "SH Error: unknown reloc type %d\n", code);
510   return NULL;
511 }
512 
513 /* This macro is used in coffcode.h to get the howto corresponding to
514    an internal reloc.  */
515 
516 #define RTYPE2HOWTO(relent, internal)		\
517   ((relent)->howto =				\
518    ((internal)->r_type < SH_COFF_HOWTO_COUNT	\
519     ? &sh_coff_howtos[(internal)->r_type]	\
520     : (reloc_howto_type *) NULL))
521 
522 /* This is the same as the macro in coffcode.h, except that it copies
523    r_offset into reloc_entry->addend for some relocs.  */
524 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr)                \
525   {                                                             \
526     coff_symbol_type *coffsym = (coff_symbol_type *) NULL;      \
527     if (ptr && bfd_asymbol_bfd (ptr) != abfd)                   \
528       coffsym = (obj_symbols (abfd)                             \
529                  + (cache_ptr->sym_ptr_ptr - symbols));         \
530     else if (ptr)                                               \
531       coffsym = coff_symbol_from (abfd, ptr);                   \
532     if (coffsym != (coff_symbol_type *) NULL                    \
533         && coffsym->native->u.syment.n_scnum == 0)              \
534       cache_ptr->addend = 0;                                    \
535     else if (ptr && bfd_asymbol_bfd (ptr) == abfd               \
536              && ptr->section != (asection *) NULL)              \
537       cache_ptr->addend = - (ptr->section->vma + ptr->value);   \
538     else                                                        \
539       cache_ptr->addend = 0;                                    \
540     if ((reloc).r_type == R_SH_SWITCH8				\
541 	|| (reloc).r_type == R_SH_SWITCH16			\
542 	|| (reloc).r_type == R_SH_SWITCH32			\
543 	|| (reloc).r_type == R_SH_USES				\
544 	|| (reloc).r_type == R_SH_COUNT				\
545 	|| (reloc).r_type == R_SH_ALIGN)			\
546       cache_ptr->addend = (reloc).r_offset;			\
547   }
548 
549 /* This is the howto function for the SH relocations.  */
550 
551 static bfd_reloc_status_type
sh_reloc(abfd,reloc_entry,symbol_in,data,input_section,output_bfd,error_message)552 sh_reloc (abfd, reloc_entry, symbol_in, data, input_section, output_bfd,
553 	  error_message)
554      bfd *abfd;
555      arelent *reloc_entry;
556      asymbol *symbol_in;
557      PTR data;
558      asection *input_section;
559      bfd *output_bfd;
560      char **error_message ATTRIBUTE_UNUSED;
561 {
562   unsigned long insn;
563   bfd_vma sym_value;
564   unsigned short r_type;
565   bfd_vma addr = reloc_entry->address;
566   bfd_byte *hit_data = addr + (bfd_byte *) data;
567 
568   r_type = reloc_entry->howto->type;
569 
570   if (output_bfd != NULL)
571     {
572       /* Partial linking--do nothing.  */
573       reloc_entry->address += input_section->output_offset;
574       return bfd_reloc_ok;
575     }
576 
577   /* Almost all relocs have to do with relaxing.  If any work must be
578      done for them, it has been done in sh_relax_section.  */
579   if (r_type != R_SH_IMM32
580 #ifdef COFF_WITH_PE
581       && r_type != R_SH_IMM32CE
582       && r_type != R_SH_IMAGEBASE
583 #endif
584       && (r_type != R_SH_PCDISP
585 	  || (symbol_in->flags & BSF_LOCAL) != 0))
586     return bfd_reloc_ok;
587 
588   if (symbol_in != NULL
589       && bfd_is_und_section (symbol_in->section))
590     return bfd_reloc_undefined;
591 
592   sym_value = get_symbol_value (symbol_in);
593 
594   switch (r_type)
595     {
596     case R_SH_IMM32:
597 #ifdef COFF_WITH_PE
598     case R_SH_IMM32CE:
599 #endif
600       insn = bfd_get_32 (abfd, hit_data);
601       insn += sym_value + reloc_entry->addend;
602       bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
603       break;
604 #ifdef COFF_WITH_PE
605     case R_SH_IMAGEBASE:
606       insn = bfd_get_32 (abfd, hit_data);
607       insn += sym_value + reloc_entry->addend;
608       insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
609       bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
610       break;
611 #endif
612     case R_SH_PCDISP:
613       insn = bfd_get_16 (abfd, hit_data);
614       sym_value += reloc_entry->addend;
615       sym_value -= (input_section->output_section->vma
616 		    + input_section->output_offset
617 		    + addr
618 		    + 4);
619       sym_value += (insn & 0xfff) << 1;
620       if (insn & 0x800)
621 	sym_value -= 0x1000;
622       insn = (insn & 0xf000) | (sym_value & 0xfff);
623       bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
624       if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
625 	return bfd_reloc_overflow;
626       break;
627     default:
628       abort ();
629       break;
630     }
631 
632   return bfd_reloc_ok;
633 }
634 
635 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
636 
637 /* We can do relaxing.  */
638 #define coff_bfd_relax_section sh_relax_section
639 
640 /* We use the special COFF backend linker.  */
641 #define coff_relocate_section sh_relocate_section
642 
643 /* When relaxing, we need to use special code to get the relocated
644    section contents.  */
645 #define coff_bfd_get_relocated_section_contents \
646   sh_coff_get_relocated_section_contents
647 
648 #include "coffcode.h"
649 
650 /* This function handles relaxing on the SH.
651 
652    Function calls on the SH look like this:
653 
654        movl  L1,r0
655        ...
656        jsr   @r0
657        ...
658      L1:
659        .long function
660 
661    The compiler and assembler will cooperate to create R_SH_USES
662    relocs on the jsr instructions.  The r_offset field of the
663    R_SH_USES reloc is the PC relative offset to the instruction which
664    loads the register (the r_offset field is computed as though it
665    were a jump instruction, so the offset value is actually from four
666    bytes past the instruction).  The linker can use this reloc to
667    determine just which function is being called, and thus decide
668    whether it is possible to replace the jsr with a bsr.
669 
670    If multiple function calls are all based on a single register load
671    (i.e., the same function is called multiple times), the compiler
672    guarantees that each function call will have an R_SH_USES reloc.
673    Therefore, if the linker is able to convert each R_SH_USES reloc
674    which refers to that address, it can safely eliminate the register
675    load.
676 
677    When the assembler creates an R_SH_USES reloc, it examines it to
678    determine which address is being loaded (L1 in the above example).
679    It then counts the number of references to that address, and
680    creates an R_SH_COUNT reloc at that address.  The r_offset field of
681    the R_SH_COUNT reloc will be the number of references.  If the
682    linker is able to eliminate a register load, it can use the
683    R_SH_COUNT reloc to see whether it can also eliminate the function
684    address.
685 
686    SH relaxing also handles another, unrelated, matter.  On the SH, if
687    a load or store instruction is not aligned on a four byte boundary,
688    the memory cycle interferes with the 32 bit instruction fetch,
689    causing a one cycle bubble in the pipeline.  Therefore, we try to
690    align load and store instructions on four byte boundaries if we
691    can, by swapping them with one of the adjacent instructions.  */
692 
693 static bfd_boolean
sh_relax_section(abfd,sec,link_info,again)694 sh_relax_section (abfd, sec, link_info, again)
695      bfd *abfd;
696      asection *sec;
697      struct bfd_link_info *link_info;
698      bfd_boolean *again;
699 {
700   struct internal_reloc *internal_relocs;
701   bfd_boolean have_code;
702   struct internal_reloc *irel, *irelend;
703   bfd_byte *contents = NULL;
704 
705   *again = FALSE;
706 
707   if (link_info->relocatable
708       || (sec->flags & SEC_RELOC) == 0
709       || sec->reloc_count == 0)
710     return TRUE;
711 
712   if (coff_section_data (abfd, sec) == NULL)
713     {
714       bfd_size_type amt = sizeof (struct coff_section_tdata);
715       sec->used_by_bfd = (PTR) bfd_zalloc (abfd, amt);
716       if (sec->used_by_bfd == NULL)
717 	return FALSE;
718     }
719 
720   internal_relocs = (_bfd_coff_read_internal_relocs
721 		     (abfd, sec, link_info->keep_memory,
722 		      (bfd_byte *) NULL, FALSE,
723 		      (struct internal_reloc *) NULL));
724   if (internal_relocs == NULL)
725     goto error_return;
726 
727   have_code = FALSE;
728 
729   irelend = internal_relocs + sec->reloc_count;
730   for (irel = internal_relocs; irel < irelend; irel++)
731     {
732       bfd_vma laddr, paddr, symval;
733       unsigned short insn;
734       struct internal_reloc *irelfn, *irelscan, *irelcount;
735       struct internal_syment sym;
736       bfd_signed_vma foff;
737 
738       if (irel->r_type == R_SH_CODE)
739 	have_code = TRUE;
740 
741       if (irel->r_type != R_SH_USES)
742 	continue;
743 
744       /* Get the section contents.  */
745       if (contents == NULL)
746 	{
747 	  if (coff_section_data (abfd, sec)->contents != NULL)
748 	    contents = coff_section_data (abfd, sec)->contents;
749 	  else
750 	    {
751 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
752 		goto error_return;
753 	    }
754 	}
755 
756       /* The r_offset field of the R_SH_USES reloc will point us to
757          the register load.  The 4 is because the r_offset field is
758          computed as though it were a jump offset, which are based
759          from 4 bytes after the jump instruction.  */
760       laddr = irel->r_vaddr - sec->vma + 4;
761       /* Careful to sign extend the 32-bit offset.  */
762       laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
763       if (laddr >= sec->size)
764 	{
765 	  (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
766 				 abfd, (unsigned long) irel->r_vaddr);
767 	  continue;
768 	}
769       insn = bfd_get_16 (abfd, contents + laddr);
770 
771       /* If the instruction is not mov.l NN,rN, we don't know what to do.  */
772       if ((insn & 0xf000) != 0xd000)
773 	{
774 	  ((*_bfd_error_handler)
775 	   ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
776 	    abfd, (unsigned long) irel->r_vaddr, insn));
777 	  continue;
778 	}
779 
780       /* Get the address from which the register is being loaded.  The
781       	 displacement in the mov.l instruction is quadrupled.  It is a
782       	 displacement from four bytes after the movl instruction, but,
783       	 before adding in the PC address, two least significant bits
784       	 of the PC are cleared.  We assume that the section is aligned
785       	 on a four byte boundary.  */
786       paddr = insn & 0xff;
787       paddr *= 4;
788       paddr += (laddr + 4) &~ (bfd_vma) 3;
789       if (paddr >= sec->size)
790 	{
791 	  ((*_bfd_error_handler)
792 	   ("%B: 0x%lx: warning: bad R_SH_USES load offset",
793 	    abfd, (unsigned long) irel->r_vaddr));
794 	  continue;
795 	}
796 
797       /* Get the reloc for the address from which the register is
798          being loaded.  This reloc will tell us which function is
799          actually being called.  */
800       paddr += sec->vma;
801       for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
802 	if (irelfn->r_vaddr == paddr
803 #ifdef COFF_WITH_PE
804 	    && (irelfn->r_type == R_SH_IMM32
805 		|| irelfn->r_type == R_SH_IMM32CE
806 		|| irelfn->r_type == R_SH_IMAGEBASE)
807 
808 #else
809 	    && irelfn->r_type == R_SH_IMM32
810 #endif
811 	    )
812 	  break;
813       if (irelfn >= irelend)
814 	{
815 	  ((*_bfd_error_handler)
816 	   ("%B: 0x%lx: warning: could not find expected reloc",
817 	    abfd, (unsigned long) paddr));
818 	  continue;
819 	}
820 
821       /* Get the value of the symbol referred to by the reloc.  */
822       if (! _bfd_coff_get_external_symbols (abfd))
823 	goto error_return;
824       bfd_coff_swap_sym_in (abfd,
825 			    ((bfd_byte *) obj_coff_external_syms (abfd)
826 			     + (irelfn->r_symndx
827 				* bfd_coff_symesz (abfd))),
828 			    &sym);
829       if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
830 	{
831 	  ((*_bfd_error_handler)
832 	   ("%B: 0x%lx: warning: symbol in unexpected section",
833 	    abfd, (unsigned long) paddr));
834 	  continue;
835 	}
836 
837       if (sym.n_sclass != C_EXT)
838 	{
839 	  symval = (sym.n_value
840 		    - sec->vma
841 		    + sec->output_section->vma
842 		    + sec->output_offset);
843 	}
844       else
845 	{
846 	  struct coff_link_hash_entry *h;
847 
848 	  h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
849 	  BFD_ASSERT (h != NULL);
850 	  if (h->root.type != bfd_link_hash_defined
851 	      && h->root.type != bfd_link_hash_defweak)
852 	    {
853 	      /* This appears to be a reference to an undefined
854                  symbol.  Just ignore it--it will be caught by the
855                  regular reloc processing.  */
856 	      continue;
857 	    }
858 
859 	  symval = (h->root.u.def.value
860 		    + h->root.u.def.section->output_section->vma
861 		    + h->root.u.def.section->output_offset);
862 	}
863 
864       symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
865 
866       /* See if this function call can be shortened.  */
867       foff = (symval
868 	      - (irel->r_vaddr
869 		 - sec->vma
870 		 + sec->output_section->vma
871 		 + sec->output_offset
872 		 + 4));
873       if (foff < -0x1000 || foff >= 0x1000)
874 	{
875 	  /* After all that work, we can't shorten this function call.  */
876 	  continue;
877 	}
878 
879       /* Shorten the function call.  */
880 
881       /* For simplicity of coding, we are going to modify the section
882 	 contents, the section relocs, and the BFD symbol table.  We
883 	 must tell the rest of the code not to free up this
884 	 information.  It would be possible to instead create a table
885 	 of changes which have to be made, as is done in coff-mips.c;
886 	 that would be more work, but would require less memory when
887 	 the linker is run.  */
888 
889       coff_section_data (abfd, sec)->relocs = internal_relocs;
890       coff_section_data (abfd, sec)->keep_relocs = TRUE;
891 
892       coff_section_data (abfd, sec)->contents = contents;
893       coff_section_data (abfd, sec)->keep_contents = TRUE;
894 
895       obj_coff_keep_syms (abfd) = TRUE;
896 
897       /* Replace the jsr with a bsr.  */
898 
899       /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
900          replace the jsr with a bsr.  */
901       irel->r_type = R_SH_PCDISP;
902       irel->r_symndx = irelfn->r_symndx;
903       if (sym.n_sclass != C_EXT)
904 	{
905 	  /* If this needs to be changed because of future relaxing,
906              it will be handled here like other internal PCDISP
907              relocs.  */
908 	  bfd_put_16 (abfd,
909 		      (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
910 		      contents + irel->r_vaddr - sec->vma);
911 	}
912       else
913 	{
914 	  /* We can't fully resolve this yet, because the external
915              symbol value may be changed by future relaxing.  We let
916              the final link phase handle it.  */
917 	  bfd_put_16 (abfd, (bfd_vma) 0xb000,
918 		      contents + irel->r_vaddr - sec->vma);
919 	}
920 
921       /* See if there is another R_SH_USES reloc referring to the same
922          register load.  */
923       for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
924 	if (irelscan->r_type == R_SH_USES
925 	    && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
926 	  break;
927       if (irelscan < irelend)
928 	{
929 	  /* Some other function call depends upon this register load,
930 	     and we have not yet converted that function call.
931 	     Indeed, we may never be able to convert it.  There is
932 	     nothing else we can do at this point.  */
933 	  continue;
934 	}
935 
936       /* Look for a R_SH_COUNT reloc on the location where the
937          function address is stored.  Do this before deleting any
938          bytes, to avoid confusion about the address.  */
939       for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
940 	if (irelcount->r_vaddr == paddr
941 	    && irelcount->r_type == R_SH_COUNT)
942 	  break;
943 
944       /* Delete the register load.  */
945       if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
946 	goto error_return;
947 
948       /* That will change things, so, just in case it permits some
949          other function call to come within range, we should relax
950          again.  Note that this is not required, and it may be slow.  */
951       *again = TRUE;
952 
953       /* Now check whether we got a COUNT reloc.  */
954       if (irelcount >= irelend)
955 	{
956 	  ((*_bfd_error_handler)
957 	   ("%B: 0x%lx: warning: could not find expected COUNT reloc",
958 	    abfd, (unsigned long) paddr));
959 	  continue;
960 	}
961 
962       /* The number of uses is stored in the r_offset field.  We've
963          just deleted one.  */
964       if (irelcount->r_offset == 0)
965 	{
966 	  ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
967 				  abfd, (unsigned long) paddr));
968 	  continue;
969 	}
970 
971       --irelcount->r_offset;
972 
973       /* If there are no more uses, we can delete the address.  Reload
974          the address from irelfn, in case it was changed by the
975          previous call to sh_relax_delete_bytes.  */
976       if (irelcount->r_offset == 0)
977 	{
978 	  if (! sh_relax_delete_bytes (abfd, sec,
979 				       irelfn->r_vaddr - sec->vma, 4))
980 	    goto error_return;
981 	}
982 
983       /* We've done all we can with that function call.  */
984     }
985 
986   /* Look for load and store instructions that we can align on four
987      byte boundaries.  */
988   if (have_code)
989     {
990       bfd_boolean swapped;
991 
992       /* Get the section contents.  */
993       if (contents == NULL)
994 	{
995 	  if (coff_section_data (abfd, sec)->contents != NULL)
996 	    contents = coff_section_data (abfd, sec)->contents;
997 	  else
998 	    {
999 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1000 		goto error_return;
1001 	    }
1002 	}
1003 
1004       if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1005 	goto error_return;
1006 
1007       if (swapped)
1008 	{
1009 	  coff_section_data (abfd, sec)->relocs = internal_relocs;
1010 	  coff_section_data (abfd, sec)->keep_relocs = TRUE;
1011 
1012 	  coff_section_data (abfd, sec)->contents = contents;
1013 	  coff_section_data (abfd, sec)->keep_contents = TRUE;
1014 
1015 	  obj_coff_keep_syms (abfd) = TRUE;
1016 	}
1017     }
1018 
1019   if (internal_relocs != NULL
1020       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1021     {
1022       if (! link_info->keep_memory)
1023 	free (internal_relocs);
1024       else
1025 	coff_section_data (abfd, sec)->relocs = internal_relocs;
1026     }
1027 
1028   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1029     {
1030       if (! link_info->keep_memory)
1031 	free (contents);
1032       else
1033 	/* Cache the section contents for coff_link_input_bfd.  */
1034 	coff_section_data (abfd, sec)->contents = contents;
1035     }
1036 
1037   return TRUE;
1038 
1039  error_return:
1040   if (internal_relocs != NULL
1041       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1042     free (internal_relocs);
1043   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1044     free (contents);
1045   return FALSE;
1046 }
1047 
1048 /* Delete some bytes from a section while relaxing.  */
1049 
1050 static bfd_boolean
sh_relax_delete_bytes(abfd,sec,addr,count)1051 sh_relax_delete_bytes (abfd, sec, addr, count)
1052      bfd *abfd;
1053      asection *sec;
1054      bfd_vma addr;
1055      int count;
1056 {
1057   bfd_byte *contents;
1058   struct internal_reloc *irel, *irelend;
1059   struct internal_reloc *irelalign;
1060   bfd_vma toaddr;
1061   bfd_byte *esym, *esymend;
1062   bfd_size_type symesz;
1063   struct coff_link_hash_entry **sym_hash;
1064   asection *o;
1065 
1066   contents = coff_section_data (abfd, sec)->contents;
1067 
1068   /* The deletion must stop at the next ALIGN reloc for an aligment
1069      power larger than the number of bytes we are deleting.  */
1070 
1071   irelalign = NULL;
1072   toaddr = sec->size;
1073 
1074   irel = coff_section_data (abfd, sec)->relocs;
1075   irelend = irel + sec->reloc_count;
1076   for (; irel < irelend; irel++)
1077     {
1078       if (irel->r_type == R_SH_ALIGN
1079 	  && irel->r_vaddr - sec->vma > addr
1080 	  && count < (1 << irel->r_offset))
1081 	{
1082 	  irelalign = irel;
1083 	  toaddr = irel->r_vaddr - sec->vma;
1084 	  break;
1085 	}
1086     }
1087 
1088   /* Actually delete the bytes.  */
1089   memmove (contents + addr, contents + addr + count,
1090 	   (size_t) (toaddr - addr - count));
1091   if (irelalign == NULL)
1092     sec->size -= count;
1093   else
1094     {
1095       int i;
1096 
1097 #define NOP_OPCODE (0x0009)
1098 
1099       BFD_ASSERT ((count & 1) == 0);
1100       for (i = 0; i < count; i += 2)
1101 	bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1102     }
1103 
1104   /* Adjust all the relocs.  */
1105   for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1106     {
1107       bfd_vma nraddr, stop;
1108       bfd_vma start = 0;
1109       int insn = 0;
1110       struct internal_syment sym;
1111       int off, adjust, oinsn;
1112       bfd_signed_vma voff = 0;
1113       bfd_boolean overflow;
1114 
1115       /* Get the new reloc address.  */
1116       nraddr = irel->r_vaddr - sec->vma;
1117       if ((irel->r_vaddr - sec->vma > addr
1118 	   && irel->r_vaddr - sec->vma < toaddr)
1119 	  || (irel->r_type == R_SH_ALIGN
1120 	      && irel->r_vaddr - sec->vma == toaddr))
1121 	nraddr -= count;
1122 
1123       /* See if this reloc was for the bytes we have deleted, in which
1124 	 case we no longer care about it.  Don't delete relocs which
1125 	 represent addresses, though.  */
1126       if (irel->r_vaddr - sec->vma >= addr
1127 	  && irel->r_vaddr - sec->vma < addr + count
1128 	  && irel->r_type != R_SH_ALIGN
1129 	  && irel->r_type != R_SH_CODE
1130 	  && irel->r_type != R_SH_DATA
1131 	  && irel->r_type != R_SH_LABEL)
1132 	irel->r_type = R_SH_UNUSED;
1133 
1134       /* If this is a PC relative reloc, see if the range it covers
1135          includes the bytes we have deleted.  */
1136       switch (irel->r_type)
1137 	{
1138 	default:
1139 	  break;
1140 
1141 	case R_SH_PCDISP8BY2:
1142 	case R_SH_PCDISP:
1143 	case R_SH_PCRELIMM8BY2:
1144 	case R_SH_PCRELIMM8BY4:
1145 	  start = irel->r_vaddr - sec->vma;
1146 	  insn = bfd_get_16 (abfd, contents + nraddr);
1147 	  break;
1148 	}
1149 
1150       switch (irel->r_type)
1151 	{
1152 	default:
1153 	  start = stop = addr;
1154 	  break;
1155 
1156 	case R_SH_IMM32:
1157 #ifdef COFF_WITH_PE
1158 	case R_SH_IMM32CE:
1159 	case R_SH_IMAGEBASE:
1160 #endif
1161 	  /* If this reloc is against a symbol defined in this
1162              section, and the symbol will not be adjusted below, we
1163              must check the addend to see it will put the value in
1164              range to be adjusted, and hence must be changed.  */
1165 	  bfd_coff_swap_sym_in (abfd,
1166 				((bfd_byte *) obj_coff_external_syms (abfd)
1167 				 + (irel->r_symndx
1168 				    * bfd_coff_symesz (abfd))),
1169 				&sym);
1170 	  if (sym.n_sclass != C_EXT
1171 	      && sym.n_scnum == sec->target_index
1172 	      && ((bfd_vma) sym.n_value <= addr
1173 		  || (bfd_vma) sym.n_value >= toaddr))
1174 	    {
1175 	      bfd_vma val;
1176 
1177 	      val = bfd_get_32 (abfd, contents + nraddr);
1178 	      val += sym.n_value;
1179 	      if (val > addr && val < toaddr)
1180 		bfd_put_32 (abfd, val - count, contents + nraddr);
1181 	    }
1182 	  start = stop = addr;
1183 	  break;
1184 
1185 	case R_SH_PCDISP8BY2:
1186 	  off = insn & 0xff;
1187 	  if (off & 0x80)
1188 	    off -= 0x100;
1189 	  stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1190 	  break;
1191 
1192 	case R_SH_PCDISP:
1193 	  bfd_coff_swap_sym_in (abfd,
1194 				((bfd_byte *) obj_coff_external_syms (abfd)
1195 				 + (irel->r_symndx
1196 				    * bfd_coff_symesz (abfd))),
1197 				&sym);
1198 	  if (sym.n_sclass == C_EXT)
1199 	    start = stop = addr;
1200 	  else
1201 	    {
1202 	      off = insn & 0xfff;
1203 	      if (off & 0x800)
1204 		off -= 0x1000;
1205 	      stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1206 	    }
1207 	  break;
1208 
1209 	case R_SH_PCRELIMM8BY2:
1210 	  off = insn & 0xff;
1211 	  stop = start + 4 + off * 2;
1212 	  break;
1213 
1214 	case R_SH_PCRELIMM8BY4:
1215 	  off = insn & 0xff;
1216 	  stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1217 	  break;
1218 
1219 	case R_SH_SWITCH8:
1220 	case R_SH_SWITCH16:
1221 	case R_SH_SWITCH32:
1222 	  /* These relocs types represent
1223 	       .word L2-L1
1224 	     The r_offset field holds the difference between the reloc
1225 	     address and L1.  That is the start of the reloc, and
1226 	     adding in the contents gives us the top.  We must adjust
1227 	     both the r_offset field and the section contents.  */
1228 
1229 	  start = irel->r_vaddr - sec->vma;
1230 	  stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1231 
1232 	  if (start > addr
1233 	      && start < toaddr
1234 	      && (stop <= addr || stop >= toaddr))
1235 	    irel->r_offset += count;
1236 	  else if (stop > addr
1237 		   && stop < toaddr
1238 		   && (start <= addr || start >= toaddr))
1239 	    irel->r_offset -= count;
1240 
1241 	  start = stop;
1242 
1243 	  if (irel->r_type == R_SH_SWITCH16)
1244 	    voff = bfd_get_signed_16 (abfd, contents + nraddr);
1245 	  else if (irel->r_type == R_SH_SWITCH8)
1246 	    voff = bfd_get_8 (abfd, contents + nraddr);
1247 	  else
1248 	    voff = bfd_get_signed_32 (abfd, contents + nraddr);
1249 	  stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1250 
1251 	  break;
1252 
1253 	case R_SH_USES:
1254 	  start = irel->r_vaddr - sec->vma;
1255 	  stop = (bfd_vma) ((bfd_signed_vma) start
1256 			    + (long) irel->r_offset
1257 			    + 4);
1258 	  break;
1259 	}
1260 
1261       if (start > addr
1262 	  && start < toaddr
1263 	  && (stop <= addr || stop >= toaddr))
1264 	adjust = count;
1265       else if (stop > addr
1266 	       && stop < toaddr
1267 	       && (start <= addr || start >= toaddr))
1268 	adjust = - count;
1269       else
1270 	adjust = 0;
1271 
1272       if (adjust != 0)
1273 	{
1274 	  oinsn = insn;
1275 	  overflow = FALSE;
1276 	  switch (irel->r_type)
1277 	    {
1278 	    default:
1279 	      abort ();
1280 	      break;
1281 
1282 	    case R_SH_PCDISP8BY2:
1283 	    case R_SH_PCRELIMM8BY2:
1284 	      insn += adjust / 2;
1285 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1286 		overflow = TRUE;
1287 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1288 	      break;
1289 
1290 	    case R_SH_PCDISP:
1291 	      insn += adjust / 2;
1292 	      if ((oinsn & 0xf000) != (insn & 0xf000))
1293 		overflow = TRUE;
1294 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1295 	      break;
1296 
1297 	    case R_SH_PCRELIMM8BY4:
1298 	      BFD_ASSERT (adjust == count || count >= 4);
1299 	      if (count >= 4)
1300 		insn += adjust / 4;
1301 	      else
1302 		{
1303 		  if ((irel->r_vaddr & 3) == 0)
1304 		    ++insn;
1305 		}
1306 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1307 		overflow = TRUE;
1308 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1309 	      break;
1310 
1311 	    case R_SH_SWITCH8:
1312 	      voff += adjust;
1313 	      if (voff < 0 || voff >= 0xff)
1314 		overflow = TRUE;
1315 	      bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1316 	      break;
1317 
1318 	    case R_SH_SWITCH16:
1319 	      voff += adjust;
1320 	      if (voff < - 0x8000 || voff >= 0x8000)
1321 		overflow = TRUE;
1322 	      bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1323 	      break;
1324 
1325 	    case R_SH_SWITCH32:
1326 	      voff += adjust;
1327 	      bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1328 	      break;
1329 
1330 	    case R_SH_USES:
1331 	      irel->r_offset += adjust;
1332 	      break;
1333 	    }
1334 
1335 	  if (overflow)
1336 	    {
1337 	      ((*_bfd_error_handler)
1338 	       ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1339 		abfd, (unsigned long) irel->r_vaddr));
1340 	      bfd_set_error (bfd_error_bad_value);
1341 	      return FALSE;
1342 	    }
1343 	}
1344 
1345       irel->r_vaddr = nraddr + sec->vma;
1346     }
1347 
1348   /* Look through all the other sections.  If there contain any IMM32
1349      relocs against internal symbols which we are not going to adjust
1350      below, we may need to adjust the addends.  */
1351   for (o = abfd->sections; o != NULL; o = o->next)
1352     {
1353       struct internal_reloc *internal_relocs;
1354       struct internal_reloc *irelscan, *irelscanend;
1355       bfd_byte *ocontents;
1356 
1357       if (o == sec
1358 	  || (o->flags & SEC_RELOC) == 0
1359 	  || o->reloc_count == 0)
1360 	continue;
1361 
1362       /* We always cache the relocs.  Perhaps, if info->keep_memory is
1363          FALSE, we should free them, if we are permitted to, when we
1364          leave sh_coff_relax_section.  */
1365       internal_relocs = (_bfd_coff_read_internal_relocs
1366 			 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1367 			  (struct internal_reloc *) NULL));
1368       if (internal_relocs == NULL)
1369 	return FALSE;
1370 
1371       ocontents = NULL;
1372       irelscanend = internal_relocs + o->reloc_count;
1373       for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1374 	{
1375 	  struct internal_syment sym;
1376 
1377 #ifdef COFF_WITH_PE
1378 	  if (irelscan->r_type != R_SH_IMM32
1379 	      && irelscan->r_type != R_SH_IMAGEBASE
1380 	      && irelscan->r_type != R_SH_IMM32CE)
1381 #else
1382 	  if (irelscan->r_type != R_SH_IMM32)
1383 #endif
1384 	    continue;
1385 
1386 	  bfd_coff_swap_sym_in (abfd,
1387 				((bfd_byte *) obj_coff_external_syms (abfd)
1388 				 + (irelscan->r_symndx
1389 				    * bfd_coff_symesz (abfd))),
1390 				&sym);
1391 	  if (sym.n_sclass != C_EXT
1392 	      && sym.n_scnum == sec->target_index
1393 	      && ((bfd_vma) sym.n_value <= addr
1394 		  || (bfd_vma) sym.n_value >= toaddr))
1395 	    {
1396 	      bfd_vma val;
1397 
1398 	      if (ocontents == NULL)
1399 		{
1400 		  if (coff_section_data (abfd, o)->contents != NULL)
1401 		    ocontents = coff_section_data (abfd, o)->contents;
1402 		  else
1403 		    {
1404 		      if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1405 			return FALSE;
1406 		      /* We always cache the section contents.
1407                          Perhaps, if info->keep_memory is FALSE, we
1408                          should free them, if we are permitted to,
1409                          when we leave sh_coff_relax_section.  */
1410 		      coff_section_data (abfd, o)->contents = ocontents;
1411 		    }
1412 		}
1413 
1414 	      val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1415 	      val += sym.n_value;
1416 	      if (val > addr && val < toaddr)
1417 		bfd_put_32 (abfd, val - count,
1418 			    ocontents + irelscan->r_vaddr - o->vma);
1419 
1420 	      coff_section_data (abfd, o)->keep_contents = TRUE;
1421 	    }
1422 	}
1423     }
1424 
1425   /* Adjusting the internal symbols will not work if something has
1426      already retrieved the generic symbols.  It would be possible to
1427      make this work by adjusting the generic symbols at the same time.
1428      However, this case should not arise in normal usage.  */
1429   if (obj_symbols (abfd) != NULL
1430       || obj_raw_syments (abfd) != NULL)
1431     {
1432       ((*_bfd_error_handler)
1433        ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1434       bfd_set_error (bfd_error_invalid_operation);
1435       return FALSE;
1436     }
1437 
1438   /* Adjust all the symbols.  */
1439   sym_hash = obj_coff_sym_hashes (abfd);
1440   symesz = bfd_coff_symesz (abfd);
1441   esym = (bfd_byte *) obj_coff_external_syms (abfd);
1442   esymend = esym + obj_raw_syment_count (abfd) * symesz;
1443   while (esym < esymend)
1444     {
1445       struct internal_syment isym;
1446 
1447       bfd_coff_swap_sym_in (abfd, (PTR) esym, (PTR) &isym);
1448 
1449       if (isym.n_scnum == sec->target_index
1450 	  && (bfd_vma) isym.n_value > addr
1451 	  && (bfd_vma) isym.n_value < toaddr)
1452 	{
1453 	  isym.n_value -= count;
1454 
1455 	  bfd_coff_swap_sym_out (abfd, (PTR) &isym, (PTR) esym);
1456 
1457 	  if (*sym_hash != NULL)
1458 	    {
1459 	      BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1460 			  || (*sym_hash)->root.type == bfd_link_hash_defweak);
1461 	      BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1462 			  && (*sym_hash)->root.u.def.value < toaddr);
1463 	      (*sym_hash)->root.u.def.value -= count;
1464 	    }
1465 	}
1466 
1467       esym += (isym.n_numaux + 1) * symesz;
1468       sym_hash += isym.n_numaux + 1;
1469     }
1470 
1471   /* See if we can move the ALIGN reloc forward.  We have adjusted
1472      r_vaddr for it already.  */
1473   if (irelalign != NULL)
1474     {
1475       bfd_vma alignto, alignaddr;
1476 
1477       alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1478       alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1479 			     1 << irelalign->r_offset);
1480       if (alignto != alignaddr)
1481 	{
1482 	  /* Tail recursion.  */
1483 	  return sh_relax_delete_bytes (abfd, sec, alignaddr,
1484 					(int) (alignto - alignaddr));
1485 	}
1486     }
1487 
1488   return TRUE;
1489 }
1490 
1491 /* This is yet another version of the SH opcode table, used to rapidly
1492    get information about a particular instruction.  */
1493 
1494 /* The opcode map is represented by an array of these structures.  The
1495    array is indexed by the high order four bits in the instruction.  */
1496 
1497 struct sh_major_opcode
1498 {
1499   /* A pointer to the instruction list.  This is an array which
1500      contains all the instructions with this major opcode.  */
1501   const struct sh_minor_opcode *minor_opcodes;
1502   /* The number of elements in minor_opcodes.  */
1503   unsigned short count;
1504 };
1505 
1506 /* This structure holds information for a set of SH opcodes.  The
1507    instruction code is anded with the mask value, and the resulting
1508    value is used to search the order opcode list.  */
1509 
1510 struct sh_minor_opcode
1511 {
1512   /* The sorted opcode list.  */
1513   const struct sh_opcode *opcodes;
1514   /* The number of elements in opcodes.  */
1515   unsigned short count;
1516   /* The mask value to use when searching the opcode list.  */
1517   unsigned short mask;
1518 };
1519 
1520 /* This structure holds information for an SH instruction.  An array
1521    of these structures is sorted in order by opcode.  */
1522 
1523 struct sh_opcode
1524 {
1525   /* The code for this instruction, after it has been anded with the
1526      mask value in the sh_major_opcode structure.  */
1527   unsigned short opcode;
1528   /* Flags for this instruction.  */
1529   unsigned long flags;
1530 };
1531 
1532 /* Flag which appear in the sh_opcode structure.  */
1533 
1534 /* This instruction loads a value from memory.  */
1535 #define LOAD (0x1)
1536 
1537 /* This instruction stores a value to memory.  */
1538 #define STORE (0x2)
1539 
1540 /* This instruction is a branch.  */
1541 #define BRANCH (0x4)
1542 
1543 /* This instruction has a delay slot.  */
1544 #define DELAY (0x8)
1545 
1546 /* This instruction uses the value in the register in the field at
1547    mask 0x0f00 of the instruction.  */
1548 #define USES1 (0x10)
1549 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1550 
1551 /* This instruction uses the value in the register in the field at
1552    mask 0x00f0 of the instruction.  */
1553 #define USES2 (0x20)
1554 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1555 
1556 /* This instruction uses the value in register 0.  */
1557 #define USESR0 (0x40)
1558 
1559 /* This instruction sets the value in the register in the field at
1560    mask 0x0f00 of the instruction.  */
1561 #define SETS1 (0x80)
1562 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1563 
1564 /* This instruction sets the value in the register in the field at
1565    mask 0x00f0 of the instruction.  */
1566 #define SETS2 (0x100)
1567 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1568 
1569 /* This instruction sets register 0.  */
1570 #define SETSR0 (0x200)
1571 
1572 /* This instruction sets a special register.  */
1573 #define SETSSP (0x400)
1574 
1575 /* This instruction uses a special register.  */
1576 #define USESSP (0x800)
1577 
1578 /* This instruction uses the floating point register in the field at
1579    mask 0x0f00 of the instruction.  */
1580 #define USESF1 (0x1000)
1581 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1582 
1583 /* This instruction uses the floating point register in the field at
1584    mask 0x00f0 of the instruction.  */
1585 #define USESF2 (0x2000)
1586 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1587 
1588 /* This instruction uses floating point register 0.  */
1589 #define USESF0 (0x4000)
1590 
1591 /* This instruction sets the floating point register in the field at
1592    mask 0x0f00 of the instruction.  */
1593 #define SETSF1 (0x8000)
1594 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1595 
1596 #define USESAS (0x10000)
1597 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1598 #define USESR8 (0x20000)
1599 #define SETSAS (0x40000)
1600 #define SETSAS_REG(x) USESAS_REG (x)
1601 
1602 #define MAP(a) a, sizeof a / sizeof a[0]
1603 
1604 #ifndef COFF_IMAGE_WITH_PE
1605 static bfd_boolean sh_insn_uses_reg
1606   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1607 static bfd_boolean sh_insn_sets_reg
1608   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1609 static bfd_boolean sh_insn_uses_or_sets_reg
1610   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1611 static bfd_boolean sh_insn_uses_freg
1612   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1613 static bfd_boolean sh_insn_sets_freg
1614   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1615 static bfd_boolean sh_insn_uses_or_sets_freg
1616   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int));
1617 static bfd_boolean sh_insns_conflict
1618   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1619 	   const struct sh_opcode *));
1620 static bfd_boolean sh_load_use
1621   PARAMS ((unsigned int, const struct sh_opcode *, unsigned int,
1622 	   const struct sh_opcode *));
1623 
1624 /* The opcode maps.  */
1625 
1626 static const struct sh_opcode sh_opcode00[] =
1627 {
1628   { 0x0008, SETSSP },			/* clrt */
1629   { 0x0009, 0 },			/* nop */
1630   { 0x000b, BRANCH | DELAY | USESSP },	/* rts */
1631   { 0x0018, SETSSP },			/* sett */
1632   { 0x0019, SETSSP },			/* div0u */
1633   { 0x001b, 0 },			/* sleep */
1634   { 0x0028, SETSSP },			/* clrmac */
1635   { 0x002b, BRANCH | DELAY | SETSSP },	/* rte */
1636   { 0x0038, USESSP | SETSSP },		/* ldtlb */
1637   { 0x0048, SETSSP },			/* clrs */
1638   { 0x0058, SETSSP }			/* sets */
1639 };
1640 
1641 static const struct sh_opcode sh_opcode01[] =
1642 {
1643   { 0x0003, BRANCH | DELAY | USES1 | SETSSP },	/* bsrf rn */
1644   { 0x000a, SETS1 | USESSP },			/* sts mach,rn */
1645   { 0x001a, SETS1 | USESSP },			/* sts macl,rn */
1646   { 0x0023, BRANCH | DELAY | USES1 },		/* braf rn */
1647   { 0x0029, SETS1 | USESSP },			/* movt rn */
1648   { 0x002a, SETS1 | USESSP },			/* sts pr,rn */
1649   { 0x005a, SETS1 | USESSP },			/* sts fpul,rn */
1650   { 0x006a, SETS1 | USESSP },			/* sts fpscr,rn / sts dsr,rn */
1651   { 0x0083, LOAD | USES1 },			/* pref @rn */
1652   { 0x007a, SETS1 | USESSP },			/* sts a0,rn */
1653   { 0x008a, SETS1 | USESSP },			/* sts x0,rn */
1654   { 0x009a, SETS1 | USESSP },			/* sts x1,rn */
1655   { 0x00aa, SETS1 | USESSP },			/* sts y0,rn */
1656   { 0x00ba, SETS1 | USESSP }			/* sts y1,rn */
1657 };
1658 
1659 static const struct sh_opcode sh_opcode02[] =
1660 {
1661   { 0x0002, SETS1 | USESSP },			/* stc <special_reg>,rn */
1662   { 0x0004, STORE | USES1 | USES2 | USESR0 },	/* mov.b rm,@(r0,rn) */
1663   { 0x0005, STORE | USES1 | USES2 | USESR0 },	/* mov.w rm,@(r0,rn) */
1664   { 0x0006, STORE | USES1 | USES2 | USESR0 },	/* mov.l rm,@(r0,rn) */
1665   { 0x0007, SETSSP | USES1 | USES2 },		/* mul.l rm,rn */
1666   { 0x000c, LOAD | SETS1 | USES2 | USESR0 },	/* mov.b @(r0,rm),rn */
1667   { 0x000d, LOAD | SETS1 | USES2 | USESR0 },	/* mov.w @(r0,rm),rn */
1668   { 0x000e, LOAD | SETS1 | USES2 | USESR0 },	/* mov.l @(r0,rm),rn */
1669   { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1670 };
1671 
1672 static const struct sh_minor_opcode sh_opcode0[] =
1673 {
1674   { MAP (sh_opcode00), 0xffff },
1675   { MAP (sh_opcode01), 0xf0ff },
1676   { MAP (sh_opcode02), 0xf00f }
1677 };
1678 
1679 static const struct sh_opcode sh_opcode10[] =
1680 {
1681   { 0x1000, STORE | USES1 | USES2 }	/* mov.l rm,@(disp,rn) */
1682 };
1683 
1684 static const struct sh_minor_opcode sh_opcode1[] =
1685 {
1686   { MAP (sh_opcode10), 0xf000 }
1687 };
1688 
1689 static const struct sh_opcode sh_opcode20[] =
1690 {
1691   { 0x2000, STORE | USES1 | USES2 },		/* mov.b rm,@rn */
1692   { 0x2001, STORE | USES1 | USES2 },		/* mov.w rm,@rn */
1693   { 0x2002, STORE | USES1 | USES2 },		/* mov.l rm,@rn */
1694   { 0x2004, STORE | SETS1 | USES1 | USES2 },	/* mov.b rm,@-rn */
1695   { 0x2005, STORE | SETS1 | USES1 | USES2 },	/* mov.w rm,@-rn */
1696   { 0x2006, STORE | SETS1 | USES1 | USES2 },	/* mov.l rm,@-rn */
1697   { 0x2007, SETSSP | USES1 | USES2 | USESSP },	/* div0s */
1698   { 0x2008, SETSSP | USES1 | USES2 },		/* tst rm,rn */
1699   { 0x2009, SETS1 | USES1 | USES2 },		/* and rm,rn */
1700   { 0x200a, SETS1 | USES1 | USES2 },		/* xor rm,rn */
1701   { 0x200b, SETS1 | USES1 | USES2 },		/* or rm,rn */
1702   { 0x200c, SETSSP | USES1 | USES2 },		/* cmp/str rm,rn */
1703   { 0x200d, SETS1 | USES1 | USES2 },		/* xtrct rm,rn */
1704   { 0x200e, SETSSP | USES1 | USES2 },		/* mulu.w rm,rn */
1705   { 0x200f, SETSSP | USES1 | USES2 }		/* muls.w rm,rn */
1706 };
1707 
1708 static const struct sh_minor_opcode sh_opcode2[] =
1709 {
1710   { MAP (sh_opcode20), 0xf00f }
1711 };
1712 
1713 static const struct sh_opcode sh_opcode30[] =
1714 {
1715   { 0x3000, SETSSP | USES1 | USES2 },		/* cmp/eq rm,rn */
1716   { 0x3002, SETSSP | USES1 | USES2 },		/* cmp/hs rm,rn */
1717   { 0x3003, SETSSP | USES1 | USES2 },		/* cmp/ge rm,rn */
1718   { 0x3004, SETSSP | USESSP | USES1 | USES2 },	/* div1 rm,rn */
1719   { 0x3005, SETSSP | USES1 | USES2 },		/* dmulu.l rm,rn */
1720   { 0x3006, SETSSP | USES1 | USES2 },		/* cmp/hi rm,rn */
1721   { 0x3007, SETSSP | USES1 | USES2 },		/* cmp/gt rm,rn */
1722   { 0x3008, SETS1 | USES1 | USES2 },		/* sub rm,rn */
1723   { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1724   { 0x300b, SETS1 | SETSSP | USES1 | USES2 },	/* subv rm,rn */
1725   { 0x300c, SETS1 | USES1 | USES2 },		/* add rm,rn */
1726   { 0x300d, SETSSP | USES1 | USES2 },		/* dmuls.l rm,rn */
1727   { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1728   { 0x300f, SETS1 | SETSSP | USES1 | USES2 }	/* addv rm,rn */
1729 };
1730 
1731 static const struct sh_minor_opcode sh_opcode3[] =
1732 {
1733   { MAP (sh_opcode30), 0xf00f }
1734 };
1735 
1736 static const struct sh_opcode sh_opcode40[] =
1737 {
1738   { 0x4000, SETS1 | SETSSP | USES1 },		/* shll rn */
1739   { 0x4001, SETS1 | SETSSP | USES1 },		/* shlr rn */
1740   { 0x4002, STORE | SETS1 | USES1 | USESSP },	/* sts.l mach,@-rn */
1741   { 0x4004, SETS1 | SETSSP | USES1 },		/* rotl rn */
1742   { 0x4005, SETS1 | SETSSP | USES1 },		/* rotr rn */
1743   { 0x4006, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,mach */
1744   { 0x4008, SETS1 | USES1 },			/* shll2 rn */
1745   { 0x4009, SETS1 | USES1 },			/* shlr2 rn */
1746   { 0x400a, SETSSP | USES1 },			/* lds rm,mach */
1747   { 0x400b, BRANCH | DELAY | USES1 },		/* jsr @rn */
1748   { 0x4010, SETS1 | SETSSP | USES1 },		/* dt rn */
1749   { 0x4011, SETSSP | USES1 },			/* cmp/pz rn */
1750   { 0x4012, STORE | SETS1 | USES1 | USESSP },	/* sts.l macl,@-rn */
1751   { 0x4014, SETSSP | USES1 },			/* setrc rm */
1752   { 0x4015, SETSSP | USES1 },			/* cmp/pl rn */
1753   { 0x4016, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,macl */
1754   { 0x4018, SETS1 | USES1 },			/* shll8 rn */
1755   { 0x4019, SETS1 | USES1 },			/* shlr8 rn */
1756   { 0x401a, SETSSP | USES1 },			/* lds rm,macl */
1757   { 0x401b, LOAD | SETSSP | USES1 },		/* tas.b @rn */
1758   { 0x4020, SETS1 | SETSSP | USES1 },		/* shal rn */
1759   { 0x4021, SETS1 | SETSSP | USES1 },		/* shar rn */
1760   { 0x4022, STORE | SETS1 | USES1 | USESSP },	/* sts.l pr,@-rn */
1761   { 0x4024, SETS1 | SETSSP | USES1 | USESSP },	/* rotcl rn */
1762   { 0x4025, SETS1 | SETSSP | USES1 | USESSP },	/* rotcr rn */
1763   { 0x4026, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,pr */
1764   { 0x4028, SETS1 | USES1 },			/* shll16 rn */
1765   { 0x4029, SETS1 | USES1 },			/* shlr16 rn */
1766   { 0x402a, SETSSP | USES1 },			/* lds rm,pr */
1767   { 0x402b, BRANCH | DELAY | USES1 },		/* jmp @rn */
1768   { 0x4052, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpul,@-rn */
1769   { 0x4056, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpul */
1770   { 0x405a, SETSSP | USES1 },			/* lds.l rm,fpul */
1771   { 0x4062, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpscr / dsr,@-rn */
1772   { 0x4066, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpscr / dsr */
1773   { 0x406a, SETSSP | USES1 },			/* lds rm,fpscr / lds rm,dsr */
1774   { 0x4072, STORE | SETS1 | USES1 | USESSP },	/* sts.l a0,@-rn */
1775   { 0x4076, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,a0 */
1776   { 0x407a, SETSSP | USES1 },			/* lds.l rm,a0 */
1777   { 0x4082, STORE | SETS1 | USES1 | USESSP },	/* sts.l x0,@-rn */
1778   { 0x4086, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x0 */
1779   { 0x408a, SETSSP | USES1 },			/* lds.l rm,x0 */
1780   { 0x4092, STORE | SETS1 | USES1 | USESSP },	/* sts.l x1,@-rn */
1781   { 0x4096, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x1 */
1782   { 0x409a, SETSSP | USES1 },			/* lds.l rm,x1 */
1783   { 0x40a2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y0,@-rn */
1784   { 0x40a6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y0 */
1785   { 0x40aa, SETSSP | USES1 },			/* lds.l rm,y0 */
1786   { 0x40b2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y1,@-rn */
1787   { 0x40b6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y1 */
1788   { 0x40ba, SETSSP | USES1 }			/* lds.l rm,y1 */
1789 };
1790 
1791 static const struct sh_opcode sh_opcode41[] =
1792 {
1793   { 0x4003, STORE | SETS1 | USES1 | USESSP },	/* stc.l <special_reg>,@-rn */
1794   { 0x4007, LOAD | SETS1 | SETSSP | USES1 },	/* ldc.l @rm+,<special_reg> */
1795   { 0x400c, SETS1 | USES1 | USES2 },		/* shad rm,rn */
1796   { 0x400d, SETS1 | USES1 | USES2 },		/* shld rm,rn */
1797   { 0x400e, SETSSP | USES1 },			/* ldc rm,<special_reg> */
1798   { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1799 };
1800 
1801 static const struct sh_minor_opcode sh_opcode4[] =
1802 {
1803   { MAP (sh_opcode40), 0xf0ff },
1804   { MAP (sh_opcode41), 0xf00f }
1805 };
1806 
1807 static const struct sh_opcode sh_opcode50[] =
1808 {
1809   { 0x5000, LOAD | SETS1 | USES2 }	/* mov.l @(disp,rm),rn */
1810 };
1811 
1812 static const struct sh_minor_opcode sh_opcode5[] =
1813 {
1814   { MAP (sh_opcode50), 0xf000 }
1815 };
1816 
1817 static const struct sh_opcode sh_opcode60[] =
1818 {
1819   { 0x6000, LOAD | SETS1 | USES2 },		/* mov.b @rm,rn */
1820   { 0x6001, LOAD | SETS1 | USES2 },		/* mov.w @rm,rn */
1821   { 0x6002, LOAD | SETS1 | USES2 },		/* mov.l @rm,rn */
1822   { 0x6003, SETS1 | USES2 },			/* mov rm,rn */
1823   { 0x6004, LOAD | SETS1 | SETS2 | USES2 },	/* mov.b @rm+,rn */
1824   { 0x6005, LOAD | SETS1 | SETS2 | USES2 },	/* mov.w @rm+,rn */
1825   { 0x6006, LOAD | SETS1 | SETS2 | USES2 },	/* mov.l @rm+,rn */
1826   { 0x6007, SETS1 | USES2 },			/* not rm,rn */
1827   { 0x6008, SETS1 | USES2 },			/* swap.b rm,rn */
1828   { 0x6009, SETS1 | USES2 },			/* swap.w rm,rn */
1829   { 0x600a, SETS1 | SETSSP | USES2 | USESSP },	/* negc rm,rn */
1830   { 0x600b, SETS1 | USES2 },			/* neg rm,rn */
1831   { 0x600c, SETS1 | USES2 },			/* extu.b rm,rn */
1832   { 0x600d, SETS1 | USES2 },			/* extu.w rm,rn */
1833   { 0x600e, SETS1 | USES2 },			/* exts.b rm,rn */
1834   { 0x600f, SETS1 | USES2 }			/* exts.w rm,rn */
1835 };
1836 
1837 static const struct sh_minor_opcode sh_opcode6[] =
1838 {
1839   { MAP (sh_opcode60), 0xf00f }
1840 };
1841 
1842 static const struct sh_opcode sh_opcode70[] =
1843 {
1844   { 0x7000, SETS1 | USES1 }		/* add #imm,rn */
1845 };
1846 
1847 static const struct sh_minor_opcode sh_opcode7[] =
1848 {
1849   { MAP (sh_opcode70), 0xf000 }
1850 };
1851 
1852 static const struct sh_opcode sh_opcode80[] =
1853 {
1854   { 0x8000, STORE | USES2 | USESR0 },	/* mov.b r0,@(disp,rn) */
1855   { 0x8100, STORE | USES2 | USESR0 },	/* mov.w r0,@(disp,rn) */
1856   { 0x8200, SETSSP },			/* setrc #imm */
1857   { 0x8400, LOAD | SETSR0 | USES2 },	/* mov.b @(disp,rm),r0 */
1858   { 0x8500, LOAD | SETSR0 | USES2 },	/* mov.w @(disp,rn),r0 */
1859   { 0x8800, SETSSP | USESR0 },		/* cmp/eq #imm,r0 */
1860   { 0x8900, BRANCH | USESSP },		/* bt label */
1861   { 0x8b00, BRANCH | USESSP },		/* bf label */
1862   { 0x8c00, SETSSP },			/* ldrs @(disp,pc) */
1863   { 0x8d00, BRANCH | DELAY | USESSP },	/* bt/s label */
1864   { 0x8e00, SETSSP },			/* ldre @(disp,pc) */
1865   { 0x8f00, BRANCH | DELAY | USESSP }	/* bf/s label */
1866 };
1867 
1868 static const struct sh_minor_opcode sh_opcode8[] =
1869 {
1870   { MAP (sh_opcode80), 0xff00 }
1871 };
1872 
1873 static const struct sh_opcode sh_opcode90[] =
1874 {
1875   { 0x9000, LOAD | SETS1 }	/* mov.w @(disp,pc),rn */
1876 };
1877 
1878 static const struct sh_minor_opcode sh_opcode9[] =
1879 {
1880   { MAP (sh_opcode90), 0xf000 }
1881 };
1882 
1883 static const struct sh_opcode sh_opcodea0[] =
1884 {
1885   { 0xa000, BRANCH | DELAY }	/* bra label */
1886 };
1887 
1888 static const struct sh_minor_opcode sh_opcodea[] =
1889 {
1890   { MAP (sh_opcodea0), 0xf000 }
1891 };
1892 
1893 static const struct sh_opcode sh_opcodeb0[] =
1894 {
1895   { 0xb000, BRANCH | DELAY }	/* bsr label */
1896 };
1897 
1898 static const struct sh_minor_opcode sh_opcodeb[] =
1899 {
1900   { MAP (sh_opcodeb0), 0xf000 }
1901 };
1902 
1903 static const struct sh_opcode sh_opcodec0[] =
1904 {
1905   { 0xc000, STORE | USESR0 | USESSP },		/* mov.b r0,@(disp,gbr) */
1906   { 0xc100, STORE | USESR0 | USESSP },		/* mov.w r0,@(disp,gbr) */
1907   { 0xc200, STORE | USESR0 | USESSP },		/* mov.l r0,@(disp,gbr) */
1908   { 0xc300, BRANCH | USESSP },			/* trapa #imm */
1909   { 0xc400, LOAD | SETSR0 | USESSP },		/* mov.b @(disp,gbr),r0 */
1910   { 0xc500, LOAD | SETSR0 | USESSP },		/* mov.w @(disp,gbr),r0 */
1911   { 0xc600, LOAD | SETSR0 | USESSP },		/* mov.l @(disp,gbr),r0 */
1912   { 0xc700, SETSR0 },				/* mova @(disp,pc),r0 */
1913   { 0xc800, SETSSP | USESR0 },			/* tst #imm,r0 */
1914   { 0xc900, SETSR0 | USESR0 },			/* and #imm,r0 */
1915   { 0xca00, SETSR0 | USESR0 },			/* xor #imm,r0 */
1916   { 0xcb00, SETSR0 | USESR0 },			/* or #imm,r0 */
1917   { 0xcc00, LOAD | SETSSP | USESR0 | USESSP },	/* tst.b #imm,@(r0,gbr) */
1918   { 0xcd00, LOAD | STORE | USESR0 | USESSP },	/* and.b #imm,@(r0,gbr) */
1919   { 0xce00, LOAD | STORE | USESR0 | USESSP },	/* xor.b #imm,@(r0,gbr) */
1920   { 0xcf00, LOAD | STORE | USESR0 | USESSP }	/* or.b #imm,@(r0,gbr) */
1921 };
1922 
1923 static const struct sh_minor_opcode sh_opcodec[] =
1924 {
1925   { MAP (sh_opcodec0), 0xff00 }
1926 };
1927 
1928 static const struct sh_opcode sh_opcoded0[] =
1929 {
1930   { 0xd000, LOAD | SETS1 }		/* mov.l @(disp,pc),rn */
1931 };
1932 
1933 static const struct sh_minor_opcode sh_opcoded[] =
1934 {
1935   { MAP (sh_opcoded0), 0xf000 }
1936 };
1937 
1938 static const struct sh_opcode sh_opcodee0[] =
1939 {
1940   { 0xe000, SETS1 }		/* mov #imm,rn */
1941 };
1942 
1943 static const struct sh_minor_opcode sh_opcodee[] =
1944 {
1945   { MAP (sh_opcodee0), 0xf000 }
1946 };
1947 
1948 static const struct sh_opcode sh_opcodef0[] =
1949 {
1950   { 0xf000, SETSF1 | USESF1 | USESF2 },		/* fadd fm,fn */
1951   { 0xf001, SETSF1 | USESF1 | USESF2 },		/* fsub fm,fn */
1952   { 0xf002, SETSF1 | USESF1 | USESF2 },		/* fmul fm,fn */
1953   { 0xf003, SETSF1 | USESF1 | USESF2 },		/* fdiv fm,fn */
1954   { 0xf004, SETSSP | USESF1 | USESF2 },		/* fcmp/eq fm,fn */
1955   { 0xf005, SETSSP | USESF1 | USESF2 },		/* fcmp/gt fm,fn */
1956   { 0xf006, LOAD | SETSF1 | USES2 | USESR0 },	/* fmov.s @(r0,rm),fn */
1957   { 0xf007, STORE | USES1 | USESF2 | USESR0 },	/* fmov.s fm,@(r0,rn) */
1958   { 0xf008, LOAD | SETSF1 | USES2 },		/* fmov.s @rm,fn */
1959   { 0xf009, LOAD | SETS2 | SETSF1 | USES2 },	/* fmov.s @rm+,fn */
1960   { 0xf00a, STORE | USES1 | USESF2 },		/* fmov.s fm,@rn */
1961   { 0xf00b, STORE | SETS1 | USES1 | USESF2 },	/* fmov.s fm,@-rn */
1962   { 0xf00c, SETSF1 | USESF2 },			/* fmov fm,fn */
1963   { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 }	/* fmac f0,fm,fn */
1964 };
1965 
1966 static const struct sh_opcode sh_opcodef1[] =
1967 {
1968   { 0xf00d, SETSF1 | USESSP },	/* fsts fpul,fn */
1969   { 0xf01d, SETSSP | USESF1 },	/* flds fn,fpul */
1970   { 0xf02d, SETSF1 | USESSP },	/* float fpul,fn */
1971   { 0xf03d, SETSSP | USESF1 },	/* ftrc fn,fpul */
1972   { 0xf04d, SETSF1 | USESF1 },	/* fneg fn */
1973   { 0xf05d, SETSF1 | USESF1 },	/* fabs fn */
1974   { 0xf06d, SETSF1 | USESF1 },	/* fsqrt fn */
1975   { 0xf07d, SETSSP | USESF1 },	/* ftst/nan fn */
1976   { 0xf08d, SETSF1 },		/* fldi0 fn */
1977   { 0xf09d, SETSF1 }		/* fldi1 fn */
1978 };
1979 
1980 static const struct sh_minor_opcode sh_opcodef[] =
1981 {
1982   { MAP (sh_opcodef0), 0xf00f },
1983   { MAP (sh_opcodef1), 0xf0ff }
1984 };
1985 
1986 static struct sh_major_opcode sh_opcodes[] =
1987 {
1988   { MAP (sh_opcode0) },
1989   { MAP (sh_opcode1) },
1990   { MAP (sh_opcode2) },
1991   { MAP (sh_opcode3) },
1992   { MAP (sh_opcode4) },
1993   { MAP (sh_opcode5) },
1994   { MAP (sh_opcode6) },
1995   { MAP (sh_opcode7) },
1996   { MAP (sh_opcode8) },
1997   { MAP (sh_opcode9) },
1998   { MAP (sh_opcodea) },
1999   { MAP (sh_opcodeb) },
2000   { MAP (sh_opcodec) },
2001   { MAP (sh_opcoded) },
2002   { MAP (sh_opcodee) },
2003   { MAP (sh_opcodef) }
2004 };
2005 
2006 /* The double data transfer / parallel processing insns are not
2007    described here.  This will cause sh_align_load_span to leave them alone.  */
2008 
2009 static const struct sh_opcode sh_dsp_opcodef0[] =
2010 {
2011   { 0xf400, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @-as,ds */
2012   { 0xf401, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@-as */
2013   { 0xf404, USESAS | LOAD | SETSSP },		/* movs.x @as,ds */
2014   { 0xf405, USESAS | STORE | USESSP },		/* movs.x ds,@as */
2015   { 0xf408, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @as+,ds */
2016   { 0xf409, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@as+ */
2017   { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 },	/* movs.x @as+r8,ds */
2018   { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 }	/* movs.x ds,@as+r8 */
2019 };
2020 
2021 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2022 {
2023   { MAP (sh_dsp_opcodef0), 0xfc0d }
2024 };
2025 
2026 /* Given an instruction, return a pointer to the corresponding
2027    sh_opcode structure.  Return NULL if the instruction is not
2028    recognized.  */
2029 
2030 static const struct sh_opcode *
sh_insn_info(insn)2031 sh_insn_info (insn)
2032      unsigned int insn;
2033 {
2034   const struct sh_major_opcode *maj;
2035   const struct sh_minor_opcode *min, *minend;
2036 
2037   maj = &sh_opcodes[(insn & 0xf000) >> 12];
2038   min = maj->minor_opcodes;
2039   minend = min + maj->count;
2040   for (; min < minend; min++)
2041     {
2042       unsigned int l;
2043       const struct sh_opcode *op, *opend;
2044 
2045       l = insn & min->mask;
2046       op = min->opcodes;
2047       opend = op + min->count;
2048 
2049       /* Since the opcodes tables are sorted, we could use a binary
2050          search here if the count were above some cutoff value.  */
2051       for (; op < opend; op++)
2052 	if (op->opcode == l)
2053 	  return op;
2054     }
2055 
2056   return NULL;
2057 }
2058 
2059 /* See whether an instruction uses or sets a general purpose register */
2060 
2061 static bfd_boolean
sh_insn_uses_or_sets_reg(insn,op,reg)2062 sh_insn_uses_or_sets_reg (insn, op, reg)
2063      unsigned int insn;
2064      const struct sh_opcode *op;
2065      unsigned int reg;
2066 {
2067   if (sh_insn_uses_reg (insn, op, reg))
2068     return TRUE;
2069 
2070   return sh_insn_sets_reg (insn, op, reg);
2071 }
2072 
2073 /* See whether an instruction uses a general purpose register.  */
2074 
2075 static bfd_boolean
sh_insn_uses_reg(insn,op,reg)2076 sh_insn_uses_reg (insn, op, reg)
2077      unsigned int insn;
2078      const struct sh_opcode *op;
2079      unsigned int reg;
2080 {
2081   unsigned int f;
2082 
2083   f = op->flags;
2084 
2085   if ((f & USES1) != 0
2086       && USES1_REG (insn) == reg)
2087     return TRUE;
2088   if ((f & USES2) != 0
2089       && USES2_REG (insn) == reg)
2090     return TRUE;
2091   if ((f & USESR0) != 0
2092       && reg == 0)
2093     return TRUE;
2094   if ((f & USESAS) && reg == USESAS_REG (insn))
2095     return TRUE;
2096   if ((f & USESR8) && reg == 8)
2097     return TRUE;
2098 
2099   return FALSE;
2100 }
2101 
2102 /* See whether an instruction sets a general purpose register.  */
2103 
2104 static bfd_boolean
sh_insn_sets_reg(insn,op,reg)2105 sh_insn_sets_reg (insn, op, reg)
2106      unsigned int insn;
2107      const struct sh_opcode *op;
2108      unsigned int reg;
2109 {
2110   unsigned int f;
2111 
2112   f = op->flags;
2113 
2114   if ((f & SETS1) != 0
2115       && SETS1_REG (insn) == reg)
2116     return TRUE;
2117   if ((f & SETS2) != 0
2118       && SETS2_REG (insn) == reg)
2119     return TRUE;
2120   if ((f & SETSR0) != 0
2121       && reg == 0)
2122     return TRUE;
2123   if ((f & SETSAS) && reg == SETSAS_REG (insn))
2124     return TRUE;
2125 
2126   return FALSE;
2127 }
2128 
2129 /* See whether an instruction uses or sets a floating point register */
2130 
2131 static bfd_boolean
sh_insn_uses_or_sets_freg(insn,op,reg)2132 sh_insn_uses_or_sets_freg (insn, op, reg)
2133      unsigned int insn;
2134      const struct sh_opcode *op;
2135      unsigned int reg;
2136 {
2137   if (sh_insn_uses_freg (insn, op, reg))
2138     return TRUE;
2139 
2140   return sh_insn_sets_freg (insn, op, reg);
2141 }
2142 
2143 /* See whether an instruction uses a floating point register.  */
2144 
2145 static bfd_boolean
sh_insn_uses_freg(insn,op,freg)2146 sh_insn_uses_freg (insn, op, freg)
2147      unsigned int insn;
2148      const struct sh_opcode *op;
2149      unsigned int freg;
2150 {
2151   unsigned int f;
2152 
2153   f = op->flags;
2154 
2155   /* We can't tell if this is a double-precision insn, so just play safe
2156      and assume that it might be.  So not only have we test FREG against
2157      itself, but also even FREG against FREG+1 - if the using insn uses
2158      just the low part of a double precision value - but also an odd
2159      FREG against FREG-1 -  if the setting insn sets just the low part
2160      of a double precision value.
2161      So what this all boils down to is that we have to ignore the lowest
2162      bit of the register number.  */
2163 
2164   if ((f & USESF1) != 0
2165       && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2166     return TRUE;
2167   if ((f & USESF2) != 0
2168       && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2169     return TRUE;
2170   if ((f & USESF0) != 0
2171       && freg == 0)
2172     return TRUE;
2173 
2174   return FALSE;
2175 }
2176 
2177 /* See whether an instruction sets a floating point register.  */
2178 
2179 static bfd_boolean
sh_insn_sets_freg(insn,op,freg)2180 sh_insn_sets_freg (insn, op, freg)
2181      unsigned int insn;
2182      const struct sh_opcode *op;
2183      unsigned int freg;
2184 {
2185   unsigned int f;
2186 
2187   f = op->flags;
2188 
2189   /* We can't tell if this is a double-precision insn, so just play safe
2190      and assume that it might be.  So not only have we test FREG against
2191      itself, but also even FREG against FREG+1 - if the using insn uses
2192      just the low part of a double precision value - but also an odd
2193      FREG against FREG-1 -  if the setting insn sets just the low part
2194      of a double precision value.
2195      So what this all boils down to is that we have to ignore the lowest
2196      bit of the register number.  */
2197 
2198   if ((f & SETSF1) != 0
2199       && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2200     return TRUE;
2201 
2202   return FALSE;
2203 }
2204 
2205 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2206    before I2.  OP1 and OP2 are the corresponding sh_opcode structures.
2207    This should return TRUE if there is a conflict, or FALSE if the
2208    instructions can be swapped safely.  */
2209 
2210 static bfd_boolean
sh_insns_conflict(i1,op1,i2,op2)2211 sh_insns_conflict (i1, op1, i2, op2)
2212      unsigned int i1;
2213      const struct sh_opcode *op1;
2214      unsigned int i2;
2215      const struct sh_opcode *op2;
2216 {
2217   unsigned int f1, f2;
2218 
2219   f1 = op1->flags;
2220   f2 = op2->flags;
2221 
2222   /* Load of fpscr conflicts with floating point operations.
2223      FIXME: shouldn't test raw opcodes here.  */
2224   if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2225       || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2226     return TRUE;
2227 
2228   if ((f1 & (BRANCH | DELAY)) != 0
2229       || (f2 & (BRANCH | DELAY)) != 0)
2230     return TRUE;
2231 
2232   if (((f1 | f2) & SETSSP)
2233       && (f1 & (SETSSP | USESSP))
2234       && (f2 & (SETSSP | USESSP)))
2235     return TRUE;
2236 
2237   if ((f1 & SETS1) != 0
2238       && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2239     return TRUE;
2240   if ((f1 & SETS2) != 0
2241       && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2242     return TRUE;
2243   if ((f1 & SETSR0) != 0
2244       && sh_insn_uses_or_sets_reg (i2, op2, 0))
2245     return TRUE;
2246   if ((f1 & SETSAS)
2247       && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2248     return TRUE;
2249   if ((f1 & SETSF1) != 0
2250       && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2251     return TRUE;
2252 
2253   if ((f2 & SETS1) != 0
2254       && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2255     return TRUE;
2256   if ((f2 & SETS2) != 0
2257       && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2258     return TRUE;
2259   if ((f2 & SETSR0) != 0
2260       && sh_insn_uses_or_sets_reg (i1, op1, 0))
2261     return TRUE;
2262   if ((f2 & SETSAS)
2263       && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2264     return TRUE;
2265   if ((f2 & SETSF1) != 0
2266       && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2267     return TRUE;
2268 
2269   /* The instructions do not conflict.  */
2270   return FALSE;
2271 }
2272 
2273 /* I1 is a load instruction, and I2 is some other instruction.  Return
2274    TRUE if I1 loads a register which I2 uses.  */
2275 
2276 static bfd_boolean
sh_load_use(i1,op1,i2,op2)2277 sh_load_use (i1, op1, i2, op2)
2278      unsigned int i1;
2279      const struct sh_opcode *op1;
2280      unsigned int i2;
2281      const struct sh_opcode *op2;
2282 {
2283   unsigned int f1;
2284 
2285   f1 = op1->flags;
2286 
2287   if ((f1 & LOAD) == 0)
2288     return FALSE;
2289 
2290   /* If both SETS1 and SETSSP are set, that means a load to a special
2291      register using postincrement addressing mode, which we don't care
2292      about here.  */
2293   if ((f1 & SETS1) != 0
2294       && (f1 & SETSSP) == 0
2295       && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2296     return TRUE;
2297 
2298   if ((f1 & SETSR0) != 0
2299       && sh_insn_uses_reg (i2, op2, 0))
2300     return TRUE;
2301 
2302   if ((f1 & SETSF1) != 0
2303       && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2304     return TRUE;
2305 
2306   return FALSE;
2307 }
2308 
2309 /* Try to align loads and stores within a span of memory.  This is
2310    called by both the ELF and the COFF sh targets.  ABFD and SEC are
2311    the BFD and section we are examining.  CONTENTS is the contents of
2312    the section.  SWAP is the routine to call to swap two instructions.
2313    RELOCS is a pointer to the internal relocation information, to be
2314    passed to SWAP.  PLABEL is a pointer to the current label in a
2315    sorted list of labels; LABEL_END is the end of the list.  START and
2316    STOP are the range of memory to examine.  If a swap is made,
2317    *PSWAPPED is set to TRUE.  */
2318 
2319 #ifdef COFF_WITH_PE
2320 static
2321 #endif
2322 bfd_boolean
_bfd_sh_align_load_span(abfd,sec,contents,swap,relocs,plabel,label_end,start,stop,pswapped)2323 _bfd_sh_align_load_span (abfd, sec, contents, swap, relocs,
2324 			 plabel, label_end, start, stop, pswapped)
2325      bfd *abfd;
2326      asection *sec;
2327      bfd_byte *contents;
2328      bfd_boolean (*swap) PARAMS ((bfd *, asection *, PTR, bfd_byte *, bfd_vma));
2329      PTR relocs;
2330      bfd_vma **plabel;
2331      bfd_vma *label_end;
2332      bfd_vma start;
2333      bfd_vma stop;
2334      bfd_boolean *pswapped;
2335 {
2336   int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2337 	     || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2338   bfd_vma i;
2339 
2340   /* The SH4 has a Harvard architecture, hence aligning loads is not
2341      desirable.  In fact, it is counter-productive, since it interferes
2342      with the schedules generated by the compiler.  */
2343   if (abfd->arch_info->mach == bfd_mach_sh4)
2344     return TRUE;
2345 
2346   /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2347      instructions.  */
2348   if (dsp)
2349     {
2350       sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2351       sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef;
2352     }
2353 
2354   /* Instructions should be aligned on 2 byte boundaries.  */
2355   if ((start & 1) == 1)
2356     ++start;
2357 
2358   /* Now look through the unaligned addresses.  */
2359   i = start;
2360   if ((i & 2) == 0)
2361     i += 2;
2362   for (; i < stop; i += 4)
2363     {
2364       unsigned int insn;
2365       const struct sh_opcode *op;
2366       unsigned int prev_insn = 0;
2367       const struct sh_opcode *prev_op = NULL;
2368 
2369       insn = bfd_get_16 (abfd, contents + i);
2370       op = sh_insn_info (insn);
2371       if (op == NULL
2372 	  || (op->flags & (LOAD | STORE)) == 0)
2373 	continue;
2374 
2375       /* This is a load or store which is not on a four byte boundary.  */
2376 
2377       while (*plabel < label_end && **plabel < i)
2378 	++*plabel;
2379 
2380       if (i > start)
2381 	{
2382 	  prev_insn = bfd_get_16 (abfd, contents + i - 2);
2383 	  /* If INSN is the field b of a parallel processing insn, it is not
2384 	     a load / store after all.  Note that the test here might mistake
2385 	     the field_b of a pcopy insn for the starting code of a parallel
2386 	     processing insn; this might miss a swapping opportunity, but at
2387 	     least we're on the safe side.  */
2388 	  if (dsp && (prev_insn & 0xfc00) == 0xf800)
2389 	    continue;
2390 
2391 	  /* Check if prev_insn is actually the field b of a parallel
2392 	     processing insn.  Again, this can give a spurious match
2393 	     after a pcopy.  */
2394 	  if (dsp && i - 2 > start)
2395 	    {
2396 	      unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2397 
2398 	      if ((pprev_insn & 0xfc00) == 0xf800)
2399 		prev_op = NULL;
2400 	      else
2401 		prev_op = sh_insn_info (prev_insn);
2402 	    }
2403 	  else
2404 	    prev_op = sh_insn_info (prev_insn);
2405 
2406 	  /* If the load/store instruction is in a delay slot, we
2407 	     can't swap.  */
2408 	  if (prev_op == NULL
2409 	      || (prev_op->flags & DELAY) != 0)
2410 	    continue;
2411 	}
2412       if (i > start
2413 	  && (*plabel >= label_end || **plabel != i)
2414 	  && prev_op != NULL
2415 	  && (prev_op->flags & (LOAD | STORE)) == 0
2416 	  && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2417 	{
2418 	  bfd_boolean ok;
2419 
2420 	  /* The load/store instruction does not have a label, and
2421 	     there is a previous instruction; PREV_INSN is not
2422 	     itself a load/store instruction, and PREV_INSN and
2423 	     INSN do not conflict.  */
2424 
2425 	  ok = TRUE;
2426 
2427 	  if (i >= start + 4)
2428 	    {
2429 	      unsigned int prev2_insn;
2430 	      const struct sh_opcode *prev2_op;
2431 
2432 	      prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2433 	      prev2_op = sh_insn_info (prev2_insn);
2434 
2435 	      /* If the instruction before PREV_INSN has a delay
2436 		 slot--that is, PREV_INSN is in a delay slot--we
2437 		 can not swap.  */
2438 	      if (prev2_op == NULL
2439 		  || (prev2_op->flags & DELAY) != 0)
2440 		ok = FALSE;
2441 
2442 	      /* If the instruction before PREV_INSN is a load,
2443 		 and it sets a register which INSN uses, then
2444 		 putting INSN immediately after PREV_INSN will
2445 		 cause a pipeline bubble, so there is no point to
2446 		 making the swap.  */
2447 	      if (ok
2448 		  && (prev2_op->flags & LOAD) != 0
2449 		  && sh_load_use (prev2_insn, prev2_op, insn, op))
2450 		ok = FALSE;
2451 	    }
2452 
2453 	  if (ok)
2454 	    {
2455 	      if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2456 		return FALSE;
2457 	      *pswapped = TRUE;
2458 	      continue;
2459 	    }
2460 	}
2461 
2462       while (*plabel < label_end && **plabel < i + 2)
2463 	++*plabel;
2464 
2465       if (i + 2 < stop
2466 	  && (*plabel >= label_end || **plabel != i + 2))
2467 	{
2468 	  unsigned int next_insn;
2469 	  const struct sh_opcode *next_op;
2470 
2471 	  /* There is an instruction after the load/store
2472 	     instruction, and it does not have a label.  */
2473 	  next_insn = bfd_get_16 (abfd, contents + i + 2);
2474 	  next_op = sh_insn_info (next_insn);
2475 	  if (next_op != NULL
2476 	      && (next_op->flags & (LOAD | STORE)) == 0
2477 	      && ! sh_insns_conflict (insn, op, next_insn, next_op))
2478 	    {
2479 	      bfd_boolean ok;
2480 
2481 	      /* NEXT_INSN is not itself a load/store instruction,
2482 		 and it does not conflict with INSN.  */
2483 
2484 	      ok = TRUE;
2485 
2486 	      /* If PREV_INSN is a load, and it sets a register
2487 		 which NEXT_INSN uses, then putting NEXT_INSN
2488 		 immediately after PREV_INSN will cause a pipeline
2489 		 bubble, so there is no reason to make this swap.  */
2490 	      if (prev_op != NULL
2491 		  && (prev_op->flags & LOAD) != 0
2492 		  && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2493 		ok = FALSE;
2494 
2495 	      /* If INSN is a load, and it sets a register which
2496 		 the insn after NEXT_INSN uses, then doing the
2497 		 swap will cause a pipeline bubble, so there is no
2498 		 reason to make the swap.  However, if the insn
2499 		 after NEXT_INSN is itself a load or store
2500 		 instruction, then it is misaligned, so
2501 		 optimistically hope that it will be swapped
2502 		 itself, and just live with the pipeline bubble if
2503 		 it isn't.  */
2504 	      if (ok
2505 		  && i + 4 < stop
2506 		  && (op->flags & LOAD) != 0)
2507 		{
2508 		  unsigned int next2_insn;
2509 		  const struct sh_opcode *next2_op;
2510 
2511 		  next2_insn = bfd_get_16 (abfd, contents + i + 4);
2512 		  next2_op = sh_insn_info (next2_insn);
2513 		  if ((next2_op->flags & (LOAD | STORE)) == 0
2514 		      && sh_load_use (insn, op, next2_insn, next2_op))
2515 		    ok = FALSE;
2516 		}
2517 
2518 	      if (ok)
2519 		{
2520 		  if (! (*swap) (abfd, sec, relocs, contents, i))
2521 		    return FALSE;
2522 		  *pswapped = TRUE;
2523 		  continue;
2524 		}
2525 	    }
2526 	}
2527     }
2528 
2529   return TRUE;
2530 }
2531 #endif /* not COFF_IMAGE_WITH_PE */
2532 
2533 /* Look for loads and stores which we can align to four byte
2534    boundaries.  See the longer comment above sh_relax_section for why
2535    this is desirable.  This sets *PSWAPPED if some instruction was
2536    swapped.  */
2537 
2538 static bfd_boolean
sh_align_loads(abfd,sec,internal_relocs,contents,pswapped)2539 sh_align_loads (abfd, sec, internal_relocs, contents, pswapped)
2540      bfd *abfd;
2541      asection *sec;
2542      struct internal_reloc *internal_relocs;
2543      bfd_byte *contents;
2544      bfd_boolean *pswapped;
2545 {
2546   struct internal_reloc *irel, *irelend;
2547   bfd_vma *labels = NULL;
2548   bfd_vma *label, *label_end;
2549   bfd_size_type amt;
2550 
2551   *pswapped = FALSE;
2552 
2553   irelend = internal_relocs + sec->reloc_count;
2554 
2555   /* Get all the addresses with labels on them.  */
2556   amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2557   labels = (bfd_vma *) bfd_malloc (amt);
2558   if (labels == NULL)
2559     goto error_return;
2560   label_end = labels;
2561   for (irel = internal_relocs; irel < irelend; irel++)
2562     {
2563       if (irel->r_type == R_SH_LABEL)
2564 	{
2565 	  *label_end = irel->r_vaddr - sec->vma;
2566 	  ++label_end;
2567 	}
2568     }
2569 
2570   /* Note that the assembler currently always outputs relocs in
2571      address order.  If that ever changes, this code will need to sort
2572      the label values and the relocs.  */
2573 
2574   label = labels;
2575 
2576   for (irel = internal_relocs; irel < irelend; irel++)
2577     {
2578       bfd_vma start, stop;
2579 
2580       if (irel->r_type != R_SH_CODE)
2581 	continue;
2582 
2583       start = irel->r_vaddr - sec->vma;
2584 
2585       for (irel++; irel < irelend; irel++)
2586 	if (irel->r_type == R_SH_DATA)
2587 	  break;
2588       if (irel < irelend)
2589 	stop = irel->r_vaddr - sec->vma;
2590       else
2591 	stop = sec->size;
2592 
2593       if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2594 				     (PTR) internal_relocs, &label,
2595 				     label_end, start, stop, pswapped))
2596 	goto error_return;
2597     }
2598 
2599   free (labels);
2600 
2601   return TRUE;
2602 
2603  error_return:
2604   if (labels != NULL)
2605     free (labels);
2606   return FALSE;
2607 }
2608 
2609 /* Swap two SH instructions.  */
2610 
2611 static bfd_boolean
sh_swap_insns(abfd,sec,relocs,contents,addr)2612 sh_swap_insns (abfd, sec, relocs, contents, addr)
2613      bfd *abfd;
2614      asection *sec;
2615      PTR relocs;
2616      bfd_byte *contents;
2617      bfd_vma addr;
2618 {
2619   struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2620   unsigned short i1, i2;
2621   struct internal_reloc *irel, *irelend;
2622 
2623   /* Swap the instructions themselves.  */
2624   i1 = bfd_get_16 (abfd, contents + addr);
2625   i2 = bfd_get_16 (abfd, contents + addr + 2);
2626   bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2627   bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2628 
2629   /* Adjust all reloc addresses.  */
2630   irelend = internal_relocs + sec->reloc_count;
2631   for (irel = internal_relocs; irel < irelend; irel++)
2632     {
2633       int type, add;
2634 
2635       /* There are a few special types of relocs that we don't want to
2636          adjust.  These relocs do not apply to the instruction itself,
2637          but are only associated with the address.  */
2638       type = irel->r_type;
2639       if (type == R_SH_ALIGN
2640 	  || type == R_SH_CODE
2641 	  || type == R_SH_DATA
2642 	  || type == R_SH_LABEL)
2643 	continue;
2644 
2645       /* If an R_SH_USES reloc points to one of the addresses being
2646          swapped, we must adjust it.  It would be incorrect to do this
2647          for a jump, though, since we want to execute both
2648          instructions after the jump.  (We have avoided swapping
2649          around a label, so the jump will not wind up executing an
2650          instruction it shouldn't).  */
2651       if (type == R_SH_USES)
2652 	{
2653 	  bfd_vma off;
2654 
2655 	  off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2656 	  if (off == addr)
2657 	    irel->r_offset += 2;
2658 	  else if (off == addr + 2)
2659 	    irel->r_offset -= 2;
2660 	}
2661 
2662       if (irel->r_vaddr - sec->vma == addr)
2663 	{
2664 	  irel->r_vaddr += 2;
2665 	  add = -2;
2666 	}
2667       else if (irel->r_vaddr - sec->vma == addr + 2)
2668 	{
2669 	  irel->r_vaddr -= 2;
2670 	  add = 2;
2671 	}
2672       else
2673 	add = 0;
2674 
2675       if (add != 0)
2676 	{
2677 	  bfd_byte *loc;
2678 	  unsigned short insn, oinsn;
2679 	  bfd_boolean overflow;
2680 
2681 	  loc = contents + irel->r_vaddr - sec->vma;
2682 	  overflow = FALSE;
2683 	  switch (type)
2684 	    {
2685 	    default:
2686 	      break;
2687 
2688 	    case R_SH_PCDISP8BY2:
2689 	    case R_SH_PCRELIMM8BY2:
2690 	      insn = bfd_get_16 (abfd, loc);
2691 	      oinsn = insn;
2692 	      insn += add / 2;
2693 	      if ((oinsn & 0xff00) != (insn & 0xff00))
2694 		overflow = TRUE;
2695 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2696 	      break;
2697 
2698 	    case R_SH_PCDISP:
2699 	      insn = bfd_get_16 (abfd, loc);
2700 	      oinsn = insn;
2701 	      insn += add / 2;
2702 	      if ((oinsn & 0xf000) != (insn & 0xf000))
2703 		overflow = TRUE;
2704 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2705 	      break;
2706 
2707 	    case R_SH_PCRELIMM8BY4:
2708 	      /* This reloc ignores the least significant 3 bits of
2709                  the program counter before adding in the offset.
2710                  This means that if ADDR is at an even address, the
2711                  swap will not affect the offset.  If ADDR is an at an
2712                  odd address, then the instruction will be crossing a
2713                  four byte boundary, and must be adjusted.  */
2714 	      if ((addr & 3) != 0)
2715 		{
2716 		  insn = bfd_get_16 (abfd, loc);
2717 		  oinsn = insn;
2718 		  insn += add / 2;
2719 		  if ((oinsn & 0xff00) != (insn & 0xff00))
2720 		    overflow = TRUE;
2721 		  bfd_put_16 (abfd, (bfd_vma) insn, loc);
2722 		}
2723 
2724 	      break;
2725 	    }
2726 
2727 	  if (overflow)
2728 	    {
2729 	      ((*_bfd_error_handler)
2730 	       ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2731 		abfd, (unsigned long) irel->r_vaddr));
2732 	      bfd_set_error (bfd_error_bad_value);
2733 	      return FALSE;
2734 	    }
2735 	}
2736     }
2737 
2738   return TRUE;
2739 }
2740 
2741 /* This is a modification of _bfd_coff_generic_relocate_section, which
2742    will handle SH relaxing.  */
2743 
2744 static bfd_boolean
sh_relocate_section(output_bfd,info,input_bfd,input_section,contents,relocs,syms,sections)2745 sh_relocate_section (output_bfd, info, input_bfd, input_section, contents,
2746 		     relocs, syms, sections)
2747      bfd *output_bfd ATTRIBUTE_UNUSED;
2748      struct bfd_link_info *info;
2749      bfd *input_bfd;
2750      asection *input_section;
2751      bfd_byte *contents;
2752      struct internal_reloc *relocs;
2753      struct internal_syment *syms;
2754      asection **sections;
2755 {
2756   struct internal_reloc *rel;
2757   struct internal_reloc *relend;
2758 
2759   rel = relocs;
2760   relend = rel + input_section->reloc_count;
2761   for (; rel < relend; rel++)
2762     {
2763       long symndx;
2764       struct coff_link_hash_entry *h;
2765       struct internal_syment *sym;
2766       bfd_vma addend;
2767       bfd_vma val;
2768       reloc_howto_type *howto;
2769       bfd_reloc_status_type rstat;
2770 
2771       /* Almost all relocs have to do with relaxing.  If any work must
2772          be done for them, it has been done in sh_relax_section.  */
2773       if (rel->r_type != R_SH_IMM32
2774 #ifdef COFF_WITH_PE
2775 	  && rel->r_type != R_SH_IMM32CE
2776 	  && rel->r_type != R_SH_IMAGEBASE
2777 #endif
2778 	  && rel->r_type != R_SH_PCDISP)
2779 	continue;
2780 
2781       symndx = rel->r_symndx;
2782 
2783       if (symndx == -1)
2784 	{
2785 	  h = NULL;
2786 	  sym = NULL;
2787 	}
2788       else
2789 	{
2790 	  if (symndx < 0
2791 	      || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2792 	    {
2793 	      (*_bfd_error_handler)
2794 		("%B: illegal symbol index %ld in relocs",
2795 		 input_bfd, symndx);
2796 	      bfd_set_error (bfd_error_bad_value);
2797 	      return FALSE;
2798 	    }
2799 	  h = obj_coff_sym_hashes (input_bfd)[symndx];
2800 	  sym = syms + symndx;
2801 	}
2802 
2803       if (sym != NULL && sym->n_scnum != 0)
2804 	addend = - sym->n_value;
2805       else
2806 	addend = 0;
2807 
2808       if (rel->r_type == R_SH_PCDISP)
2809 	addend -= 4;
2810 
2811       if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2812 	howto = NULL;
2813       else
2814 	howto = &sh_coff_howtos[rel->r_type];
2815 
2816       if (howto == NULL)
2817 	{
2818 	  bfd_set_error (bfd_error_bad_value);
2819 	  return FALSE;
2820 	}
2821 
2822 #ifdef COFF_WITH_PE
2823       if (rel->r_type == R_SH_IMAGEBASE)
2824 	addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2825 #endif
2826 
2827       val = 0;
2828 
2829       if (h == NULL)
2830 	{
2831 	  asection *sec;
2832 
2833 	  /* There is nothing to do for an internal PCDISP reloc.  */
2834 	  if (rel->r_type == R_SH_PCDISP)
2835 	    continue;
2836 
2837 	  if (symndx == -1)
2838 	    {
2839 	      sec = bfd_abs_section_ptr;
2840 	      val = 0;
2841 	    }
2842 	  else
2843 	    {
2844 	      sec = sections[symndx];
2845               val = (sec->output_section->vma
2846 		     + sec->output_offset
2847 		     + sym->n_value
2848 		     - sec->vma);
2849 	    }
2850 	}
2851       else
2852 	{
2853 	  if (h->root.type == bfd_link_hash_defined
2854 	      || h->root.type == bfd_link_hash_defweak)
2855 	    {
2856 	      asection *sec;
2857 
2858 	      sec = h->root.u.def.section;
2859 	      val = (h->root.u.def.value
2860 		     + sec->output_section->vma
2861 		     + sec->output_offset);
2862 	    }
2863 	  else if (! info->relocatable)
2864 	    {
2865 	      if (! ((*info->callbacks->undefined_symbol)
2866 		     (info, h->root.root.string, input_bfd, input_section,
2867 		      rel->r_vaddr - input_section->vma, TRUE)))
2868 		return FALSE;
2869 	    }
2870 	}
2871 
2872       rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2873 					contents,
2874 					rel->r_vaddr - input_section->vma,
2875 					val, addend);
2876 
2877       switch (rstat)
2878 	{
2879 	default:
2880 	  abort ();
2881 	case bfd_reloc_ok:
2882 	  break;
2883 	case bfd_reloc_overflow:
2884 	  {
2885 	    const char *name;
2886 	    char buf[SYMNMLEN + 1];
2887 
2888 	    if (symndx == -1)
2889 	      name = "*ABS*";
2890 	    else if (h != NULL)
2891 	      name = NULL;
2892 	    else if (sym->_n._n_n._n_zeroes == 0
2893 		     && sym->_n._n_n._n_offset != 0)
2894 	      name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2895 	    else
2896 	      {
2897  		strncpy (buf, sym->_n._n_name, SYMNMLEN);
2898 		buf[SYMNMLEN] = '\0';
2899 		name = buf;
2900 	      }
2901 
2902 	    if (! ((*info->callbacks->reloc_overflow)
2903 		   (info, (h ? &h->root : NULL), name, howto->name,
2904 		    (bfd_vma) 0, input_bfd, input_section,
2905 		    rel->r_vaddr - input_section->vma)))
2906 	      return FALSE;
2907 	  }
2908 	}
2909     }
2910 
2911   return TRUE;
2912 }
2913 
2914 /* This is a version of bfd_generic_get_relocated_section_contents
2915    which uses sh_relocate_section.  */
2916 
2917 static bfd_byte *
sh_coff_get_relocated_section_contents(output_bfd,link_info,link_order,data,relocatable,symbols)2918 sh_coff_get_relocated_section_contents (output_bfd, link_info, link_order,
2919 					data, relocatable, symbols)
2920      bfd *output_bfd;
2921      struct bfd_link_info *link_info;
2922      struct bfd_link_order *link_order;
2923      bfd_byte *data;
2924      bfd_boolean relocatable;
2925      asymbol **symbols;
2926 {
2927   asection *input_section = link_order->u.indirect.section;
2928   bfd *input_bfd = input_section->owner;
2929   asection **sections = NULL;
2930   struct internal_reloc *internal_relocs = NULL;
2931   struct internal_syment *internal_syms = NULL;
2932 
2933   /* We only need to handle the case of relaxing, or of having a
2934      particular set of section contents, specially.  */
2935   if (relocatable
2936       || coff_section_data (input_bfd, input_section) == NULL
2937       || coff_section_data (input_bfd, input_section)->contents == NULL)
2938     return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2939 						       link_order, data,
2940 						       relocatable,
2941 						       symbols);
2942 
2943   memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2944 	  (size_t) input_section->size);
2945 
2946   if ((input_section->flags & SEC_RELOC) != 0
2947       && input_section->reloc_count > 0)
2948     {
2949       bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2950       bfd_byte *esym, *esymend;
2951       struct internal_syment *isymp;
2952       asection **secpp;
2953       bfd_size_type amt;
2954 
2955       if (! _bfd_coff_get_external_symbols (input_bfd))
2956 	goto error_return;
2957 
2958       internal_relocs = (_bfd_coff_read_internal_relocs
2959 			 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2960 			  FALSE, (struct internal_reloc *) NULL));
2961       if (internal_relocs == NULL)
2962 	goto error_return;
2963 
2964       amt = obj_raw_syment_count (input_bfd);
2965       amt *= sizeof (struct internal_syment);
2966       internal_syms = (struct internal_syment *) bfd_malloc (amt);
2967       if (internal_syms == NULL)
2968 	goto error_return;
2969 
2970       amt = obj_raw_syment_count (input_bfd);
2971       amt *= sizeof (asection *);
2972       sections = (asection **) bfd_malloc (amt);
2973       if (sections == NULL)
2974 	goto error_return;
2975 
2976       isymp = internal_syms;
2977       secpp = sections;
2978       esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2979       esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2980       while (esym < esymend)
2981 	{
2982 	  bfd_coff_swap_sym_in (input_bfd, (PTR) esym, (PTR) isymp);
2983 
2984 	  if (isymp->n_scnum != 0)
2985 	    *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2986 	  else
2987 	    {
2988 	      if (isymp->n_value == 0)
2989 		*secpp = bfd_und_section_ptr;
2990 	      else
2991 		*secpp = bfd_com_section_ptr;
2992 	    }
2993 
2994 	  esym += (isymp->n_numaux + 1) * symesz;
2995 	  secpp += isymp->n_numaux + 1;
2996 	  isymp += isymp->n_numaux + 1;
2997 	}
2998 
2999       if (! sh_relocate_section (output_bfd, link_info, input_bfd,
3000 				 input_section, data, internal_relocs,
3001 				 internal_syms, sections))
3002 	goto error_return;
3003 
3004       free (sections);
3005       sections = NULL;
3006       free (internal_syms);
3007       internal_syms = NULL;
3008       free (internal_relocs);
3009       internal_relocs = NULL;
3010     }
3011 
3012   return data;
3013 
3014  error_return:
3015   if (internal_relocs != NULL)
3016     free (internal_relocs);
3017   if (internal_syms != NULL)
3018     free (internal_syms);
3019   if (sections != NULL)
3020     free (sections);
3021   return NULL;
3022 }
3023 
3024 /* The target vectors.  */
3025 
3026 #ifndef TARGET_SHL_SYM
3027 CREATE_BIG_COFF_TARGET_VEC (shcoff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3028 #endif
3029 
3030 #ifdef TARGET_SHL_SYM
3031 #define TARGET_SYM TARGET_SHL_SYM
3032 #else
3033 #define TARGET_SYM shlcoff_vec
3034 #endif
3035 
3036 #ifndef TARGET_SHL_NAME
3037 #define TARGET_SHL_NAME "coff-shl"
3038 #endif
3039 
3040 #ifdef COFF_WITH_PE
3041 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3042 			       SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3043 #else
3044 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3045 			       0, '_', NULL, COFF_SWAP_TABLE)
3046 #endif
3047 
3048 #ifndef TARGET_SHL_SYM
3049 static const bfd_target * coff_small_object_p PARAMS ((bfd *));
3050 static bfd_boolean coff_small_new_section_hook PARAMS ((bfd *, asection *));
3051 /* Some people want versions of the SH COFF target which do not align
3052    to 16 byte boundaries.  We implement that by adding a couple of new
3053    target vectors.  These are just like the ones above, but they
3054    change the default section alignment.  To generate them in the
3055    assembler, use -small.  To use them in the linker, use -b
3056    coff-sh{l}-small and -oformat coff-sh{l}-small.
3057 
3058    Yes, this is a horrible hack.  A general solution for setting
3059    section alignment in COFF is rather complex.  ELF handles this
3060    correctly.  */
3061 
3062 /* Only recognize the small versions if the target was not defaulted.
3063    Otherwise we won't recognize the non default endianness.  */
3064 
3065 static const bfd_target *
coff_small_object_p(abfd)3066 coff_small_object_p (abfd)
3067      bfd *abfd;
3068 {
3069   if (abfd->target_defaulted)
3070     {
3071       bfd_set_error (bfd_error_wrong_format);
3072       return NULL;
3073     }
3074   return coff_object_p (abfd);
3075 }
3076 
3077 /* Set the section alignment for the small versions.  */
3078 
3079 static bfd_boolean
coff_small_new_section_hook(abfd,section)3080 coff_small_new_section_hook (abfd, section)
3081      bfd *abfd;
3082      asection *section;
3083 {
3084   if (! coff_new_section_hook (abfd, section))
3085     return FALSE;
3086 
3087   /* We must align to at least a four byte boundary, because longword
3088      accesses must be on a four byte boundary.  */
3089   if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3090     section->alignment_power = 2;
3091 
3092   return TRUE;
3093 }
3094 
3095 /* This is copied from bfd_coff_std_swap_table so that we can change
3096    the default section alignment power.  */
3097 
3098 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3099 {
3100   coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3101   coff_swap_aux_out, coff_swap_sym_out,
3102   coff_swap_lineno_out, coff_swap_reloc_out,
3103   coff_swap_filehdr_out, coff_swap_aouthdr_out,
3104   coff_swap_scnhdr_out,
3105   FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3106 #ifdef COFF_LONG_FILENAMES
3107   TRUE,
3108 #else
3109   FALSE,
3110 #endif
3111 #ifdef COFF_LONG_SECTION_NAMES
3112   TRUE,
3113 #else
3114   FALSE,
3115 #endif
3116   2,
3117 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3118   TRUE,
3119 #else
3120   FALSE,
3121 #endif
3122 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3123   4,
3124 #else
3125   2,
3126 #endif
3127   coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3128   coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3129   coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3130   coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3131   coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3132   coff_classify_symbol, coff_compute_section_file_positions,
3133   coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3134   coff_adjust_symndx, coff_link_add_one_symbol,
3135   coff_link_output_has_begun, coff_final_link_postscript
3136 };
3137 
3138 #define coff_small_close_and_cleanup \
3139   coff_close_and_cleanup
3140 #define coff_small_bfd_free_cached_info \
3141   coff_bfd_free_cached_info
3142 #define coff_small_get_section_contents \
3143   coff_get_section_contents
3144 #define coff_small_get_section_contents_in_window \
3145   coff_get_section_contents_in_window
3146 
3147 extern const bfd_target shlcoff_small_vec;
3148 
3149 const bfd_target shcoff_small_vec =
3150 {
3151   "coff-sh-small",		/* name */
3152   bfd_target_coff_flavour,
3153   BFD_ENDIAN_BIG,		/* data byte order is big */
3154   BFD_ENDIAN_BIG,		/* header byte order is big */
3155 
3156   (HAS_RELOC | EXEC_P |		/* object flags */
3157    HAS_LINENO | HAS_DEBUG |
3158    HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3159 
3160   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3161   '_',				/* leading symbol underscore */
3162   '/',				/* ar_pad_char */
3163   15,				/* ar_max_namelen */
3164   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3165   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3166   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3167   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3168   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3169   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3170 
3171   {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3172      bfd_generic_archive_p, _bfd_dummy_target},
3173   {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3174      bfd_false},
3175   {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3176      _bfd_write_archive_contents, bfd_false},
3177 
3178   BFD_JUMP_TABLE_GENERIC (coff_small),
3179   BFD_JUMP_TABLE_COPY (coff),
3180   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3181   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3182   BFD_JUMP_TABLE_SYMBOLS (coff),
3183   BFD_JUMP_TABLE_RELOCS (coff),
3184   BFD_JUMP_TABLE_WRITE (coff),
3185   BFD_JUMP_TABLE_LINK (coff),
3186   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3187 
3188   & shlcoff_small_vec,
3189 
3190   (PTR) &bfd_coff_small_swap_table
3191 };
3192 
3193 const bfd_target shlcoff_small_vec =
3194 {
3195   "coff-shl-small",		/* name */
3196   bfd_target_coff_flavour,
3197   BFD_ENDIAN_LITTLE,		/* data byte order is little */
3198   BFD_ENDIAN_LITTLE,		/* header byte order is little endian too*/
3199 
3200   (HAS_RELOC | EXEC_P |		/* object flags */
3201    HAS_LINENO | HAS_DEBUG |
3202    HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3203 
3204   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3205   '_',				/* leading symbol underscore */
3206   '/',				/* ar_pad_char */
3207   15,				/* ar_max_namelen */
3208   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3209   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3210   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3211   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3212   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3213   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3214 
3215   {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3216      bfd_generic_archive_p, _bfd_dummy_target},
3217   {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3218      bfd_false},
3219   {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3220      _bfd_write_archive_contents, bfd_false},
3221 
3222   BFD_JUMP_TABLE_GENERIC (coff_small),
3223   BFD_JUMP_TABLE_COPY (coff),
3224   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3225   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3226   BFD_JUMP_TABLE_SYMBOLS (coff),
3227   BFD_JUMP_TABLE_RELOCS (coff),
3228   BFD_JUMP_TABLE_WRITE (coff),
3229   BFD_JUMP_TABLE_LINK (coff),
3230   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3231 
3232   & shcoff_small_vec,
3233 
3234   (PTR) &bfd_coff_small_swap_table
3235 };
3236 #endif
3237