1 /* BFD back-end for Renesas Super-H COFF binaries.
2    Copyright (C) 1993-2016 Free Software Foundation, Inc.
3    Contributed by Cygnus Support.
4    Written by Steve Chamberlain, <sac@cygnus.com>.
5    Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6 
7    This file is part of BFD, the Binary File Descriptor library.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
22    MA 02110-1301, USA.  */
23 
24 #include "sysdep.h"
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31 
32 #undef  bfd_pe_print_pdata
33 
34 #ifdef COFF_WITH_PE
35 #include "coff/pe.h"
36 
37 #ifndef COFF_IMAGE_WITH_PE
38 static bfd_boolean sh_align_load_span
39   (bfd *, asection *, bfd_byte *,
40    bfd_boolean (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
41    void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *);
42 
43 #define _bfd_sh_align_load_span sh_align_load_span
44 #endif
45 
46 #define	bfd_pe_print_pdata   _bfd_pe_print_ce_compressed_pdata
47 
48 #else
49 
50 #define	bfd_pe_print_pdata   NULL
51 
52 #endif /* COFF_WITH_PE.  */
53 
54 #include "libcoff.h"
55 
56 /* Internal functions.  */
57 
58 #ifdef COFF_WITH_PE
59 /* Can't build import tables with 2**4 alignment.  */
60 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	2
61 #else
62 /* Default section alignment to 2**4.  */
63 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	4
64 #endif
65 
66 #ifdef COFF_IMAGE_WITH_PE
67 /* Align PE executables.  */
68 #define COFF_PAGE_SIZE 0x1000
69 #endif
70 
71 /* Generate long file names.  */
72 #define COFF_LONG_FILENAMES
73 
74 #ifdef COFF_WITH_PE
75 /* Return TRUE if this relocation should
76    appear in the output .reloc section.  */
77 
78 static bfd_boolean
in_reloc_p(bfd * abfd ATTRIBUTE_UNUSED,reloc_howto_type * howto)79 in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
80 	    reloc_howto_type * howto)
81 {
82   return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
83 }
84 #endif
85 
86 static bfd_reloc_status_type
87 sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
88 static bfd_boolean
89 sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
90 		     bfd_byte *, struct internal_reloc *,
91 		     struct internal_syment *, asection **);
92 static bfd_boolean
93 sh_align_loads (bfd *, asection *, struct internal_reloc *,
94 		bfd_byte *, bfd_boolean *);
95 
96 /* The supported relocations.  There are a lot of relocations defined
97    in coff/internal.h which we do not expect to ever see.  */
98 static reloc_howto_type sh_coff_howtos[] =
99 {
100   EMPTY_HOWTO (0),
101   EMPTY_HOWTO (1),
102 #ifdef COFF_WITH_PE
103   /* Windows CE */
104   HOWTO (R_SH_IMM32CE,		/* type */
105 	 0,			/* rightshift */
106 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
107 	 32,			/* bitsize */
108 	 FALSE,			/* pc_relative */
109 	 0,			/* bitpos */
110 	 complain_overflow_bitfield, /* complain_on_overflow */
111 	 sh_reloc,		/* special_function */
112 	 "r_imm32ce",		/* name */
113 	 TRUE,			/* partial_inplace */
114 	 0xffffffff,		/* src_mask */
115 	 0xffffffff,		/* dst_mask */
116 	 FALSE),		/* pcrel_offset */
117 #else
118   EMPTY_HOWTO (2),
119 #endif
120   EMPTY_HOWTO (3), /* R_SH_PCREL8 */
121   EMPTY_HOWTO (4), /* R_SH_PCREL16 */
122   EMPTY_HOWTO (5), /* R_SH_HIGH8 */
123   EMPTY_HOWTO (6), /* R_SH_IMM24 */
124   EMPTY_HOWTO (7), /* R_SH_LOW16 */
125   EMPTY_HOWTO (8),
126   EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
127 
128   HOWTO (R_SH_PCDISP8BY2,	/* type */
129 	 1,			/* rightshift */
130 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
131 	 8,			/* bitsize */
132 	 TRUE,			/* pc_relative */
133 	 0,			/* bitpos */
134 	 complain_overflow_signed, /* complain_on_overflow */
135 	 sh_reloc,		/* special_function */
136 	 "r_pcdisp8by2",	/* name */
137 	 TRUE,			/* partial_inplace */
138 	 0xff,			/* src_mask */
139 	 0xff,			/* dst_mask */
140 	 TRUE),			/* pcrel_offset */
141 
142   EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
143 
144   HOWTO (R_SH_PCDISP,		/* type */
145 	 1,			/* rightshift */
146 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
147 	 12,			/* bitsize */
148 	 TRUE,			/* pc_relative */
149 	 0,			/* bitpos */
150 	 complain_overflow_signed, /* complain_on_overflow */
151 	 sh_reloc,		/* special_function */
152 	 "r_pcdisp12by2",	/* name */
153 	 TRUE,			/* partial_inplace */
154 	 0xfff,			/* src_mask */
155 	 0xfff,			/* dst_mask */
156 	 TRUE),			/* pcrel_offset */
157 
158   EMPTY_HOWTO (13),
159 
160   HOWTO (R_SH_IMM32,		/* type */
161 	 0,			/* rightshift */
162 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
163 	 32,			/* bitsize */
164 	 FALSE,			/* pc_relative */
165 	 0,			/* bitpos */
166 	 complain_overflow_bitfield, /* complain_on_overflow */
167 	 sh_reloc,		/* special_function */
168 	 "r_imm32",		/* name */
169 	 TRUE,			/* partial_inplace */
170 	 0xffffffff,		/* src_mask */
171 	 0xffffffff,		/* dst_mask */
172 	 FALSE),		/* pcrel_offset */
173 
174   EMPTY_HOWTO (15),
175 #ifdef COFF_WITH_PE
176   HOWTO (R_SH_IMAGEBASE,        /* type */
177 	 0,	                /* rightshift */
178 	 2,	                /* size (0 = byte, 1 = short, 2 = long) */
179 	 32,	                /* bitsize */
180 	 FALSE,	                /* pc_relative */
181 	 0,	                /* bitpos */
182 	 complain_overflow_bitfield, /* complain_on_overflow */
183 	 sh_reloc,       	/* special_function */
184 	 "rva32",	        /* name */
185 	 TRUE,	                /* partial_inplace */
186 	 0xffffffff,            /* src_mask */
187 	 0xffffffff,            /* dst_mask */
188 	 FALSE),                /* pcrel_offset */
189 #else
190   EMPTY_HOWTO (16), /* R_SH_IMM8 */
191 #endif
192   EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
193   EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
194   EMPTY_HOWTO (19), /* R_SH_IMM4 */
195   EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
196   EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
197 
198   HOWTO (R_SH_PCRELIMM8BY2,	/* type */
199 	 1,			/* rightshift */
200 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
201 	 8,			/* bitsize */
202 	 TRUE,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_unsigned, /* complain_on_overflow */
205 	 sh_reloc,		/* special_function */
206 	 "r_pcrelimm8by2",	/* name */
207 	 TRUE,			/* partial_inplace */
208 	 0xff,			/* src_mask */
209 	 0xff,			/* dst_mask */
210 	 TRUE),			/* pcrel_offset */
211 
212   HOWTO (R_SH_PCRELIMM8BY4,	/* type */
213 	 2,			/* rightshift */
214 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
215 	 8,			/* bitsize */
216 	 TRUE,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_unsigned, /* complain_on_overflow */
219 	 sh_reloc,		/* special_function */
220 	 "r_pcrelimm8by4",	/* name */
221 	 TRUE,			/* partial_inplace */
222 	 0xff,			/* src_mask */
223 	 0xff,			/* dst_mask */
224 	 TRUE),			/* pcrel_offset */
225 
226   HOWTO (R_SH_IMM16,		/* type */
227 	 0,			/* rightshift */
228 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
229 	 16,			/* bitsize */
230 	 FALSE,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_bitfield, /* complain_on_overflow */
233 	 sh_reloc,		/* special_function */
234 	 "r_imm16",		/* name */
235 	 TRUE,			/* partial_inplace */
236 	 0xffff,		/* src_mask */
237 	 0xffff,		/* dst_mask */
238 	 FALSE),		/* pcrel_offset */
239 
240   HOWTO (R_SH_SWITCH16,		/* type */
241 	 0,			/* rightshift */
242 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
243 	 16,			/* bitsize */
244 	 FALSE,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_bitfield, /* complain_on_overflow */
247 	 sh_reloc,		/* special_function */
248 	 "r_switch16",		/* name */
249 	 TRUE,			/* partial_inplace */
250 	 0xffff,		/* src_mask */
251 	 0xffff,		/* dst_mask */
252 	 FALSE),		/* pcrel_offset */
253 
254   HOWTO (R_SH_SWITCH32,		/* type */
255 	 0,			/* rightshift */
256 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
257 	 32,			/* bitsize */
258 	 FALSE,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_bitfield, /* complain_on_overflow */
261 	 sh_reloc,		/* special_function */
262 	 "r_switch32",		/* name */
263 	 TRUE,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 FALSE),		/* pcrel_offset */
267 
268   HOWTO (R_SH_USES,		/* type */
269 	 0,			/* rightshift */
270 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
271 	 16,			/* bitsize */
272 	 FALSE,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield, /* complain_on_overflow */
275 	 sh_reloc,		/* special_function */
276 	 "r_uses",		/* name */
277 	 TRUE,			/* partial_inplace */
278 	 0xffff,		/* src_mask */
279 	 0xffff,		/* dst_mask */
280 	 FALSE),		/* pcrel_offset */
281 
282   HOWTO (R_SH_COUNT,		/* type */
283 	 0,			/* rightshift */
284 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
285 	 32,			/* bitsize */
286 	 FALSE,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_bitfield, /* complain_on_overflow */
289 	 sh_reloc,		/* special_function */
290 	 "r_count",		/* name */
291 	 TRUE,			/* partial_inplace */
292 	 0xffffffff,		/* src_mask */
293 	 0xffffffff,		/* dst_mask */
294 	 FALSE),		/* pcrel_offset */
295 
296   HOWTO (R_SH_ALIGN,		/* type */
297 	 0,			/* rightshift */
298 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
299 	 32,			/* bitsize */
300 	 FALSE,			/* pc_relative */
301 	 0,			/* bitpos */
302 	 complain_overflow_bitfield, /* complain_on_overflow */
303 	 sh_reloc,		/* special_function */
304 	 "r_align",		/* name */
305 	 TRUE,			/* partial_inplace */
306 	 0xffffffff,		/* src_mask */
307 	 0xffffffff,		/* dst_mask */
308 	 FALSE),		/* pcrel_offset */
309 
310   HOWTO (R_SH_CODE,		/* type */
311 	 0,			/* rightshift */
312 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
313 	 32,			/* bitsize */
314 	 FALSE,			/* pc_relative */
315 	 0,			/* bitpos */
316 	 complain_overflow_bitfield, /* complain_on_overflow */
317 	 sh_reloc,		/* special_function */
318 	 "r_code",		/* name */
319 	 TRUE,			/* partial_inplace */
320 	 0xffffffff,		/* src_mask */
321 	 0xffffffff,		/* dst_mask */
322 	 FALSE),		/* pcrel_offset */
323 
324   HOWTO (R_SH_DATA,		/* type */
325 	 0,			/* rightshift */
326 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
327 	 32,			/* bitsize */
328 	 FALSE,			/* pc_relative */
329 	 0,			/* bitpos */
330 	 complain_overflow_bitfield, /* complain_on_overflow */
331 	 sh_reloc,		/* special_function */
332 	 "r_data",		/* name */
333 	 TRUE,			/* partial_inplace */
334 	 0xffffffff,		/* src_mask */
335 	 0xffffffff,		/* dst_mask */
336 	 FALSE),		/* pcrel_offset */
337 
338   HOWTO (R_SH_LABEL,		/* type */
339 	 0,			/* rightshift */
340 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
341 	 32,			/* bitsize */
342 	 FALSE,			/* pc_relative */
343 	 0,			/* bitpos */
344 	 complain_overflow_bitfield, /* complain_on_overflow */
345 	 sh_reloc,		/* special_function */
346 	 "r_label",		/* name */
347 	 TRUE,			/* partial_inplace */
348 	 0xffffffff,		/* src_mask */
349 	 0xffffffff,		/* dst_mask */
350 	 FALSE),		/* pcrel_offset */
351 
352   HOWTO (R_SH_SWITCH8,		/* type */
353 	 0,			/* rightshift */
354 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
355 	 8,			/* bitsize */
356 	 FALSE,			/* pc_relative */
357 	 0,			/* bitpos */
358 	 complain_overflow_bitfield, /* complain_on_overflow */
359 	 sh_reloc,		/* special_function */
360 	 "r_switch8",		/* name */
361 	 TRUE,			/* partial_inplace */
362 	 0xff,			/* src_mask */
363 	 0xff,			/* dst_mask */
364 	 FALSE)			/* pcrel_offset */
365 };
366 
367 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
368 
369 /* Check for a bad magic number.  */
370 #define BADMAG(x) SHBADMAG(x)
371 
372 /* Customize coffcode.h (this is not currently used).  */
373 #define SH 1
374 
375 /* FIXME: This should not be set here.  */
376 #define __A_MAGIC_SET__
377 
378 #ifndef COFF_WITH_PE
379 /* Swap the r_offset field in and out.  */
380 #define SWAP_IN_RELOC_OFFSET  H_GET_32
381 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
382 
383 /* Swap out extra information in the reloc structure.  */
384 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst)	\
385   do						\
386     {						\
387       dst->r_stuff[0] = 'S';			\
388       dst->r_stuff[1] = 'C';			\
389     }						\
390   while (0)
391 #endif
392 
393 /* Get the value of a symbol, when performing a relocation.  */
394 
395 static long
get_symbol_value(asymbol * symbol)396 get_symbol_value (asymbol *symbol)
397 {
398   bfd_vma relocation;
399 
400   if (bfd_is_com_section (symbol->section))
401     relocation = 0;
402   else
403     relocation = (symbol->value +
404 		  symbol->section->output_section->vma +
405 		  symbol->section->output_offset);
406 
407   return relocation;
408 }
409 
410 #ifdef COFF_WITH_PE
411 /* Convert an rtype to howto for the COFF backend linker.
412    Copied from coff-i386.  */
413 #define coff_rtype_to_howto coff_sh_rtype_to_howto
414 
415 
416 static reloc_howto_type *
coff_sh_rtype_to_howto(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,struct internal_reloc * rel,struct coff_link_hash_entry * h,struct internal_syment * sym,bfd_vma * addendp)417 coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
418 			asection * sec,
419 			struct internal_reloc * rel,
420 			struct coff_link_hash_entry * h,
421 			struct internal_syment * sym,
422 			bfd_vma * addendp)
423 {
424   reloc_howto_type * howto;
425 
426   howto = sh_coff_howtos + rel->r_type;
427 
428   *addendp = 0;
429 
430   if (howto->pc_relative)
431     *addendp += sec->vma;
432 
433   if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
434     {
435       /* This is a common symbol.  The section contents include the
436 	 size (sym->n_value) as an addend.  The relocate_section
437 	 function will be adding in the final value of the symbol.  We
438 	 need to subtract out the current size in order to get the
439 	 correct result.  */
440       BFD_ASSERT (h != NULL);
441     }
442 
443   if (howto->pc_relative)
444     {
445       *addendp -= 4;
446 
447       /* If the symbol is defined, then the generic code is going to
448          add back the symbol value in order to cancel out an
449          adjustment it made to the addend.  However, we set the addend
450          to 0 at the start of this function.  We need to adjust here,
451          to avoid the adjustment the generic code will make.  FIXME:
452          This is getting a bit hackish.  */
453       if (sym != NULL && sym->n_scnum != 0)
454 	*addendp -= sym->n_value;
455     }
456 
457   if (rel->r_type == R_SH_IMAGEBASE)
458     *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
459 
460   return howto;
461 }
462 
463 #endif /* COFF_WITH_PE */
464 
465 /* This structure is used to map BFD reloc codes to SH PE relocs.  */
466 struct shcoff_reloc_map
467 {
468   bfd_reloc_code_real_type bfd_reloc_val;
469   unsigned char shcoff_reloc_val;
470 };
471 
472 #ifdef COFF_WITH_PE
473 /* An array mapping BFD reloc codes to SH PE relocs.  */
474 static const struct shcoff_reloc_map sh_reloc_map[] =
475 {
476   { BFD_RELOC_32, R_SH_IMM32CE },
477   { BFD_RELOC_RVA, R_SH_IMAGEBASE },
478   { BFD_RELOC_CTOR, R_SH_IMM32CE },
479 };
480 #else
481 /* An array mapping BFD reloc codes to SH PE relocs.  */
482 static const struct shcoff_reloc_map sh_reloc_map[] =
483 {
484   { BFD_RELOC_32, R_SH_IMM32 },
485   { BFD_RELOC_CTOR, R_SH_IMM32 },
486 };
487 #endif
488 
489 /* Given a BFD reloc code, return the howto structure for the
490    corresponding SH PE reloc.  */
491 #define coff_bfd_reloc_type_lookup	sh_coff_reloc_type_lookup
492 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
493 
494 static reloc_howto_type *
sh_coff_reloc_type_lookup(bfd * abfd ATTRIBUTE_UNUSED,bfd_reloc_code_real_type code)495 sh_coff_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
496 			   bfd_reloc_code_real_type code)
497 {
498   unsigned int i;
499 
500   for (i = ARRAY_SIZE (sh_reloc_map); i--;)
501     if (sh_reloc_map[i].bfd_reloc_val == code)
502       return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
503 
504   (*_bfd_error_handler) (_("SH Error: unknown reloc type %d"), code);
505   return NULL;
506 }
507 
508 static reloc_howto_type *
sh_coff_reloc_name_lookup(bfd * abfd ATTRIBUTE_UNUSED,const char * r_name)509 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
510 			   const char *r_name)
511 {
512   unsigned int i;
513 
514   for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
515     if (sh_coff_howtos[i].name != NULL
516 	&& strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
517       return &sh_coff_howtos[i];
518 
519   return NULL;
520 }
521 
522 /* This macro is used in coffcode.h to get the howto corresponding to
523    an internal reloc.  */
524 
525 #define RTYPE2HOWTO(relent, internal)		\
526   ((relent)->howto =				\
527    ((internal)->r_type < SH_COFF_HOWTO_COUNT	\
528     ? &sh_coff_howtos[(internal)->r_type]	\
529     : (reloc_howto_type *) NULL))
530 
531 /* This is the same as the macro in coffcode.h, except that it copies
532    r_offset into reloc_entry->addend for some relocs.  */
533 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr)		\
534   {								\
535     coff_symbol_type *coffsym = (coff_symbol_type *) NULL;	\
536     if (ptr && bfd_asymbol_bfd (ptr) != abfd)			\
537       coffsym = (obj_symbols (abfd)				\
538 		 + (cache_ptr->sym_ptr_ptr - symbols));		\
539     else if (ptr)						\
540       coffsym = coff_symbol_from (ptr);				\
541     if (coffsym != (coff_symbol_type *) NULL			\
542 	&& coffsym->native->u.syment.n_scnum == 0)		\
543       cache_ptr->addend = 0;					\
544     else if (ptr && bfd_asymbol_bfd (ptr) == abfd		\
545 	     && ptr->section != (asection *) NULL)		\
546       cache_ptr->addend = - (ptr->section->vma + ptr->value);	\
547     else							\
548       cache_ptr->addend = 0;					\
549     if ((reloc).r_type == R_SH_SWITCH8				\
550 	|| (reloc).r_type == R_SH_SWITCH16			\
551 	|| (reloc).r_type == R_SH_SWITCH32			\
552 	|| (reloc).r_type == R_SH_USES				\
553 	|| (reloc).r_type == R_SH_COUNT				\
554 	|| (reloc).r_type == R_SH_ALIGN)			\
555       cache_ptr->addend = (reloc).r_offset;			\
556   }
557 
558 /* This is the howto function for the SH relocations.  */
559 
560 static bfd_reloc_status_type
sh_reloc(bfd * abfd,arelent * reloc_entry,asymbol * symbol_in,void * data,asection * input_section,bfd * output_bfd,char ** error_message ATTRIBUTE_UNUSED)561 sh_reloc (bfd *      abfd,
562 	  arelent *  reloc_entry,
563 	  asymbol *  symbol_in,
564 	  void *     data,
565 	  asection * input_section,
566 	  bfd *      output_bfd,
567 	  char **    error_message ATTRIBUTE_UNUSED)
568 {
569   unsigned long insn;
570   bfd_vma sym_value;
571   unsigned short r_type;
572   bfd_vma addr = reloc_entry->address;
573   bfd_byte *hit_data = addr + (bfd_byte *) data;
574 
575   r_type = reloc_entry->howto->type;
576 
577   if (output_bfd != NULL)
578     {
579       /* Partial linking--do nothing.  */
580       reloc_entry->address += input_section->output_offset;
581       return bfd_reloc_ok;
582     }
583 
584   /* Almost all relocs have to do with relaxing.  If any work must be
585      done for them, it has been done in sh_relax_section.  */
586   if (r_type != R_SH_IMM32
587 #ifdef COFF_WITH_PE
588       && r_type != R_SH_IMM32CE
589       && r_type != R_SH_IMAGEBASE
590 #endif
591       && (r_type != R_SH_PCDISP
592 	  || (symbol_in->flags & BSF_LOCAL) != 0))
593     return bfd_reloc_ok;
594 
595   if (symbol_in != NULL
596       && bfd_is_und_section (symbol_in->section))
597     return bfd_reloc_undefined;
598 
599   sym_value = get_symbol_value (symbol_in);
600 
601   switch (r_type)
602     {
603     case R_SH_IMM32:
604 #ifdef COFF_WITH_PE
605     case R_SH_IMM32CE:
606 #endif
607       insn = bfd_get_32 (abfd, hit_data);
608       insn += sym_value + reloc_entry->addend;
609       bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
610       break;
611 #ifdef COFF_WITH_PE
612     case R_SH_IMAGEBASE:
613       insn = bfd_get_32 (abfd, hit_data);
614       insn += sym_value + reloc_entry->addend;
615       insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
616       bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
617       break;
618 #endif
619     case R_SH_PCDISP:
620       insn = bfd_get_16 (abfd, hit_data);
621       sym_value += reloc_entry->addend;
622       sym_value -= (input_section->output_section->vma
623 		    + input_section->output_offset
624 		    + addr
625 		    + 4);
626       sym_value += (insn & 0xfff) << 1;
627       if (insn & 0x800)
628 	sym_value -= 0x1000;
629       insn = (insn & 0xf000) | (sym_value & 0xfff);
630       bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
631       if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
632 	return bfd_reloc_overflow;
633       break;
634     default:
635       abort ();
636       break;
637     }
638 
639   return bfd_reloc_ok;
640 }
641 
642 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
643 
644 /* We can do relaxing.  */
645 #define coff_bfd_relax_section sh_relax_section
646 
647 /* We use the special COFF backend linker.  */
648 #define coff_relocate_section sh_relocate_section
649 
650 /* When relaxing, we need to use special code to get the relocated
651    section contents.  */
652 #define coff_bfd_get_relocated_section_contents \
653   sh_coff_get_relocated_section_contents
654 
655 #include "coffcode.h"
656 
657 static bfd_boolean
658 sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
659 
660 /* This function handles relaxing on the SH.
661 
662    Function calls on the SH look like this:
663 
664        movl  L1,r0
665        ...
666        jsr   @r0
667        ...
668      L1:
669        .long function
670 
671    The compiler and assembler will cooperate to create R_SH_USES
672    relocs on the jsr instructions.  The r_offset field of the
673    R_SH_USES reloc is the PC relative offset to the instruction which
674    loads the register (the r_offset field is computed as though it
675    were a jump instruction, so the offset value is actually from four
676    bytes past the instruction).  The linker can use this reloc to
677    determine just which function is being called, and thus decide
678    whether it is possible to replace the jsr with a bsr.
679 
680    If multiple function calls are all based on a single register load
681    (i.e., the same function is called multiple times), the compiler
682    guarantees that each function call will have an R_SH_USES reloc.
683    Therefore, if the linker is able to convert each R_SH_USES reloc
684    which refers to that address, it can safely eliminate the register
685    load.
686 
687    When the assembler creates an R_SH_USES reloc, it examines it to
688    determine which address is being loaded (L1 in the above example).
689    It then counts the number of references to that address, and
690    creates an R_SH_COUNT reloc at that address.  The r_offset field of
691    the R_SH_COUNT reloc will be the number of references.  If the
692    linker is able to eliminate a register load, it can use the
693    R_SH_COUNT reloc to see whether it can also eliminate the function
694    address.
695 
696    SH relaxing also handles another, unrelated, matter.  On the SH, if
697    a load or store instruction is not aligned on a four byte boundary,
698    the memory cycle interferes with the 32 bit instruction fetch,
699    causing a one cycle bubble in the pipeline.  Therefore, we try to
700    align load and store instructions on four byte boundaries if we
701    can, by swapping them with one of the adjacent instructions.  */
702 
703 static bfd_boolean
sh_relax_section(bfd * abfd,asection * sec,struct bfd_link_info * link_info,bfd_boolean * again)704 sh_relax_section (bfd *abfd,
705 		  asection *sec,
706 		  struct bfd_link_info *link_info,
707 		  bfd_boolean *again)
708 {
709   struct internal_reloc *internal_relocs;
710   bfd_boolean have_code;
711   struct internal_reloc *irel, *irelend;
712   bfd_byte *contents = NULL;
713 
714   *again = FALSE;
715 
716   if (bfd_link_relocatable (link_info)
717       || (sec->flags & SEC_RELOC) == 0
718       || sec->reloc_count == 0)
719     return TRUE;
720 
721   if (coff_section_data (abfd, sec) == NULL)
722     {
723       bfd_size_type amt = sizeof (struct coff_section_tdata);
724       sec->used_by_bfd = bfd_zalloc (abfd, amt);
725       if (sec->used_by_bfd == NULL)
726 	return FALSE;
727     }
728 
729   internal_relocs = (_bfd_coff_read_internal_relocs
730 		     (abfd, sec, link_info->keep_memory,
731 		      (bfd_byte *) NULL, FALSE,
732 		      (struct internal_reloc *) NULL));
733   if (internal_relocs == NULL)
734     goto error_return;
735 
736   have_code = FALSE;
737 
738   irelend = internal_relocs + sec->reloc_count;
739   for (irel = internal_relocs; irel < irelend; irel++)
740     {
741       bfd_vma laddr, paddr, symval;
742       unsigned short insn;
743       struct internal_reloc *irelfn, *irelscan, *irelcount;
744       struct internal_syment sym;
745       bfd_signed_vma foff;
746 
747       if (irel->r_type == R_SH_CODE)
748 	have_code = TRUE;
749 
750       if (irel->r_type != R_SH_USES)
751 	continue;
752 
753       /* Get the section contents.  */
754       if (contents == NULL)
755 	{
756 	  if (coff_section_data (abfd, sec)->contents != NULL)
757 	    contents = coff_section_data (abfd, sec)->contents;
758 	  else
759 	    {
760 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
761 		goto error_return;
762 	    }
763 	}
764 
765       /* The r_offset field of the R_SH_USES reloc will point us to
766          the register load.  The 4 is because the r_offset field is
767          computed as though it were a jump offset, which are based
768          from 4 bytes after the jump instruction.  */
769       laddr = irel->r_vaddr - sec->vma + 4;
770       /* Careful to sign extend the 32-bit offset.  */
771       laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
772       if (laddr >= sec->size)
773 	{
774 	  (*_bfd_error_handler) ("%B: 0x%lx: warning: bad R_SH_USES offset",
775 				 abfd, (unsigned long) irel->r_vaddr);
776 	  continue;
777 	}
778       insn = bfd_get_16 (abfd, contents + laddr);
779 
780       /* If the instruction is not mov.l NN,rN, we don't know what to do.  */
781       if ((insn & 0xf000) != 0xd000)
782 	{
783 	  ((*_bfd_error_handler)
784 	   ("%B: 0x%lx: warning: R_SH_USES points to unrecognized insn 0x%x",
785 	    abfd, (unsigned long) irel->r_vaddr, insn));
786 	  continue;
787 	}
788 
789       /* Get the address from which the register is being loaded.  The
790       	 displacement in the mov.l instruction is quadrupled.  It is a
791       	 displacement from four bytes after the movl instruction, but,
792       	 before adding in the PC address, two least significant bits
793       	 of the PC are cleared.  We assume that the section is aligned
794       	 on a four byte boundary.  */
795       paddr = insn & 0xff;
796       paddr *= 4;
797       paddr += (laddr + 4) &~ (bfd_vma) 3;
798       if (paddr >= sec->size)
799 	{
800 	  ((*_bfd_error_handler)
801 	   ("%B: 0x%lx: warning: bad R_SH_USES load offset",
802 	    abfd, (unsigned long) irel->r_vaddr));
803 	  continue;
804 	}
805 
806       /* Get the reloc for the address from which the register is
807          being loaded.  This reloc will tell us which function is
808          actually being called.  */
809       paddr += sec->vma;
810       for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
811 	if (irelfn->r_vaddr == paddr
812 #ifdef COFF_WITH_PE
813 	    && (irelfn->r_type == R_SH_IMM32
814 		|| irelfn->r_type == R_SH_IMM32CE
815 		|| irelfn->r_type == R_SH_IMAGEBASE)
816 
817 #else
818 	    && irelfn->r_type == R_SH_IMM32
819 #endif
820 	    )
821 	  break;
822       if (irelfn >= irelend)
823 	{
824 	  ((*_bfd_error_handler)
825 	   ("%B: 0x%lx: warning: could not find expected reloc",
826 	    abfd, (unsigned long) paddr));
827 	  continue;
828 	}
829 
830       /* Get the value of the symbol referred to by the reloc.  */
831       if (! _bfd_coff_get_external_symbols (abfd))
832 	goto error_return;
833       bfd_coff_swap_sym_in (abfd,
834 			    ((bfd_byte *) obj_coff_external_syms (abfd)
835 			     + (irelfn->r_symndx
836 				* bfd_coff_symesz (abfd))),
837 			    &sym);
838       if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
839 	{
840 	  ((*_bfd_error_handler)
841 	   ("%B: 0x%lx: warning: symbol in unexpected section",
842 	    abfd, (unsigned long) paddr));
843 	  continue;
844 	}
845 
846       if (sym.n_sclass != C_EXT)
847 	{
848 	  symval = (sym.n_value
849 		    - sec->vma
850 		    + sec->output_section->vma
851 		    + sec->output_offset);
852 	}
853       else
854 	{
855 	  struct coff_link_hash_entry *h;
856 
857 	  h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
858 	  BFD_ASSERT (h != NULL);
859 	  if (h->root.type != bfd_link_hash_defined
860 	      && h->root.type != bfd_link_hash_defweak)
861 	    {
862 	      /* This appears to be a reference to an undefined
863                  symbol.  Just ignore it--it will be caught by the
864                  regular reloc processing.  */
865 	      continue;
866 	    }
867 
868 	  symval = (h->root.u.def.value
869 		    + h->root.u.def.section->output_section->vma
870 		    + h->root.u.def.section->output_offset);
871 	}
872 
873       symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
874 
875       /* See if this function call can be shortened.  */
876       foff = (symval
877 	      - (irel->r_vaddr
878 		 - sec->vma
879 		 + sec->output_section->vma
880 		 + sec->output_offset
881 		 + 4));
882       if (foff < -0x1000 || foff >= 0x1000)
883 	{
884 	  /* After all that work, we can't shorten this function call.  */
885 	  continue;
886 	}
887 
888       /* Shorten the function call.  */
889 
890       /* For simplicity of coding, we are going to modify the section
891 	 contents, the section relocs, and the BFD symbol table.  We
892 	 must tell the rest of the code not to free up this
893 	 information.  It would be possible to instead create a table
894 	 of changes which have to be made, as is done in coff-mips.c;
895 	 that would be more work, but would require less memory when
896 	 the linker is run.  */
897 
898       coff_section_data (abfd, sec)->relocs = internal_relocs;
899       coff_section_data (abfd, sec)->keep_relocs = TRUE;
900 
901       coff_section_data (abfd, sec)->contents = contents;
902       coff_section_data (abfd, sec)->keep_contents = TRUE;
903 
904       obj_coff_keep_syms (abfd) = TRUE;
905 
906       /* Replace the jsr with a bsr.  */
907 
908       /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
909          replace the jsr with a bsr.  */
910       irel->r_type = R_SH_PCDISP;
911       irel->r_symndx = irelfn->r_symndx;
912       if (sym.n_sclass != C_EXT)
913 	{
914 	  /* If this needs to be changed because of future relaxing,
915              it will be handled here like other internal PCDISP
916              relocs.  */
917 	  bfd_put_16 (abfd,
918 		      (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
919 		      contents + irel->r_vaddr - sec->vma);
920 	}
921       else
922 	{
923 	  /* We can't fully resolve this yet, because the external
924              symbol value may be changed by future relaxing.  We let
925              the final link phase handle it.  */
926 	  bfd_put_16 (abfd, (bfd_vma) 0xb000,
927 		      contents + irel->r_vaddr - sec->vma);
928 	}
929 
930       /* See if there is another R_SH_USES reloc referring to the same
931          register load.  */
932       for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
933 	if (irelscan->r_type == R_SH_USES
934 	    && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
935 	  break;
936       if (irelscan < irelend)
937 	{
938 	  /* Some other function call depends upon this register load,
939 	     and we have not yet converted that function call.
940 	     Indeed, we may never be able to convert it.  There is
941 	     nothing else we can do at this point.  */
942 	  continue;
943 	}
944 
945       /* Look for a R_SH_COUNT reloc on the location where the
946          function address is stored.  Do this before deleting any
947          bytes, to avoid confusion about the address.  */
948       for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
949 	if (irelcount->r_vaddr == paddr
950 	    && irelcount->r_type == R_SH_COUNT)
951 	  break;
952 
953       /* Delete the register load.  */
954       if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
955 	goto error_return;
956 
957       /* That will change things, so, just in case it permits some
958          other function call to come within range, we should relax
959          again.  Note that this is not required, and it may be slow.  */
960       *again = TRUE;
961 
962       /* Now check whether we got a COUNT reloc.  */
963       if (irelcount >= irelend)
964 	{
965 	  ((*_bfd_error_handler)
966 	   ("%B: 0x%lx: warning: could not find expected COUNT reloc",
967 	    abfd, (unsigned long) paddr));
968 	  continue;
969 	}
970 
971       /* The number of uses is stored in the r_offset field.  We've
972          just deleted one.  */
973       if (irelcount->r_offset == 0)
974 	{
975 	  ((*_bfd_error_handler) ("%B: 0x%lx: warning: bad count",
976 				  abfd, (unsigned long) paddr));
977 	  continue;
978 	}
979 
980       --irelcount->r_offset;
981 
982       /* If there are no more uses, we can delete the address.  Reload
983          the address from irelfn, in case it was changed by the
984          previous call to sh_relax_delete_bytes.  */
985       if (irelcount->r_offset == 0)
986 	{
987 	  if (! sh_relax_delete_bytes (abfd, sec,
988 				       irelfn->r_vaddr - sec->vma, 4))
989 	    goto error_return;
990 	}
991 
992       /* We've done all we can with that function call.  */
993     }
994 
995   /* Look for load and store instructions that we can align on four
996      byte boundaries.  */
997   if (have_code)
998     {
999       bfd_boolean swapped;
1000 
1001       /* Get the section contents.  */
1002       if (contents == NULL)
1003 	{
1004 	  if (coff_section_data (abfd, sec)->contents != NULL)
1005 	    contents = coff_section_data (abfd, sec)->contents;
1006 	  else
1007 	    {
1008 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1009 		goto error_return;
1010 	    }
1011 	}
1012 
1013       if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1014 	goto error_return;
1015 
1016       if (swapped)
1017 	{
1018 	  coff_section_data (abfd, sec)->relocs = internal_relocs;
1019 	  coff_section_data (abfd, sec)->keep_relocs = TRUE;
1020 
1021 	  coff_section_data (abfd, sec)->contents = contents;
1022 	  coff_section_data (abfd, sec)->keep_contents = TRUE;
1023 
1024 	  obj_coff_keep_syms (abfd) = TRUE;
1025 	}
1026     }
1027 
1028   if (internal_relocs != NULL
1029       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1030     {
1031       if (! link_info->keep_memory)
1032 	free (internal_relocs);
1033       else
1034 	coff_section_data (abfd, sec)->relocs = internal_relocs;
1035     }
1036 
1037   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1038     {
1039       if (! link_info->keep_memory)
1040 	free (contents);
1041       else
1042 	/* Cache the section contents for coff_link_input_bfd.  */
1043 	coff_section_data (abfd, sec)->contents = contents;
1044     }
1045 
1046   return TRUE;
1047 
1048  error_return:
1049   if (internal_relocs != NULL
1050       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1051     free (internal_relocs);
1052   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1053     free (contents);
1054   return FALSE;
1055 }
1056 
1057 /* Delete some bytes from a section while relaxing.  */
1058 
1059 static bfd_boolean
sh_relax_delete_bytes(bfd * abfd,asection * sec,bfd_vma addr,int count)1060 sh_relax_delete_bytes (bfd *abfd,
1061 		       asection *sec,
1062 		       bfd_vma addr,
1063 		       int count)
1064 {
1065   bfd_byte *contents;
1066   struct internal_reloc *irel, *irelend;
1067   struct internal_reloc *irelalign;
1068   bfd_vma toaddr;
1069   bfd_byte *esym, *esymend;
1070   bfd_size_type symesz;
1071   struct coff_link_hash_entry **sym_hash;
1072   asection *o;
1073 
1074   contents = coff_section_data (abfd, sec)->contents;
1075 
1076   /* The deletion must stop at the next ALIGN reloc for an aligment
1077      power larger than the number of bytes we are deleting.  */
1078 
1079   irelalign = NULL;
1080   toaddr = sec->size;
1081 
1082   irel = coff_section_data (abfd, sec)->relocs;
1083   irelend = irel + sec->reloc_count;
1084   for (; irel < irelend; irel++)
1085     {
1086       if (irel->r_type == R_SH_ALIGN
1087 	  && irel->r_vaddr - sec->vma > addr
1088 	  && count < (1 << irel->r_offset))
1089 	{
1090 	  irelalign = irel;
1091 	  toaddr = irel->r_vaddr - sec->vma;
1092 	  break;
1093 	}
1094     }
1095 
1096   /* Actually delete the bytes.  */
1097   memmove (contents + addr, contents + addr + count,
1098 	   (size_t) (toaddr - addr - count));
1099   if (irelalign == NULL)
1100     sec->size -= count;
1101   else
1102     {
1103       int i;
1104 
1105 #define NOP_OPCODE (0x0009)
1106 
1107       BFD_ASSERT ((count & 1) == 0);
1108       for (i = 0; i < count; i += 2)
1109 	bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1110     }
1111 
1112   /* Adjust all the relocs.  */
1113   for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1114     {
1115       bfd_vma nraddr, stop;
1116       bfd_vma start = 0;
1117       int insn = 0;
1118       struct internal_syment sym;
1119       int off, adjust, oinsn;
1120       bfd_signed_vma voff = 0;
1121       bfd_boolean overflow;
1122 
1123       /* Get the new reloc address.  */
1124       nraddr = irel->r_vaddr - sec->vma;
1125       if ((irel->r_vaddr - sec->vma > addr
1126 	   && irel->r_vaddr - sec->vma < toaddr)
1127 	  || (irel->r_type == R_SH_ALIGN
1128 	      && irel->r_vaddr - sec->vma == toaddr))
1129 	nraddr -= count;
1130 
1131       /* See if this reloc was for the bytes we have deleted, in which
1132 	 case we no longer care about it.  Don't delete relocs which
1133 	 represent addresses, though.  */
1134       if (irel->r_vaddr - sec->vma >= addr
1135 	  && irel->r_vaddr - sec->vma < addr + count
1136 	  && irel->r_type != R_SH_ALIGN
1137 	  && irel->r_type != R_SH_CODE
1138 	  && irel->r_type != R_SH_DATA
1139 	  && irel->r_type != R_SH_LABEL)
1140 	irel->r_type = R_SH_UNUSED;
1141 
1142       /* If this is a PC relative reloc, see if the range it covers
1143          includes the bytes we have deleted.  */
1144       switch (irel->r_type)
1145 	{
1146 	default:
1147 	  break;
1148 
1149 	case R_SH_PCDISP8BY2:
1150 	case R_SH_PCDISP:
1151 	case R_SH_PCRELIMM8BY2:
1152 	case R_SH_PCRELIMM8BY4:
1153 	  start = irel->r_vaddr - sec->vma;
1154 	  insn = bfd_get_16 (abfd, contents + nraddr);
1155 	  break;
1156 	}
1157 
1158       switch (irel->r_type)
1159 	{
1160 	default:
1161 	  start = stop = addr;
1162 	  break;
1163 
1164 	case R_SH_IMM32:
1165 #ifdef COFF_WITH_PE
1166 	case R_SH_IMM32CE:
1167 	case R_SH_IMAGEBASE:
1168 #endif
1169 	  /* If this reloc is against a symbol defined in this
1170              section, and the symbol will not be adjusted below, we
1171              must check the addend to see it will put the value in
1172              range to be adjusted, and hence must be changed.  */
1173 	  bfd_coff_swap_sym_in (abfd,
1174 				((bfd_byte *) obj_coff_external_syms (abfd)
1175 				 + (irel->r_symndx
1176 				    * bfd_coff_symesz (abfd))),
1177 				&sym);
1178 	  if (sym.n_sclass != C_EXT
1179 	      && sym.n_scnum == sec->target_index
1180 	      && ((bfd_vma) sym.n_value <= addr
1181 		  || (bfd_vma) sym.n_value >= toaddr))
1182 	    {
1183 	      bfd_vma val;
1184 
1185 	      val = bfd_get_32 (abfd, contents + nraddr);
1186 	      val += sym.n_value;
1187 	      if (val > addr && val < toaddr)
1188 		bfd_put_32 (abfd, val - count, contents + nraddr);
1189 	    }
1190 	  start = stop = addr;
1191 	  break;
1192 
1193 	case R_SH_PCDISP8BY2:
1194 	  off = insn & 0xff;
1195 	  if (off & 0x80)
1196 	    off -= 0x100;
1197 	  stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1198 	  break;
1199 
1200 	case R_SH_PCDISP:
1201 	  bfd_coff_swap_sym_in (abfd,
1202 				((bfd_byte *) obj_coff_external_syms (abfd)
1203 				 + (irel->r_symndx
1204 				    * bfd_coff_symesz (abfd))),
1205 				&sym);
1206 	  if (sym.n_sclass == C_EXT)
1207 	    start = stop = addr;
1208 	  else
1209 	    {
1210 	      off = insn & 0xfff;
1211 	      if (off & 0x800)
1212 		off -= 0x1000;
1213 	      stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1214 	    }
1215 	  break;
1216 
1217 	case R_SH_PCRELIMM8BY2:
1218 	  off = insn & 0xff;
1219 	  stop = start + 4 + off * 2;
1220 	  break;
1221 
1222 	case R_SH_PCRELIMM8BY4:
1223 	  off = insn & 0xff;
1224 	  stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1225 	  break;
1226 
1227 	case R_SH_SWITCH8:
1228 	case R_SH_SWITCH16:
1229 	case R_SH_SWITCH32:
1230 	  /* These relocs types represent
1231 	       .word L2-L1
1232 	     The r_offset field holds the difference between the reloc
1233 	     address and L1.  That is the start of the reloc, and
1234 	     adding in the contents gives us the top.  We must adjust
1235 	     both the r_offset field and the section contents.  */
1236 
1237 	  start = irel->r_vaddr - sec->vma;
1238 	  stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1239 
1240 	  if (start > addr
1241 	      && start < toaddr
1242 	      && (stop <= addr || stop >= toaddr))
1243 	    irel->r_offset += count;
1244 	  else if (stop > addr
1245 		   && stop < toaddr
1246 		   && (start <= addr || start >= toaddr))
1247 	    irel->r_offset -= count;
1248 
1249 	  start = stop;
1250 
1251 	  if (irel->r_type == R_SH_SWITCH16)
1252 	    voff = bfd_get_signed_16 (abfd, contents + nraddr);
1253 	  else if (irel->r_type == R_SH_SWITCH8)
1254 	    voff = bfd_get_8 (abfd, contents + nraddr);
1255 	  else
1256 	    voff = bfd_get_signed_32 (abfd, contents + nraddr);
1257 	  stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1258 
1259 	  break;
1260 
1261 	case R_SH_USES:
1262 	  start = irel->r_vaddr - sec->vma;
1263 	  stop = (bfd_vma) ((bfd_signed_vma) start
1264 			    + (long) irel->r_offset
1265 			    + 4);
1266 	  break;
1267 	}
1268 
1269       if (start > addr
1270 	  && start < toaddr
1271 	  && (stop <= addr || stop >= toaddr))
1272 	adjust = count;
1273       else if (stop > addr
1274 	       && stop < toaddr
1275 	       && (start <= addr || start >= toaddr))
1276 	adjust = - count;
1277       else
1278 	adjust = 0;
1279 
1280       if (adjust != 0)
1281 	{
1282 	  oinsn = insn;
1283 	  overflow = FALSE;
1284 	  switch (irel->r_type)
1285 	    {
1286 	    default:
1287 	      abort ();
1288 	      break;
1289 
1290 	    case R_SH_PCDISP8BY2:
1291 	    case R_SH_PCRELIMM8BY2:
1292 	      insn += adjust / 2;
1293 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1294 		overflow = TRUE;
1295 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1296 	      break;
1297 
1298 	    case R_SH_PCDISP:
1299 	      insn += adjust / 2;
1300 	      if ((oinsn & 0xf000) != (insn & 0xf000))
1301 		overflow = TRUE;
1302 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1303 	      break;
1304 
1305 	    case R_SH_PCRELIMM8BY4:
1306 	      BFD_ASSERT (adjust == count || count >= 4);
1307 	      if (count >= 4)
1308 		insn += adjust / 4;
1309 	      else
1310 		{
1311 		  if ((irel->r_vaddr & 3) == 0)
1312 		    ++insn;
1313 		}
1314 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1315 		overflow = TRUE;
1316 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1317 	      break;
1318 
1319 	    case R_SH_SWITCH8:
1320 	      voff += adjust;
1321 	      if (voff < 0 || voff >= 0xff)
1322 		overflow = TRUE;
1323 	      bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1324 	      break;
1325 
1326 	    case R_SH_SWITCH16:
1327 	      voff += adjust;
1328 	      if (voff < - 0x8000 || voff >= 0x8000)
1329 		overflow = TRUE;
1330 	      bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1331 	      break;
1332 
1333 	    case R_SH_SWITCH32:
1334 	      voff += adjust;
1335 	      bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1336 	      break;
1337 
1338 	    case R_SH_USES:
1339 	      irel->r_offset += adjust;
1340 	      break;
1341 	    }
1342 
1343 	  if (overflow)
1344 	    {
1345 	      ((*_bfd_error_handler)
1346 	       ("%B: 0x%lx: fatal: reloc overflow while relaxing",
1347 		abfd, (unsigned long) irel->r_vaddr));
1348 	      bfd_set_error (bfd_error_bad_value);
1349 	      return FALSE;
1350 	    }
1351 	}
1352 
1353       irel->r_vaddr = nraddr + sec->vma;
1354     }
1355 
1356   /* Look through all the other sections.  If there contain any IMM32
1357      relocs against internal symbols which we are not going to adjust
1358      below, we may need to adjust the addends.  */
1359   for (o = abfd->sections; o != NULL; o = o->next)
1360     {
1361       struct internal_reloc *internal_relocs;
1362       struct internal_reloc *irelscan, *irelscanend;
1363       bfd_byte *ocontents;
1364 
1365       if (o == sec
1366 	  || (o->flags & SEC_RELOC) == 0
1367 	  || o->reloc_count == 0)
1368 	continue;
1369 
1370       /* We always cache the relocs.  Perhaps, if info->keep_memory is
1371          FALSE, we should free them, if we are permitted to, when we
1372          leave sh_coff_relax_section.  */
1373       internal_relocs = (_bfd_coff_read_internal_relocs
1374 			 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1375 			  (struct internal_reloc *) NULL));
1376       if (internal_relocs == NULL)
1377 	return FALSE;
1378 
1379       ocontents = NULL;
1380       irelscanend = internal_relocs + o->reloc_count;
1381       for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1382 	{
1383 	  struct internal_syment sym;
1384 
1385 #ifdef COFF_WITH_PE
1386 	  if (irelscan->r_type != R_SH_IMM32
1387 	      && irelscan->r_type != R_SH_IMAGEBASE
1388 	      && irelscan->r_type != R_SH_IMM32CE)
1389 #else
1390 	  if (irelscan->r_type != R_SH_IMM32)
1391 #endif
1392 	    continue;
1393 
1394 	  bfd_coff_swap_sym_in (abfd,
1395 				((bfd_byte *) obj_coff_external_syms (abfd)
1396 				 + (irelscan->r_symndx
1397 				    * bfd_coff_symesz (abfd))),
1398 				&sym);
1399 	  if (sym.n_sclass != C_EXT
1400 	      && sym.n_scnum == sec->target_index
1401 	      && ((bfd_vma) sym.n_value <= addr
1402 		  || (bfd_vma) sym.n_value >= toaddr))
1403 	    {
1404 	      bfd_vma val;
1405 
1406 	      if (ocontents == NULL)
1407 		{
1408 		  if (coff_section_data (abfd, o)->contents != NULL)
1409 		    ocontents = coff_section_data (abfd, o)->contents;
1410 		  else
1411 		    {
1412 		      if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1413 			return FALSE;
1414 		      /* We always cache the section contents.
1415                          Perhaps, if info->keep_memory is FALSE, we
1416                          should free them, if we are permitted to,
1417                          when we leave sh_coff_relax_section.  */
1418 		      coff_section_data (abfd, o)->contents = ocontents;
1419 		    }
1420 		}
1421 
1422 	      val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1423 	      val += sym.n_value;
1424 	      if (val > addr && val < toaddr)
1425 		bfd_put_32 (abfd, val - count,
1426 			    ocontents + irelscan->r_vaddr - o->vma);
1427 
1428 	      coff_section_data (abfd, o)->keep_contents = TRUE;
1429 	    }
1430 	}
1431     }
1432 
1433   /* Adjusting the internal symbols will not work if something has
1434      already retrieved the generic symbols.  It would be possible to
1435      make this work by adjusting the generic symbols at the same time.
1436      However, this case should not arise in normal usage.  */
1437   if (obj_symbols (abfd) != NULL
1438       || obj_raw_syments (abfd) != NULL)
1439     {
1440       ((*_bfd_error_handler)
1441        ("%B: fatal: generic symbols retrieved before relaxing", abfd));
1442       bfd_set_error (bfd_error_invalid_operation);
1443       return FALSE;
1444     }
1445 
1446   /* Adjust all the symbols.  */
1447   sym_hash = obj_coff_sym_hashes (abfd);
1448   symesz = bfd_coff_symesz (abfd);
1449   esym = (bfd_byte *) obj_coff_external_syms (abfd);
1450   esymend = esym + obj_raw_syment_count (abfd) * symesz;
1451   while (esym < esymend)
1452     {
1453       struct internal_syment isym;
1454 
1455       bfd_coff_swap_sym_in (abfd, esym, &isym);
1456 
1457       if (isym.n_scnum == sec->target_index
1458 	  && (bfd_vma) isym.n_value > addr
1459 	  && (bfd_vma) isym.n_value < toaddr)
1460 	{
1461 	  isym.n_value -= count;
1462 
1463 	  bfd_coff_swap_sym_out (abfd, &isym, esym);
1464 
1465 	  if (*sym_hash != NULL)
1466 	    {
1467 	      BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1468 			  || (*sym_hash)->root.type == bfd_link_hash_defweak);
1469 	      BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1470 			  && (*sym_hash)->root.u.def.value < toaddr);
1471 	      (*sym_hash)->root.u.def.value -= count;
1472 	    }
1473 	}
1474 
1475       esym += (isym.n_numaux + 1) * symesz;
1476       sym_hash += isym.n_numaux + 1;
1477     }
1478 
1479   /* See if we can move the ALIGN reloc forward.  We have adjusted
1480      r_vaddr for it already.  */
1481   if (irelalign != NULL)
1482     {
1483       bfd_vma alignto, alignaddr;
1484 
1485       alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1486       alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1487 			     1 << irelalign->r_offset);
1488       if (alignto != alignaddr)
1489 	{
1490 	  /* Tail recursion.  */
1491 	  return sh_relax_delete_bytes (abfd, sec, alignaddr,
1492 					(int) (alignto - alignaddr));
1493 	}
1494     }
1495 
1496   return TRUE;
1497 }
1498 
1499 /* This is yet another version of the SH opcode table, used to rapidly
1500    get information about a particular instruction.  */
1501 
1502 /* The opcode map is represented by an array of these structures.  The
1503    array is indexed by the high order four bits in the instruction.  */
1504 
1505 struct sh_major_opcode
1506 {
1507   /* A pointer to the instruction list.  This is an array which
1508      contains all the instructions with this major opcode.  */
1509   const struct sh_minor_opcode *minor_opcodes;
1510   /* The number of elements in minor_opcodes.  */
1511   unsigned short count;
1512 };
1513 
1514 /* This structure holds information for a set of SH opcodes.  The
1515    instruction code is anded with the mask value, and the resulting
1516    value is used to search the order opcode list.  */
1517 
1518 struct sh_minor_opcode
1519 {
1520   /* The sorted opcode list.  */
1521   const struct sh_opcode *opcodes;
1522   /* The number of elements in opcodes.  */
1523   unsigned short count;
1524   /* The mask value to use when searching the opcode list.  */
1525   unsigned short mask;
1526 };
1527 
1528 /* This structure holds information for an SH instruction.  An array
1529    of these structures is sorted in order by opcode.  */
1530 
1531 struct sh_opcode
1532 {
1533   /* The code for this instruction, after it has been anded with the
1534      mask value in the sh_major_opcode structure.  */
1535   unsigned short opcode;
1536   /* Flags for this instruction.  */
1537   unsigned long flags;
1538 };
1539 
1540 /* Flag which appear in the sh_opcode structure.  */
1541 
1542 /* This instruction loads a value from memory.  */
1543 #define LOAD (0x1)
1544 
1545 /* This instruction stores a value to memory.  */
1546 #define STORE (0x2)
1547 
1548 /* This instruction is a branch.  */
1549 #define BRANCH (0x4)
1550 
1551 /* This instruction has a delay slot.  */
1552 #define DELAY (0x8)
1553 
1554 /* This instruction uses the value in the register in the field at
1555    mask 0x0f00 of the instruction.  */
1556 #define USES1 (0x10)
1557 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1558 
1559 /* This instruction uses the value in the register in the field at
1560    mask 0x00f0 of the instruction.  */
1561 #define USES2 (0x20)
1562 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1563 
1564 /* This instruction uses the value in register 0.  */
1565 #define USESR0 (0x40)
1566 
1567 /* This instruction sets the value in the register in the field at
1568    mask 0x0f00 of the instruction.  */
1569 #define SETS1 (0x80)
1570 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1571 
1572 /* This instruction sets the value in the register in the field at
1573    mask 0x00f0 of the instruction.  */
1574 #define SETS2 (0x100)
1575 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1576 
1577 /* This instruction sets register 0.  */
1578 #define SETSR0 (0x200)
1579 
1580 /* This instruction sets a special register.  */
1581 #define SETSSP (0x400)
1582 
1583 /* This instruction uses a special register.  */
1584 #define USESSP (0x800)
1585 
1586 /* This instruction uses the floating point register in the field at
1587    mask 0x0f00 of the instruction.  */
1588 #define USESF1 (0x1000)
1589 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1590 
1591 /* This instruction uses the floating point register in the field at
1592    mask 0x00f0 of the instruction.  */
1593 #define USESF2 (0x2000)
1594 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1595 
1596 /* This instruction uses floating point register 0.  */
1597 #define USESF0 (0x4000)
1598 
1599 /* This instruction sets the floating point register in the field at
1600    mask 0x0f00 of the instruction.  */
1601 #define SETSF1 (0x8000)
1602 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1603 
1604 #define USESAS (0x10000)
1605 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1606 #define USESR8 (0x20000)
1607 #define SETSAS (0x40000)
1608 #define SETSAS_REG(x) USESAS_REG (x)
1609 
1610 #define MAP(a) a, sizeof a / sizeof a[0]
1611 
1612 #ifndef COFF_IMAGE_WITH_PE
1613 
1614 /* The opcode maps.  */
1615 
1616 static const struct sh_opcode sh_opcode00[] =
1617 {
1618   { 0x0008, SETSSP },			/* clrt */
1619   { 0x0009, 0 },			/* nop */
1620   { 0x000b, BRANCH | DELAY | USESSP },	/* rts */
1621   { 0x0018, SETSSP },			/* sett */
1622   { 0x0019, SETSSP },			/* div0u */
1623   { 0x001b, 0 },			/* sleep */
1624   { 0x0028, SETSSP },			/* clrmac */
1625   { 0x002b, BRANCH | DELAY | SETSSP },	/* rte */
1626   { 0x0038, USESSP | SETSSP },		/* ldtlb */
1627   { 0x0048, SETSSP },			/* clrs */
1628   { 0x0058, SETSSP }			/* sets */
1629 };
1630 
1631 static const struct sh_opcode sh_opcode01[] =
1632 {
1633   { 0x0003, BRANCH | DELAY | USES1 | SETSSP },	/* bsrf rn */
1634   { 0x000a, SETS1 | USESSP },			/* sts mach,rn */
1635   { 0x001a, SETS1 | USESSP },			/* sts macl,rn */
1636   { 0x0023, BRANCH | DELAY | USES1 },		/* braf rn */
1637   { 0x0029, SETS1 | USESSP },			/* movt rn */
1638   { 0x002a, SETS1 | USESSP },			/* sts pr,rn */
1639   { 0x005a, SETS1 | USESSP },			/* sts fpul,rn */
1640   { 0x006a, SETS1 | USESSP },			/* sts fpscr,rn / sts dsr,rn */
1641   { 0x0083, LOAD | USES1 },			/* pref @rn */
1642   { 0x007a, SETS1 | USESSP },			/* sts a0,rn */
1643   { 0x008a, SETS1 | USESSP },			/* sts x0,rn */
1644   { 0x009a, SETS1 | USESSP },			/* sts x1,rn */
1645   { 0x00aa, SETS1 | USESSP },			/* sts y0,rn */
1646   { 0x00ba, SETS1 | USESSP }			/* sts y1,rn */
1647 };
1648 
1649 static const struct sh_opcode sh_opcode02[] =
1650 {
1651   { 0x0002, SETS1 | USESSP },			/* stc <special_reg>,rn */
1652   { 0x0004, STORE | USES1 | USES2 | USESR0 },	/* mov.b rm,@(r0,rn) */
1653   { 0x0005, STORE | USES1 | USES2 | USESR0 },	/* mov.w rm,@(r0,rn) */
1654   { 0x0006, STORE | USES1 | USES2 | USESR0 },	/* mov.l rm,@(r0,rn) */
1655   { 0x0007, SETSSP | USES1 | USES2 },		/* mul.l rm,rn */
1656   { 0x000c, LOAD | SETS1 | USES2 | USESR0 },	/* mov.b @(r0,rm),rn */
1657   { 0x000d, LOAD | SETS1 | USES2 | USESR0 },	/* mov.w @(r0,rm),rn */
1658   { 0x000e, LOAD | SETS1 | USES2 | USESR0 },	/* mov.l @(r0,rm),rn */
1659   { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1660 };
1661 
1662 static const struct sh_minor_opcode sh_opcode0[] =
1663 {
1664   { MAP (sh_opcode00), 0xffff },
1665   { MAP (sh_opcode01), 0xf0ff },
1666   { MAP (sh_opcode02), 0xf00f }
1667 };
1668 
1669 static const struct sh_opcode sh_opcode10[] =
1670 {
1671   { 0x1000, STORE | USES1 | USES2 }	/* mov.l rm,@(disp,rn) */
1672 };
1673 
1674 static const struct sh_minor_opcode sh_opcode1[] =
1675 {
1676   { MAP (sh_opcode10), 0xf000 }
1677 };
1678 
1679 static const struct sh_opcode sh_opcode20[] =
1680 {
1681   { 0x2000, STORE | USES1 | USES2 },		/* mov.b rm,@rn */
1682   { 0x2001, STORE | USES1 | USES2 },		/* mov.w rm,@rn */
1683   { 0x2002, STORE | USES1 | USES2 },		/* mov.l rm,@rn */
1684   { 0x2004, STORE | SETS1 | USES1 | USES2 },	/* mov.b rm,@-rn */
1685   { 0x2005, STORE | SETS1 | USES1 | USES2 },	/* mov.w rm,@-rn */
1686   { 0x2006, STORE | SETS1 | USES1 | USES2 },	/* mov.l rm,@-rn */
1687   { 0x2007, SETSSP | USES1 | USES2 | USESSP },	/* div0s */
1688   { 0x2008, SETSSP | USES1 | USES2 },		/* tst rm,rn */
1689   { 0x2009, SETS1 | USES1 | USES2 },		/* and rm,rn */
1690   { 0x200a, SETS1 | USES1 | USES2 },		/* xor rm,rn */
1691   { 0x200b, SETS1 | USES1 | USES2 },		/* or rm,rn */
1692   { 0x200c, SETSSP | USES1 | USES2 },		/* cmp/str rm,rn */
1693   { 0x200d, SETS1 | USES1 | USES2 },		/* xtrct rm,rn */
1694   { 0x200e, SETSSP | USES1 | USES2 },		/* mulu.w rm,rn */
1695   { 0x200f, SETSSP | USES1 | USES2 }		/* muls.w rm,rn */
1696 };
1697 
1698 static const struct sh_minor_opcode sh_opcode2[] =
1699 {
1700   { MAP (sh_opcode20), 0xf00f }
1701 };
1702 
1703 static const struct sh_opcode sh_opcode30[] =
1704 {
1705   { 0x3000, SETSSP | USES1 | USES2 },		/* cmp/eq rm,rn */
1706   { 0x3002, SETSSP | USES1 | USES2 },		/* cmp/hs rm,rn */
1707   { 0x3003, SETSSP | USES1 | USES2 },		/* cmp/ge rm,rn */
1708   { 0x3004, SETSSP | USESSP | USES1 | USES2 },	/* div1 rm,rn */
1709   { 0x3005, SETSSP | USES1 | USES2 },		/* dmulu.l rm,rn */
1710   { 0x3006, SETSSP | USES1 | USES2 },		/* cmp/hi rm,rn */
1711   { 0x3007, SETSSP | USES1 | USES2 },		/* cmp/gt rm,rn */
1712   { 0x3008, SETS1 | USES1 | USES2 },		/* sub rm,rn */
1713   { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1714   { 0x300b, SETS1 | SETSSP | USES1 | USES2 },	/* subv rm,rn */
1715   { 0x300c, SETS1 | USES1 | USES2 },		/* add rm,rn */
1716   { 0x300d, SETSSP | USES1 | USES2 },		/* dmuls.l rm,rn */
1717   { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1718   { 0x300f, SETS1 | SETSSP | USES1 | USES2 }	/* addv rm,rn */
1719 };
1720 
1721 static const struct sh_minor_opcode sh_opcode3[] =
1722 {
1723   { MAP (sh_opcode30), 0xf00f }
1724 };
1725 
1726 static const struct sh_opcode sh_opcode40[] =
1727 {
1728   { 0x4000, SETS1 | SETSSP | USES1 },		/* shll rn */
1729   { 0x4001, SETS1 | SETSSP | USES1 },		/* shlr rn */
1730   { 0x4002, STORE | SETS1 | USES1 | USESSP },	/* sts.l mach,@-rn */
1731   { 0x4004, SETS1 | SETSSP | USES1 },		/* rotl rn */
1732   { 0x4005, SETS1 | SETSSP | USES1 },		/* rotr rn */
1733   { 0x4006, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,mach */
1734   { 0x4008, SETS1 | USES1 },			/* shll2 rn */
1735   { 0x4009, SETS1 | USES1 },			/* shlr2 rn */
1736   { 0x400a, SETSSP | USES1 },			/* lds rm,mach */
1737   { 0x400b, BRANCH | DELAY | USES1 },		/* jsr @rn */
1738   { 0x4010, SETS1 | SETSSP | USES1 },		/* dt rn */
1739   { 0x4011, SETSSP | USES1 },			/* cmp/pz rn */
1740   { 0x4012, STORE | SETS1 | USES1 | USESSP },	/* sts.l macl,@-rn */
1741   { 0x4014, SETSSP | USES1 },			/* setrc rm */
1742   { 0x4015, SETSSP | USES1 },			/* cmp/pl rn */
1743   { 0x4016, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,macl */
1744   { 0x4018, SETS1 | USES1 },			/* shll8 rn */
1745   { 0x4019, SETS1 | USES1 },			/* shlr8 rn */
1746   { 0x401a, SETSSP | USES1 },			/* lds rm,macl */
1747   { 0x401b, LOAD | SETSSP | USES1 },		/* tas.b @rn */
1748   { 0x4020, SETS1 | SETSSP | USES1 },		/* shal rn */
1749   { 0x4021, SETS1 | SETSSP | USES1 },		/* shar rn */
1750   { 0x4022, STORE | SETS1 | USES1 | USESSP },	/* sts.l pr,@-rn */
1751   { 0x4024, SETS1 | SETSSP | USES1 | USESSP },	/* rotcl rn */
1752   { 0x4025, SETS1 | SETSSP | USES1 | USESSP },	/* rotcr rn */
1753   { 0x4026, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,pr */
1754   { 0x4028, SETS1 | USES1 },			/* shll16 rn */
1755   { 0x4029, SETS1 | USES1 },			/* shlr16 rn */
1756   { 0x402a, SETSSP | USES1 },			/* lds rm,pr */
1757   { 0x402b, BRANCH | DELAY | USES1 },		/* jmp @rn */
1758   { 0x4052, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpul,@-rn */
1759   { 0x4056, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpul */
1760   { 0x405a, SETSSP | USES1 },			/* lds.l rm,fpul */
1761   { 0x4062, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpscr / dsr,@-rn */
1762   { 0x4066, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpscr / dsr */
1763   { 0x406a, SETSSP | USES1 },			/* lds rm,fpscr / lds rm,dsr */
1764   { 0x4072, STORE | SETS1 | USES1 | USESSP },	/* sts.l a0,@-rn */
1765   { 0x4076, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,a0 */
1766   { 0x407a, SETSSP | USES1 },			/* lds.l rm,a0 */
1767   { 0x4082, STORE | SETS1 | USES1 | USESSP },	/* sts.l x0,@-rn */
1768   { 0x4086, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x0 */
1769   { 0x408a, SETSSP | USES1 },			/* lds.l rm,x0 */
1770   { 0x4092, STORE | SETS1 | USES1 | USESSP },	/* sts.l x1,@-rn */
1771   { 0x4096, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x1 */
1772   { 0x409a, SETSSP | USES1 },			/* lds.l rm,x1 */
1773   { 0x40a2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y0,@-rn */
1774   { 0x40a6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y0 */
1775   { 0x40aa, SETSSP | USES1 },			/* lds.l rm,y0 */
1776   { 0x40b2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y1,@-rn */
1777   { 0x40b6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y1 */
1778   { 0x40ba, SETSSP | USES1 }			/* lds.l rm,y1 */
1779 };
1780 
1781 static const struct sh_opcode sh_opcode41[] =
1782 {
1783   { 0x4003, STORE | SETS1 | USES1 | USESSP },	/* stc.l <special_reg>,@-rn */
1784   { 0x4007, LOAD | SETS1 | SETSSP | USES1 },	/* ldc.l @rm+,<special_reg> */
1785   { 0x400c, SETS1 | USES1 | USES2 },		/* shad rm,rn */
1786   { 0x400d, SETS1 | USES1 | USES2 },		/* shld rm,rn */
1787   { 0x400e, SETSSP | USES1 },			/* ldc rm,<special_reg> */
1788   { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1789 };
1790 
1791 static const struct sh_minor_opcode sh_opcode4[] =
1792 {
1793   { MAP (sh_opcode40), 0xf0ff },
1794   { MAP (sh_opcode41), 0xf00f }
1795 };
1796 
1797 static const struct sh_opcode sh_opcode50[] =
1798 {
1799   { 0x5000, LOAD | SETS1 | USES2 }	/* mov.l @(disp,rm),rn */
1800 };
1801 
1802 static const struct sh_minor_opcode sh_opcode5[] =
1803 {
1804   { MAP (sh_opcode50), 0xf000 }
1805 };
1806 
1807 static const struct sh_opcode sh_opcode60[] =
1808 {
1809   { 0x6000, LOAD | SETS1 | USES2 },		/* mov.b @rm,rn */
1810   { 0x6001, LOAD | SETS1 | USES2 },		/* mov.w @rm,rn */
1811   { 0x6002, LOAD | SETS1 | USES2 },		/* mov.l @rm,rn */
1812   { 0x6003, SETS1 | USES2 },			/* mov rm,rn */
1813   { 0x6004, LOAD | SETS1 | SETS2 | USES2 },	/* mov.b @rm+,rn */
1814   { 0x6005, LOAD | SETS1 | SETS2 | USES2 },	/* mov.w @rm+,rn */
1815   { 0x6006, LOAD | SETS1 | SETS2 | USES2 },	/* mov.l @rm+,rn */
1816   { 0x6007, SETS1 | USES2 },			/* not rm,rn */
1817   { 0x6008, SETS1 | USES2 },			/* swap.b rm,rn */
1818   { 0x6009, SETS1 | USES2 },			/* swap.w rm,rn */
1819   { 0x600a, SETS1 | SETSSP | USES2 | USESSP },	/* negc rm,rn */
1820   { 0x600b, SETS1 | USES2 },			/* neg rm,rn */
1821   { 0x600c, SETS1 | USES2 },			/* extu.b rm,rn */
1822   { 0x600d, SETS1 | USES2 },			/* extu.w rm,rn */
1823   { 0x600e, SETS1 | USES2 },			/* exts.b rm,rn */
1824   { 0x600f, SETS1 | USES2 }			/* exts.w rm,rn */
1825 };
1826 
1827 static const struct sh_minor_opcode sh_opcode6[] =
1828 {
1829   { MAP (sh_opcode60), 0xf00f }
1830 };
1831 
1832 static const struct sh_opcode sh_opcode70[] =
1833 {
1834   { 0x7000, SETS1 | USES1 }		/* add #imm,rn */
1835 };
1836 
1837 static const struct sh_minor_opcode sh_opcode7[] =
1838 {
1839   { MAP (sh_opcode70), 0xf000 }
1840 };
1841 
1842 static const struct sh_opcode sh_opcode80[] =
1843 {
1844   { 0x8000, STORE | USES2 | USESR0 },	/* mov.b r0,@(disp,rn) */
1845   { 0x8100, STORE | USES2 | USESR0 },	/* mov.w r0,@(disp,rn) */
1846   { 0x8200, SETSSP },			/* setrc #imm */
1847   { 0x8400, LOAD | SETSR0 | USES2 },	/* mov.b @(disp,rm),r0 */
1848   { 0x8500, LOAD | SETSR0 | USES2 },	/* mov.w @(disp,rn),r0 */
1849   { 0x8800, SETSSP | USESR0 },		/* cmp/eq #imm,r0 */
1850   { 0x8900, BRANCH | USESSP },		/* bt label */
1851   { 0x8b00, BRANCH | USESSP },		/* bf label */
1852   { 0x8c00, SETSSP },			/* ldrs @(disp,pc) */
1853   { 0x8d00, BRANCH | DELAY | USESSP },	/* bt/s label */
1854   { 0x8e00, SETSSP },			/* ldre @(disp,pc) */
1855   { 0x8f00, BRANCH | DELAY | USESSP }	/* bf/s label */
1856 };
1857 
1858 static const struct sh_minor_opcode sh_opcode8[] =
1859 {
1860   { MAP (sh_opcode80), 0xff00 }
1861 };
1862 
1863 static const struct sh_opcode sh_opcode90[] =
1864 {
1865   { 0x9000, LOAD | SETS1 }	/* mov.w @(disp,pc),rn */
1866 };
1867 
1868 static const struct sh_minor_opcode sh_opcode9[] =
1869 {
1870   { MAP (sh_opcode90), 0xf000 }
1871 };
1872 
1873 static const struct sh_opcode sh_opcodea0[] =
1874 {
1875   { 0xa000, BRANCH | DELAY }	/* bra label */
1876 };
1877 
1878 static const struct sh_minor_opcode sh_opcodea[] =
1879 {
1880   { MAP (sh_opcodea0), 0xf000 }
1881 };
1882 
1883 static const struct sh_opcode sh_opcodeb0[] =
1884 {
1885   { 0xb000, BRANCH | DELAY }	/* bsr label */
1886 };
1887 
1888 static const struct sh_minor_opcode sh_opcodeb[] =
1889 {
1890   { MAP (sh_opcodeb0), 0xf000 }
1891 };
1892 
1893 static const struct sh_opcode sh_opcodec0[] =
1894 {
1895   { 0xc000, STORE | USESR0 | USESSP },		/* mov.b r0,@(disp,gbr) */
1896   { 0xc100, STORE | USESR0 | USESSP },		/* mov.w r0,@(disp,gbr) */
1897   { 0xc200, STORE | USESR0 | USESSP },		/* mov.l r0,@(disp,gbr) */
1898   { 0xc300, BRANCH | USESSP },			/* trapa #imm */
1899   { 0xc400, LOAD | SETSR0 | USESSP },		/* mov.b @(disp,gbr),r0 */
1900   { 0xc500, LOAD | SETSR0 | USESSP },		/* mov.w @(disp,gbr),r0 */
1901   { 0xc600, LOAD | SETSR0 | USESSP },		/* mov.l @(disp,gbr),r0 */
1902   { 0xc700, SETSR0 },				/* mova @(disp,pc),r0 */
1903   { 0xc800, SETSSP | USESR0 },			/* tst #imm,r0 */
1904   { 0xc900, SETSR0 | USESR0 },			/* and #imm,r0 */
1905   { 0xca00, SETSR0 | USESR0 },			/* xor #imm,r0 */
1906   { 0xcb00, SETSR0 | USESR0 },			/* or #imm,r0 */
1907   { 0xcc00, LOAD | SETSSP | USESR0 | USESSP },	/* tst.b #imm,@(r0,gbr) */
1908   { 0xcd00, LOAD | STORE | USESR0 | USESSP },	/* and.b #imm,@(r0,gbr) */
1909   { 0xce00, LOAD | STORE | USESR0 | USESSP },	/* xor.b #imm,@(r0,gbr) */
1910   { 0xcf00, LOAD | STORE | USESR0 | USESSP }	/* or.b #imm,@(r0,gbr) */
1911 };
1912 
1913 static const struct sh_minor_opcode sh_opcodec[] =
1914 {
1915   { MAP (sh_opcodec0), 0xff00 }
1916 };
1917 
1918 static const struct sh_opcode sh_opcoded0[] =
1919 {
1920   { 0xd000, LOAD | SETS1 }		/* mov.l @(disp,pc),rn */
1921 };
1922 
1923 static const struct sh_minor_opcode sh_opcoded[] =
1924 {
1925   { MAP (sh_opcoded0), 0xf000 }
1926 };
1927 
1928 static const struct sh_opcode sh_opcodee0[] =
1929 {
1930   { 0xe000, SETS1 }		/* mov #imm,rn */
1931 };
1932 
1933 static const struct sh_minor_opcode sh_opcodee[] =
1934 {
1935   { MAP (sh_opcodee0), 0xf000 }
1936 };
1937 
1938 static const struct sh_opcode sh_opcodef0[] =
1939 {
1940   { 0xf000, SETSF1 | USESF1 | USESF2 },		/* fadd fm,fn */
1941   { 0xf001, SETSF1 | USESF1 | USESF2 },		/* fsub fm,fn */
1942   { 0xf002, SETSF1 | USESF1 | USESF2 },		/* fmul fm,fn */
1943   { 0xf003, SETSF1 | USESF1 | USESF2 },		/* fdiv fm,fn */
1944   { 0xf004, SETSSP | USESF1 | USESF2 },		/* fcmp/eq fm,fn */
1945   { 0xf005, SETSSP | USESF1 | USESF2 },		/* fcmp/gt fm,fn */
1946   { 0xf006, LOAD | SETSF1 | USES2 | USESR0 },	/* fmov.s @(r0,rm),fn */
1947   { 0xf007, STORE | USES1 | USESF2 | USESR0 },	/* fmov.s fm,@(r0,rn) */
1948   { 0xf008, LOAD | SETSF1 | USES2 },		/* fmov.s @rm,fn */
1949   { 0xf009, LOAD | SETS2 | SETSF1 | USES2 },	/* fmov.s @rm+,fn */
1950   { 0xf00a, STORE | USES1 | USESF2 },		/* fmov.s fm,@rn */
1951   { 0xf00b, STORE | SETS1 | USES1 | USESF2 },	/* fmov.s fm,@-rn */
1952   { 0xf00c, SETSF1 | USESF2 },			/* fmov fm,fn */
1953   { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 }	/* fmac f0,fm,fn */
1954 };
1955 
1956 static const struct sh_opcode sh_opcodef1[] =
1957 {
1958   { 0xf00d, SETSF1 | USESSP },	/* fsts fpul,fn */
1959   { 0xf01d, SETSSP | USESF1 },	/* flds fn,fpul */
1960   { 0xf02d, SETSF1 | USESSP },	/* float fpul,fn */
1961   { 0xf03d, SETSSP | USESF1 },	/* ftrc fn,fpul */
1962   { 0xf04d, SETSF1 | USESF1 },	/* fneg fn */
1963   { 0xf05d, SETSF1 | USESF1 },	/* fabs fn */
1964   { 0xf06d, SETSF1 | USESF1 },	/* fsqrt fn */
1965   { 0xf07d, SETSSP | USESF1 },	/* ftst/nan fn */
1966   { 0xf08d, SETSF1 },		/* fldi0 fn */
1967   { 0xf09d, SETSF1 }		/* fldi1 fn */
1968 };
1969 
1970 static const struct sh_minor_opcode sh_opcodef[] =
1971 {
1972   { MAP (sh_opcodef0), 0xf00f },
1973   { MAP (sh_opcodef1), 0xf0ff }
1974 };
1975 
1976 static struct sh_major_opcode sh_opcodes[] =
1977 {
1978   { MAP (sh_opcode0) },
1979   { MAP (sh_opcode1) },
1980   { MAP (sh_opcode2) },
1981   { MAP (sh_opcode3) },
1982   { MAP (sh_opcode4) },
1983   { MAP (sh_opcode5) },
1984   { MAP (sh_opcode6) },
1985   { MAP (sh_opcode7) },
1986   { MAP (sh_opcode8) },
1987   { MAP (sh_opcode9) },
1988   { MAP (sh_opcodea) },
1989   { MAP (sh_opcodeb) },
1990   { MAP (sh_opcodec) },
1991   { MAP (sh_opcoded) },
1992   { MAP (sh_opcodee) },
1993   { MAP (sh_opcodef) }
1994 };
1995 
1996 /* The double data transfer / parallel processing insns are not
1997    described here.  This will cause sh_align_load_span to leave them alone.  */
1998 
1999 static const struct sh_opcode sh_dsp_opcodef0[] =
2000 {
2001   { 0xf400, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @-as,ds */
2002   { 0xf401, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@-as */
2003   { 0xf404, USESAS | LOAD | SETSSP },		/* movs.x @as,ds */
2004   { 0xf405, USESAS | STORE | USESSP },		/* movs.x ds,@as */
2005   { 0xf408, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @as+,ds */
2006   { 0xf409, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@as+ */
2007   { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 },	/* movs.x @as+r8,ds */
2008   { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 }	/* movs.x ds,@as+r8 */
2009 };
2010 
2011 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2012 {
2013   { MAP (sh_dsp_opcodef0), 0xfc0d }
2014 };
2015 
2016 /* Given an instruction, return a pointer to the corresponding
2017    sh_opcode structure.  Return NULL if the instruction is not
2018    recognized.  */
2019 
2020 static const struct sh_opcode *
sh_insn_info(unsigned int insn)2021 sh_insn_info (unsigned int insn)
2022 {
2023   const struct sh_major_opcode *maj;
2024   const struct sh_minor_opcode *min, *minend;
2025 
2026   maj = &sh_opcodes[(insn & 0xf000) >> 12];
2027   min = maj->minor_opcodes;
2028   minend = min + maj->count;
2029   for (; min < minend; min++)
2030     {
2031       unsigned int l;
2032       const struct sh_opcode *op, *opend;
2033 
2034       l = insn & min->mask;
2035       op = min->opcodes;
2036       opend = op + min->count;
2037 
2038       /* Since the opcodes tables are sorted, we could use a binary
2039          search here if the count were above some cutoff value.  */
2040       for (; op < opend; op++)
2041 	if (op->opcode == l)
2042 	  return op;
2043     }
2044 
2045   return NULL;
2046 }
2047 
2048 /* See whether an instruction uses a general purpose register.  */
2049 
2050 static bfd_boolean
sh_insn_uses_reg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2051 sh_insn_uses_reg (unsigned int insn,
2052 		  const struct sh_opcode *op,
2053 		  unsigned int reg)
2054 {
2055   unsigned int f;
2056 
2057   f = op->flags;
2058 
2059   if ((f & USES1) != 0
2060       && USES1_REG (insn) == reg)
2061     return TRUE;
2062   if ((f & USES2) != 0
2063       && USES2_REG (insn) == reg)
2064     return TRUE;
2065   if ((f & USESR0) != 0
2066       && reg == 0)
2067     return TRUE;
2068   if ((f & USESAS) && reg == USESAS_REG (insn))
2069     return TRUE;
2070   if ((f & USESR8) && reg == 8)
2071     return TRUE;
2072 
2073   return FALSE;
2074 }
2075 
2076 /* See whether an instruction sets a general purpose register.  */
2077 
2078 static bfd_boolean
sh_insn_sets_reg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2079 sh_insn_sets_reg (unsigned int insn,
2080 		  const struct sh_opcode *op,
2081 		  unsigned int reg)
2082 {
2083   unsigned int f;
2084 
2085   f = op->flags;
2086 
2087   if ((f & SETS1) != 0
2088       && SETS1_REG (insn) == reg)
2089     return TRUE;
2090   if ((f & SETS2) != 0
2091       && SETS2_REG (insn) == reg)
2092     return TRUE;
2093   if ((f & SETSR0) != 0
2094       && reg == 0)
2095     return TRUE;
2096   if ((f & SETSAS) && reg == SETSAS_REG (insn))
2097     return TRUE;
2098 
2099   return FALSE;
2100 }
2101 
2102 /* See whether an instruction uses or sets a general purpose register */
2103 
2104 static bfd_boolean
sh_insn_uses_or_sets_reg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2105 sh_insn_uses_or_sets_reg (unsigned int insn,
2106 			  const struct sh_opcode *op,
2107 			  unsigned int reg)
2108 {
2109   if (sh_insn_uses_reg (insn, op, reg))
2110     return TRUE;
2111 
2112   return sh_insn_sets_reg (insn, op, reg);
2113 }
2114 
2115 /* See whether an instruction uses a floating point register.  */
2116 
2117 static bfd_boolean
sh_insn_uses_freg(unsigned int insn,const struct sh_opcode * op,unsigned int freg)2118 sh_insn_uses_freg (unsigned int insn,
2119 		   const struct sh_opcode *op,
2120 		   unsigned int freg)
2121 {
2122   unsigned int f;
2123 
2124   f = op->flags;
2125 
2126   /* We can't tell if this is a double-precision insn, so just play safe
2127      and assume that it might be.  So not only have we test FREG against
2128      itself, but also even FREG against FREG+1 - if the using insn uses
2129      just the low part of a double precision value - but also an odd
2130      FREG against FREG-1 -  if the setting insn sets just the low part
2131      of a double precision value.
2132      So what this all boils down to is that we have to ignore the lowest
2133      bit of the register number.  */
2134 
2135   if ((f & USESF1) != 0
2136       && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2137     return TRUE;
2138   if ((f & USESF2) != 0
2139       && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2140     return TRUE;
2141   if ((f & USESF0) != 0
2142       && freg == 0)
2143     return TRUE;
2144 
2145   return FALSE;
2146 }
2147 
2148 /* See whether an instruction sets a floating point register.  */
2149 
2150 static bfd_boolean
sh_insn_sets_freg(unsigned int insn,const struct sh_opcode * op,unsigned int freg)2151 sh_insn_sets_freg (unsigned int insn,
2152 		   const struct sh_opcode *op,
2153 		   unsigned int freg)
2154 {
2155   unsigned int f;
2156 
2157   f = op->flags;
2158 
2159   /* We can't tell if this is a double-precision insn, so just play safe
2160      and assume that it might be.  So not only have we test FREG against
2161      itself, but also even FREG against FREG+1 - if the using insn uses
2162      just the low part of a double precision value - but also an odd
2163      FREG against FREG-1 -  if the setting insn sets just the low part
2164      of a double precision value.
2165      So what this all boils down to is that we have to ignore the lowest
2166      bit of the register number.  */
2167 
2168   if ((f & SETSF1) != 0
2169       && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2170     return TRUE;
2171 
2172   return FALSE;
2173 }
2174 
2175 /* See whether an instruction uses or sets a floating point register */
2176 
2177 static bfd_boolean
sh_insn_uses_or_sets_freg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2178 sh_insn_uses_or_sets_freg (unsigned int insn,
2179 			   const struct sh_opcode *op,
2180 			   unsigned int reg)
2181 {
2182   if (sh_insn_uses_freg (insn, op, reg))
2183     return TRUE;
2184 
2185   return sh_insn_sets_freg (insn, op, reg);
2186 }
2187 
2188 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2189    before I2.  OP1 and OP2 are the corresponding sh_opcode structures.
2190    This should return TRUE if there is a conflict, or FALSE if the
2191    instructions can be swapped safely.  */
2192 
2193 static bfd_boolean
sh_insns_conflict(unsigned int i1,const struct sh_opcode * op1,unsigned int i2,const struct sh_opcode * op2)2194 sh_insns_conflict (unsigned int i1,
2195 		   const struct sh_opcode *op1,
2196 		   unsigned int i2,
2197 		   const struct sh_opcode *op2)
2198 {
2199   unsigned int f1, f2;
2200 
2201   f1 = op1->flags;
2202   f2 = op2->flags;
2203 
2204   /* Load of fpscr conflicts with floating point operations.
2205      FIXME: shouldn't test raw opcodes here.  */
2206   if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2207       || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2208     return TRUE;
2209 
2210   if ((f1 & (BRANCH | DELAY)) != 0
2211       || (f2 & (BRANCH | DELAY)) != 0)
2212     return TRUE;
2213 
2214   if (((f1 | f2) & SETSSP)
2215       && (f1 & (SETSSP | USESSP))
2216       && (f2 & (SETSSP | USESSP)))
2217     return TRUE;
2218 
2219   if ((f1 & SETS1) != 0
2220       && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2221     return TRUE;
2222   if ((f1 & SETS2) != 0
2223       && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2224     return TRUE;
2225   if ((f1 & SETSR0) != 0
2226       && sh_insn_uses_or_sets_reg (i2, op2, 0))
2227     return TRUE;
2228   if ((f1 & SETSAS)
2229       && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2230     return TRUE;
2231   if ((f1 & SETSF1) != 0
2232       && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2233     return TRUE;
2234 
2235   if ((f2 & SETS1) != 0
2236       && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2237     return TRUE;
2238   if ((f2 & SETS2) != 0
2239       && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2240     return TRUE;
2241   if ((f2 & SETSR0) != 0
2242       && sh_insn_uses_or_sets_reg (i1, op1, 0))
2243     return TRUE;
2244   if ((f2 & SETSAS)
2245       && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2246     return TRUE;
2247   if ((f2 & SETSF1) != 0
2248       && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2249     return TRUE;
2250 
2251   /* The instructions do not conflict.  */
2252   return FALSE;
2253 }
2254 
2255 /* I1 is a load instruction, and I2 is some other instruction.  Return
2256    TRUE if I1 loads a register which I2 uses.  */
2257 
2258 static bfd_boolean
sh_load_use(unsigned int i1,const struct sh_opcode * op1,unsigned int i2,const struct sh_opcode * op2)2259 sh_load_use (unsigned int i1,
2260 	     const struct sh_opcode *op1,
2261 	     unsigned int i2,
2262 	     const struct sh_opcode *op2)
2263 {
2264   unsigned int f1;
2265 
2266   f1 = op1->flags;
2267 
2268   if ((f1 & LOAD) == 0)
2269     return FALSE;
2270 
2271   /* If both SETS1 and SETSSP are set, that means a load to a special
2272      register using postincrement addressing mode, which we don't care
2273      about here.  */
2274   if ((f1 & SETS1) != 0
2275       && (f1 & SETSSP) == 0
2276       && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2277     return TRUE;
2278 
2279   if ((f1 & SETSR0) != 0
2280       && sh_insn_uses_reg (i2, op2, 0))
2281     return TRUE;
2282 
2283   if ((f1 & SETSF1) != 0
2284       && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2285     return TRUE;
2286 
2287   return FALSE;
2288 }
2289 
2290 /* Try to align loads and stores within a span of memory.  This is
2291    called by both the ELF and the COFF sh targets.  ABFD and SEC are
2292    the BFD and section we are examining.  CONTENTS is the contents of
2293    the section.  SWAP is the routine to call to swap two instructions.
2294    RELOCS is a pointer to the internal relocation information, to be
2295    passed to SWAP.  PLABEL is a pointer to the current label in a
2296    sorted list of labels; LABEL_END is the end of the list.  START and
2297    STOP are the range of memory to examine.  If a swap is made,
2298    *PSWAPPED is set to TRUE.  */
2299 
2300 #ifdef COFF_WITH_PE
2301 static
2302 #endif
2303 bfd_boolean
_bfd_sh_align_load_span(bfd * abfd,asection * sec,bfd_byte * contents,bfd_boolean (* swap)(bfd *,asection *,void *,bfd_byte *,bfd_vma),void * relocs,bfd_vma ** plabel,bfd_vma * label_end,bfd_vma start,bfd_vma stop,bfd_boolean * pswapped)2304 _bfd_sh_align_load_span (bfd *abfd,
2305 			 asection *sec,
2306 			 bfd_byte *contents,
2307 			 bfd_boolean (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
2308 			 void * relocs,
2309 			 bfd_vma **plabel,
2310 			 bfd_vma *label_end,
2311 			 bfd_vma start,
2312 			 bfd_vma stop,
2313 			 bfd_boolean *pswapped)
2314 {
2315   int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2316 	     || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2317   bfd_vma i;
2318 
2319   /* The SH4 has a Harvard architecture, hence aligning loads is not
2320      desirable.  In fact, it is counter-productive, since it interferes
2321      with the schedules generated by the compiler.  */
2322   if (abfd->arch_info->mach == bfd_mach_sh4)
2323     return TRUE;
2324 
2325   /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2326      instructions.  */
2327   if (dsp)
2328     {
2329       sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2330       sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef [0];
2331     }
2332 
2333   /* Instructions should be aligned on 2 byte boundaries.  */
2334   if ((start & 1) == 1)
2335     ++start;
2336 
2337   /* Now look through the unaligned addresses.  */
2338   i = start;
2339   if ((i & 2) == 0)
2340     i += 2;
2341   for (; i < stop; i += 4)
2342     {
2343       unsigned int insn;
2344       const struct sh_opcode *op;
2345       unsigned int prev_insn = 0;
2346       const struct sh_opcode *prev_op = NULL;
2347 
2348       insn = bfd_get_16 (abfd, contents + i);
2349       op = sh_insn_info (insn);
2350       if (op == NULL
2351 	  || (op->flags & (LOAD | STORE)) == 0)
2352 	continue;
2353 
2354       /* This is a load or store which is not on a four byte boundary.  */
2355 
2356       while (*plabel < label_end && **plabel < i)
2357 	++*plabel;
2358 
2359       if (i > start)
2360 	{
2361 	  prev_insn = bfd_get_16 (abfd, contents + i - 2);
2362 	  /* If INSN is the field b of a parallel processing insn, it is not
2363 	     a load / store after all.  Note that the test here might mistake
2364 	     the field_b of a pcopy insn for the starting code of a parallel
2365 	     processing insn; this might miss a swapping opportunity, but at
2366 	     least we're on the safe side.  */
2367 	  if (dsp && (prev_insn & 0xfc00) == 0xf800)
2368 	    continue;
2369 
2370 	  /* Check if prev_insn is actually the field b of a parallel
2371 	     processing insn.  Again, this can give a spurious match
2372 	     after a pcopy.  */
2373 	  if (dsp && i - 2 > start)
2374 	    {
2375 	      unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2376 
2377 	      if ((pprev_insn & 0xfc00) == 0xf800)
2378 		prev_op = NULL;
2379 	      else
2380 		prev_op = sh_insn_info (prev_insn);
2381 	    }
2382 	  else
2383 	    prev_op = sh_insn_info (prev_insn);
2384 
2385 	  /* If the load/store instruction is in a delay slot, we
2386 	     can't swap.  */
2387 	  if (prev_op == NULL
2388 	      || (prev_op->flags & DELAY) != 0)
2389 	    continue;
2390 	}
2391       if (i > start
2392 	  && (*plabel >= label_end || **plabel != i)
2393 	  && prev_op != NULL
2394 	  && (prev_op->flags & (LOAD | STORE)) == 0
2395 	  && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2396 	{
2397 	  bfd_boolean ok;
2398 
2399 	  /* The load/store instruction does not have a label, and
2400 	     there is a previous instruction; PREV_INSN is not
2401 	     itself a load/store instruction, and PREV_INSN and
2402 	     INSN do not conflict.  */
2403 
2404 	  ok = TRUE;
2405 
2406 	  if (i >= start + 4)
2407 	    {
2408 	      unsigned int prev2_insn;
2409 	      const struct sh_opcode *prev2_op;
2410 
2411 	      prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2412 	      prev2_op = sh_insn_info (prev2_insn);
2413 
2414 	      /* If the instruction before PREV_INSN has a delay
2415 		 slot--that is, PREV_INSN is in a delay slot--we
2416 		 can not swap.  */
2417 	      if (prev2_op == NULL
2418 		  || (prev2_op->flags & DELAY) != 0)
2419 		ok = FALSE;
2420 
2421 	      /* If the instruction before PREV_INSN is a load,
2422 		 and it sets a register which INSN uses, then
2423 		 putting INSN immediately after PREV_INSN will
2424 		 cause a pipeline bubble, so there is no point to
2425 		 making the swap.  */
2426 	      if (ok
2427 		  && (prev2_op->flags & LOAD) != 0
2428 		  && sh_load_use (prev2_insn, prev2_op, insn, op))
2429 		ok = FALSE;
2430 	    }
2431 
2432 	  if (ok)
2433 	    {
2434 	      if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2435 		return FALSE;
2436 	      *pswapped = TRUE;
2437 	      continue;
2438 	    }
2439 	}
2440 
2441       while (*plabel < label_end && **plabel < i + 2)
2442 	++*plabel;
2443 
2444       if (i + 2 < stop
2445 	  && (*plabel >= label_end || **plabel != i + 2))
2446 	{
2447 	  unsigned int next_insn;
2448 	  const struct sh_opcode *next_op;
2449 
2450 	  /* There is an instruction after the load/store
2451 	     instruction, and it does not have a label.  */
2452 	  next_insn = bfd_get_16 (abfd, contents + i + 2);
2453 	  next_op = sh_insn_info (next_insn);
2454 	  if (next_op != NULL
2455 	      && (next_op->flags & (LOAD | STORE)) == 0
2456 	      && ! sh_insns_conflict (insn, op, next_insn, next_op))
2457 	    {
2458 	      bfd_boolean ok;
2459 
2460 	      /* NEXT_INSN is not itself a load/store instruction,
2461 		 and it does not conflict with INSN.  */
2462 
2463 	      ok = TRUE;
2464 
2465 	      /* If PREV_INSN is a load, and it sets a register
2466 		 which NEXT_INSN uses, then putting NEXT_INSN
2467 		 immediately after PREV_INSN will cause a pipeline
2468 		 bubble, so there is no reason to make this swap.  */
2469 	      if (prev_op != NULL
2470 		  && (prev_op->flags & LOAD) != 0
2471 		  && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2472 		ok = FALSE;
2473 
2474 	      /* If INSN is a load, and it sets a register which
2475 		 the insn after NEXT_INSN uses, then doing the
2476 		 swap will cause a pipeline bubble, so there is no
2477 		 reason to make the swap.  However, if the insn
2478 		 after NEXT_INSN is itself a load or store
2479 		 instruction, then it is misaligned, so
2480 		 optimistically hope that it will be swapped
2481 		 itself, and just live with the pipeline bubble if
2482 		 it isn't.  */
2483 	      if (ok
2484 		  && i + 4 < stop
2485 		  && (op->flags & LOAD) != 0)
2486 		{
2487 		  unsigned int next2_insn;
2488 		  const struct sh_opcode *next2_op;
2489 
2490 		  next2_insn = bfd_get_16 (abfd, contents + i + 4);
2491 		  next2_op = sh_insn_info (next2_insn);
2492 		  if (next2_op == NULL
2493 		      || ((next2_op->flags & (LOAD | STORE)) == 0
2494 			  && sh_load_use (insn, op, next2_insn, next2_op)))
2495 		    ok = FALSE;
2496 		}
2497 
2498 	      if (ok)
2499 		{
2500 		  if (! (*swap) (abfd, sec, relocs, contents, i))
2501 		    return FALSE;
2502 		  *pswapped = TRUE;
2503 		  continue;
2504 		}
2505 	    }
2506 	}
2507     }
2508 
2509   return TRUE;
2510 }
2511 #endif /* not COFF_IMAGE_WITH_PE */
2512 
2513 /* Swap two SH instructions.  */
2514 
2515 static bfd_boolean
sh_swap_insns(bfd * abfd,asection * sec,void * relocs,bfd_byte * contents,bfd_vma addr)2516 sh_swap_insns (bfd *      abfd,
2517 	       asection * sec,
2518 	       void *     relocs,
2519 	       bfd_byte * contents,
2520 	       bfd_vma    addr)
2521 {
2522   struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2523   unsigned short i1, i2;
2524   struct internal_reloc *irel, *irelend;
2525 
2526   /* Swap the instructions themselves.  */
2527   i1 = bfd_get_16 (abfd, contents + addr);
2528   i2 = bfd_get_16 (abfd, contents + addr + 2);
2529   bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2530   bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2531 
2532   /* Adjust all reloc addresses.  */
2533   irelend = internal_relocs + sec->reloc_count;
2534   for (irel = internal_relocs; irel < irelend; irel++)
2535     {
2536       int type, add;
2537 
2538       /* There are a few special types of relocs that we don't want to
2539          adjust.  These relocs do not apply to the instruction itself,
2540          but are only associated with the address.  */
2541       type = irel->r_type;
2542       if (type == R_SH_ALIGN
2543 	  || type == R_SH_CODE
2544 	  || type == R_SH_DATA
2545 	  || type == R_SH_LABEL)
2546 	continue;
2547 
2548       /* If an R_SH_USES reloc points to one of the addresses being
2549          swapped, we must adjust it.  It would be incorrect to do this
2550          for a jump, though, since we want to execute both
2551          instructions after the jump.  (We have avoided swapping
2552          around a label, so the jump will not wind up executing an
2553          instruction it shouldn't).  */
2554       if (type == R_SH_USES)
2555 	{
2556 	  bfd_vma off;
2557 
2558 	  off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2559 	  if (off == addr)
2560 	    irel->r_offset += 2;
2561 	  else if (off == addr + 2)
2562 	    irel->r_offset -= 2;
2563 	}
2564 
2565       if (irel->r_vaddr - sec->vma == addr)
2566 	{
2567 	  irel->r_vaddr += 2;
2568 	  add = -2;
2569 	}
2570       else if (irel->r_vaddr - sec->vma == addr + 2)
2571 	{
2572 	  irel->r_vaddr -= 2;
2573 	  add = 2;
2574 	}
2575       else
2576 	add = 0;
2577 
2578       if (add != 0)
2579 	{
2580 	  bfd_byte *loc;
2581 	  unsigned short insn, oinsn;
2582 	  bfd_boolean overflow;
2583 
2584 	  loc = contents + irel->r_vaddr - sec->vma;
2585 	  overflow = FALSE;
2586 	  switch (type)
2587 	    {
2588 	    default:
2589 	      break;
2590 
2591 	    case R_SH_PCDISP8BY2:
2592 	    case R_SH_PCRELIMM8BY2:
2593 	      insn = bfd_get_16 (abfd, loc);
2594 	      oinsn = insn;
2595 	      insn += add / 2;
2596 	      if ((oinsn & 0xff00) != (insn & 0xff00))
2597 		overflow = TRUE;
2598 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2599 	      break;
2600 
2601 	    case R_SH_PCDISP:
2602 	      insn = bfd_get_16 (abfd, loc);
2603 	      oinsn = insn;
2604 	      insn += add / 2;
2605 	      if ((oinsn & 0xf000) != (insn & 0xf000))
2606 		overflow = TRUE;
2607 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2608 	      break;
2609 
2610 	    case R_SH_PCRELIMM8BY4:
2611 	      /* This reloc ignores the least significant 3 bits of
2612                  the program counter before adding in the offset.
2613                  This means that if ADDR is at an even address, the
2614                  swap will not affect the offset.  If ADDR is an at an
2615                  odd address, then the instruction will be crossing a
2616                  four byte boundary, and must be adjusted.  */
2617 	      if ((addr & 3) != 0)
2618 		{
2619 		  insn = bfd_get_16 (abfd, loc);
2620 		  oinsn = insn;
2621 		  insn += add / 2;
2622 		  if ((oinsn & 0xff00) != (insn & 0xff00))
2623 		    overflow = TRUE;
2624 		  bfd_put_16 (abfd, (bfd_vma) insn, loc);
2625 		}
2626 
2627 	      break;
2628 	    }
2629 
2630 	  if (overflow)
2631 	    {
2632 	      ((*_bfd_error_handler)
2633 	       ("%B: 0x%lx: fatal: reloc overflow while relaxing",
2634 		abfd, (unsigned long) irel->r_vaddr));
2635 	      bfd_set_error (bfd_error_bad_value);
2636 	      return FALSE;
2637 	    }
2638 	}
2639     }
2640 
2641   return TRUE;
2642 }
2643 
2644 /* Look for loads and stores which we can align to four byte
2645    boundaries.  See the longer comment above sh_relax_section for why
2646    this is desirable.  This sets *PSWAPPED if some instruction was
2647    swapped.  */
2648 
2649 static bfd_boolean
sh_align_loads(bfd * abfd,asection * sec,struct internal_reloc * internal_relocs,bfd_byte * contents,bfd_boolean * pswapped)2650 sh_align_loads (bfd *abfd,
2651 		asection *sec,
2652 		struct internal_reloc *internal_relocs,
2653 		bfd_byte *contents,
2654 		bfd_boolean *pswapped)
2655 {
2656   struct internal_reloc *irel, *irelend;
2657   bfd_vma *labels = NULL;
2658   bfd_vma *label, *label_end;
2659   bfd_size_type amt;
2660 
2661   *pswapped = FALSE;
2662 
2663   irelend = internal_relocs + sec->reloc_count;
2664 
2665   /* Get all the addresses with labels on them.  */
2666   amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2667   labels = (bfd_vma *) bfd_malloc (amt);
2668   if (labels == NULL)
2669     goto error_return;
2670   label_end = labels;
2671   for (irel = internal_relocs; irel < irelend; irel++)
2672     {
2673       if (irel->r_type == R_SH_LABEL)
2674 	{
2675 	  *label_end = irel->r_vaddr - sec->vma;
2676 	  ++label_end;
2677 	}
2678     }
2679 
2680   /* Note that the assembler currently always outputs relocs in
2681      address order.  If that ever changes, this code will need to sort
2682      the label values and the relocs.  */
2683 
2684   label = labels;
2685 
2686   for (irel = internal_relocs; irel < irelend; irel++)
2687     {
2688       bfd_vma start, stop;
2689 
2690       if (irel->r_type != R_SH_CODE)
2691 	continue;
2692 
2693       start = irel->r_vaddr - sec->vma;
2694 
2695       for (irel++; irel < irelend; irel++)
2696 	if (irel->r_type == R_SH_DATA)
2697 	  break;
2698       if (irel < irelend)
2699 	stop = irel->r_vaddr - sec->vma;
2700       else
2701 	stop = sec->size;
2702 
2703       if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2704 				     internal_relocs, &label,
2705 				     label_end, start, stop, pswapped))
2706 	goto error_return;
2707     }
2708 
2709   free (labels);
2710 
2711   return TRUE;
2712 
2713  error_return:
2714   if (labels != NULL)
2715     free (labels);
2716   return FALSE;
2717 }
2718 
2719 /* This is a modification of _bfd_coff_generic_relocate_section, which
2720    will handle SH relaxing.  */
2721 
2722 static bfd_boolean
sh_relocate_section(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,struct internal_reloc * relocs,struct internal_syment * syms,asection ** sections)2723 sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
2724 		     struct bfd_link_info *info,
2725 		     bfd *input_bfd,
2726 		     asection *input_section,
2727 		     bfd_byte *contents,
2728 		     struct internal_reloc *relocs,
2729 		     struct internal_syment *syms,
2730 		     asection **sections)
2731 {
2732   struct internal_reloc *rel;
2733   struct internal_reloc *relend;
2734 
2735   rel = relocs;
2736   relend = rel + input_section->reloc_count;
2737   for (; rel < relend; rel++)
2738     {
2739       long symndx;
2740       struct coff_link_hash_entry *h;
2741       struct internal_syment *sym;
2742       bfd_vma addend;
2743       bfd_vma val;
2744       reloc_howto_type *howto;
2745       bfd_reloc_status_type rstat;
2746 
2747       /* Almost all relocs have to do with relaxing.  If any work must
2748          be done for them, it has been done in sh_relax_section.  */
2749       if (rel->r_type != R_SH_IMM32
2750 #ifdef COFF_WITH_PE
2751 	  && rel->r_type != R_SH_IMM32CE
2752 	  && rel->r_type != R_SH_IMAGEBASE
2753 #endif
2754 	  && rel->r_type != R_SH_PCDISP)
2755 	continue;
2756 
2757       symndx = rel->r_symndx;
2758 
2759       if (symndx == -1)
2760 	{
2761 	  h = NULL;
2762 	  sym = NULL;
2763 	}
2764       else
2765 	{
2766 	  if (symndx < 0
2767 	      || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2768 	    {
2769 	      (*_bfd_error_handler)
2770 		("%B: illegal symbol index %ld in relocs",
2771 		 input_bfd, symndx);
2772 	      bfd_set_error (bfd_error_bad_value);
2773 	      return FALSE;
2774 	    }
2775 	  h = obj_coff_sym_hashes (input_bfd)[symndx];
2776 	  sym = syms + symndx;
2777 	}
2778 
2779       if (sym != NULL && sym->n_scnum != 0)
2780 	addend = - sym->n_value;
2781       else
2782 	addend = 0;
2783 
2784       if (rel->r_type == R_SH_PCDISP)
2785 	addend -= 4;
2786 
2787       if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2788 	howto = NULL;
2789       else
2790 	howto = &sh_coff_howtos[rel->r_type];
2791 
2792       if (howto == NULL)
2793 	{
2794 	  bfd_set_error (bfd_error_bad_value);
2795 	  return FALSE;
2796 	}
2797 
2798 #ifdef COFF_WITH_PE
2799       if (rel->r_type == R_SH_IMAGEBASE)
2800 	addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2801 #endif
2802 
2803       val = 0;
2804 
2805       if (h == NULL)
2806 	{
2807 	  asection *sec;
2808 
2809 	  /* There is nothing to do for an internal PCDISP reloc.  */
2810 	  if (rel->r_type == R_SH_PCDISP)
2811 	    continue;
2812 
2813 	  if (symndx == -1)
2814 	    {
2815 	      sec = bfd_abs_section_ptr;
2816 	      val = 0;
2817 	    }
2818 	  else
2819 	    {
2820 	      sec = sections[symndx];
2821               val = (sec->output_section->vma
2822 		     + sec->output_offset
2823 		     + sym->n_value
2824 		     - sec->vma);
2825 	    }
2826 	}
2827       else
2828 	{
2829 	  if (h->root.type == bfd_link_hash_defined
2830 	      || h->root.type == bfd_link_hash_defweak)
2831 	    {
2832 	      asection *sec;
2833 
2834 	      sec = h->root.u.def.section;
2835 	      val = (h->root.u.def.value
2836 		     + sec->output_section->vma
2837 		     + sec->output_offset);
2838 	    }
2839 	  else if (! bfd_link_relocatable (info))
2840 	    (*info->callbacks->undefined_symbol)
2841 	      (info, h->root.root.string, input_bfd, input_section,
2842 	       rel->r_vaddr - input_section->vma, TRUE);
2843 	}
2844 
2845       rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2846 					contents,
2847 					rel->r_vaddr - input_section->vma,
2848 					val, addend);
2849 
2850       switch (rstat)
2851 	{
2852 	default:
2853 	  abort ();
2854 	case bfd_reloc_ok:
2855 	  break;
2856 	case bfd_reloc_overflow:
2857 	  {
2858 	    const char *name;
2859 	    char buf[SYMNMLEN + 1];
2860 
2861 	    if (symndx == -1)
2862 	      name = "*ABS*";
2863 	    else if (h != NULL)
2864 	      name = NULL;
2865 	    else if (sym->_n._n_n._n_zeroes == 0
2866 		     && sym->_n._n_n._n_offset != 0)
2867 	      name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2868 	    else
2869 	      {
2870  		strncpy (buf, sym->_n._n_name, SYMNMLEN);
2871 		buf[SYMNMLEN] = '\0';
2872 		name = buf;
2873 	      }
2874 
2875 	    (*info->callbacks->reloc_overflow)
2876 	      (info, (h ? &h->root : NULL), name, howto->name,
2877 	       (bfd_vma) 0, input_bfd, input_section,
2878 	       rel->r_vaddr - input_section->vma);
2879 	  }
2880 	}
2881     }
2882 
2883   return TRUE;
2884 }
2885 
2886 /* This is a version of bfd_generic_get_relocated_section_contents
2887    which uses sh_relocate_section.  */
2888 
2889 static bfd_byte *
sh_coff_get_relocated_section_contents(bfd * output_bfd,struct bfd_link_info * link_info,struct bfd_link_order * link_order,bfd_byte * data,bfd_boolean relocatable,asymbol ** symbols)2890 sh_coff_get_relocated_section_contents (bfd *output_bfd,
2891 					struct bfd_link_info *link_info,
2892 					struct bfd_link_order *link_order,
2893 					bfd_byte *data,
2894 					bfd_boolean relocatable,
2895 					asymbol **symbols)
2896 {
2897   asection *input_section = link_order->u.indirect.section;
2898   bfd *input_bfd = input_section->owner;
2899   asection **sections = NULL;
2900   struct internal_reloc *internal_relocs = NULL;
2901   struct internal_syment *internal_syms = NULL;
2902 
2903   /* We only need to handle the case of relaxing, or of having a
2904      particular set of section contents, specially.  */
2905   if (relocatable
2906       || coff_section_data (input_bfd, input_section) == NULL
2907       || coff_section_data (input_bfd, input_section)->contents == NULL)
2908     return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2909 						       link_order, data,
2910 						       relocatable,
2911 						       symbols);
2912 
2913   memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2914 	  (size_t) input_section->size);
2915 
2916   if ((input_section->flags & SEC_RELOC) != 0
2917       && input_section->reloc_count > 0)
2918     {
2919       bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2920       bfd_byte *esym, *esymend;
2921       struct internal_syment *isymp;
2922       asection **secpp;
2923       bfd_size_type amt;
2924 
2925       if (! _bfd_coff_get_external_symbols (input_bfd))
2926 	goto error_return;
2927 
2928       internal_relocs = (_bfd_coff_read_internal_relocs
2929 			 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2930 			  FALSE, (struct internal_reloc *) NULL));
2931       if (internal_relocs == NULL)
2932 	goto error_return;
2933 
2934       amt = obj_raw_syment_count (input_bfd);
2935       amt *= sizeof (struct internal_syment);
2936       internal_syms = (struct internal_syment *) bfd_malloc (amt);
2937       if (internal_syms == NULL)
2938 	goto error_return;
2939 
2940       amt = obj_raw_syment_count (input_bfd);
2941       amt *= sizeof (asection *);
2942       sections = (asection **) bfd_malloc (amt);
2943       if (sections == NULL)
2944 	goto error_return;
2945 
2946       isymp = internal_syms;
2947       secpp = sections;
2948       esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2949       esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2950       while (esym < esymend)
2951 	{
2952 	  bfd_coff_swap_sym_in (input_bfd, esym, isymp);
2953 
2954 	  if (isymp->n_scnum != 0)
2955 	    *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2956 	  else
2957 	    {
2958 	      if (isymp->n_value == 0)
2959 		*secpp = bfd_und_section_ptr;
2960 	      else
2961 		*secpp = bfd_com_section_ptr;
2962 	    }
2963 
2964 	  esym += (isymp->n_numaux + 1) * symesz;
2965 	  secpp += isymp->n_numaux + 1;
2966 	  isymp += isymp->n_numaux + 1;
2967 	}
2968 
2969       if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2970 				 input_section, data, internal_relocs,
2971 				 internal_syms, sections))
2972 	goto error_return;
2973 
2974       free (sections);
2975       sections = NULL;
2976       free (internal_syms);
2977       internal_syms = NULL;
2978       free (internal_relocs);
2979       internal_relocs = NULL;
2980     }
2981 
2982   return data;
2983 
2984  error_return:
2985   if (internal_relocs != NULL)
2986     free (internal_relocs);
2987   if (internal_syms != NULL)
2988     free (internal_syms);
2989   if (sections != NULL)
2990     free (sections);
2991   return NULL;
2992 }
2993 
2994 /* The target vectors.  */
2995 
2996 #ifndef TARGET_SHL_SYM
2997 CREATE_BIG_COFF_TARGET_VEC (sh_coff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
2998 #endif
2999 
3000 #ifdef TARGET_SHL_SYM
3001 #define TARGET_SYM TARGET_SHL_SYM
3002 #else
3003 #define TARGET_SYM sh_coff_le_vec
3004 #endif
3005 
3006 #ifndef TARGET_SHL_NAME
3007 #define TARGET_SHL_NAME "coff-shl"
3008 #endif
3009 
3010 #ifdef COFF_WITH_PE
3011 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3012 			       SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3013 #else
3014 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3015 			       0, '_', NULL, COFF_SWAP_TABLE)
3016 #endif
3017 
3018 #ifndef TARGET_SHL_SYM
3019 
3020 /* Some people want versions of the SH COFF target which do not align
3021    to 16 byte boundaries.  We implement that by adding a couple of new
3022    target vectors.  These are just like the ones above, but they
3023    change the default section alignment.  To generate them in the
3024    assembler, use -small.  To use them in the linker, use -b
3025    coff-sh{l}-small and -oformat coff-sh{l}-small.
3026 
3027    Yes, this is a horrible hack.  A general solution for setting
3028    section alignment in COFF is rather complex.  ELF handles this
3029    correctly.  */
3030 
3031 /* Only recognize the small versions if the target was not defaulted.
3032    Otherwise we won't recognize the non default endianness.  */
3033 
3034 static const bfd_target *
coff_small_object_p(bfd * abfd)3035 coff_small_object_p (bfd *abfd)
3036 {
3037   if (abfd->target_defaulted)
3038     {
3039       bfd_set_error (bfd_error_wrong_format);
3040       return NULL;
3041     }
3042   return coff_object_p (abfd);
3043 }
3044 
3045 /* Set the section alignment for the small versions.  */
3046 
3047 static bfd_boolean
coff_small_new_section_hook(bfd * abfd,asection * section)3048 coff_small_new_section_hook (bfd *abfd, asection *section)
3049 {
3050   if (! coff_new_section_hook (abfd, section))
3051     return FALSE;
3052 
3053   /* We must align to at least a four byte boundary, because longword
3054      accesses must be on a four byte boundary.  */
3055   if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3056     section->alignment_power = 2;
3057 
3058   return TRUE;
3059 }
3060 
3061 /* This is copied from bfd_coff_std_swap_table so that we can change
3062    the default section alignment power.  */
3063 
3064 static bfd_coff_backend_data bfd_coff_small_swap_table =
3065 {
3066   coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3067   coff_swap_aux_out, coff_swap_sym_out,
3068   coff_swap_lineno_out, coff_swap_reloc_out,
3069   coff_swap_filehdr_out, coff_swap_aouthdr_out,
3070   coff_swap_scnhdr_out,
3071   FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3072 #ifdef COFF_LONG_FILENAMES
3073   TRUE,
3074 #else
3075   FALSE,
3076 #endif
3077   COFF_DEFAULT_LONG_SECTION_NAMES,
3078   2,
3079 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3080   TRUE,
3081 #else
3082   FALSE,
3083 #endif
3084 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3085   4,
3086 #else
3087   2,
3088 #endif
3089   32768,
3090   coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3091   coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3092   coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3093   coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3094   coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3095   coff_classify_symbol, coff_compute_section_file_positions,
3096   coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3097   coff_adjust_symndx, coff_link_add_one_symbol,
3098   coff_link_output_has_begun, coff_final_link_postscript,
3099   bfd_pe_print_pdata
3100 };
3101 
3102 #define coff_small_close_and_cleanup \
3103   coff_close_and_cleanup
3104 #define coff_small_bfd_free_cached_info \
3105   coff_bfd_free_cached_info
3106 #define coff_small_get_section_contents \
3107   coff_get_section_contents
3108 #define coff_small_get_section_contents_in_window \
3109   coff_get_section_contents_in_window
3110 
3111 extern const bfd_target sh_coff_small_le_vec;
3112 
3113 const bfd_target sh_coff_small_vec =
3114 {
3115   "coff-sh-small",		/* name */
3116   bfd_target_coff_flavour,
3117   BFD_ENDIAN_BIG,		/* data byte order is big */
3118   BFD_ENDIAN_BIG,		/* header byte order is big */
3119 
3120   (HAS_RELOC | EXEC_P |		/* object flags */
3121    HAS_LINENO | HAS_DEBUG |
3122    HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3123 
3124   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3125   '_',				/* leading symbol underscore */
3126   '/',				/* ar_pad_char */
3127   15,				/* ar_max_namelen */
3128   0,				/* match priority.  */
3129   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3130   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3131   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3132   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3133   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3134   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3135 
3136   {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3137      bfd_generic_archive_p, _bfd_dummy_target},
3138   {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3139      bfd_false},
3140   {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3141      _bfd_write_archive_contents, bfd_false},
3142 
3143   BFD_JUMP_TABLE_GENERIC (coff_small),
3144   BFD_JUMP_TABLE_COPY (coff),
3145   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3146   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3147   BFD_JUMP_TABLE_SYMBOLS (coff),
3148   BFD_JUMP_TABLE_RELOCS (coff),
3149   BFD_JUMP_TABLE_WRITE (coff),
3150   BFD_JUMP_TABLE_LINK (coff),
3151   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3152 
3153   & sh_coff_small_le_vec,
3154 
3155   & bfd_coff_small_swap_table
3156 };
3157 
3158 const bfd_target sh_coff_small_le_vec =
3159 {
3160   "coff-shl-small",		/* name */
3161   bfd_target_coff_flavour,
3162   BFD_ENDIAN_LITTLE,		/* data byte order is little */
3163   BFD_ENDIAN_LITTLE,		/* header byte order is little endian too*/
3164 
3165   (HAS_RELOC | EXEC_P |		/* object flags */
3166    HAS_LINENO | HAS_DEBUG |
3167    HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3168 
3169   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3170   '_',				/* leading symbol underscore */
3171   '/',				/* ar_pad_char */
3172   15,				/* ar_max_namelen */
3173   0,				/* match priority.  */
3174   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3175   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3176   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3177   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3178   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3179   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3180 
3181   {_bfd_dummy_target, coff_small_object_p, /* bfd_check_format */
3182      bfd_generic_archive_p, _bfd_dummy_target},
3183   {bfd_false, coff_mkobject, _bfd_generic_mkarchive, /* bfd_set_format */
3184      bfd_false},
3185   {bfd_false, coff_write_object_contents, /* bfd_write_contents */
3186      _bfd_write_archive_contents, bfd_false},
3187 
3188   BFD_JUMP_TABLE_GENERIC (coff_small),
3189   BFD_JUMP_TABLE_COPY (coff),
3190   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3191   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3192   BFD_JUMP_TABLE_SYMBOLS (coff),
3193   BFD_JUMP_TABLE_RELOCS (coff),
3194   BFD_JUMP_TABLE_WRITE (coff),
3195   BFD_JUMP_TABLE_LINK (coff),
3196   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3197 
3198   & sh_coff_small_vec,
3199 
3200   & bfd_coff_small_swap_table
3201 };
3202 #endif
3203