1 /* BFD back-end for Renesas Super-H COFF binaries.
2    Copyright (C) 1993-2021 Free Software Foundation, Inc.
3    Contributed by Cygnus Support.
4    Written by Steve Chamberlain, <sac@cygnus.com>.
5    Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6 
7    This file is part of BFD, the Binary File Descriptor library.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
22    MA 02110-1301, USA.  */
23 
24 #include "sysdep.h"
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31 
32 #undef  bfd_pe_print_pdata
33 
34 #ifdef COFF_WITH_PE
35 #include "coff/pe.h"
36 
37 #ifndef COFF_IMAGE_WITH_PE
38 static bool sh_align_load_span
39   (bfd *, asection *, bfd_byte *,
40    bool (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
41    void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bool *);
42 
43 #define _bfd_sh_align_load_span sh_align_load_span
44 #endif
45 
46 #define	bfd_pe_print_pdata   _bfd_pe_print_ce_compressed_pdata
47 
48 #else
49 
50 #define	bfd_pe_print_pdata   NULL
51 
52 #endif /* COFF_WITH_PE.  */
53 
54 #include "libcoff.h"
55 
56 /* Internal functions.  */
57 
58 #ifdef COFF_WITH_PE
59 /* Can't build import tables with 2**4 alignment.  */
60 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	2
61 #else
62 /* Default section alignment to 2**4.  */
63 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	4
64 #endif
65 
66 #ifdef COFF_IMAGE_WITH_PE
67 /* Align PE executables.  */
68 #define COFF_PAGE_SIZE 0x1000
69 #endif
70 
71 /* Generate long file names.  */
72 #define COFF_LONG_FILENAMES
73 
74 #ifdef COFF_WITH_PE
75 /* Return TRUE if this relocation should
76    appear in the output .reloc section.  */
77 
78 static bool
in_reloc_p(bfd * abfd ATTRIBUTE_UNUSED,reloc_howto_type * howto)79 in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
80 	    reloc_howto_type * howto)
81 {
82   return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
83 }
84 #endif
85 
86 static bfd_reloc_status_type
87 sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
88 static bool
89 sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
90 		     bfd_byte *, struct internal_reloc *,
91 		     struct internal_syment *, asection **);
92 static bool
93 sh_align_loads (bfd *, asection *, struct internal_reloc *,
94 		bfd_byte *, bool *);
95 
96 /* The supported relocations.  There are a lot of relocations defined
97    in coff/internal.h which we do not expect to ever see.  */
98 static reloc_howto_type sh_coff_howtos[] =
99 {
100   EMPTY_HOWTO (0),
101   EMPTY_HOWTO (1),
102 #ifdef COFF_WITH_PE
103   /* Windows CE */
104   HOWTO (R_SH_IMM32CE,		/* type */
105 	 0,			/* rightshift */
106 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
107 	 32,			/* bitsize */
108 	 false,			/* pc_relative */
109 	 0,			/* bitpos */
110 	 complain_overflow_bitfield, /* complain_on_overflow */
111 	 sh_reloc,		/* special_function */
112 	 "r_imm32ce",		/* name */
113 	 true,			/* partial_inplace */
114 	 0xffffffff,		/* src_mask */
115 	 0xffffffff,		/* dst_mask */
116 	 false),		/* pcrel_offset */
117 #else
118   EMPTY_HOWTO (2),
119 #endif
120   EMPTY_HOWTO (3), /* R_SH_PCREL8 */
121   EMPTY_HOWTO (4), /* R_SH_PCREL16 */
122   EMPTY_HOWTO (5), /* R_SH_HIGH8 */
123   EMPTY_HOWTO (6), /* R_SH_IMM24 */
124   EMPTY_HOWTO (7), /* R_SH_LOW16 */
125   EMPTY_HOWTO (8),
126   EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
127 
128   HOWTO (R_SH_PCDISP8BY2,	/* type */
129 	 1,			/* rightshift */
130 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
131 	 8,			/* bitsize */
132 	 true,			/* pc_relative */
133 	 0,			/* bitpos */
134 	 complain_overflow_signed, /* complain_on_overflow */
135 	 sh_reloc,		/* special_function */
136 	 "r_pcdisp8by2",	/* name */
137 	 true,			/* partial_inplace */
138 	 0xff,			/* src_mask */
139 	 0xff,			/* dst_mask */
140 	 true),			/* pcrel_offset */
141 
142   EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
143 
144   HOWTO (R_SH_PCDISP,		/* type */
145 	 1,			/* rightshift */
146 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
147 	 12,			/* bitsize */
148 	 true,			/* pc_relative */
149 	 0,			/* bitpos */
150 	 complain_overflow_signed, /* complain_on_overflow */
151 	 sh_reloc,		/* special_function */
152 	 "r_pcdisp12by2",	/* name */
153 	 true,			/* partial_inplace */
154 	 0xfff,			/* src_mask */
155 	 0xfff,			/* dst_mask */
156 	 true),			/* pcrel_offset */
157 
158   EMPTY_HOWTO (13),
159 
160   HOWTO (R_SH_IMM32,		/* type */
161 	 0,			/* rightshift */
162 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
163 	 32,			/* bitsize */
164 	 false,			/* pc_relative */
165 	 0,			/* bitpos */
166 	 complain_overflow_bitfield, /* complain_on_overflow */
167 	 sh_reloc,		/* special_function */
168 	 "r_imm32",		/* name */
169 	 true,			/* partial_inplace */
170 	 0xffffffff,		/* src_mask */
171 	 0xffffffff,		/* dst_mask */
172 	 false),		/* pcrel_offset */
173 
174   EMPTY_HOWTO (15),
175 #ifdef COFF_WITH_PE
176   HOWTO (R_SH_IMAGEBASE,	/* type */
177 	 0,			/* rightshift */
178 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
179 	 32,			/* bitsize */
180 	 false,			/* pc_relative */
181 	 0,			/* bitpos */
182 	 complain_overflow_bitfield, /* complain_on_overflow */
183 	 sh_reloc,		/* special_function */
184 	 "rva32",		/* name */
185 	 true,			/* partial_inplace */
186 	 0xffffffff,		/* src_mask */
187 	 0xffffffff,		/* dst_mask */
188 	 false),		/* pcrel_offset */
189 #else
190   EMPTY_HOWTO (16), /* R_SH_IMM8 */
191 #endif
192   EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
193   EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
194   EMPTY_HOWTO (19), /* R_SH_IMM4 */
195   EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
196   EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
197 
198   HOWTO (R_SH_PCRELIMM8BY2,	/* type */
199 	 1,			/* rightshift */
200 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
201 	 8,			/* bitsize */
202 	 true,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_unsigned, /* complain_on_overflow */
205 	 sh_reloc,		/* special_function */
206 	 "r_pcrelimm8by2",	/* name */
207 	 true,			/* partial_inplace */
208 	 0xff,			/* src_mask */
209 	 0xff,			/* dst_mask */
210 	 true),			/* pcrel_offset */
211 
212   HOWTO (R_SH_PCRELIMM8BY4,	/* type */
213 	 2,			/* rightshift */
214 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
215 	 8,			/* bitsize */
216 	 true,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_unsigned, /* complain_on_overflow */
219 	 sh_reloc,		/* special_function */
220 	 "r_pcrelimm8by4",	/* name */
221 	 true,			/* partial_inplace */
222 	 0xff,			/* src_mask */
223 	 0xff,			/* dst_mask */
224 	 true),			/* pcrel_offset */
225 
226   HOWTO (R_SH_IMM16,		/* type */
227 	 0,			/* rightshift */
228 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
229 	 16,			/* bitsize */
230 	 false,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_bitfield, /* complain_on_overflow */
233 	 sh_reloc,		/* special_function */
234 	 "r_imm16",		/* name */
235 	 true,			/* partial_inplace */
236 	 0xffff,		/* src_mask */
237 	 0xffff,		/* dst_mask */
238 	 false),		/* pcrel_offset */
239 
240   HOWTO (R_SH_SWITCH16,		/* type */
241 	 0,			/* rightshift */
242 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
243 	 16,			/* bitsize */
244 	 false,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_bitfield, /* complain_on_overflow */
247 	 sh_reloc,		/* special_function */
248 	 "r_switch16",		/* name */
249 	 true,			/* partial_inplace */
250 	 0xffff,		/* src_mask */
251 	 0xffff,		/* dst_mask */
252 	 false),		/* pcrel_offset */
253 
254   HOWTO (R_SH_SWITCH32,		/* type */
255 	 0,			/* rightshift */
256 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
257 	 32,			/* bitsize */
258 	 false,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_bitfield, /* complain_on_overflow */
261 	 sh_reloc,		/* special_function */
262 	 "r_switch32",		/* name */
263 	 true,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 false),		/* pcrel_offset */
267 
268   HOWTO (R_SH_USES,		/* type */
269 	 0,			/* rightshift */
270 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
271 	 16,			/* bitsize */
272 	 false,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield, /* complain_on_overflow */
275 	 sh_reloc,		/* special_function */
276 	 "r_uses",		/* name */
277 	 true,			/* partial_inplace */
278 	 0xffff,		/* src_mask */
279 	 0xffff,		/* dst_mask */
280 	 false),		/* pcrel_offset */
281 
282   HOWTO (R_SH_COUNT,		/* type */
283 	 0,			/* rightshift */
284 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
285 	 32,			/* bitsize */
286 	 false,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_bitfield, /* complain_on_overflow */
289 	 sh_reloc,		/* special_function */
290 	 "r_count",		/* name */
291 	 true,			/* partial_inplace */
292 	 0xffffffff,		/* src_mask */
293 	 0xffffffff,		/* dst_mask */
294 	 false),		/* pcrel_offset */
295 
296   HOWTO (R_SH_ALIGN,		/* type */
297 	 0,			/* rightshift */
298 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
299 	 32,			/* bitsize */
300 	 false,			/* pc_relative */
301 	 0,			/* bitpos */
302 	 complain_overflow_bitfield, /* complain_on_overflow */
303 	 sh_reloc,		/* special_function */
304 	 "r_align",		/* name */
305 	 true,			/* partial_inplace */
306 	 0xffffffff,		/* src_mask */
307 	 0xffffffff,		/* dst_mask */
308 	 false),		/* pcrel_offset */
309 
310   HOWTO (R_SH_CODE,		/* type */
311 	 0,			/* rightshift */
312 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
313 	 32,			/* bitsize */
314 	 false,			/* pc_relative */
315 	 0,			/* bitpos */
316 	 complain_overflow_bitfield, /* complain_on_overflow */
317 	 sh_reloc,		/* special_function */
318 	 "r_code",		/* name */
319 	 true,			/* partial_inplace */
320 	 0xffffffff,		/* src_mask */
321 	 0xffffffff,		/* dst_mask */
322 	 false),		/* pcrel_offset */
323 
324   HOWTO (R_SH_DATA,		/* type */
325 	 0,			/* rightshift */
326 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
327 	 32,			/* bitsize */
328 	 false,			/* pc_relative */
329 	 0,			/* bitpos */
330 	 complain_overflow_bitfield, /* complain_on_overflow */
331 	 sh_reloc,		/* special_function */
332 	 "r_data",		/* name */
333 	 true,			/* partial_inplace */
334 	 0xffffffff,		/* src_mask */
335 	 0xffffffff,		/* dst_mask */
336 	 false),		/* pcrel_offset */
337 
338   HOWTO (R_SH_LABEL,		/* type */
339 	 0,			/* rightshift */
340 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
341 	 32,			/* bitsize */
342 	 false,			/* pc_relative */
343 	 0,			/* bitpos */
344 	 complain_overflow_bitfield, /* complain_on_overflow */
345 	 sh_reloc,		/* special_function */
346 	 "r_label",		/* name */
347 	 true,			/* partial_inplace */
348 	 0xffffffff,		/* src_mask */
349 	 0xffffffff,		/* dst_mask */
350 	 false),		/* pcrel_offset */
351 
352   HOWTO (R_SH_SWITCH8,		/* type */
353 	 0,			/* rightshift */
354 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
355 	 8,			/* bitsize */
356 	 false,			/* pc_relative */
357 	 0,			/* bitpos */
358 	 complain_overflow_bitfield, /* complain_on_overflow */
359 	 sh_reloc,		/* special_function */
360 	 "r_switch8",		/* name */
361 	 true,			/* partial_inplace */
362 	 0xff,			/* src_mask */
363 	 0xff,			/* dst_mask */
364 	 false)			/* pcrel_offset */
365 };
366 
367 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
368 
369 /* Check for a bad magic number.  */
370 #define BADMAG(x) SHBADMAG(x)
371 
372 /* Customize coffcode.h (this is not currently used).  */
373 #define SH 1
374 
375 /* FIXME: This should not be set here.  */
376 #define __A_MAGIC_SET__
377 
378 #ifndef COFF_WITH_PE
379 /* Swap the r_offset field in and out.  */
380 #define SWAP_IN_RELOC_OFFSET  H_GET_32
381 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
382 
383 /* Swap out extra information in the reloc structure.  */
384 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst)	\
385   do						\
386     {						\
387       dst->r_stuff[0] = 'S';			\
388       dst->r_stuff[1] = 'C';			\
389     }						\
390   while (0)
391 #endif
392 
393 /* Get the value of a symbol, when performing a relocation.  */
394 
395 static long
get_symbol_value(asymbol * symbol)396 get_symbol_value (asymbol *symbol)
397 {
398   bfd_vma relocation;
399 
400   if (bfd_is_com_section (symbol->section))
401     relocation = 0;
402   else
403     relocation = (symbol->value +
404 		  symbol->section->output_section->vma +
405 		  symbol->section->output_offset);
406 
407   return relocation;
408 }
409 
410 #ifdef COFF_WITH_PE
411 /* Convert an rtype to howto for the COFF backend linker.
412    Copied from coff-i386.  */
413 #define coff_rtype_to_howto coff_sh_rtype_to_howto
414 
415 
416 static reloc_howto_type *
coff_sh_rtype_to_howto(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,struct internal_reloc * rel,struct coff_link_hash_entry * h,struct internal_syment * sym,bfd_vma * addendp)417 coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
418 			asection * sec,
419 			struct internal_reloc * rel,
420 			struct coff_link_hash_entry * h,
421 			struct internal_syment * sym,
422 			bfd_vma * addendp)
423 {
424   reloc_howto_type * howto;
425 
426   howto = sh_coff_howtos + rel->r_type;
427 
428   *addendp = 0;
429 
430   if (howto->pc_relative)
431     *addendp += sec->vma;
432 
433   if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
434     {
435       /* This is a common symbol.  The section contents include the
436 	 size (sym->n_value) as an addend.  The relocate_section
437 	 function will be adding in the final value of the symbol.  We
438 	 need to subtract out the current size in order to get the
439 	 correct result.  */
440       BFD_ASSERT (h != NULL);
441     }
442 
443   if (howto->pc_relative)
444     {
445       *addendp -= 4;
446 
447       /* If the symbol is defined, then the generic code is going to
448 	 add back the symbol value in order to cancel out an
449 	 adjustment it made to the addend.  However, we set the addend
450 	 to 0 at the start of this function.  We need to adjust here,
451 	 to avoid the adjustment the generic code will make.  FIXME:
452 	 This is getting a bit hackish.  */
453       if (sym != NULL && sym->n_scnum != 0)
454 	*addendp -= sym->n_value;
455     }
456 
457   if (rel->r_type == R_SH_IMAGEBASE)
458     *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
459 
460   return howto;
461 }
462 
463 #endif /* COFF_WITH_PE */
464 
465 /* This structure is used to map BFD reloc codes to SH PE relocs.  */
466 struct shcoff_reloc_map
467 {
468   bfd_reloc_code_real_type bfd_reloc_val;
469   unsigned char shcoff_reloc_val;
470 };
471 
472 #ifdef COFF_WITH_PE
473 /* An array mapping BFD reloc codes to SH PE relocs.  */
474 static const struct shcoff_reloc_map sh_reloc_map[] =
475 {
476   { BFD_RELOC_32, R_SH_IMM32CE },
477   { BFD_RELOC_RVA, R_SH_IMAGEBASE },
478   { BFD_RELOC_CTOR, R_SH_IMM32CE },
479 };
480 #else
481 /* An array mapping BFD reloc codes to SH PE relocs.  */
482 static const struct shcoff_reloc_map sh_reloc_map[] =
483 {
484   { BFD_RELOC_32, R_SH_IMM32 },
485   { BFD_RELOC_CTOR, R_SH_IMM32 },
486 };
487 #endif
488 
489 /* Given a BFD reloc code, return the howto structure for the
490    corresponding SH PE reloc.  */
491 #define coff_bfd_reloc_type_lookup	sh_coff_reloc_type_lookup
492 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
493 
494 static reloc_howto_type *
sh_coff_reloc_type_lookup(bfd * abfd,bfd_reloc_code_real_type code)495 sh_coff_reloc_type_lookup (bfd *abfd,
496 			   bfd_reloc_code_real_type code)
497 {
498   unsigned int i;
499 
500   for (i = ARRAY_SIZE (sh_reloc_map); i--;)
501     if (sh_reloc_map[i].bfd_reloc_val == code)
502       return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
503 
504   _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
505 		      abfd, (unsigned int) code);
506   return NULL;
507 }
508 
509 static reloc_howto_type *
sh_coff_reloc_name_lookup(bfd * abfd ATTRIBUTE_UNUSED,const char * r_name)510 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
511 			   const char *r_name)
512 {
513   unsigned int i;
514 
515   for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
516     if (sh_coff_howtos[i].name != NULL
517 	&& strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
518       return &sh_coff_howtos[i];
519 
520   return NULL;
521 }
522 
523 /* This macro is used in coffcode.h to get the howto corresponding to
524    an internal reloc.  */
525 
526 #define RTYPE2HOWTO(relent, internal)		\
527   ((relent)->howto =				\
528    ((internal)->r_type < SH_COFF_HOWTO_COUNT	\
529     ? &sh_coff_howtos[(internal)->r_type]	\
530     : (reloc_howto_type *) NULL))
531 
532 /* This is the same as the macro in coffcode.h, except that it copies
533    r_offset into reloc_entry->addend for some relocs.  */
534 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr)		\
535   {								\
536     coff_symbol_type *coffsym = (coff_symbol_type *) NULL;	\
537     if (ptr && bfd_asymbol_bfd (ptr) != abfd)			\
538       coffsym = (obj_symbols (abfd)				\
539 		 + (cache_ptr->sym_ptr_ptr - symbols));		\
540     else if (ptr)						\
541       coffsym = coff_symbol_from (ptr);				\
542     if (coffsym != (coff_symbol_type *) NULL			\
543 	&& coffsym->native->u.syment.n_scnum == 0)		\
544       cache_ptr->addend = 0;					\
545     else if (ptr && bfd_asymbol_bfd (ptr) == abfd		\
546 	     && ptr->section != (asection *) NULL)		\
547       cache_ptr->addend = - (ptr->section->vma + ptr->value);	\
548     else							\
549       cache_ptr->addend = 0;					\
550     if ((reloc).r_type == R_SH_SWITCH8				\
551 	|| (reloc).r_type == R_SH_SWITCH16			\
552 	|| (reloc).r_type == R_SH_SWITCH32			\
553 	|| (reloc).r_type == R_SH_USES				\
554 	|| (reloc).r_type == R_SH_COUNT				\
555 	|| (reloc).r_type == R_SH_ALIGN)			\
556       cache_ptr->addend = (reloc).r_offset;			\
557   }
558 
559 /* This is the howto function for the SH relocations.  */
560 
561 static bfd_reloc_status_type
sh_reloc(bfd * abfd,arelent * reloc_entry,asymbol * symbol_in,void * data,asection * input_section,bfd * output_bfd,char ** error_message ATTRIBUTE_UNUSED)562 sh_reloc (bfd *      abfd,
563 	  arelent *  reloc_entry,
564 	  asymbol *  symbol_in,
565 	  void *     data,
566 	  asection * input_section,
567 	  bfd *      output_bfd,
568 	  char **    error_message ATTRIBUTE_UNUSED)
569 {
570   bfd_vma insn;
571   bfd_vma sym_value;
572   unsigned short r_type;
573   bfd_vma addr = reloc_entry->address;
574   bfd_byte *hit_data = addr + (bfd_byte *) data;
575 
576   r_type = reloc_entry->howto->type;
577 
578   if (output_bfd != NULL)
579     {
580       /* Partial linking--do nothing.  */
581       reloc_entry->address += input_section->output_offset;
582       return bfd_reloc_ok;
583     }
584 
585   /* Almost all relocs have to do with relaxing.  If any work must be
586      done for them, it has been done in sh_relax_section.  */
587   if (r_type != R_SH_IMM32
588 #ifdef COFF_WITH_PE
589       && r_type != R_SH_IMM32CE
590       && r_type != R_SH_IMAGEBASE
591 #endif
592       && (r_type != R_SH_PCDISP
593 	  || (symbol_in->flags & BSF_LOCAL) != 0))
594     return bfd_reloc_ok;
595 
596   if (symbol_in != NULL
597       && bfd_is_und_section (symbol_in->section))
598     return bfd_reloc_undefined;
599 
600   if (addr > input_section->size)
601     return bfd_reloc_outofrange;
602 
603   sym_value = get_symbol_value (symbol_in);
604 
605   switch (r_type)
606     {
607     case R_SH_IMM32:
608 #ifdef COFF_WITH_PE
609     case R_SH_IMM32CE:
610 #endif
611       insn = bfd_get_32 (abfd, hit_data);
612       insn += sym_value + reloc_entry->addend;
613       bfd_put_32 (abfd, insn, hit_data);
614       break;
615 #ifdef COFF_WITH_PE
616     case R_SH_IMAGEBASE:
617       insn = bfd_get_32 (abfd, hit_data);
618       insn += sym_value + reloc_entry->addend;
619       insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
620       bfd_put_32 (abfd, insn, hit_data);
621       break;
622 #endif
623     case R_SH_PCDISP:
624       insn = bfd_get_16 (abfd, hit_data);
625       sym_value += reloc_entry->addend;
626       sym_value -= (input_section->output_section->vma
627 		    + input_section->output_offset
628 		    + addr
629 		    + 4);
630       sym_value += (((insn & 0xfff) ^ 0x800) - 0x800) << 1;
631       insn = (insn & 0xf000) | ((sym_value >> 1) & 0xfff);
632       bfd_put_16 (abfd, insn, hit_data);
633       if (sym_value + 0x1000 >= 0x2000 || (sym_value & 1) != 0)
634 	return bfd_reloc_overflow;
635       break;
636     default:
637       abort ();
638       break;
639     }
640 
641   return bfd_reloc_ok;
642 }
643 
644 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
645 
646 /* We can do relaxing.  */
647 #define coff_bfd_relax_section sh_relax_section
648 
649 /* We use the special COFF backend linker.  */
650 #define coff_relocate_section sh_relocate_section
651 
652 /* When relaxing, we need to use special code to get the relocated
653    section contents.  */
654 #define coff_bfd_get_relocated_section_contents \
655   sh_coff_get_relocated_section_contents
656 
657 #include "coffcode.h"
658 
659 static bool
660 sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
661 
662 /* This function handles relaxing on the SH.
663 
664    Function calls on the SH look like this:
665 
666        movl  L1,r0
667        ...
668        jsr   @r0
669        ...
670      L1:
671        .long function
672 
673    The compiler and assembler will cooperate to create R_SH_USES
674    relocs on the jsr instructions.  The r_offset field of the
675    R_SH_USES reloc is the PC relative offset to the instruction which
676    loads the register (the r_offset field is computed as though it
677    were a jump instruction, so the offset value is actually from four
678    bytes past the instruction).  The linker can use this reloc to
679    determine just which function is being called, and thus decide
680    whether it is possible to replace the jsr with a bsr.
681 
682    If multiple function calls are all based on a single register load
683    (i.e., the same function is called multiple times), the compiler
684    guarantees that each function call will have an R_SH_USES reloc.
685    Therefore, if the linker is able to convert each R_SH_USES reloc
686    which refers to that address, it can safely eliminate the register
687    load.
688 
689    When the assembler creates an R_SH_USES reloc, it examines it to
690    determine which address is being loaded (L1 in the above example).
691    It then counts the number of references to that address, and
692    creates an R_SH_COUNT reloc at that address.  The r_offset field of
693    the R_SH_COUNT reloc will be the number of references.  If the
694    linker is able to eliminate a register load, it can use the
695    R_SH_COUNT reloc to see whether it can also eliminate the function
696    address.
697 
698    SH relaxing also handles another, unrelated, matter.  On the SH, if
699    a load or store instruction is not aligned on a four byte boundary,
700    the memory cycle interferes with the 32 bit instruction fetch,
701    causing a one cycle bubble in the pipeline.  Therefore, we try to
702    align load and store instructions on four byte boundaries if we
703    can, by swapping them with one of the adjacent instructions.  */
704 
705 static bool
sh_relax_section(bfd * abfd,asection * sec,struct bfd_link_info * link_info,bool * again)706 sh_relax_section (bfd *abfd,
707 		  asection *sec,
708 		  struct bfd_link_info *link_info,
709 		  bool *again)
710 {
711   struct internal_reloc *internal_relocs;
712   bool have_code;
713   struct internal_reloc *irel, *irelend;
714   bfd_byte *contents = NULL;
715 
716   *again = false;
717 
718   if (bfd_link_relocatable (link_info)
719       || (sec->flags & SEC_RELOC) == 0
720       || sec->reloc_count == 0)
721     return true;
722 
723   if (coff_section_data (abfd, sec) == NULL)
724     {
725       size_t amt = sizeof (struct coff_section_tdata);
726       sec->used_by_bfd = bfd_zalloc (abfd, amt);
727       if (sec->used_by_bfd == NULL)
728 	return false;
729     }
730 
731   internal_relocs = (_bfd_coff_read_internal_relocs
732 		     (abfd, sec, link_info->keep_memory,
733 		      (bfd_byte *) NULL, false,
734 		      (struct internal_reloc *) NULL));
735   if (internal_relocs == NULL)
736     goto error_return;
737 
738   have_code = false;
739 
740   irelend = internal_relocs + sec->reloc_count;
741   for (irel = internal_relocs; irel < irelend; irel++)
742     {
743       bfd_vma laddr, paddr, symval;
744       unsigned short insn;
745       struct internal_reloc *irelfn, *irelscan, *irelcount;
746       struct internal_syment sym;
747       bfd_signed_vma foff;
748 
749       if (irel->r_type == R_SH_CODE)
750 	have_code = true;
751 
752       if (irel->r_type != R_SH_USES)
753 	continue;
754 
755       /* Get the section contents.  */
756       if (contents == NULL)
757 	{
758 	  if (coff_section_data (abfd, sec)->contents != NULL)
759 	    contents = coff_section_data (abfd, sec)->contents;
760 	  else
761 	    {
762 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
763 		goto error_return;
764 	    }
765 	}
766 
767       /* The r_offset field of the R_SH_USES reloc will point us to
768 	 the register load.  The 4 is because the r_offset field is
769 	 computed as though it were a jump offset, which are based
770 	 from 4 bytes after the jump instruction.  */
771       laddr = irel->r_vaddr - sec->vma + 4;
772       /* Careful to sign extend the 32-bit offset.  */
773       laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
774       if (laddr >= sec->size)
775 	{
776 	  /* xgettext: c-format */
777 	  _bfd_error_handler
778 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES offset"),
779 	     abfd, (uint64_t) irel->r_vaddr);
780 	  continue;
781 	}
782       insn = bfd_get_16 (abfd, contents + laddr);
783 
784       /* If the instruction is not mov.l NN,rN, we don't know what to do.  */
785       if ((insn & 0xf000) != 0xd000)
786 	{
787 	  _bfd_error_handler
788 	    /* xgettext: c-format */
789 	    (_("%pB: %#" PRIx64 ": warning: R_SH_USES points to unrecognized insn %#x"),
790 	     abfd, (uint64_t) irel->r_vaddr, insn);
791 	  continue;
792 	}
793 
794       /* Get the address from which the register is being loaded.  The
795 	 displacement in the mov.l instruction is quadrupled.  It is a
796 	 displacement from four bytes after the movl instruction, but,
797 	 before adding in the PC address, two least significant bits
798 	 of the PC are cleared.  We assume that the section is aligned
799 	 on a four byte boundary.  */
800       paddr = insn & 0xff;
801       paddr *= 4;
802       paddr += (laddr + 4) &~ (bfd_vma) 3;
803       if (paddr >= sec->size)
804 	{
805 	  _bfd_error_handler
806 	    /* xgettext: c-format */
807 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES load offset"),
808 	     abfd, (uint64_t) irel->r_vaddr);
809 	  continue;
810 	}
811 
812       /* Get the reloc for the address from which the register is
813 	 being loaded.  This reloc will tell us which function is
814 	 actually being called.  */
815       paddr += sec->vma;
816       for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
817 	if (irelfn->r_vaddr == paddr
818 #ifdef COFF_WITH_PE
819 	    && (irelfn->r_type == R_SH_IMM32
820 		|| irelfn->r_type == R_SH_IMM32CE
821 		|| irelfn->r_type == R_SH_IMAGEBASE)
822 
823 #else
824 	    && irelfn->r_type == R_SH_IMM32
825 #endif
826 	    )
827 	  break;
828       if (irelfn >= irelend)
829 	{
830 	  _bfd_error_handler
831 	    /* xgettext: c-format */
832 	    (_("%pB: %#" PRIx64 ": warning: could not find expected reloc"),
833 	     abfd, (uint64_t) paddr);
834 	  continue;
835 	}
836 
837       /* Get the value of the symbol referred to by the reloc.  */
838       if (! _bfd_coff_get_external_symbols (abfd))
839 	goto error_return;
840       bfd_coff_swap_sym_in (abfd,
841 			    ((bfd_byte *) obj_coff_external_syms (abfd)
842 			     + (irelfn->r_symndx
843 				* bfd_coff_symesz (abfd))),
844 			    &sym);
845       if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
846 	{
847 	  _bfd_error_handler
848 	    /* xgettext: c-format */
849 	    (_("%pB: %#" PRIx64 ": warning: symbol in unexpected section"),
850 	     abfd, (uint64_t) paddr);
851 	  continue;
852 	}
853 
854       if (sym.n_sclass != C_EXT)
855 	{
856 	  symval = (sym.n_value
857 		    - sec->vma
858 		    + sec->output_section->vma
859 		    + sec->output_offset);
860 	}
861       else
862 	{
863 	  struct coff_link_hash_entry *h;
864 
865 	  h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
866 	  BFD_ASSERT (h != NULL);
867 	  if (h->root.type != bfd_link_hash_defined
868 	      && h->root.type != bfd_link_hash_defweak)
869 	    {
870 	      /* This appears to be a reference to an undefined
871 		 symbol.  Just ignore it--it will be caught by the
872 		 regular reloc processing.  */
873 	      continue;
874 	    }
875 
876 	  symval = (h->root.u.def.value
877 		    + h->root.u.def.section->output_section->vma
878 		    + h->root.u.def.section->output_offset);
879 	}
880 
881       symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
882 
883       /* See if this function call can be shortened.  */
884       foff = (symval
885 	      - (irel->r_vaddr
886 		 - sec->vma
887 		 + sec->output_section->vma
888 		 + sec->output_offset
889 		 + 4));
890       if (foff < -0x1000 || foff >= 0x1000)
891 	{
892 	  /* After all that work, we can't shorten this function call.  */
893 	  continue;
894 	}
895 
896       /* Shorten the function call.  */
897 
898       /* For simplicity of coding, we are going to modify the section
899 	 contents, the section relocs, and the BFD symbol table.  We
900 	 must tell the rest of the code not to free up this
901 	 information.  It would be possible to instead create a table
902 	 of changes which have to be made, as is done in coff-mips.c;
903 	 that would be more work, but would require less memory when
904 	 the linker is run.  */
905 
906       coff_section_data (abfd, sec)->relocs = internal_relocs;
907       coff_section_data (abfd, sec)->keep_relocs = true;
908 
909       coff_section_data (abfd, sec)->contents = contents;
910       coff_section_data (abfd, sec)->keep_contents = true;
911 
912       obj_coff_keep_syms (abfd) = true;
913 
914       /* Replace the jsr with a bsr.  */
915 
916       /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
917 	 replace the jsr with a bsr.  */
918       irel->r_type = R_SH_PCDISP;
919       irel->r_symndx = irelfn->r_symndx;
920       if (sym.n_sclass != C_EXT)
921 	{
922 	  /* If this needs to be changed because of future relaxing,
923 	     it will be handled here like other internal PCDISP
924 	     relocs.  */
925 	  bfd_put_16 (abfd,
926 		      (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
927 		      contents + irel->r_vaddr - sec->vma);
928 	}
929       else
930 	{
931 	  /* We can't fully resolve this yet, because the external
932 	     symbol value may be changed by future relaxing.  We let
933 	     the final link phase handle it.  */
934 	  bfd_put_16 (abfd, (bfd_vma) 0xb000,
935 		      contents + irel->r_vaddr - sec->vma);
936 	}
937 
938       /* See if there is another R_SH_USES reloc referring to the same
939 	 register load.  */
940       for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
941 	if (irelscan->r_type == R_SH_USES
942 	    && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
943 	  break;
944       if (irelscan < irelend)
945 	{
946 	  /* Some other function call depends upon this register load,
947 	     and we have not yet converted that function call.
948 	     Indeed, we may never be able to convert it.  There is
949 	     nothing else we can do at this point.  */
950 	  continue;
951 	}
952 
953       /* Look for a R_SH_COUNT reloc on the location where the
954 	 function address is stored.  Do this before deleting any
955 	 bytes, to avoid confusion about the address.  */
956       for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
957 	if (irelcount->r_vaddr == paddr
958 	    && irelcount->r_type == R_SH_COUNT)
959 	  break;
960 
961       /* Delete the register load.  */
962       if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
963 	goto error_return;
964 
965       /* That will change things, so, just in case it permits some
966 	 other function call to come within range, we should relax
967 	 again.  Note that this is not required, and it may be slow.  */
968       *again = true;
969 
970       /* Now check whether we got a COUNT reloc.  */
971       if (irelcount >= irelend)
972 	{
973 	  _bfd_error_handler
974 	    /* xgettext: c-format */
975 	    (_("%pB: %#" PRIx64 ": warning: could not find expected COUNT reloc"),
976 	     abfd, (uint64_t) paddr);
977 	  continue;
978 	}
979 
980       /* The number of uses is stored in the r_offset field.  We've
981 	 just deleted one.  */
982       if (irelcount->r_offset == 0)
983 	{
984 	  /* xgettext: c-format */
985 	  _bfd_error_handler (_("%pB: %#" PRIx64 ": warning: bad count"),
986 			      abfd, (uint64_t) paddr);
987 	  continue;
988 	}
989 
990       --irelcount->r_offset;
991 
992       /* If there are no more uses, we can delete the address.  Reload
993 	 the address from irelfn, in case it was changed by the
994 	 previous call to sh_relax_delete_bytes.  */
995       if (irelcount->r_offset == 0)
996 	{
997 	  if (! sh_relax_delete_bytes (abfd, sec,
998 				       irelfn->r_vaddr - sec->vma, 4))
999 	    goto error_return;
1000 	}
1001 
1002       /* We've done all we can with that function call.  */
1003     }
1004 
1005   /* Look for load and store instructions that we can align on four
1006      byte boundaries.  */
1007   if (have_code)
1008     {
1009       bool swapped;
1010 
1011       /* Get the section contents.  */
1012       if (contents == NULL)
1013 	{
1014 	  if (coff_section_data (abfd, sec)->contents != NULL)
1015 	    contents = coff_section_data (abfd, sec)->contents;
1016 	  else
1017 	    {
1018 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1019 		goto error_return;
1020 	    }
1021 	}
1022 
1023       if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1024 	goto error_return;
1025 
1026       if (swapped)
1027 	{
1028 	  coff_section_data (abfd, sec)->relocs = internal_relocs;
1029 	  coff_section_data (abfd, sec)->keep_relocs = true;
1030 
1031 	  coff_section_data (abfd, sec)->contents = contents;
1032 	  coff_section_data (abfd, sec)->keep_contents = true;
1033 
1034 	  obj_coff_keep_syms (abfd) = true;
1035 	}
1036     }
1037 
1038   if (internal_relocs != NULL
1039       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1040     {
1041       if (! link_info->keep_memory)
1042 	free (internal_relocs);
1043       else
1044 	coff_section_data (abfd, sec)->relocs = internal_relocs;
1045     }
1046 
1047   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1048     {
1049       if (! link_info->keep_memory)
1050 	free (contents);
1051       else
1052 	/* Cache the section contents for coff_link_input_bfd.  */
1053 	coff_section_data (abfd, sec)->contents = contents;
1054     }
1055 
1056   return true;
1057 
1058  error_return:
1059   if (internal_relocs != coff_section_data (abfd, sec)->relocs)
1060     free (internal_relocs);
1061   if (contents != coff_section_data (abfd, sec)->contents)
1062     free (contents);
1063   return false;
1064 }
1065 
1066 /* Delete some bytes from a section while relaxing.  */
1067 
1068 static bool
sh_relax_delete_bytes(bfd * abfd,asection * sec,bfd_vma addr,int count)1069 sh_relax_delete_bytes (bfd *abfd,
1070 		       asection *sec,
1071 		       bfd_vma addr,
1072 		       int count)
1073 {
1074   bfd_byte *contents;
1075   struct internal_reloc *irel, *irelend;
1076   struct internal_reloc *irelalign;
1077   bfd_vma toaddr;
1078   bfd_byte *esym, *esymend;
1079   bfd_size_type symesz;
1080   struct coff_link_hash_entry **sym_hash;
1081   asection *o;
1082 
1083   contents = coff_section_data (abfd, sec)->contents;
1084 
1085   /* The deletion must stop at the next ALIGN reloc for an alignment
1086      power larger than the number of bytes we are deleting.  */
1087 
1088   irelalign = NULL;
1089   toaddr = sec->size;
1090 
1091   irel = coff_section_data (abfd, sec)->relocs;
1092   irelend = irel + sec->reloc_count;
1093   for (; irel < irelend; irel++)
1094     {
1095       if (irel->r_type == R_SH_ALIGN
1096 	  && irel->r_vaddr - sec->vma > addr
1097 	  && count < (1 << irel->r_offset))
1098 	{
1099 	  irelalign = irel;
1100 	  toaddr = irel->r_vaddr - sec->vma;
1101 	  break;
1102 	}
1103     }
1104 
1105   /* Actually delete the bytes.  */
1106   memmove (contents + addr, contents + addr + count,
1107 	   (size_t) (toaddr - addr - count));
1108   if (irelalign == NULL)
1109     sec->size -= count;
1110   else
1111     {
1112       int i;
1113 
1114 #define NOP_OPCODE (0x0009)
1115 
1116       BFD_ASSERT ((count & 1) == 0);
1117       for (i = 0; i < count; i += 2)
1118 	bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1119     }
1120 
1121   /* Adjust all the relocs.  */
1122   for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1123     {
1124       bfd_vma nraddr, stop;
1125       bfd_vma start = 0;
1126       int insn = 0;
1127       struct internal_syment sym;
1128       int off, adjust, oinsn;
1129       bfd_signed_vma voff = 0;
1130       bool overflow;
1131 
1132       /* Get the new reloc address.  */
1133       nraddr = irel->r_vaddr - sec->vma;
1134       if ((irel->r_vaddr - sec->vma > addr
1135 	   && irel->r_vaddr - sec->vma < toaddr)
1136 	  || (irel->r_type == R_SH_ALIGN
1137 	      && irel->r_vaddr - sec->vma == toaddr))
1138 	nraddr -= count;
1139 
1140       /* See if this reloc was for the bytes we have deleted, in which
1141 	 case we no longer care about it.  Don't delete relocs which
1142 	 represent addresses, though.  */
1143       if (irel->r_vaddr - sec->vma >= addr
1144 	  && irel->r_vaddr - sec->vma < addr + count
1145 	  && irel->r_type != R_SH_ALIGN
1146 	  && irel->r_type != R_SH_CODE
1147 	  && irel->r_type != R_SH_DATA
1148 	  && irel->r_type != R_SH_LABEL)
1149 	irel->r_type = R_SH_UNUSED;
1150 
1151       /* If this is a PC relative reloc, see if the range it covers
1152 	 includes the bytes we have deleted.  */
1153       switch (irel->r_type)
1154 	{
1155 	default:
1156 	  break;
1157 
1158 	case R_SH_PCDISP8BY2:
1159 	case R_SH_PCDISP:
1160 	case R_SH_PCRELIMM8BY2:
1161 	case R_SH_PCRELIMM8BY4:
1162 	  start = irel->r_vaddr - sec->vma;
1163 	  insn = bfd_get_16 (abfd, contents + nraddr);
1164 	  break;
1165 	}
1166 
1167       switch (irel->r_type)
1168 	{
1169 	default:
1170 	  start = stop = addr;
1171 	  break;
1172 
1173 	case R_SH_IMM32:
1174 #ifdef COFF_WITH_PE
1175 	case R_SH_IMM32CE:
1176 	case R_SH_IMAGEBASE:
1177 #endif
1178 	  /* If this reloc is against a symbol defined in this
1179 	     section, and the symbol will not be adjusted below, we
1180 	     must check the addend to see it will put the value in
1181 	     range to be adjusted, and hence must be changed.  */
1182 	  bfd_coff_swap_sym_in (abfd,
1183 				((bfd_byte *) obj_coff_external_syms (abfd)
1184 				 + (irel->r_symndx
1185 				    * bfd_coff_symesz (abfd))),
1186 				&sym);
1187 	  if (sym.n_sclass != C_EXT
1188 	      && sym.n_scnum == sec->target_index
1189 	      && ((bfd_vma) sym.n_value <= addr
1190 		  || (bfd_vma) sym.n_value >= toaddr))
1191 	    {
1192 	      bfd_vma val;
1193 
1194 	      val = bfd_get_32 (abfd, contents + nraddr);
1195 	      val += sym.n_value;
1196 	      if (val > addr && val < toaddr)
1197 		bfd_put_32 (abfd, val - count, contents + nraddr);
1198 	    }
1199 	  start = stop = addr;
1200 	  break;
1201 
1202 	case R_SH_PCDISP8BY2:
1203 	  off = insn & 0xff;
1204 	  if (off & 0x80)
1205 	    off -= 0x100;
1206 	  stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1207 	  break;
1208 
1209 	case R_SH_PCDISP:
1210 	  bfd_coff_swap_sym_in (abfd,
1211 				((bfd_byte *) obj_coff_external_syms (abfd)
1212 				 + (irel->r_symndx
1213 				    * bfd_coff_symesz (abfd))),
1214 				&sym);
1215 	  if (sym.n_sclass == C_EXT)
1216 	    start = stop = addr;
1217 	  else
1218 	    {
1219 	      off = insn & 0xfff;
1220 	      if (off & 0x800)
1221 		off -= 0x1000;
1222 	      stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1223 	    }
1224 	  break;
1225 
1226 	case R_SH_PCRELIMM8BY2:
1227 	  off = insn & 0xff;
1228 	  stop = start + 4 + off * 2;
1229 	  break;
1230 
1231 	case R_SH_PCRELIMM8BY4:
1232 	  off = insn & 0xff;
1233 	  stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1234 	  break;
1235 
1236 	case R_SH_SWITCH8:
1237 	case R_SH_SWITCH16:
1238 	case R_SH_SWITCH32:
1239 	  /* These relocs types represent
1240 	       .word L2-L1
1241 	     The r_offset field holds the difference between the reloc
1242 	     address and L1.  That is the start of the reloc, and
1243 	     adding in the contents gives us the top.  We must adjust
1244 	     both the r_offset field and the section contents.  */
1245 
1246 	  start = irel->r_vaddr - sec->vma;
1247 	  stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1248 
1249 	  if (start > addr
1250 	      && start < toaddr
1251 	      && (stop <= addr || stop >= toaddr))
1252 	    irel->r_offset += count;
1253 	  else if (stop > addr
1254 		   && stop < toaddr
1255 		   && (start <= addr || start >= toaddr))
1256 	    irel->r_offset -= count;
1257 
1258 	  start = stop;
1259 
1260 	  if (irel->r_type == R_SH_SWITCH16)
1261 	    voff = bfd_get_signed_16 (abfd, contents + nraddr);
1262 	  else if (irel->r_type == R_SH_SWITCH8)
1263 	    voff = bfd_get_8 (abfd, contents + nraddr);
1264 	  else
1265 	    voff = bfd_get_signed_32 (abfd, contents + nraddr);
1266 	  stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1267 
1268 	  break;
1269 
1270 	case R_SH_USES:
1271 	  start = irel->r_vaddr - sec->vma;
1272 	  stop = (bfd_vma) ((bfd_signed_vma) start
1273 			    + (long) irel->r_offset
1274 			    + 4);
1275 	  break;
1276 	}
1277 
1278       if (start > addr
1279 	  && start < toaddr
1280 	  && (stop <= addr || stop >= toaddr))
1281 	adjust = count;
1282       else if (stop > addr
1283 	       && stop < toaddr
1284 	       && (start <= addr || start >= toaddr))
1285 	adjust = - count;
1286       else
1287 	adjust = 0;
1288 
1289       if (adjust != 0)
1290 	{
1291 	  oinsn = insn;
1292 	  overflow = false;
1293 	  switch (irel->r_type)
1294 	    {
1295 	    default:
1296 	      abort ();
1297 	      break;
1298 
1299 	    case R_SH_PCDISP8BY2:
1300 	    case R_SH_PCRELIMM8BY2:
1301 	      insn += adjust / 2;
1302 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1303 		overflow = true;
1304 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1305 	      break;
1306 
1307 	    case R_SH_PCDISP:
1308 	      insn += adjust / 2;
1309 	      if ((oinsn & 0xf000) != (insn & 0xf000))
1310 		overflow = true;
1311 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1312 	      break;
1313 
1314 	    case R_SH_PCRELIMM8BY4:
1315 	      BFD_ASSERT (adjust == count || count >= 4);
1316 	      if (count >= 4)
1317 		insn += adjust / 4;
1318 	      else
1319 		{
1320 		  if ((irel->r_vaddr & 3) == 0)
1321 		    ++insn;
1322 		}
1323 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1324 		overflow = true;
1325 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1326 	      break;
1327 
1328 	    case R_SH_SWITCH8:
1329 	      voff += adjust;
1330 	      if (voff < 0 || voff >= 0xff)
1331 		overflow = true;
1332 	      bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1333 	      break;
1334 
1335 	    case R_SH_SWITCH16:
1336 	      voff += adjust;
1337 	      if (voff < - 0x8000 || voff >= 0x8000)
1338 		overflow = true;
1339 	      bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1340 	      break;
1341 
1342 	    case R_SH_SWITCH32:
1343 	      voff += adjust;
1344 	      bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1345 	      break;
1346 
1347 	    case R_SH_USES:
1348 	      irel->r_offset += adjust;
1349 	      break;
1350 	    }
1351 
1352 	  if (overflow)
1353 	    {
1354 	      _bfd_error_handler
1355 		/* xgettext: c-format */
1356 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
1357 		 abfd, (uint64_t) irel->r_vaddr);
1358 	      bfd_set_error (bfd_error_bad_value);
1359 	      return false;
1360 	    }
1361 	}
1362 
1363       irel->r_vaddr = nraddr + sec->vma;
1364     }
1365 
1366   /* Look through all the other sections.  If there contain any IMM32
1367      relocs against internal symbols which we are not going to adjust
1368      below, we may need to adjust the addends.  */
1369   for (o = abfd->sections; o != NULL; o = o->next)
1370     {
1371       struct internal_reloc *internal_relocs;
1372       struct internal_reloc *irelscan, *irelscanend;
1373       bfd_byte *ocontents;
1374 
1375       if (o == sec
1376 	  || (o->flags & SEC_RELOC) == 0
1377 	  || o->reloc_count == 0)
1378 	continue;
1379 
1380       /* We always cache the relocs.  Perhaps, if info->keep_memory is
1381 	 FALSE, we should free them, if we are permitted to, when we
1382 	 leave sh_coff_relax_section.  */
1383       internal_relocs = (_bfd_coff_read_internal_relocs
1384 			 (abfd, o, true, (bfd_byte *) NULL, false,
1385 			  (struct internal_reloc *) NULL));
1386       if (internal_relocs == NULL)
1387 	return false;
1388 
1389       ocontents = NULL;
1390       irelscanend = internal_relocs + o->reloc_count;
1391       for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1392 	{
1393 	  struct internal_syment sym;
1394 
1395 #ifdef COFF_WITH_PE
1396 	  if (irelscan->r_type != R_SH_IMM32
1397 	      && irelscan->r_type != R_SH_IMAGEBASE
1398 	      && irelscan->r_type != R_SH_IMM32CE)
1399 #else
1400 	  if (irelscan->r_type != R_SH_IMM32)
1401 #endif
1402 	    continue;
1403 
1404 	  bfd_coff_swap_sym_in (abfd,
1405 				((bfd_byte *) obj_coff_external_syms (abfd)
1406 				 + (irelscan->r_symndx
1407 				    * bfd_coff_symesz (abfd))),
1408 				&sym);
1409 	  if (sym.n_sclass != C_EXT
1410 	      && sym.n_scnum == sec->target_index
1411 	      && ((bfd_vma) sym.n_value <= addr
1412 		  || (bfd_vma) sym.n_value >= toaddr))
1413 	    {
1414 	      bfd_vma val;
1415 
1416 	      if (ocontents == NULL)
1417 		{
1418 		  if (coff_section_data (abfd, o)->contents != NULL)
1419 		    ocontents = coff_section_data (abfd, o)->contents;
1420 		  else
1421 		    {
1422 		      if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1423 			return false;
1424 		      /* We always cache the section contents.
1425 			 Perhaps, if info->keep_memory is FALSE, we
1426 			 should free them, if we are permitted to,
1427 			 when we leave sh_coff_relax_section.  */
1428 		      coff_section_data (abfd, o)->contents = ocontents;
1429 		    }
1430 		}
1431 
1432 	      val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1433 	      val += sym.n_value;
1434 	      if (val > addr && val < toaddr)
1435 		bfd_put_32 (abfd, val - count,
1436 			    ocontents + irelscan->r_vaddr - o->vma);
1437 
1438 	      coff_section_data (abfd, o)->keep_contents = true;
1439 	    }
1440 	}
1441     }
1442 
1443   /* Adjusting the internal symbols will not work if something has
1444      already retrieved the generic symbols.  It would be possible to
1445      make this work by adjusting the generic symbols at the same time.
1446      However, this case should not arise in normal usage.  */
1447   if (obj_symbols (abfd) != NULL
1448       || obj_raw_syments (abfd) != NULL)
1449     {
1450       _bfd_error_handler
1451 	(_("%pB: fatal: generic symbols retrieved before relaxing"), abfd);
1452       bfd_set_error (bfd_error_invalid_operation);
1453       return false;
1454     }
1455 
1456   /* Adjust all the symbols.  */
1457   sym_hash = obj_coff_sym_hashes (abfd);
1458   symesz = bfd_coff_symesz (abfd);
1459   esym = (bfd_byte *) obj_coff_external_syms (abfd);
1460   esymend = esym + obj_raw_syment_count (abfd) * symesz;
1461   while (esym < esymend)
1462     {
1463       struct internal_syment isym;
1464 
1465       bfd_coff_swap_sym_in (abfd, esym, &isym);
1466 
1467       if (isym.n_scnum == sec->target_index
1468 	  && (bfd_vma) isym.n_value > addr
1469 	  && (bfd_vma) isym.n_value < toaddr)
1470 	{
1471 	  isym.n_value -= count;
1472 
1473 	  bfd_coff_swap_sym_out (abfd, &isym, esym);
1474 
1475 	  if (*sym_hash != NULL)
1476 	    {
1477 	      BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1478 			  || (*sym_hash)->root.type == bfd_link_hash_defweak);
1479 	      BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1480 			  && (*sym_hash)->root.u.def.value < toaddr);
1481 	      (*sym_hash)->root.u.def.value -= count;
1482 	    }
1483 	}
1484 
1485       esym += (isym.n_numaux + 1) * symesz;
1486       sym_hash += isym.n_numaux + 1;
1487     }
1488 
1489   /* See if we can move the ALIGN reloc forward.  We have adjusted
1490      r_vaddr for it already.  */
1491   if (irelalign != NULL)
1492     {
1493       bfd_vma alignto, alignaddr;
1494 
1495       alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1496       alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1497 			     1 << irelalign->r_offset);
1498       if (alignto != alignaddr)
1499 	{
1500 	  /* Tail recursion.  */
1501 	  return sh_relax_delete_bytes (abfd, sec, alignaddr,
1502 					(int) (alignto - alignaddr));
1503 	}
1504     }
1505 
1506   return true;
1507 }
1508 
1509 /* This is yet another version of the SH opcode table, used to rapidly
1510    get information about a particular instruction.  */
1511 
1512 /* The opcode map is represented by an array of these structures.  The
1513    array is indexed by the high order four bits in the instruction.  */
1514 
1515 struct sh_major_opcode
1516 {
1517   /* A pointer to the instruction list.  This is an array which
1518      contains all the instructions with this major opcode.  */
1519   const struct sh_minor_opcode *minor_opcodes;
1520   /* The number of elements in minor_opcodes.  */
1521   unsigned short count;
1522 };
1523 
1524 /* This structure holds information for a set of SH opcodes.  The
1525    instruction code is anded with the mask value, and the resulting
1526    value is used to search the order opcode list.  */
1527 
1528 struct sh_minor_opcode
1529 {
1530   /* The sorted opcode list.  */
1531   const struct sh_opcode *opcodes;
1532   /* The number of elements in opcodes.  */
1533   unsigned short count;
1534   /* The mask value to use when searching the opcode list.  */
1535   unsigned short mask;
1536 };
1537 
1538 /* This structure holds information for an SH instruction.  An array
1539    of these structures is sorted in order by opcode.  */
1540 
1541 struct sh_opcode
1542 {
1543   /* The code for this instruction, after it has been anded with the
1544      mask value in the sh_major_opcode structure.  */
1545   unsigned short opcode;
1546   /* Flags for this instruction.  */
1547   unsigned long flags;
1548 };
1549 
1550 /* Flag which appear in the sh_opcode structure.  */
1551 
1552 /* This instruction loads a value from memory.  */
1553 #define LOAD (0x1)
1554 
1555 /* This instruction stores a value to memory.  */
1556 #define STORE (0x2)
1557 
1558 /* This instruction is a branch.  */
1559 #define BRANCH (0x4)
1560 
1561 /* This instruction has a delay slot.  */
1562 #define DELAY (0x8)
1563 
1564 /* This instruction uses the value in the register in the field at
1565    mask 0x0f00 of the instruction.  */
1566 #define USES1 (0x10)
1567 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1568 
1569 /* This instruction uses the value in the register in the field at
1570    mask 0x00f0 of the instruction.  */
1571 #define USES2 (0x20)
1572 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1573 
1574 /* This instruction uses the value in register 0.  */
1575 #define USESR0 (0x40)
1576 
1577 /* This instruction sets the value in the register in the field at
1578    mask 0x0f00 of the instruction.  */
1579 #define SETS1 (0x80)
1580 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1581 
1582 /* This instruction sets the value in the register in the field at
1583    mask 0x00f0 of the instruction.  */
1584 #define SETS2 (0x100)
1585 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1586 
1587 /* This instruction sets register 0.  */
1588 #define SETSR0 (0x200)
1589 
1590 /* This instruction sets a special register.  */
1591 #define SETSSP (0x400)
1592 
1593 /* This instruction uses a special register.  */
1594 #define USESSP (0x800)
1595 
1596 /* This instruction uses the floating point register in the field at
1597    mask 0x0f00 of the instruction.  */
1598 #define USESF1 (0x1000)
1599 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1600 
1601 /* This instruction uses the floating point register in the field at
1602    mask 0x00f0 of the instruction.  */
1603 #define USESF2 (0x2000)
1604 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1605 
1606 /* This instruction uses floating point register 0.  */
1607 #define USESF0 (0x4000)
1608 
1609 /* This instruction sets the floating point register in the field at
1610    mask 0x0f00 of the instruction.  */
1611 #define SETSF1 (0x8000)
1612 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1613 
1614 #define USESAS (0x10000)
1615 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1616 #define USESR8 (0x20000)
1617 #define SETSAS (0x40000)
1618 #define SETSAS_REG(x) USESAS_REG (x)
1619 
1620 #define MAP(a) a, sizeof a / sizeof a[0]
1621 
1622 #ifndef COFF_IMAGE_WITH_PE
1623 
1624 /* The opcode maps.  */
1625 
1626 static const struct sh_opcode sh_opcode00[] =
1627 {
1628   { 0x0008, SETSSP },			/* clrt */
1629   { 0x0009, 0 },			/* nop */
1630   { 0x000b, BRANCH | DELAY | USESSP },	/* rts */
1631   { 0x0018, SETSSP },			/* sett */
1632   { 0x0019, SETSSP },			/* div0u */
1633   { 0x001b, 0 },			/* sleep */
1634   { 0x0028, SETSSP },			/* clrmac */
1635   { 0x002b, BRANCH | DELAY | SETSSP },	/* rte */
1636   { 0x0038, USESSP | SETSSP },		/* ldtlb */
1637   { 0x0048, SETSSP },			/* clrs */
1638   { 0x0058, SETSSP }			/* sets */
1639 };
1640 
1641 static const struct sh_opcode sh_opcode01[] =
1642 {
1643   { 0x0003, BRANCH | DELAY | USES1 | SETSSP },	/* bsrf rn */
1644   { 0x000a, SETS1 | USESSP },			/* sts mach,rn */
1645   { 0x001a, SETS1 | USESSP },			/* sts macl,rn */
1646   { 0x0023, BRANCH | DELAY | USES1 },		/* braf rn */
1647   { 0x0029, SETS1 | USESSP },			/* movt rn */
1648   { 0x002a, SETS1 | USESSP },			/* sts pr,rn */
1649   { 0x005a, SETS1 | USESSP },			/* sts fpul,rn */
1650   { 0x006a, SETS1 | USESSP },			/* sts fpscr,rn / sts dsr,rn */
1651   { 0x0083, LOAD | USES1 },			/* pref @rn */
1652   { 0x007a, SETS1 | USESSP },			/* sts a0,rn */
1653   { 0x008a, SETS1 | USESSP },			/* sts x0,rn */
1654   { 0x009a, SETS1 | USESSP },			/* sts x1,rn */
1655   { 0x00aa, SETS1 | USESSP },			/* sts y0,rn */
1656   { 0x00ba, SETS1 | USESSP }			/* sts y1,rn */
1657 };
1658 
1659 static const struct sh_opcode sh_opcode02[] =
1660 {
1661   { 0x0002, SETS1 | USESSP },			/* stc <special_reg>,rn */
1662   { 0x0004, STORE | USES1 | USES2 | USESR0 },	/* mov.b rm,@(r0,rn) */
1663   { 0x0005, STORE | USES1 | USES2 | USESR0 },	/* mov.w rm,@(r0,rn) */
1664   { 0x0006, STORE | USES1 | USES2 | USESR0 },	/* mov.l rm,@(r0,rn) */
1665   { 0x0007, SETSSP | USES1 | USES2 },		/* mul.l rm,rn */
1666   { 0x000c, LOAD | SETS1 | USES2 | USESR0 },	/* mov.b @(r0,rm),rn */
1667   { 0x000d, LOAD | SETS1 | USES2 | USESR0 },	/* mov.w @(r0,rm),rn */
1668   { 0x000e, LOAD | SETS1 | USES2 | USESR0 },	/* mov.l @(r0,rm),rn */
1669   { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1670 };
1671 
1672 static const struct sh_minor_opcode sh_opcode0[] =
1673 {
1674   { MAP (sh_opcode00), 0xffff },
1675   { MAP (sh_opcode01), 0xf0ff },
1676   { MAP (sh_opcode02), 0xf00f }
1677 };
1678 
1679 static const struct sh_opcode sh_opcode10[] =
1680 {
1681   { 0x1000, STORE | USES1 | USES2 }	/* mov.l rm,@(disp,rn) */
1682 };
1683 
1684 static const struct sh_minor_opcode sh_opcode1[] =
1685 {
1686   { MAP (sh_opcode10), 0xf000 }
1687 };
1688 
1689 static const struct sh_opcode sh_opcode20[] =
1690 {
1691   { 0x2000, STORE | USES1 | USES2 },		/* mov.b rm,@rn */
1692   { 0x2001, STORE | USES1 | USES2 },		/* mov.w rm,@rn */
1693   { 0x2002, STORE | USES1 | USES2 },		/* mov.l rm,@rn */
1694   { 0x2004, STORE | SETS1 | USES1 | USES2 },	/* mov.b rm,@-rn */
1695   { 0x2005, STORE | SETS1 | USES1 | USES2 },	/* mov.w rm,@-rn */
1696   { 0x2006, STORE | SETS1 | USES1 | USES2 },	/* mov.l rm,@-rn */
1697   { 0x2007, SETSSP | USES1 | USES2 | USESSP },	/* div0s */
1698   { 0x2008, SETSSP | USES1 | USES2 },		/* tst rm,rn */
1699   { 0x2009, SETS1 | USES1 | USES2 },		/* and rm,rn */
1700   { 0x200a, SETS1 | USES1 | USES2 },		/* xor rm,rn */
1701   { 0x200b, SETS1 | USES1 | USES2 },		/* or rm,rn */
1702   { 0x200c, SETSSP | USES1 | USES2 },		/* cmp/str rm,rn */
1703   { 0x200d, SETS1 | USES1 | USES2 },		/* xtrct rm,rn */
1704   { 0x200e, SETSSP | USES1 | USES2 },		/* mulu.w rm,rn */
1705   { 0x200f, SETSSP | USES1 | USES2 }		/* muls.w rm,rn */
1706 };
1707 
1708 static const struct sh_minor_opcode sh_opcode2[] =
1709 {
1710   { MAP (sh_opcode20), 0xf00f }
1711 };
1712 
1713 static const struct sh_opcode sh_opcode30[] =
1714 {
1715   { 0x3000, SETSSP | USES1 | USES2 },		/* cmp/eq rm,rn */
1716   { 0x3002, SETSSP | USES1 | USES2 },		/* cmp/hs rm,rn */
1717   { 0x3003, SETSSP | USES1 | USES2 },		/* cmp/ge rm,rn */
1718   { 0x3004, SETSSP | USESSP | USES1 | USES2 },	/* div1 rm,rn */
1719   { 0x3005, SETSSP | USES1 | USES2 },		/* dmulu.l rm,rn */
1720   { 0x3006, SETSSP | USES1 | USES2 },		/* cmp/hi rm,rn */
1721   { 0x3007, SETSSP | USES1 | USES2 },		/* cmp/gt rm,rn */
1722   { 0x3008, SETS1 | USES1 | USES2 },		/* sub rm,rn */
1723   { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1724   { 0x300b, SETS1 | SETSSP | USES1 | USES2 },	/* subv rm,rn */
1725   { 0x300c, SETS1 | USES1 | USES2 },		/* add rm,rn */
1726   { 0x300d, SETSSP | USES1 | USES2 },		/* dmuls.l rm,rn */
1727   { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1728   { 0x300f, SETS1 | SETSSP | USES1 | USES2 }	/* addv rm,rn */
1729 };
1730 
1731 static const struct sh_minor_opcode sh_opcode3[] =
1732 {
1733   { MAP (sh_opcode30), 0xf00f }
1734 };
1735 
1736 static const struct sh_opcode sh_opcode40[] =
1737 {
1738   { 0x4000, SETS1 | SETSSP | USES1 },		/* shll rn */
1739   { 0x4001, SETS1 | SETSSP | USES1 },		/* shlr rn */
1740   { 0x4002, STORE | SETS1 | USES1 | USESSP },	/* sts.l mach,@-rn */
1741   { 0x4004, SETS1 | SETSSP | USES1 },		/* rotl rn */
1742   { 0x4005, SETS1 | SETSSP | USES1 },		/* rotr rn */
1743   { 0x4006, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,mach */
1744   { 0x4008, SETS1 | USES1 },			/* shll2 rn */
1745   { 0x4009, SETS1 | USES1 },			/* shlr2 rn */
1746   { 0x400a, SETSSP | USES1 },			/* lds rm,mach */
1747   { 0x400b, BRANCH | DELAY | USES1 },		/* jsr @rn */
1748   { 0x4010, SETS1 | SETSSP | USES1 },		/* dt rn */
1749   { 0x4011, SETSSP | USES1 },			/* cmp/pz rn */
1750   { 0x4012, STORE | SETS1 | USES1 | USESSP },	/* sts.l macl,@-rn */
1751   { 0x4014, SETSSP | USES1 },			/* setrc rm */
1752   { 0x4015, SETSSP | USES1 },			/* cmp/pl rn */
1753   { 0x4016, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,macl */
1754   { 0x4018, SETS1 | USES1 },			/* shll8 rn */
1755   { 0x4019, SETS1 | USES1 },			/* shlr8 rn */
1756   { 0x401a, SETSSP | USES1 },			/* lds rm,macl */
1757   { 0x401b, LOAD | SETSSP | USES1 },		/* tas.b @rn */
1758   { 0x4020, SETS1 | SETSSP | USES1 },		/* shal rn */
1759   { 0x4021, SETS1 | SETSSP | USES1 },		/* shar rn */
1760   { 0x4022, STORE | SETS1 | USES1 | USESSP },	/* sts.l pr,@-rn */
1761   { 0x4024, SETS1 | SETSSP | USES1 | USESSP },	/* rotcl rn */
1762   { 0x4025, SETS1 | SETSSP | USES1 | USESSP },	/* rotcr rn */
1763   { 0x4026, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,pr */
1764   { 0x4028, SETS1 | USES1 },			/* shll16 rn */
1765   { 0x4029, SETS1 | USES1 },			/* shlr16 rn */
1766   { 0x402a, SETSSP | USES1 },			/* lds rm,pr */
1767   { 0x402b, BRANCH | DELAY | USES1 },		/* jmp @rn */
1768   { 0x4052, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpul,@-rn */
1769   { 0x4056, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpul */
1770   { 0x405a, SETSSP | USES1 },			/* lds.l rm,fpul */
1771   { 0x4062, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpscr / dsr,@-rn */
1772   { 0x4066, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpscr / dsr */
1773   { 0x406a, SETSSP | USES1 },			/* lds rm,fpscr / lds rm,dsr */
1774   { 0x4072, STORE | SETS1 | USES1 | USESSP },	/* sts.l a0,@-rn */
1775   { 0x4076, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,a0 */
1776   { 0x407a, SETSSP | USES1 },			/* lds.l rm,a0 */
1777   { 0x4082, STORE | SETS1 | USES1 | USESSP },	/* sts.l x0,@-rn */
1778   { 0x4086, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x0 */
1779   { 0x408a, SETSSP | USES1 },			/* lds.l rm,x0 */
1780   { 0x4092, STORE | SETS1 | USES1 | USESSP },	/* sts.l x1,@-rn */
1781   { 0x4096, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x1 */
1782   { 0x409a, SETSSP | USES1 },			/* lds.l rm,x1 */
1783   { 0x40a2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y0,@-rn */
1784   { 0x40a6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y0 */
1785   { 0x40aa, SETSSP | USES1 },			/* lds.l rm,y0 */
1786   { 0x40b2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y1,@-rn */
1787   { 0x40b6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y1 */
1788   { 0x40ba, SETSSP | USES1 }			/* lds.l rm,y1 */
1789 };
1790 
1791 static const struct sh_opcode sh_opcode41[] =
1792 {
1793   { 0x4003, STORE | SETS1 | USES1 | USESSP },	/* stc.l <special_reg>,@-rn */
1794   { 0x4007, LOAD | SETS1 | SETSSP | USES1 },	/* ldc.l @rm+,<special_reg> */
1795   { 0x400c, SETS1 | USES1 | USES2 },		/* shad rm,rn */
1796   { 0x400d, SETS1 | USES1 | USES2 },		/* shld rm,rn */
1797   { 0x400e, SETSSP | USES1 },			/* ldc rm,<special_reg> */
1798   { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1799 };
1800 
1801 static const struct sh_minor_opcode sh_opcode4[] =
1802 {
1803   { MAP (sh_opcode40), 0xf0ff },
1804   { MAP (sh_opcode41), 0xf00f }
1805 };
1806 
1807 static const struct sh_opcode sh_opcode50[] =
1808 {
1809   { 0x5000, LOAD | SETS1 | USES2 }	/* mov.l @(disp,rm),rn */
1810 };
1811 
1812 static const struct sh_minor_opcode sh_opcode5[] =
1813 {
1814   { MAP (sh_opcode50), 0xf000 }
1815 };
1816 
1817 static const struct sh_opcode sh_opcode60[] =
1818 {
1819   { 0x6000, LOAD | SETS1 | USES2 },		/* mov.b @rm,rn */
1820   { 0x6001, LOAD | SETS1 | USES2 },		/* mov.w @rm,rn */
1821   { 0x6002, LOAD | SETS1 | USES2 },		/* mov.l @rm,rn */
1822   { 0x6003, SETS1 | USES2 },			/* mov rm,rn */
1823   { 0x6004, LOAD | SETS1 | SETS2 | USES2 },	/* mov.b @rm+,rn */
1824   { 0x6005, LOAD | SETS1 | SETS2 | USES2 },	/* mov.w @rm+,rn */
1825   { 0x6006, LOAD | SETS1 | SETS2 | USES2 },	/* mov.l @rm+,rn */
1826   { 0x6007, SETS1 | USES2 },			/* not rm,rn */
1827   { 0x6008, SETS1 | USES2 },			/* swap.b rm,rn */
1828   { 0x6009, SETS1 | USES2 },			/* swap.w rm,rn */
1829   { 0x600a, SETS1 | SETSSP | USES2 | USESSP },	/* negc rm,rn */
1830   { 0x600b, SETS1 | USES2 },			/* neg rm,rn */
1831   { 0x600c, SETS1 | USES2 },			/* extu.b rm,rn */
1832   { 0x600d, SETS1 | USES2 },			/* extu.w rm,rn */
1833   { 0x600e, SETS1 | USES2 },			/* exts.b rm,rn */
1834   { 0x600f, SETS1 | USES2 }			/* exts.w rm,rn */
1835 };
1836 
1837 static const struct sh_minor_opcode sh_opcode6[] =
1838 {
1839   { MAP (sh_opcode60), 0xf00f }
1840 };
1841 
1842 static const struct sh_opcode sh_opcode70[] =
1843 {
1844   { 0x7000, SETS1 | USES1 }		/* add #imm,rn */
1845 };
1846 
1847 static const struct sh_minor_opcode sh_opcode7[] =
1848 {
1849   { MAP (sh_opcode70), 0xf000 }
1850 };
1851 
1852 static const struct sh_opcode sh_opcode80[] =
1853 {
1854   { 0x8000, STORE | USES2 | USESR0 },	/* mov.b r0,@(disp,rn) */
1855   { 0x8100, STORE | USES2 | USESR0 },	/* mov.w r0,@(disp,rn) */
1856   { 0x8200, SETSSP },			/* setrc #imm */
1857   { 0x8400, LOAD | SETSR0 | USES2 },	/* mov.b @(disp,rm),r0 */
1858   { 0x8500, LOAD | SETSR0 | USES2 },	/* mov.w @(disp,rn),r0 */
1859   { 0x8800, SETSSP | USESR0 },		/* cmp/eq #imm,r0 */
1860   { 0x8900, BRANCH | USESSP },		/* bt label */
1861   { 0x8b00, BRANCH | USESSP },		/* bf label */
1862   { 0x8c00, SETSSP },			/* ldrs @(disp,pc) */
1863   { 0x8d00, BRANCH | DELAY | USESSP },	/* bt/s label */
1864   { 0x8e00, SETSSP },			/* ldre @(disp,pc) */
1865   { 0x8f00, BRANCH | DELAY | USESSP }	/* bf/s label */
1866 };
1867 
1868 static const struct sh_minor_opcode sh_opcode8[] =
1869 {
1870   { MAP (sh_opcode80), 0xff00 }
1871 };
1872 
1873 static const struct sh_opcode sh_opcode90[] =
1874 {
1875   { 0x9000, LOAD | SETS1 }	/* mov.w @(disp,pc),rn */
1876 };
1877 
1878 static const struct sh_minor_opcode sh_opcode9[] =
1879 {
1880   { MAP (sh_opcode90), 0xf000 }
1881 };
1882 
1883 static const struct sh_opcode sh_opcodea0[] =
1884 {
1885   { 0xa000, BRANCH | DELAY }	/* bra label */
1886 };
1887 
1888 static const struct sh_minor_opcode sh_opcodea[] =
1889 {
1890   { MAP (sh_opcodea0), 0xf000 }
1891 };
1892 
1893 static const struct sh_opcode sh_opcodeb0[] =
1894 {
1895   { 0xb000, BRANCH | DELAY }	/* bsr label */
1896 };
1897 
1898 static const struct sh_minor_opcode sh_opcodeb[] =
1899 {
1900   { MAP (sh_opcodeb0), 0xf000 }
1901 };
1902 
1903 static const struct sh_opcode sh_opcodec0[] =
1904 {
1905   { 0xc000, STORE | USESR0 | USESSP },		/* mov.b r0,@(disp,gbr) */
1906   { 0xc100, STORE | USESR0 | USESSP },		/* mov.w r0,@(disp,gbr) */
1907   { 0xc200, STORE | USESR0 | USESSP },		/* mov.l r0,@(disp,gbr) */
1908   { 0xc300, BRANCH | USESSP },			/* trapa #imm */
1909   { 0xc400, LOAD | SETSR0 | USESSP },		/* mov.b @(disp,gbr),r0 */
1910   { 0xc500, LOAD | SETSR0 | USESSP },		/* mov.w @(disp,gbr),r0 */
1911   { 0xc600, LOAD | SETSR0 | USESSP },		/* mov.l @(disp,gbr),r0 */
1912   { 0xc700, SETSR0 },				/* mova @(disp,pc),r0 */
1913   { 0xc800, SETSSP | USESR0 },			/* tst #imm,r0 */
1914   { 0xc900, SETSR0 | USESR0 },			/* and #imm,r0 */
1915   { 0xca00, SETSR0 | USESR0 },			/* xor #imm,r0 */
1916   { 0xcb00, SETSR0 | USESR0 },			/* or #imm,r0 */
1917   { 0xcc00, LOAD | SETSSP | USESR0 | USESSP },	/* tst.b #imm,@(r0,gbr) */
1918   { 0xcd00, LOAD | STORE | USESR0 | USESSP },	/* and.b #imm,@(r0,gbr) */
1919   { 0xce00, LOAD | STORE | USESR0 | USESSP },	/* xor.b #imm,@(r0,gbr) */
1920   { 0xcf00, LOAD | STORE | USESR0 | USESSP }	/* or.b #imm,@(r0,gbr) */
1921 };
1922 
1923 static const struct sh_minor_opcode sh_opcodec[] =
1924 {
1925   { MAP (sh_opcodec0), 0xff00 }
1926 };
1927 
1928 static const struct sh_opcode sh_opcoded0[] =
1929 {
1930   { 0xd000, LOAD | SETS1 }		/* mov.l @(disp,pc),rn */
1931 };
1932 
1933 static const struct sh_minor_opcode sh_opcoded[] =
1934 {
1935   { MAP (sh_opcoded0), 0xf000 }
1936 };
1937 
1938 static const struct sh_opcode sh_opcodee0[] =
1939 {
1940   { 0xe000, SETS1 }		/* mov #imm,rn */
1941 };
1942 
1943 static const struct sh_minor_opcode sh_opcodee[] =
1944 {
1945   { MAP (sh_opcodee0), 0xf000 }
1946 };
1947 
1948 static const struct sh_opcode sh_opcodef0[] =
1949 {
1950   { 0xf000, SETSF1 | USESF1 | USESF2 },		/* fadd fm,fn */
1951   { 0xf001, SETSF1 | USESF1 | USESF2 },		/* fsub fm,fn */
1952   { 0xf002, SETSF1 | USESF1 | USESF2 },		/* fmul fm,fn */
1953   { 0xf003, SETSF1 | USESF1 | USESF2 },		/* fdiv fm,fn */
1954   { 0xf004, SETSSP | USESF1 | USESF2 },		/* fcmp/eq fm,fn */
1955   { 0xf005, SETSSP | USESF1 | USESF2 },		/* fcmp/gt fm,fn */
1956   { 0xf006, LOAD | SETSF1 | USES2 | USESR0 },	/* fmov.s @(r0,rm),fn */
1957   { 0xf007, STORE | USES1 | USESF2 | USESR0 },	/* fmov.s fm,@(r0,rn) */
1958   { 0xf008, LOAD | SETSF1 | USES2 },		/* fmov.s @rm,fn */
1959   { 0xf009, LOAD | SETS2 | SETSF1 | USES2 },	/* fmov.s @rm+,fn */
1960   { 0xf00a, STORE | USES1 | USESF2 },		/* fmov.s fm,@rn */
1961   { 0xf00b, STORE | SETS1 | USES1 | USESF2 },	/* fmov.s fm,@-rn */
1962   { 0xf00c, SETSF1 | USESF2 },			/* fmov fm,fn */
1963   { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 }	/* fmac f0,fm,fn */
1964 };
1965 
1966 static const struct sh_opcode sh_opcodef1[] =
1967 {
1968   { 0xf00d, SETSF1 | USESSP },	/* fsts fpul,fn */
1969   { 0xf01d, SETSSP | USESF1 },	/* flds fn,fpul */
1970   { 0xf02d, SETSF1 | USESSP },	/* float fpul,fn */
1971   { 0xf03d, SETSSP | USESF1 },	/* ftrc fn,fpul */
1972   { 0xf04d, SETSF1 | USESF1 },	/* fneg fn */
1973   { 0xf05d, SETSF1 | USESF1 },	/* fabs fn */
1974   { 0xf06d, SETSF1 | USESF1 },	/* fsqrt fn */
1975   { 0xf07d, SETSSP | USESF1 },	/* ftst/nan fn */
1976   { 0xf08d, SETSF1 },		/* fldi0 fn */
1977   { 0xf09d, SETSF1 }		/* fldi1 fn */
1978 };
1979 
1980 static const struct sh_minor_opcode sh_opcodef[] =
1981 {
1982   { MAP (sh_opcodef0), 0xf00f },
1983   { MAP (sh_opcodef1), 0xf0ff }
1984 };
1985 
1986 static struct sh_major_opcode sh_opcodes[] =
1987 {
1988   { MAP (sh_opcode0) },
1989   { MAP (sh_opcode1) },
1990   { MAP (sh_opcode2) },
1991   { MAP (sh_opcode3) },
1992   { MAP (sh_opcode4) },
1993   { MAP (sh_opcode5) },
1994   { MAP (sh_opcode6) },
1995   { MAP (sh_opcode7) },
1996   { MAP (sh_opcode8) },
1997   { MAP (sh_opcode9) },
1998   { MAP (sh_opcodea) },
1999   { MAP (sh_opcodeb) },
2000   { MAP (sh_opcodec) },
2001   { MAP (sh_opcoded) },
2002   { MAP (sh_opcodee) },
2003   { MAP (sh_opcodef) }
2004 };
2005 
2006 /* The double data transfer / parallel processing insns are not
2007    described here.  This will cause sh_align_load_span to leave them alone.  */
2008 
2009 static const struct sh_opcode sh_dsp_opcodef0[] =
2010 {
2011   { 0xf400, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @-as,ds */
2012   { 0xf401, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@-as */
2013   { 0xf404, USESAS | LOAD | SETSSP },		/* movs.x @as,ds */
2014   { 0xf405, USESAS | STORE | USESSP },		/* movs.x ds,@as */
2015   { 0xf408, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @as+,ds */
2016   { 0xf409, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@as+ */
2017   { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 },	/* movs.x @as+r8,ds */
2018   { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 }	/* movs.x ds,@as+r8 */
2019 };
2020 
2021 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2022 {
2023   { MAP (sh_dsp_opcodef0), 0xfc0d }
2024 };
2025 
2026 /* Given an instruction, return a pointer to the corresponding
2027    sh_opcode structure.  Return NULL if the instruction is not
2028    recognized.  */
2029 
2030 static const struct sh_opcode *
sh_insn_info(unsigned int insn)2031 sh_insn_info (unsigned int insn)
2032 {
2033   const struct sh_major_opcode *maj;
2034   const struct sh_minor_opcode *min, *minend;
2035 
2036   maj = &sh_opcodes[(insn & 0xf000) >> 12];
2037   min = maj->minor_opcodes;
2038   minend = min + maj->count;
2039   for (; min < minend; min++)
2040     {
2041       unsigned int l;
2042       const struct sh_opcode *op, *opend;
2043 
2044       l = insn & min->mask;
2045       op = min->opcodes;
2046       opend = op + min->count;
2047 
2048       /* Since the opcodes tables are sorted, we could use a binary
2049 	 search here if the count were above some cutoff value.  */
2050       for (; op < opend; op++)
2051 	if (op->opcode == l)
2052 	  return op;
2053     }
2054 
2055   return NULL;
2056 }
2057 
2058 /* See whether an instruction uses a general purpose register.  */
2059 
2060 static bool
sh_insn_uses_reg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2061 sh_insn_uses_reg (unsigned int insn,
2062 		  const struct sh_opcode *op,
2063 		  unsigned int reg)
2064 {
2065   unsigned int f;
2066 
2067   f = op->flags;
2068 
2069   if ((f & USES1) != 0
2070       && USES1_REG (insn) == reg)
2071     return true;
2072   if ((f & USES2) != 0
2073       && USES2_REG (insn) == reg)
2074     return true;
2075   if ((f & USESR0) != 0
2076       && reg == 0)
2077     return true;
2078   if ((f & USESAS) && reg == USESAS_REG (insn))
2079     return true;
2080   if ((f & USESR8) && reg == 8)
2081     return true;
2082 
2083   return false;
2084 }
2085 
2086 /* See whether an instruction sets a general purpose register.  */
2087 
2088 static bool
sh_insn_sets_reg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2089 sh_insn_sets_reg (unsigned int insn,
2090 		  const struct sh_opcode *op,
2091 		  unsigned int reg)
2092 {
2093   unsigned int f;
2094 
2095   f = op->flags;
2096 
2097   if ((f & SETS1) != 0
2098       && SETS1_REG (insn) == reg)
2099     return true;
2100   if ((f & SETS2) != 0
2101       && SETS2_REG (insn) == reg)
2102     return true;
2103   if ((f & SETSR0) != 0
2104       && reg == 0)
2105     return true;
2106   if ((f & SETSAS) && reg == SETSAS_REG (insn))
2107     return true;
2108 
2109   return false;
2110 }
2111 
2112 /* See whether an instruction uses or sets a general purpose register */
2113 
2114 static bool
sh_insn_uses_or_sets_reg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2115 sh_insn_uses_or_sets_reg (unsigned int insn,
2116 			  const struct sh_opcode *op,
2117 			  unsigned int reg)
2118 {
2119   if (sh_insn_uses_reg (insn, op, reg))
2120     return true;
2121 
2122   return sh_insn_sets_reg (insn, op, reg);
2123 }
2124 
2125 /* See whether an instruction uses a floating point register.  */
2126 
2127 static bool
sh_insn_uses_freg(unsigned int insn,const struct sh_opcode * op,unsigned int freg)2128 sh_insn_uses_freg (unsigned int insn,
2129 		   const struct sh_opcode *op,
2130 		   unsigned int freg)
2131 {
2132   unsigned int f;
2133 
2134   f = op->flags;
2135 
2136   /* We can't tell if this is a double-precision insn, so just play safe
2137      and assume that it might be.  So not only have we test FREG against
2138      itself, but also even FREG against FREG+1 - if the using insn uses
2139      just the low part of a double precision value - but also an odd
2140      FREG against FREG-1 -  if the setting insn sets just the low part
2141      of a double precision value.
2142      So what this all boils down to is that we have to ignore the lowest
2143      bit of the register number.  */
2144 
2145   if ((f & USESF1) != 0
2146       && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2147     return true;
2148   if ((f & USESF2) != 0
2149       && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2150     return true;
2151   if ((f & USESF0) != 0
2152       && freg == 0)
2153     return true;
2154 
2155   return false;
2156 }
2157 
2158 /* See whether an instruction sets a floating point register.  */
2159 
2160 static bool
sh_insn_sets_freg(unsigned int insn,const struct sh_opcode * op,unsigned int freg)2161 sh_insn_sets_freg (unsigned int insn,
2162 		   const struct sh_opcode *op,
2163 		   unsigned int freg)
2164 {
2165   unsigned int f;
2166 
2167   f = op->flags;
2168 
2169   /* We can't tell if this is a double-precision insn, so just play safe
2170      and assume that it might be.  So not only have we test FREG against
2171      itself, but also even FREG against FREG+1 - if the using insn uses
2172      just the low part of a double precision value - but also an odd
2173      FREG against FREG-1 -  if the setting insn sets just the low part
2174      of a double precision value.
2175      So what this all boils down to is that we have to ignore the lowest
2176      bit of the register number.  */
2177 
2178   if ((f & SETSF1) != 0
2179       && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2180     return true;
2181 
2182   return false;
2183 }
2184 
2185 /* See whether an instruction uses or sets a floating point register */
2186 
2187 static bool
sh_insn_uses_or_sets_freg(unsigned int insn,const struct sh_opcode * op,unsigned int reg)2188 sh_insn_uses_or_sets_freg (unsigned int insn,
2189 			   const struct sh_opcode *op,
2190 			   unsigned int reg)
2191 {
2192   if (sh_insn_uses_freg (insn, op, reg))
2193     return true;
2194 
2195   return sh_insn_sets_freg (insn, op, reg);
2196 }
2197 
2198 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2199    before I2.  OP1 and OP2 are the corresponding sh_opcode structures.
2200    This should return TRUE if there is a conflict, or FALSE if the
2201    instructions can be swapped safely.  */
2202 
2203 static bool
sh_insns_conflict(unsigned int i1,const struct sh_opcode * op1,unsigned int i2,const struct sh_opcode * op2)2204 sh_insns_conflict (unsigned int i1,
2205 		   const struct sh_opcode *op1,
2206 		   unsigned int i2,
2207 		   const struct sh_opcode *op2)
2208 {
2209   unsigned int f1, f2;
2210 
2211   f1 = op1->flags;
2212   f2 = op2->flags;
2213 
2214   /* Load of fpscr conflicts with floating point operations.
2215      FIXME: shouldn't test raw opcodes here.  */
2216   if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2217       || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2218     return true;
2219 
2220   if ((f1 & (BRANCH | DELAY)) != 0
2221       || (f2 & (BRANCH | DELAY)) != 0)
2222     return true;
2223 
2224   if (((f1 | f2) & SETSSP)
2225       && (f1 & (SETSSP | USESSP))
2226       && (f2 & (SETSSP | USESSP)))
2227     return true;
2228 
2229   if ((f1 & SETS1) != 0
2230       && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2231     return true;
2232   if ((f1 & SETS2) != 0
2233       && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2234     return true;
2235   if ((f1 & SETSR0) != 0
2236       && sh_insn_uses_or_sets_reg (i2, op2, 0))
2237     return true;
2238   if ((f1 & SETSAS)
2239       && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2240     return true;
2241   if ((f1 & SETSF1) != 0
2242       && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2243     return true;
2244 
2245   if ((f2 & SETS1) != 0
2246       && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2247     return true;
2248   if ((f2 & SETS2) != 0
2249       && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2250     return true;
2251   if ((f2 & SETSR0) != 0
2252       && sh_insn_uses_or_sets_reg (i1, op1, 0))
2253     return true;
2254   if ((f2 & SETSAS)
2255       && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2256     return true;
2257   if ((f2 & SETSF1) != 0
2258       && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2259     return true;
2260 
2261   /* The instructions do not conflict.  */
2262   return false;
2263 }
2264 
2265 /* I1 is a load instruction, and I2 is some other instruction.  Return
2266    TRUE if I1 loads a register which I2 uses.  */
2267 
2268 static bool
sh_load_use(unsigned int i1,const struct sh_opcode * op1,unsigned int i2,const struct sh_opcode * op2)2269 sh_load_use (unsigned int i1,
2270 	     const struct sh_opcode *op1,
2271 	     unsigned int i2,
2272 	     const struct sh_opcode *op2)
2273 {
2274   unsigned int f1;
2275 
2276   f1 = op1->flags;
2277 
2278   if ((f1 & LOAD) == 0)
2279     return false;
2280 
2281   /* If both SETS1 and SETSSP are set, that means a load to a special
2282      register using postincrement addressing mode, which we don't care
2283      about here.  */
2284   if ((f1 & SETS1) != 0
2285       && (f1 & SETSSP) == 0
2286       && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2287     return true;
2288 
2289   if ((f1 & SETSR0) != 0
2290       && sh_insn_uses_reg (i2, op2, 0))
2291     return true;
2292 
2293   if ((f1 & SETSF1) != 0
2294       && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2295     return true;
2296 
2297   return false;
2298 }
2299 
2300 /* Try to align loads and stores within a span of memory.  This is
2301    called by both the ELF and the COFF sh targets.  ABFD and SEC are
2302    the BFD and section we are examining.  CONTENTS is the contents of
2303    the section.  SWAP is the routine to call to swap two instructions.
2304    RELOCS is a pointer to the internal relocation information, to be
2305    passed to SWAP.  PLABEL is a pointer to the current label in a
2306    sorted list of labels; LABEL_END is the end of the list.  START and
2307    STOP are the range of memory to examine.  If a swap is made,
2308    *PSWAPPED is set to TRUE.  */
2309 
2310 #ifdef COFF_WITH_PE
2311 static
2312 #endif
2313 bool
_bfd_sh_align_load_span(bfd * abfd,asection * sec,bfd_byte * contents,bool (* swap)(bfd *,asection *,void *,bfd_byte *,bfd_vma),void * relocs,bfd_vma ** plabel,bfd_vma * label_end,bfd_vma start,bfd_vma stop,bool * pswapped)2314 _bfd_sh_align_load_span (bfd *abfd,
2315 			 asection *sec,
2316 			 bfd_byte *contents,
2317 			 bool (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
2318 			 void * relocs,
2319 			 bfd_vma **plabel,
2320 			 bfd_vma *label_end,
2321 			 bfd_vma start,
2322 			 bfd_vma stop,
2323 			 bool *pswapped)
2324 {
2325   int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2326 	     || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2327   bfd_vma i;
2328 
2329   /* The SH4 has a Harvard architecture, hence aligning loads is not
2330      desirable.  In fact, it is counter-productive, since it interferes
2331      with the schedules generated by the compiler.  */
2332   if (abfd->arch_info->mach == bfd_mach_sh4)
2333     return true;
2334 
2335   /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2336      instructions.  */
2337   if (dsp)
2338     {
2339       sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2340       sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef [0];
2341     }
2342 
2343   /* Instructions should be aligned on 2 byte boundaries.  */
2344   if ((start & 1) == 1)
2345     ++start;
2346 
2347   /* Now look through the unaligned addresses.  */
2348   i = start;
2349   if ((i & 2) == 0)
2350     i += 2;
2351   for (; i < stop; i += 4)
2352     {
2353       unsigned int insn;
2354       const struct sh_opcode *op;
2355       unsigned int prev_insn = 0;
2356       const struct sh_opcode *prev_op = NULL;
2357 
2358       insn = bfd_get_16 (abfd, contents + i);
2359       op = sh_insn_info (insn);
2360       if (op == NULL
2361 	  || (op->flags & (LOAD | STORE)) == 0)
2362 	continue;
2363 
2364       /* This is a load or store which is not on a four byte boundary.  */
2365 
2366       while (*plabel < label_end && **plabel < i)
2367 	++*plabel;
2368 
2369       if (i > start)
2370 	{
2371 	  prev_insn = bfd_get_16 (abfd, contents + i - 2);
2372 	  /* If INSN is the field b of a parallel processing insn, it is not
2373 	     a load / store after all.  Note that the test here might mistake
2374 	     the field_b of a pcopy insn for the starting code of a parallel
2375 	     processing insn; this might miss a swapping opportunity, but at
2376 	     least we're on the safe side.  */
2377 	  if (dsp && (prev_insn & 0xfc00) == 0xf800)
2378 	    continue;
2379 
2380 	  /* Check if prev_insn is actually the field b of a parallel
2381 	     processing insn.  Again, this can give a spurious match
2382 	     after a pcopy.  */
2383 	  if (dsp && i - 2 > start)
2384 	    {
2385 	      unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2386 
2387 	      if ((pprev_insn & 0xfc00) == 0xf800)
2388 		prev_op = NULL;
2389 	      else
2390 		prev_op = sh_insn_info (prev_insn);
2391 	    }
2392 	  else
2393 	    prev_op = sh_insn_info (prev_insn);
2394 
2395 	  /* If the load/store instruction is in a delay slot, we
2396 	     can't swap.  */
2397 	  if (prev_op == NULL
2398 	      || (prev_op->flags & DELAY) != 0)
2399 	    continue;
2400 	}
2401       if (i > start
2402 	  && (*plabel >= label_end || **plabel != i)
2403 	  && prev_op != NULL
2404 	  && (prev_op->flags & (LOAD | STORE)) == 0
2405 	  && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2406 	{
2407 	  bool ok;
2408 
2409 	  /* The load/store instruction does not have a label, and
2410 	     there is a previous instruction; PREV_INSN is not
2411 	     itself a load/store instruction, and PREV_INSN and
2412 	     INSN do not conflict.  */
2413 
2414 	  ok = true;
2415 
2416 	  if (i >= start + 4)
2417 	    {
2418 	      unsigned int prev2_insn;
2419 	      const struct sh_opcode *prev2_op;
2420 
2421 	      prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2422 	      prev2_op = sh_insn_info (prev2_insn);
2423 
2424 	      /* If the instruction before PREV_INSN has a delay
2425 		 slot--that is, PREV_INSN is in a delay slot--we
2426 		 can not swap.  */
2427 	      if (prev2_op == NULL
2428 		  || (prev2_op->flags & DELAY) != 0)
2429 		ok = false;
2430 
2431 	      /* If the instruction before PREV_INSN is a load,
2432 		 and it sets a register which INSN uses, then
2433 		 putting INSN immediately after PREV_INSN will
2434 		 cause a pipeline bubble, so there is no point to
2435 		 making the swap.  */
2436 	      if (ok
2437 		  && (prev2_op->flags & LOAD) != 0
2438 		  && sh_load_use (prev2_insn, prev2_op, insn, op))
2439 		ok = false;
2440 	    }
2441 
2442 	  if (ok)
2443 	    {
2444 	      if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2445 		return false;
2446 	      *pswapped = true;
2447 	      continue;
2448 	    }
2449 	}
2450 
2451       while (*plabel < label_end && **plabel < i + 2)
2452 	++*plabel;
2453 
2454       if (i + 2 < stop
2455 	  && (*plabel >= label_end || **plabel != i + 2))
2456 	{
2457 	  unsigned int next_insn;
2458 	  const struct sh_opcode *next_op;
2459 
2460 	  /* There is an instruction after the load/store
2461 	     instruction, and it does not have a label.  */
2462 	  next_insn = bfd_get_16 (abfd, contents + i + 2);
2463 	  next_op = sh_insn_info (next_insn);
2464 	  if (next_op != NULL
2465 	      && (next_op->flags & (LOAD | STORE)) == 0
2466 	      && ! sh_insns_conflict (insn, op, next_insn, next_op))
2467 	    {
2468 	      bool ok;
2469 
2470 	      /* NEXT_INSN is not itself a load/store instruction,
2471 		 and it does not conflict with INSN.  */
2472 
2473 	      ok = true;
2474 
2475 	      /* If PREV_INSN is a load, and it sets a register
2476 		 which NEXT_INSN uses, then putting NEXT_INSN
2477 		 immediately after PREV_INSN will cause a pipeline
2478 		 bubble, so there is no reason to make this swap.  */
2479 	      if (prev_op != NULL
2480 		  && (prev_op->flags & LOAD) != 0
2481 		  && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2482 		ok = false;
2483 
2484 	      /* If INSN is a load, and it sets a register which
2485 		 the insn after NEXT_INSN uses, then doing the
2486 		 swap will cause a pipeline bubble, so there is no
2487 		 reason to make the swap.  However, if the insn
2488 		 after NEXT_INSN is itself a load or store
2489 		 instruction, then it is misaligned, so
2490 		 optimistically hope that it will be swapped
2491 		 itself, and just live with the pipeline bubble if
2492 		 it isn't.  */
2493 	      if (ok
2494 		  && i + 4 < stop
2495 		  && (op->flags & LOAD) != 0)
2496 		{
2497 		  unsigned int next2_insn;
2498 		  const struct sh_opcode *next2_op;
2499 
2500 		  next2_insn = bfd_get_16 (abfd, contents + i + 4);
2501 		  next2_op = sh_insn_info (next2_insn);
2502 		  if (next2_op == NULL
2503 		      || ((next2_op->flags & (LOAD | STORE)) == 0
2504 			  && sh_load_use (insn, op, next2_insn, next2_op)))
2505 		    ok = false;
2506 		}
2507 
2508 	      if (ok)
2509 		{
2510 		  if (! (*swap) (abfd, sec, relocs, contents, i))
2511 		    return false;
2512 		  *pswapped = true;
2513 		  continue;
2514 		}
2515 	    }
2516 	}
2517     }
2518 
2519   return true;
2520 }
2521 #endif /* not COFF_IMAGE_WITH_PE */
2522 
2523 /* Swap two SH instructions.  */
2524 
2525 static bool
sh_swap_insns(bfd * abfd,asection * sec,void * relocs,bfd_byte * contents,bfd_vma addr)2526 sh_swap_insns (bfd *      abfd,
2527 	       asection * sec,
2528 	       void *     relocs,
2529 	       bfd_byte * contents,
2530 	       bfd_vma    addr)
2531 {
2532   struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2533   unsigned short i1, i2;
2534   struct internal_reloc *irel, *irelend;
2535 
2536   /* Swap the instructions themselves.  */
2537   i1 = bfd_get_16 (abfd, contents + addr);
2538   i2 = bfd_get_16 (abfd, contents + addr + 2);
2539   bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2540   bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2541 
2542   /* Adjust all reloc addresses.  */
2543   irelend = internal_relocs + sec->reloc_count;
2544   for (irel = internal_relocs; irel < irelend; irel++)
2545     {
2546       int type, add;
2547 
2548       /* There are a few special types of relocs that we don't want to
2549 	 adjust.  These relocs do not apply to the instruction itself,
2550 	 but are only associated with the address.  */
2551       type = irel->r_type;
2552       if (type == R_SH_ALIGN
2553 	  || type == R_SH_CODE
2554 	  || type == R_SH_DATA
2555 	  || type == R_SH_LABEL)
2556 	continue;
2557 
2558       /* If an R_SH_USES reloc points to one of the addresses being
2559 	 swapped, we must adjust it.  It would be incorrect to do this
2560 	 for a jump, though, since we want to execute both
2561 	 instructions after the jump.  (We have avoided swapping
2562 	 around a label, so the jump will not wind up executing an
2563 	 instruction it shouldn't).  */
2564       if (type == R_SH_USES)
2565 	{
2566 	  bfd_vma off;
2567 
2568 	  off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2569 	  if (off == addr)
2570 	    irel->r_offset += 2;
2571 	  else if (off == addr + 2)
2572 	    irel->r_offset -= 2;
2573 	}
2574 
2575       if (irel->r_vaddr - sec->vma == addr)
2576 	{
2577 	  irel->r_vaddr += 2;
2578 	  add = -2;
2579 	}
2580       else if (irel->r_vaddr - sec->vma == addr + 2)
2581 	{
2582 	  irel->r_vaddr -= 2;
2583 	  add = 2;
2584 	}
2585       else
2586 	add = 0;
2587 
2588       if (add != 0)
2589 	{
2590 	  bfd_byte *loc;
2591 	  unsigned short insn, oinsn;
2592 	  bool overflow;
2593 
2594 	  loc = contents + irel->r_vaddr - sec->vma;
2595 	  overflow = false;
2596 	  switch (type)
2597 	    {
2598 	    default:
2599 	      break;
2600 
2601 	    case R_SH_PCDISP8BY2:
2602 	    case R_SH_PCRELIMM8BY2:
2603 	      insn = bfd_get_16 (abfd, loc);
2604 	      oinsn = insn;
2605 	      insn += add / 2;
2606 	      if ((oinsn & 0xff00) != (insn & 0xff00))
2607 		overflow = true;
2608 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2609 	      break;
2610 
2611 	    case R_SH_PCDISP:
2612 	      insn = bfd_get_16 (abfd, loc);
2613 	      oinsn = insn;
2614 	      insn += add / 2;
2615 	      if ((oinsn & 0xf000) != (insn & 0xf000))
2616 		overflow = true;
2617 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2618 	      break;
2619 
2620 	    case R_SH_PCRELIMM8BY4:
2621 	      /* This reloc ignores the least significant 3 bits of
2622 		 the program counter before adding in the offset.
2623 		 This means that if ADDR is at an even address, the
2624 		 swap will not affect the offset.  If ADDR is an at an
2625 		 odd address, then the instruction will be crossing a
2626 		 four byte boundary, and must be adjusted.  */
2627 	      if ((addr & 3) != 0)
2628 		{
2629 		  insn = bfd_get_16 (abfd, loc);
2630 		  oinsn = insn;
2631 		  insn += add / 2;
2632 		  if ((oinsn & 0xff00) != (insn & 0xff00))
2633 		    overflow = true;
2634 		  bfd_put_16 (abfd, (bfd_vma) insn, loc);
2635 		}
2636 
2637 	      break;
2638 	    }
2639 
2640 	  if (overflow)
2641 	    {
2642 	      _bfd_error_handler
2643 		/* xgettext: c-format */
2644 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
2645 		 abfd, (uint64_t) irel->r_vaddr);
2646 	      bfd_set_error (bfd_error_bad_value);
2647 	      return false;
2648 	    }
2649 	}
2650     }
2651 
2652   return true;
2653 }
2654 
2655 /* Look for loads and stores which we can align to four byte
2656    boundaries.  See the longer comment above sh_relax_section for why
2657    this is desirable.  This sets *PSWAPPED if some instruction was
2658    swapped.  */
2659 
2660 static bool
sh_align_loads(bfd * abfd,asection * sec,struct internal_reloc * internal_relocs,bfd_byte * contents,bool * pswapped)2661 sh_align_loads (bfd *abfd,
2662 		asection *sec,
2663 		struct internal_reloc *internal_relocs,
2664 		bfd_byte *contents,
2665 		bool *pswapped)
2666 {
2667   struct internal_reloc *irel, *irelend;
2668   bfd_vma *labels = NULL;
2669   bfd_vma *label, *label_end;
2670   bfd_size_type amt;
2671 
2672   *pswapped = false;
2673 
2674   irelend = internal_relocs + sec->reloc_count;
2675 
2676   /* Get all the addresses with labels on them.  */
2677   amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2678   labels = (bfd_vma *) bfd_malloc (amt);
2679   if (labels == NULL)
2680     goto error_return;
2681   label_end = labels;
2682   for (irel = internal_relocs; irel < irelend; irel++)
2683     {
2684       if (irel->r_type == R_SH_LABEL)
2685 	{
2686 	  *label_end = irel->r_vaddr - sec->vma;
2687 	  ++label_end;
2688 	}
2689     }
2690 
2691   /* Note that the assembler currently always outputs relocs in
2692      address order.  If that ever changes, this code will need to sort
2693      the label values and the relocs.  */
2694 
2695   label = labels;
2696 
2697   for (irel = internal_relocs; irel < irelend; irel++)
2698     {
2699       bfd_vma start, stop;
2700 
2701       if (irel->r_type != R_SH_CODE)
2702 	continue;
2703 
2704       start = irel->r_vaddr - sec->vma;
2705 
2706       for (irel++; irel < irelend; irel++)
2707 	if (irel->r_type == R_SH_DATA)
2708 	  break;
2709       if (irel < irelend)
2710 	stop = irel->r_vaddr - sec->vma;
2711       else
2712 	stop = sec->size;
2713 
2714       if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2715 				     internal_relocs, &label,
2716 				     label_end, start, stop, pswapped))
2717 	goto error_return;
2718     }
2719 
2720   free (labels);
2721 
2722   return true;
2723 
2724  error_return:
2725   free (labels);
2726   return false;
2727 }
2728 
2729 /* This is a modification of _bfd_coff_generic_relocate_section, which
2730    will handle SH relaxing.  */
2731 
2732 static bool
sh_relocate_section(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,struct internal_reloc * relocs,struct internal_syment * syms,asection ** sections)2733 sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
2734 		     struct bfd_link_info *info,
2735 		     bfd *input_bfd,
2736 		     asection *input_section,
2737 		     bfd_byte *contents,
2738 		     struct internal_reloc *relocs,
2739 		     struct internal_syment *syms,
2740 		     asection **sections)
2741 {
2742   struct internal_reloc *rel;
2743   struct internal_reloc *relend;
2744 
2745   rel = relocs;
2746   relend = rel + input_section->reloc_count;
2747   for (; rel < relend; rel++)
2748     {
2749       long symndx;
2750       struct coff_link_hash_entry *h;
2751       struct internal_syment *sym;
2752       bfd_vma addend;
2753       bfd_vma val;
2754       reloc_howto_type *howto;
2755       bfd_reloc_status_type rstat;
2756 
2757       /* Almost all relocs have to do with relaxing.  If any work must
2758 	 be done for them, it has been done in sh_relax_section.  */
2759       if (rel->r_type != R_SH_IMM32
2760 #ifdef COFF_WITH_PE
2761 	  && rel->r_type != R_SH_IMM32CE
2762 	  && rel->r_type != R_SH_IMAGEBASE
2763 #endif
2764 	  && rel->r_type != R_SH_PCDISP)
2765 	continue;
2766 
2767       symndx = rel->r_symndx;
2768 
2769       if (symndx == -1)
2770 	{
2771 	  h = NULL;
2772 	  sym = NULL;
2773 	}
2774       else
2775 	{
2776 	  if (symndx < 0
2777 	      || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2778 	    {
2779 	      _bfd_error_handler
2780 		/* xgettext: c-format */
2781 		(_("%pB: illegal symbol index %ld in relocs"),
2782 		 input_bfd, symndx);
2783 	      bfd_set_error (bfd_error_bad_value);
2784 	      return false;
2785 	    }
2786 	  h = obj_coff_sym_hashes (input_bfd)[symndx];
2787 	  sym = syms + symndx;
2788 	}
2789 
2790       if (sym != NULL && sym->n_scnum != 0)
2791 	addend = - sym->n_value;
2792       else
2793 	addend = 0;
2794 
2795       if (rel->r_type == R_SH_PCDISP)
2796 	addend -= 4;
2797 
2798       if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2799 	howto = NULL;
2800       else
2801 	howto = &sh_coff_howtos[rel->r_type];
2802 
2803       if (howto == NULL)
2804 	{
2805 	  bfd_set_error (bfd_error_bad_value);
2806 	  return false;
2807 	}
2808 
2809 #ifdef COFF_WITH_PE
2810       if (rel->r_type == R_SH_IMAGEBASE)
2811 	addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2812 #endif
2813 
2814       val = 0;
2815 
2816       if (h == NULL)
2817 	{
2818 	  asection *sec;
2819 
2820 	  /* There is nothing to do for an internal PCDISP reloc.  */
2821 	  if (rel->r_type == R_SH_PCDISP)
2822 	    continue;
2823 
2824 	  if (symndx == -1)
2825 	    {
2826 	      sec = bfd_abs_section_ptr;
2827 	      val = 0;
2828 	    }
2829 	  else
2830 	    {
2831 	      sec = sections[symndx];
2832 	      val = (sec->output_section->vma
2833 		     + sec->output_offset
2834 		     + sym->n_value
2835 		     - sec->vma);
2836 	    }
2837 	}
2838       else
2839 	{
2840 	  if (h->root.type == bfd_link_hash_defined
2841 	      || h->root.type == bfd_link_hash_defweak)
2842 	    {
2843 	      asection *sec;
2844 
2845 	      sec = h->root.u.def.section;
2846 	      val = (h->root.u.def.value
2847 		     + sec->output_section->vma
2848 		     + sec->output_offset);
2849 	    }
2850 	  else if (! bfd_link_relocatable (info))
2851 	    (*info->callbacks->undefined_symbol)
2852 	      (info, h->root.root.string, input_bfd, input_section,
2853 	       rel->r_vaddr - input_section->vma, true);
2854 	}
2855 
2856       rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2857 					contents,
2858 					rel->r_vaddr - input_section->vma,
2859 					val, addend);
2860 
2861       switch (rstat)
2862 	{
2863 	default:
2864 	  abort ();
2865 	case bfd_reloc_ok:
2866 	  break;
2867 	case bfd_reloc_overflow:
2868 	  {
2869 	    const char *name;
2870 	    char buf[SYMNMLEN + 1];
2871 
2872 	    if (symndx == -1)
2873 	      name = "*ABS*";
2874 	    else if (h != NULL)
2875 	      name = NULL;
2876 	    else if (sym->_n._n_n._n_zeroes == 0
2877 		     && sym->_n._n_n._n_offset != 0)
2878 	      name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2879 	    else
2880 	      {
2881 		strncpy (buf, sym->_n._n_name, SYMNMLEN);
2882 		buf[SYMNMLEN] = '\0';
2883 		name = buf;
2884 	      }
2885 
2886 	    (*info->callbacks->reloc_overflow)
2887 	      (info, (h ? &h->root : NULL), name, howto->name,
2888 	       (bfd_vma) 0, input_bfd, input_section,
2889 	       rel->r_vaddr - input_section->vma);
2890 	  }
2891 	}
2892     }
2893 
2894   return true;
2895 }
2896 
2897 /* This is a version of bfd_generic_get_relocated_section_contents
2898    which uses sh_relocate_section.  */
2899 
2900 static bfd_byte *
sh_coff_get_relocated_section_contents(bfd * output_bfd,struct bfd_link_info * link_info,struct bfd_link_order * link_order,bfd_byte * data,bool relocatable,asymbol ** symbols)2901 sh_coff_get_relocated_section_contents (bfd *output_bfd,
2902 					struct bfd_link_info *link_info,
2903 					struct bfd_link_order *link_order,
2904 					bfd_byte *data,
2905 					bool relocatable,
2906 					asymbol **symbols)
2907 {
2908   asection *input_section = link_order->u.indirect.section;
2909   bfd *input_bfd = input_section->owner;
2910   asection **sections = NULL;
2911   struct internal_reloc *internal_relocs = NULL;
2912   struct internal_syment *internal_syms = NULL;
2913 
2914   /* We only need to handle the case of relaxing, or of having a
2915      particular set of section contents, specially.  */
2916   if (relocatable
2917       || coff_section_data (input_bfd, input_section) == NULL
2918       || coff_section_data (input_bfd, input_section)->contents == NULL)
2919     return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2920 						       link_order, data,
2921 						       relocatable,
2922 						       symbols);
2923 
2924   memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2925 	  (size_t) input_section->size);
2926 
2927   if ((input_section->flags & SEC_RELOC) != 0
2928       && input_section->reloc_count > 0)
2929     {
2930       bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2931       bfd_byte *esym, *esymend;
2932       struct internal_syment *isymp;
2933       asection **secpp;
2934       bfd_size_type amt;
2935 
2936       if (! _bfd_coff_get_external_symbols (input_bfd))
2937 	goto error_return;
2938 
2939       internal_relocs = (_bfd_coff_read_internal_relocs
2940 			 (input_bfd, input_section, false, (bfd_byte *) NULL,
2941 			  false, (struct internal_reloc *) NULL));
2942       if (internal_relocs == NULL)
2943 	goto error_return;
2944 
2945       amt = obj_raw_syment_count (input_bfd);
2946       amt *= sizeof (struct internal_syment);
2947       internal_syms = (struct internal_syment *) bfd_malloc (amt);
2948       if (internal_syms == NULL)
2949 	goto error_return;
2950 
2951       amt = obj_raw_syment_count (input_bfd);
2952       amt *= sizeof (asection *);
2953       sections = (asection **) bfd_malloc (amt);
2954       if (sections == NULL)
2955 	goto error_return;
2956 
2957       isymp = internal_syms;
2958       secpp = sections;
2959       esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2960       esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2961       while (esym < esymend)
2962 	{
2963 	  bfd_coff_swap_sym_in (input_bfd, esym, isymp);
2964 
2965 	  if (isymp->n_scnum != 0)
2966 	    *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2967 	  else
2968 	    {
2969 	      if (isymp->n_value == 0)
2970 		*secpp = bfd_und_section_ptr;
2971 	      else
2972 		*secpp = bfd_com_section_ptr;
2973 	    }
2974 
2975 	  esym += (isymp->n_numaux + 1) * symesz;
2976 	  secpp += isymp->n_numaux + 1;
2977 	  isymp += isymp->n_numaux + 1;
2978 	}
2979 
2980       if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2981 				 input_section, data, internal_relocs,
2982 				 internal_syms, sections))
2983 	goto error_return;
2984 
2985       free (sections);
2986       sections = NULL;
2987       free (internal_syms);
2988       internal_syms = NULL;
2989       free (internal_relocs);
2990       internal_relocs = NULL;
2991     }
2992 
2993   return data;
2994 
2995  error_return:
2996   free (internal_relocs);
2997   free (internal_syms);
2998   free (sections);
2999   return NULL;
3000 }
3001 
3002 /* The target vectors.  */
3003 
3004 #ifndef TARGET_SHL_SYM
3005 CREATE_BIG_COFF_TARGET_VEC (sh_coff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3006 #endif
3007 
3008 #ifdef TARGET_SHL_SYM
3009 #define TARGET_SYM TARGET_SHL_SYM
3010 #else
3011 #define TARGET_SYM sh_coff_le_vec
3012 #endif
3013 
3014 #ifndef TARGET_SHL_NAME
3015 #define TARGET_SHL_NAME "coff-shl"
3016 #endif
3017 
3018 #ifdef COFF_WITH_PE
3019 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3020 			       SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3021 #else
3022 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3023 			       0, '_', NULL, COFF_SWAP_TABLE)
3024 #endif
3025 
3026 #ifndef TARGET_SHL_SYM
3027 
3028 /* Some people want versions of the SH COFF target which do not align
3029    to 16 byte boundaries.  We implement that by adding a couple of new
3030    target vectors.  These are just like the ones above, but they
3031    change the default section alignment.  To generate them in the
3032    assembler, use -small.  To use them in the linker, use -b
3033    coff-sh{l}-small and -oformat coff-sh{l}-small.
3034 
3035    Yes, this is a horrible hack.  A general solution for setting
3036    section alignment in COFF is rather complex.  ELF handles this
3037    correctly.  */
3038 
3039 /* Only recognize the small versions if the target was not defaulted.
3040    Otherwise we won't recognize the non default endianness.  */
3041 
3042 static bfd_cleanup
coff_small_object_p(bfd * abfd)3043 coff_small_object_p (bfd *abfd)
3044 {
3045   if (abfd->target_defaulted)
3046     {
3047       bfd_set_error (bfd_error_wrong_format);
3048       return NULL;
3049     }
3050   return coff_object_p (abfd);
3051 }
3052 
3053 /* Set the section alignment for the small versions.  */
3054 
3055 static bool
coff_small_new_section_hook(bfd * abfd,asection * section)3056 coff_small_new_section_hook (bfd *abfd, asection *section)
3057 {
3058   if (! coff_new_section_hook (abfd, section))
3059     return false;
3060 
3061   /* We must align to at least a four byte boundary, because longword
3062      accesses must be on a four byte boundary.  */
3063   if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3064     section->alignment_power = 2;
3065 
3066   return true;
3067 }
3068 
3069 /* This is copied from bfd_coff_std_swap_table so that we can change
3070    the default section alignment power.  */
3071 
3072 static bfd_coff_backend_data bfd_coff_small_swap_table =
3073 {
3074   coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3075   coff_swap_aux_out, coff_swap_sym_out,
3076   coff_swap_lineno_out, coff_swap_reloc_out,
3077   coff_swap_filehdr_out, coff_swap_aouthdr_out,
3078   coff_swap_scnhdr_out,
3079   FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3080 #ifdef COFF_LONG_FILENAMES
3081   true,
3082 #else
3083   false,
3084 #endif
3085   COFF_DEFAULT_LONG_SECTION_NAMES,
3086   2,
3087 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3088   true,
3089 #else
3090   false,
3091 #endif
3092 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3093   4,
3094 #else
3095   2,
3096 #endif
3097   32768,
3098   coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3099   coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3100   coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3101   coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3102   coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3103   coff_classify_symbol, coff_compute_section_file_positions,
3104   coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3105   coff_adjust_symndx, coff_link_add_one_symbol,
3106   coff_link_output_has_begun, coff_final_link_postscript,
3107   bfd_pe_print_pdata
3108 };
3109 
3110 #define coff_small_close_and_cleanup \
3111   coff_close_and_cleanup
3112 #define coff_small_bfd_free_cached_info \
3113   coff_bfd_free_cached_info
3114 #define coff_small_get_section_contents \
3115   coff_get_section_contents
3116 #define coff_small_get_section_contents_in_window \
3117   coff_get_section_contents_in_window
3118 
3119 extern const bfd_target sh_coff_small_le_vec;
3120 
3121 const bfd_target sh_coff_small_vec =
3122 {
3123   "coff-sh-small",		/* name */
3124   bfd_target_coff_flavour,
3125   BFD_ENDIAN_BIG,		/* data byte order is big */
3126   BFD_ENDIAN_BIG,		/* header byte order is big */
3127 
3128   (HAS_RELOC | EXEC_P		/* object flags */
3129    | HAS_LINENO | HAS_DEBUG
3130    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3131 
3132   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3133   '_',				/* leading symbol underscore */
3134   '/',				/* ar_pad_char */
3135   15,				/* ar_max_namelen */
3136   0,				/* match priority.  */
3137   TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols.  */
3138   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3139   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3140   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3141   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3142   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3143   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3144 
3145   {				/* bfd_check_format */
3146     _bfd_dummy_target,
3147     coff_small_object_p,
3148     bfd_generic_archive_p,
3149     _bfd_dummy_target
3150   },
3151   {				/* bfd_set_format */
3152     _bfd_bool_bfd_false_error,
3153     coff_mkobject,
3154     _bfd_generic_mkarchive,
3155     _bfd_bool_bfd_false_error
3156   },
3157   {				/* bfd_write_contents */
3158     _bfd_bool_bfd_false_error,
3159     coff_write_object_contents,
3160     _bfd_write_archive_contents,
3161     _bfd_bool_bfd_false_error
3162   },
3163 
3164   BFD_JUMP_TABLE_GENERIC (coff_small),
3165   BFD_JUMP_TABLE_COPY (coff),
3166   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3167   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3168   BFD_JUMP_TABLE_SYMBOLS (coff),
3169   BFD_JUMP_TABLE_RELOCS (coff),
3170   BFD_JUMP_TABLE_WRITE (coff),
3171   BFD_JUMP_TABLE_LINK (coff),
3172   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3173 
3174   &sh_coff_small_le_vec,
3175 
3176   &bfd_coff_small_swap_table
3177 };
3178 
3179 const bfd_target sh_coff_small_le_vec =
3180 {
3181   "coff-shl-small",		/* name */
3182   bfd_target_coff_flavour,
3183   BFD_ENDIAN_LITTLE,		/* data byte order is little */
3184   BFD_ENDIAN_LITTLE,		/* header byte order is little endian too*/
3185 
3186   (HAS_RELOC | EXEC_P		/* object flags */
3187    | HAS_LINENO | HAS_DEBUG
3188    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3189 
3190   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3191   '_',				/* leading symbol underscore */
3192   '/',				/* ar_pad_char */
3193   15,				/* ar_max_namelen */
3194   0,				/* match priority.  */
3195   TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols.  */
3196   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3197   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3198   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3199   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3200   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3201   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3202 
3203   {				/* bfd_check_format */
3204     _bfd_dummy_target,
3205     coff_small_object_p,
3206     bfd_generic_archive_p,
3207     _bfd_dummy_target
3208   },
3209   {				/* bfd_set_format */
3210     _bfd_bool_bfd_false_error,
3211     coff_mkobject,
3212     _bfd_generic_mkarchive,
3213     _bfd_bool_bfd_false_error
3214   },
3215   {				/* bfd_write_contents */
3216     _bfd_bool_bfd_false_error,
3217     coff_write_object_contents,
3218     _bfd_write_archive_contents,
3219     _bfd_bool_bfd_false_error
3220   },
3221 
3222   BFD_JUMP_TABLE_GENERIC (coff_small),
3223   BFD_JUMP_TABLE_COPY (coff),
3224   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3225   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3226   BFD_JUMP_TABLE_SYMBOLS (coff),
3227   BFD_JUMP_TABLE_RELOCS (coff),
3228   BFD_JUMP_TABLE_WRITE (coff),
3229   BFD_JUMP_TABLE_LINK (coff),
3230   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3231 
3232   &sh_coff_small_vec,
3233 
3234   &bfd_coff_small_swap_table
3235 };
3236 #endif
3237