xref: /linux/arch/arm64/kernel/vmlinux.lds.S (revision 9a6b55ac)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ld script to make ARM Linux kernel
4 * taken from the i386 version by Russell King
5 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 */
7
8#define RO_EXCEPTION_TABLE_ALIGN	8
9
10#include <asm-generic/vmlinux.lds.h>
11#include <asm/cache.h>
12#include <asm/kernel-pgtable.h>
13#include <asm/thread_info.h>
14#include <asm/memory.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17
18#include "image.h"
19
20/* .exit.text needed in case of alternative patching */
21#define ARM_EXIT_KEEP(x)	x
22#define ARM_EXIT_DISCARD(x)
23
24OUTPUT_ARCH(aarch64)
25ENTRY(_text)
26
27jiffies = jiffies_64;
28
29#define HYPERVISOR_TEXT					\
30	/*						\
31	 * Align to 4 KB so that			\
32	 * a) the HYP vector table is at its minimum	\
33	 *    alignment of 2048 bytes			\
34	 * b) the HYP init code will not cross a page	\
35	 *    boundary if its size does not exceed	\
36	 *    4 KB (see related ASSERT() below)		\
37	 */						\
38	. = ALIGN(SZ_4K);				\
39	__hyp_idmap_text_start = .;			\
40	*(.hyp.idmap.text)				\
41	__hyp_idmap_text_end = .;			\
42	__hyp_text_start = .;				\
43	*(.hyp.text)					\
44	__hyp_text_end = .;
45
46#define IDMAP_TEXT					\
47	. = ALIGN(SZ_4K);				\
48	__idmap_text_start = .;				\
49	*(.idmap.text)					\
50	__idmap_text_end = .;
51
52#ifdef CONFIG_HIBERNATION
53#define HIBERNATE_TEXT					\
54	. = ALIGN(SZ_4K);				\
55	__hibernate_exit_text_start = .;		\
56	*(.hibernate_exit.text)				\
57	__hibernate_exit_text_end = .;
58#else
59#define HIBERNATE_TEXT
60#endif
61
62#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
63#define TRAMP_TEXT					\
64	. = ALIGN(PAGE_SIZE);				\
65	__entry_tramp_text_start = .;			\
66	*(.entry.tramp.text)				\
67	. = ALIGN(PAGE_SIZE);				\
68	__entry_tramp_text_end = .;
69#else
70#define TRAMP_TEXT
71#endif
72
73/*
74 * The size of the PE/COFF section that covers the kernel image, which
75 * runs from stext to _edata, must be a round multiple of the PE/COFF
76 * FileAlignment, which we set to its minimum value of 0x200. 'stext'
77 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
78 * boundary should be sufficient.
79 */
80PECOFF_FILE_ALIGNMENT = 0x200;
81
82#ifdef CONFIG_EFI
83#define PECOFF_EDATA_PADDING	\
84	.pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
85#else
86#define PECOFF_EDATA_PADDING
87#endif
88
89SECTIONS
90{
91	/*
92	 * XXX: The linker does not define how output sections are
93	 * assigned to input sections when there are multiple statements
94	 * matching the same input section name.  There is no documented
95	 * order of matching.
96	 */
97	/DISCARD/ : {
98		ARM_EXIT_DISCARD(EXIT_TEXT)
99		ARM_EXIT_DISCARD(EXIT_DATA)
100		EXIT_CALL
101		*(.discard)
102		*(.discard.*)
103		*(.interp .dynamic)
104		*(.dynsym .dynstr .hash .gnu.hash)
105		*(.eh_frame)
106	}
107
108	. = KIMAGE_VADDR + TEXT_OFFSET;
109
110	.head.text : {
111		_text = .;
112		HEAD_TEXT
113	}
114	.text : {			/* Real text segment		*/
115		_stext = .;		/* Text and read-only data	*/
116			IRQENTRY_TEXT
117			SOFTIRQENTRY_TEXT
118			ENTRY_TEXT
119			TEXT_TEXT
120			SCHED_TEXT
121			CPUIDLE_TEXT
122			LOCK_TEXT
123			KPROBES_TEXT
124			HYPERVISOR_TEXT
125			IDMAP_TEXT
126			HIBERNATE_TEXT
127			TRAMP_TEXT
128			*(.fixup)
129			*(.gnu.warning)
130		. = ALIGN(16);
131		*(.got)			/* Global offset table		*/
132	}
133
134	. = ALIGN(SEGMENT_ALIGN);
135	_etext = .;			/* End of text section */
136
137	/* everything from this point to __init_begin will be marked RO NX */
138	RO_DATA(PAGE_SIZE)
139
140	idmap_pg_dir = .;
141	. += IDMAP_DIR_SIZE;
142
143#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
144	tramp_pg_dir = .;
145	. += PAGE_SIZE;
146#endif
147
148#ifdef CONFIG_ARM64_SW_TTBR0_PAN
149	reserved_ttbr0 = .;
150	. += RESERVED_TTBR0_SIZE;
151#endif
152	swapper_pg_dir = .;
153	. += PAGE_SIZE;
154	swapper_pg_end = .;
155
156	. = ALIGN(SEGMENT_ALIGN);
157	__init_begin = .;
158	__inittext_begin = .;
159
160	INIT_TEXT_SECTION(8)
161
162	__exittext_begin = .;
163	.exit.text : {
164		ARM_EXIT_KEEP(EXIT_TEXT)
165	}
166	__exittext_end = .;
167
168	. = ALIGN(4);
169	.altinstructions : {
170		__alt_instructions = .;
171		*(.altinstructions)
172		__alt_instructions_end = .;
173	}
174	.altinstr_replacement : {
175		*(.altinstr_replacement)
176	}
177
178	. = ALIGN(PAGE_SIZE);
179	__inittext_end = .;
180	__initdata_begin = .;
181
182	.init.data : {
183		INIT_DATA
184		INIT_SETUP(16)
185		INIT_CALLS
186		CON_INITCALL
187		INIT_RAM_FS
188		*(.init.rodata.* .init.bss)	/* from the EFI stub */
189	}
190	.exit.data : {
191		ARM_EXIT_KEEP(EXIT_DATA)
192	}
193
194	PERCPU_SECTION(L1_CACHE_BYTES)
195
196	.rela.dyn : ALIGN(8) {
197		*(.rela .rela*)
198	}
199
200	__rela_offset	= ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
201	__rela_size	= SIZEOF(.rela.dyn);
202
203#ifdef CONFIG_RELR
204	.relr.dyn : ALIGN(8) {
205		*(.relr.dyn)
206	}
207
208	__relr_offset	= ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
209	__relr_size	= SIZEOF(.relr.dyn);
210#endif
211
212	. = ALIGN(SEGMENT_ALIGN);
213	__initdata_end = .;
214	__init_end = .;
215
216	_data = .;
217	_sdata = .;
218	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
219
220	/*
221	 * Data written with the MMU off but read with the MMU on requires
222	 * cache lines to be invalidated, discarding up to a Cache Writeback
223	 * Granule (CWG) of data from the cache. Keep the section that
224	 * requires this type of maintenance to be in its own Cache Writeback
225	 * Granule (CWG) area so the cache maintenance operations don't
226	 * interfere with adjacent data.
227	 */
228	.mmuoff.data.write : ALIGN(SZ_2K) {
229		__mmuoff_data_start = .;
230		*(.mmuoff.data.write)
231	}
232	. = ALIGN(SZ_2K);
233	.mmuoff.data.read : {
234		*(.mmuoff.data.read)
235		__mmuoff_data_end = .;
236	}
237
238	PECOFF_EDATA_PADDING
239	__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
240	_edata = .;
241
242	BSS_SECTION(0, 0, 0)
243
244	. = ALIGN(PAGE_SIZE);
245	init_pg_dir = .;
246	. += INIT_DIR_SIZE;
247	init_pg_end = .;
248
249	__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
250	_end = .;
251
252	STABS_DEBUG
253
254	HEAD_SYMBOLS
255}
256
257#include "image-vars.h"
258
259/*
260 * The HYP init code and ID map text can't be longer than a page each,
261 * and should not cross a page boundary.
262 */
263ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
264	"HYP init code too big or misaligned")
265ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
266	"ID map text too big or misaligned")
267#ifdef CONFIG_HIBERNATION
268ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
269	<= SZ_4K, "Hibernate exit text too big or misaligned")
270#endif
271#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
272ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
273	"Entry trampoline text too big")
274#endif
275/*
276 * If padding is applied before .head.text, virt<->phys conversions will fail.
277 */
278ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
279