]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blob - arch/arm/kernel/vmlinux.lds.S
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-disco-kernel.git] / arch / arm / kernel / vmlinux.lds.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
5 */
6
7 #ifdef CONFIG_XIP_KERNEL
8 #include "vmlinux-xip.lds.S"
9 #else
10
11 #include <asm-generic/vmlinux.lds.h>
12 #include <asm/cache.h>
13 #include <asm/thread_info.h>
14 #include <asm/memory.h>
15 #include <asm/page.h>
16 #include <asm/pgtable.h>
17
18 #define PROC_INFO \
19 . = ALIGN(4); \
20 VMLINUX_SYMBOL(__proc_info_begin) = .; \
21 *(.proc.info.init) \
22 VMLINUX_SYMBOL(__proc_info_end) = .;
23
24 #define HYPERVISOR_TEXT \
25 VMLINUX_SYMBOL(__hyp_text_start) = .; \
26 *(.hyp.text) \
27 VMLINUX_SYMBOL(__hyp_text_end) = .;
28
29 #define IDMAP_TEXT \
30 ALIGN_FUNCTION(); \
31 VMLINUX_SYMBOL(__idmap_text_start) = .; \
32 *(.idmap.text) \
33 VMLINUX_SYMBOL(__idmap_text_end) = .; \
34 . = ALIGN(PAGE_SIZE); \
35 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
36 *(.hyp.idmap.text) \
37 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
38
39 #ifdef CONFIG_HOTPLUG_CPU
40 #define ARM_CPU_DISCARD(x)
41 #define ARM_CPU_KEEP(x) x
42 #else
43 #define ARM_CPU_DISCARD(x) x
44 #define ARM_CPU_KEEP(x)
45 #endif
46
47 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
48 defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
49 #define ARM_EXIT_KEEP(x) x
50 #define ARM_EXIT_DISCARD(x)
51 #else
52 #define ARM_EXIT_KEEP(x)
53 #define ARM_EXIT_DISCARD(x) x
54 #endif
55
56 OUTPUT_ARCH(arm)
57 ENTRY(stext)
58
59 #ifndef __ARMEB__
60 jiffies = jiffies_64;
61 #else
62 jiffies = jiffies_64 + 4;
63 #endif
64
65 SECTIONS
66 {
67 /*
68 * XXX: The linker does not define how output sections are
69 * assigned to input sections when there are multiple statements
70 * matching the same input section name. There is no documented
71 * order of matching.
72 *
73 * unwind exit sections must be discarded before the rest of the
74 * unwind sections get included.
75 */
76 /DISCARD/ : {
77 *(.ARM.exidx.exit.text)
78 *(.ARM.extab.exit.text)
79 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
80 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
81 ARM_EXIT_DISCARD(EXIT_TEXT)
82 ARM_EXIT_DISCARD(EXIT_DATA)
83 EXIT_CALL
84 #ifndef CONFIG_MMU
85 *(.text.fixup)
86 *(__ex_table)
87 #endif
88 #ifndef CONFIG_SMP_ON_UP
89 *(.alt.smp.init)
90 #endif
91 *(.discard)
92 *(.discard.*)
93 }
94
95 . = PAGE_OFFSET + TEXT_OFFSET;
96 .head.text : {
97 _text = .;
98 HEAD_TEXT
99 }
100
101 #ifdef CONFIG_STRICT_KERNEL_RWX
102 . = ALIGN(1<<SECTION_SHIFT);
103 #endif
104
105 .text : { /* Real text segment */
106 _stext = .; /* Text and read-only data */
107 IDMAP_TEXT
108 __exception_text_start = .;
109 *(.exception.text)
110 __exception_text_end = .;
111 IRQENTRY_TEXT
112 SOFTIRQENTRY_TEXT
113 TEXT_TEXT
114 SCHED_TEXT
115 CPUIDLE_TEXT
116 LOCK_TEXT
117 HYPERVISOR_TEXT
118 KPROBES_TEXT
119 *(.gnu.warning)
120 *(.glue_7)
121 *(.glue_7t)
122 . = ALIGN(4);
123 *(.got) /* Global offset table */
124 ARM_CPU_KEEP(PROC_INFO)
125 }
126
127 #ifdef CONFIG_DEBUG_ALIGN_RODATA
128 . = ALIGN(1<<SECTION_SHIFT);
129 #endif
130 _etext = .; /* End of text section */
131
132 RO_DATA(PAGE_SIZE)
133
134 . = ALIGN(4);
135 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
136 __start___ex_table = .;
137 #ifdef CONFIG_MMU
138 *(__ex_table)
139 #endif
140 __stop___ex_table = .;
141 }
142
143 #ifdef CONFIG_ARM_UNWIND
144 /*
145 * Stack unwinding tables
146 */
147 . = ALIGN(8);
148 .ARM.unwind_idx : {
149 __start_unwind_idx = .;
150 *(.ARM.exidx*)
151 __stop_unwind_idx = .;
152 }
153 .ARM.unwind_tab : {
154 __start_unwind_tab = .;
155 *(.ARM.extab*)
156 __stop_unwind_tab = .;
157 }
158 #endif
159
160 NOTES
161
162 #ifdef CONFIG_STRICT_KERNEL_RWX
163 . = ALIGN(1<<SECTION_SHIFT);
164 #else
165 . = ALIGN(PAGE_SIZE);
166 #endif
167 __init_begin = .;
168
169 /*
170 * The vectors and stubs are relocatable code, and the
171 * only thing that matters is their relative offsets
172 */
173 __vectors_start = .;
174 .vectors 0xffff0000 : AT(__vectors_start) {
175 *(.vectors)
176 }
177 . = __vectors_start + SIZEOF(.vectors);
178 __vectors_end = .;
179
180 __stubs_start = .;
181 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
182 *(.stubs)
183 }
184 . = __stubs_start + SIZEOF(.stubs);
185 __stubs_end = .;
186
187 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
188
189 INIT_TEXT_SECTION(8)
190 .exit.text : {
191 ARM_EXIT_KEEP(EXIT_TEXT)
192 }
193 .init.proc.info : {
194 ARM_CPU_DISCARD(PROC_INFO)
195 }
196 .init.arch.info : {
197 __arch_info_begin = .;
198 *(.arch.info.init)
199 __arch_info_end = .;
200 }
201 .init.tagtable : {
202 __tagtable_begin = .;
203 *(.taglist.init)
204 __tagtable_end = .;
205 }
206 #ifdef CONFIG_SMP_ON_UP
207 .init.smpalt : {
208 __smpalt_begin = .;
209 *(.alt.smp.init)
210 __smpalt_end = .;
211 }
212 #endif
213 .init.pv_table : {
214 __pv_table_begin = .;
215 *(.pv_table)
216 __pv_table_end = .;
217 }
218 .init.data : {
219 INIT_DATA
220 INIT_SETUP(16)
221 INIT_CALLS
222 CON_INITCALL
223 SECURITY_INITCALL
224 INIT_RAM_FS
225 }
226 .exit.data : {
227 ARM_EXIT_KEEP(EXIT_DATA)
228 }
229
230 #ifdef CONFIG_SMP
231 PERCPU_SECTION(L1_CACHE_BYTES)
232 #endif
233
234 #ifdef CONFIG_STRICT_KERNEL_RWX
235 . = ALIGN(1<<SECTION_SHIFT);
236 #else
237 . = ALIGN(THREAD_SIZE);
238 #endif
239 __init_end = .;
240 __data_loc = .;
241
242 .data : AT(__data_loc) {
243 _data = .; /* address in memory */
244 _sdata = .;
245
246 /*
247 * first, the init task union, aligned
248 * to an 8192 byte boundary.
249 */
250 INIT_TASK_DATA(THREAD_SIZE)
251
252 NOSAVE_DATA
253 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
254 READ_MOSTLY_DATA(L1_CACHE_BYTES)
255
256 /*
257 * and the usual data section
258 */
259 DATA_DATA
260 CONSTRUCTORS
261
262 _edata = .;
263 }
264 _edata_loc = __data_loc + SIZEOF(.data);
265
266 BUG_TABLE
267
268 #ifdef CONFIG_HAVE_TCM
269 /*
270 * We align everything to a page boundary so we can
271 * free it after init has commenced and TCM contents have
272 * been copied to its destination.
273 */
274 .tcm_start : {
275 . = ALIGN(PAGE_SIZE);
276 __tcm_start = .;
277 __itcm_start = .;
278 }
279
280 /*
281 * Link these to the ITCM RAM
282 * Put VMA to the TCM address and LMA to the common RAM
283 * and we'll upload the contents from RAM to TCM and free
284 * the used RAM after that.
285 */
286 .text_itcm ITCM_OFFSET : AT(__itcm_start)
287 {
288 __sitcm_text = .;
289 *(.tcm.text)
290 *(.tcm.rodata)
291 . = ALIGN(4);
292 __eitcm_text = .;
293 }
294
295 /*
296 * Reset the dot pointer, this is needed to create the
297 * relative __dtcm_start below (to be used as extern in code).
298 */
299 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
300
301 .dtcm_start : {
302 __dtcm_start = .;
303 }
304
305 /* TODO: add remainder of ITCM as well, that can be used for data! */
306 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
307 {
308 . = ALIGN(4);
309 __sdtcm_data = .;
310 *(.tcm.data)
311 . = ALIGN(4);
312 __edtcm_data = .;
313 }
314
315 /* Reset the dot pointer or the linker gets confused */
316 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
317
318 /* End marker for freeing TCM copy in linked object */
319 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
320 . = ALIGN(PAGE_SIZE);
321 __tcm_end = .;
322 }
323 #endif
324
325 BSS_SECTION(0, 0, 0)
326 _end = .;
327
328 STABS_DEBUG
329 }
330
331 #ifdef CONFIG_STRICT_KERNEL_RWX
332 /*
333 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
334 * be the first section-aligned location after __start_rodata. Otherwise,
335 * it will be equal to __start_rodata.
336 */
337 __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
338 #endif
339
340 /*
341 * These must never be empty
342 * If you have to comment these two assert statements out, your
343 * binutils is too old (for other reasons as well)
344 */
345 ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
346 ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
347
348 /*
349 * The HYP init code can't be more than a page long,
350 * and should not cross a page boundary.
351 * The above comment applies as well.
352 */
353 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
354 "HYP init code too big or misaligned")
355
356 #endif /* CONFIG_XIP_KERNEL */