]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/asm-generic/vmlinux.lds.h
[PATCH] Uml: more cleaning
[mirror_ubuntu-jammy-kernel.git] / include / asm-generic / vmlinux.lds.h
CommitLineData
1da177e4
LT
1#ifndef LOAD_OFFSET
2#define LOAD_OFFSET 0
3#endif
4
5#ifndef VMLINUX_SYMBOL
6#define VMLINUX_SYMBOL(_sym_) _sym_
7#endif
8
6d30e3a8
SR
9/* Align . to a 8 byte boundary equals to maximum function alignment. */
10#define ALIGN_FUNCTION() . = ALIGN(8)
11
1da177e4
LT
12#define RODATA \
13 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
14 *(.rodata) *(.rodata.*) \
15 *(__vermagic) /* Kernel version magic */ \
16 } \
17 \
18 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
19 *(.rodata1) \
20 } \
21 \
22 /* PCI quirks */ \
23 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
24 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
25 *(.pci_fixup_early) \
26 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
27 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
28 *(.pci_fixup_header) \
29 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
30 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
31 *(.pci_fixup_final) \
32 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
33 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
34 *(.pci_fixup_enable) \
35 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
36 } \
37 \
38 /* Kernel symbol table: Normal symbols */ \
39 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
40 VMLINUX_SYMBOL(__start___ksymtab) = .; \
41 *(__ksymtab) \
42 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
43 } \
44 \
45 /* Kernel symbol table: GPL-only symbols */ \
46 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
47 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
48 *(__ksymtab_gpl) \
49 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
50 } \
51 \
52 /* Kernel symbol table: Normal symbols */ \
53 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
54 VMLINUX_SYMBOL(__start___kcrctab) = .; \
55 *(__kcrctab) \
56 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
57 } \
58 \
59 /* Kernel symbol table: GPL-only symbols */ \
60 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
61 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
62 *(__kcrctab_gpl) \
63 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
64 } \
65 \
66 /* Kernel symbol table: strings */ \
67 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
68 *(__ksymtab_strings) \
69 } \
70 \
71 /* Built-in module parameters. */ \
72 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
73 VMLINUX_SYMBOL(__start___param) = .; \
74 *(__param) \
75 VMLINUX_SYMBOL(__stop___param) = .; \
76 }
77
78#define SECURITY_INIT \
60bad7fa 79 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
1da177e4
LT
80 VMLINUX_SYMBOL(__security_initcall_start) = .; \
81 *(.security_initcall.init) \
82 VMLINUX_SYMBOL(__security_initcall_end) = .; \
83 }
84
6d30e3a8
SR
85/* sched.text is aling to function alignment to secure we have same
86 * address even at second ld pass when generating System.map */
1da177e4 87#define SCHED_TEXT \
6d30e3a8 88 ALIGN_FUNCTION(); \
1da177e4
LT
89 VMLINUX_SYMBOL(__sched_text_start) = .; \
90 *(.sched.text) \
91 VMLINUX_SYMBOL(__sched_text_end) = .;
92
6d30e3a8
SR
93/* spinlock.text is aling to function alignment to secure we have same
94 * address even at second ld pass when generating System.map */
1da177e4 95#define LOCK_TEXT \
6d30e3a8 96 ALIGN_FUNCTION(); \
1da177e4
LT
97 VMLINUX_SYMBOL(__lock_text_start) = .; \
98 *(.spinlock.text) \
99 VMLINUX_SYMBOL(__lock_text_end) = .;
d0aaff97
PP
100
101#define KPROBES_TEXT \
102 ALIGN_FUNCTION(); \
103 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
104 *(.kprobes.text) \
105 VMLINUX_SYMBOL(__kprobes_text_end) = .;