]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/blackfin/kernel/vmlinux.lds.S
Merge branches 'release', 'ejd', 'sony' and 'wmi' into release
[mirror_ubuntu-kernels.git] / arch / blackfin / kernel / vmlinux.lds.S
1 /*
2 * File: arch/blackfin/kernel/vmlinux.lds.S
3 * Based on: none - original work
4 * Author:
5 *
6 * Created: Tue Sep 21 2004
7 * Description: Master linker script for blackfin architecture
8 *
9 * Modified:
10 * Copyright 2004-2007 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30 #define VMLINUX_SYMBOL(_sym_) _##_sym_
31
32 #include <asm-generic/vmlinux.lds.h>
33 #include <asm/mem_map.h>
34 #include <asm/page.h>
35 #include <asm/thread_info.h>
36
37 OUTPUT_FORMAT("elf32-bfin")
38 ENTRY(__start)
39 _jiffies = _jiffies_64;
40
41 SECTIONS
42 {
43 . = CONFIG_BOOT_LOAD;
44 /* Neither the text, ro_data or bss section need to be aligned
45 * So pack them back to back
46 */
47 .text :
48 {
49 __text = .;
50 _text = .;
51 __stext = .;
52 TEXT_TEXT
53 SCHED_TEXT
54 LOCK_TEXT
55 KPROBES_TEXT
56 *(.text.*)
57 *(.fixup)
58
59 . = ALIGN(16);
60 ___start___ex_table = .;
61 *(__ex_table)
62 ___stop___ex_table = .;
63
64 __etext = .;
65 }
66
67 /* Just in case the first read only is a 32-bit access */
68 RO_DATA(4)
69
70 .bss :
71 {
72 . = ALIGN(4);
73 ___bss_start = .;
74 *(.bss .bss.*)
75 *(COMMON)
76 ___bss_stop = .;
77 }
78
79 .data :
80 {
81 __sdata = .;
82 /* This gets done first, so the glob doesn't suck it in */
83 . = ALIGN(32);
84 *(.data.cacheline_aligned)
85
86 DATA_DATA
87 *(.data.*)
88 CONSTRUCTORS
89
90 /* make sure the init_task is aligned to the
91 * kernel thread size so we can locate the kernel
92 * stack properly and quickly.
93 */
94 . = ALIGN(THREAD_SIZE);
95 *(.init_task.data)
96
97 __edata = .;
98 }
99
100 /* The init section should be last, so when we free it, it goes into
101 * the general memory pool, and (hopefully) will decrease fragmentation
102 * a tiny bit. The init section has a _requirement_ that it be
103 * PAGE_SIZE aligned
104 */
105 . = ALIGN(PAGE_SIZE);
106 ___init_begin = .;
107
108 .init.text :
109 {
110 . = ALIGN(PAGE_SIZE);
111 __sinittext = .;
112 INIT_TEXT
113 __einittext = .;
114 }
115 .init.data :
116 {
117 . = ALIGN(16);
118 INIT_DATA
119 }
120 .init.setup :
121 {
122 . = ALIGN(16);
123 ___setup_start = .;
124 *(.init.setup)
125 ___setup_end = .;
126 }
127 .initcall.init :
128 {
129 ___initcall_start = .;
130 INITCALLS
131 ___initcall_end = .;
132 }
133 .con_initcall.init :
134 {
135 ___con_initcall_start = .;
136 *(.con_initcall.init)
137 ___con_initcall_end = .;
138 }
139 SECURITY_INIT
140 .init.ramfs :
141 {
142 . = ALIGN(4);
143 ___initramfs_start = .;
144 *(.init.ramfs)
145 ___initramfs_end = .;
146 }
147
148 __l1_lma_start = .;
149
150 #if L1_CODE_LENGTH
151 # define LDS_L1_CODE *(.l1.text)
152 #else
153 # define LDS_L1_CODE
154 #endif
155 .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
156 {
157 . = ALIGN(4);
158 __stext_l1 = .;
159 LDS_L1_CODE
160 . = ALIGN(4);
161 __etext_l1 = .;
162 }
163
164 #if L1_DATA_A_LENGTH
165 # define LDS_L1_A_DATA *(.l1.data)
166 # define LDS_L1_A_BSS *(.l1.bss)
167 # define LDS_L1_A_CACHE *(.data_l1.cacheline_aligned)
168 #else
169 # define LDS_L1_A_DATA
170 # define LDS_L1_A_BSS
171 # define LDS_L1_A_CACHE
172 #endif
173 .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
174 {
175 . = ALIGN(4);
176 __sdata_l1 = .;
177 LDS_L1_A_DATA
178 __edata_l1 = .;
179
180 . = ALIGN(4);
181 __sbss_l1 = .;
182 LDS_L1_A_BSS
183
184 . = ALIGN(32);
185 LDS_L1_A_CACHE
186
187 . = ALIGN(4);
188 __ebss_l1 = .;
189 }
190
191 #if L1_DATA_B_LENGTH
192 # define LDS_L1_B_DATA *(.l1.data.B)
193 # define LDS_L1_B_BSS *(.l1.bss.B)
194 #else
195 # define LDS_L1_B_DATA
196 # define LDS_L1_B_BSS
197 #endif
198 .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
199 {
200 . = ALIGN(4);
201 __sdata_b_l1 = .;
202 LDS_L1_B_DATA
203 __edata_b_l1 = .;
204
205 . = ALIGN(4);
206 __sbss_b_l1 = .;
207 LDS_L1_B_BSS
208
209 . = ALIGN(4);
210 __ebss_b_l1 = .;
211 }
212
213 /* Force trailing alignment of our init section so that when we
214 * free our init memory, we don't leave behind a partial page.
215 */
216 . = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
217 . = ALIGN(PAGE_SIZE);
218 ___init_end = .;
219
220 __end =.;
221
222 STABS_DEBUG
223
224 DWARF_DEBUG
225
226 NOTES
227
228 /DISCARD/ :
229 {
230 EXIT_TEXT
231 EXIT_DATA
232 *(.exitcall.exit)
233 }
234 }