2a485352ec14680553de6f925a0a00281ae782c5
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / blackfin / kernel / vmlinux.lds.S
1 /*
2 * File: arch/blackfin/kernel/vmlinux.lds.S
3 * Based on: none - original work
4 * Author:
5 *
6 * Created: Tue Sep 21 2004
7 * Description: Master linker script for blackfin architecture
8 *
9 * Modified:
10 * Copyright 2004-2007 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30 #define VMLINUX_SYMBOL(_sym_) _##_sym_
31
32 #include <asm-generic/vmlinux.lds.h>
33 #include <asm/mem_map.h>
34 #include <asm/page.h>
35 #include <asm/thread_info.h>
36
37 OUTPUT_FORMAT("elf32-bfin")
38 ENTRY(__start)
39 _jiffies = _jiffies_64;
40
41 SECTIONS
42 {
43 . = CONFIG_BOOT_LOAD;
44 /* Neither the text, ro_data or bss section need to be aligned
45 * So pack them back to back
46 */
47 .text :
48 {
49 __text = .;
50 _text = .;
51 __stext = .;
52 TEXT_TEXT
53 SCHED_TEXT
54 LOCK_TEXT
55 KPROBES_TEXT
56 *(.text.*)
57 *(.fixup)
58
59 #if !L1_CODE_LENGTH
60 *(.l1.text)
61 #endif
62
63 . = ALIGN(16);
64 ___start___ex_table = .;
65 *(__ex_table)
66 ___stop___ex_table = .;
67
68 __etext = .;
69 }
70
71 /* Just in case the first read only is a 32-bit access */
72 RO_DATA(4)
73
74 .bss :
75 {
76 . = ALIGN(4);
77 ___bss_start = .;
78 *(.bss .bss.*)
79 *(COMMON)
80 #if !L1_DATA_A_LENGTH
81 *(.l1.bss)
82 #endif
83 #if !L1_DATA_B_LENGTH
84 *(.l1.bss.B)
85 #endif
86 . = ALIGN(4);
87 ___bss_stop = .;
88 }
89
90 .data :
91 {
92 __sdata = .;
93 /* This gets done first, so the glob doesn't suck it in */
94 . = ALIGN(32);
95 *(.data.cacheline_aligned)
96
97 #if !L1_DATA_A_LENGTH
98 . = ALIGN(32);
99 *(.data_l1.cacheline_aligned)
100 *(.l1.data)
101 #endif
102 #if !L1_DATA_B_LENGTH
103 *(.l1.data.B)
104 #endif
105 #if !L2_LENGTH
106 . = ALIGN(32);
107 *(.data_l2.cacheline_aligned)
108 *(.l2.data)
109 #endif
110
111 DATA_DATA
112 *(.data)
113 CONSTRUCTORS
114
115 /* make sure the init_task is aligned to the
116 * kernel thread size so we can locate the kernel
117 * stack properly and quickly.
118 */
119 . = ALIGN(THREAD_SIZE);
120 *(.init_task.data)
121
122 __edata = .;
123 }
124
125 /* The init section should be last, so when we free it, it goes into
126 * the general memory pool, and (hopefully) will decrease fragmentation
127 * a tiny bit. The init section has a _requirement_ that it be
128 * PAGE_SIZE aligned
129 */
130 . = ALIGN(PAGE_SIZE);
131 ___init_begin = .;
132
133 .init.text :
134 {
135 . = ALIGN(PAGE_SIZE);
136 __sinittext = .;
137 INIT_TEXT
138 __einittext = .;
139 }
140 .init.data :
141 {
142 . = ALIGN(16);
143 INIT_DATA
144 }
145 .init.setup :
146 {
147 . = ALIGN(16);
148 ___setup_start = .;
149 *(.init.setup)
150 ___setup_end = .;
151 }
152 .initcall.init :
153 {
154 ___initcall_start = .;
155 INITCALLS
156 ___initcall_end = .;
157 }
158 .con_initcall.init :
159 {
160 ___con_initcall_start = .;
161 *(.con_initcall.init)
162 ___con_initcall_end = .;
163 }
164 PERCPU(4)
165 SECURITY_INIT
166 .init.ramfs :
167 {
168 . = ALIGN(4);
169 ___initramfs_start = .;
170 *(.init.ramfs)
171 ___initramfs_end = .;
172 }
173
174 __l1_lma_start = .;
175
176 .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs))
177 {
178 . = ALIGN(4);
179 __stext_l1 = .;
180 *(.l1.text)
181 . = ALIGN(4);
182 __etext_l1 = .;
183 }
184
185 .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
186 {
187 . = ALIGN(4);
188 __sdata_l1 = .;
189 *(.l1.data)
190 __edata_l1 = .;
191
192 . = ALIGN(32);
193 *(.data_l1.cacheline_aligned)
194
195 . = ALIGN(4);
196 __sbss_l1 = .;
197 *(.l1.bss)
198 . = ALIGN(4);
199 __ebss_l1 = .;
200 }
201
202 .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
203 {
204 . = ALIGN(4);
205 __sdata_b_l1 = .;
206 *(.l1.data.B)
207 __edata_b_l1 = .;
208
209 . = ALIGN(4);
210 __sbss_b_l1 = .;
211 *(.l1.bss.B)
212 . = ALIGN(4);
213 __ebss_b_l1 = .;
214 }
215
216 __l2_lma_start = .;
217
218 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
219 {
220 . = ALIGN(4);
221 __stext_l2 = .;
222 *(.l2.text)
223 . = ALIGN(4);
224 __etext_l2 = .;
225
226 . = ALIGN(4);
227 __sdata_l2 = .;
228 *(.l2.data)
229 __edata_l2 = .;
230
231 . = ALIGN(32);
232 *(.data_l2.cacheline_aligned)
233
234 . = ALIGN(4);
235 __sbss_l2 = .;
236 *(.l2.bss)
237 . = ALIGN(4);
238 __ebss_l2 = .;
239 }
240 /* Force trailing alignment of our init section so that when we
241 * free our init memory, we don't leave behind a partial page.
242 */
243 . = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
244 . = ALIGN(PAGE_SIZE);
245 ___init_end = .;
246
247 __end =.;
248
249 STABS_DEBUG
250
251 DWARF_DEBUG
252
253 /DISCARD/ :
254 {
255 EXIT_TEXT
256 EXIT_DATA
257 *(.exitcall.exit)
258 }
259 }