2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later
7 #include <asm-generic/vmlinux.lds.h>
8 #include <asm/mem_map.h>
10 #include <asm/thread_info.h>
12 OUTPUT_FORMAT("elf32-bfin")
14 _jiffies = _jiffies_64;
18 #ifdef CONFIG_RAMKERNEL
24 /* Neither the text, ro_data or bss section need to be aligned
25 * So pack them back to back
33 #ifndef CONFIG_SCHEDULE_L1
41 #ifdef CONFIG_ROMKERNEL
59 /* Just in case the first read only is a 32-bit access */
63 #ifdef CONFIG_ROMKERNEL
65 .bss : AT(__rodata_end)
84 #if defined(CONFIG_ROMKERNEL)
85 .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
91 /* This gets done first, so the glob doesn't suck it in */
92 CACHELINE_ALIGNED_DATA(32)
96 *(.data_l1.cacheline_aligned)
104 *(.data_l2.cacheline_aligned)
111 INIT_TASK_DATA(THREAD_SIZE)
115 __data_lma = LOADADDR(.data);
116 __data_len = SIZEOF(.data);
118 /* The init section should be last, so when we free it, it goes into
119 * the general memory pool, and (hopefully) will decrease fragmentation
120 * a tiny bit. The init section has a _requirement_ that it be
123 . = ALIGN(PAGE_SIZE);
126 #ifdef CONFIG_RAMKERNEL
127 INIT_TEXT_SECTION(PAGE_SIZE)
129 /* We have to discard exit text and such at runtime, not link time, to
130 * handle embedded cross-section references (alt instructions, bug
131 * table, eh_frame, etc...). We need all of our .text up front and
132 * .data after it for PCREL call issues.
140 INIT_DATA_SECTION(16)
148 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
150 .init.data : AT(__data_lma + __data_len + 32)
160 . = ALIGN(PAGE_SIZE);
167 __init_data_lma = LOADADDR(.init.data);
168 __init_data_len = SIZEOF(.init.data);
171 .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
178 #ifdef CONFIG_SCHEDULE_L1
184 __text_l1_lma = LOADADDR(.text_l1);
185 __text_l1_len = SIZEOF(.text_l1);
186 ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
188 .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
196 *(.data_l1.cacheline_aligned)
204 __data_l1_lma = LOADADDR(.data_l1);
205 __data_l1_len = SIZEOF(.data_l1);
206 ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
208 .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
221 __data_b_l1_lma = LOADADDR(.data_b_l1);
222 __data_b_l1_len = SIZEOF(.data_b_l1);
223 ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
225 .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
239 *(.data_l2.cacheline_aligned)
247 __l2_lma = LOADADDR(.text_data_l2);
248 __l2_len = SIZEOF(.text_data_l2);
249 ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
251 /* Force trailing alignment of our init section so that when we
252 * free our init memory, we don't leave behind a partial page.
254 #ifdef CONFIG_RAMKERNEL
255 . = __l2_lma + __l2_len;
259 . = ALIGN(PAGE_SIZE);