select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_IDE
- select HAVE_KERNEL_GZIP
- select HAVE_KERNEL_BZIP2
- select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_GZIP if RAMKERNEL
+ select HAVE_KERNEL_BZIP2 if RAMKERNEL
+ select HAVE_KERNEL_LZMA if RAMKERNEL
select HAVE_OPROFILE
select ARCH_WANT_OPTIONAL_GPIOLIB
config ROM_BASE
hex "Kernel ROM Base"
depends on ROMKERNEL
- default "0x20040000"
+ default "0x20040040"
range 0x20000000 0x20400000 if !(BF54x || BF561)
range 0x20000000 0x30000000 if (BF54x || BF561)
help
+ Make sure your ROM base does not include any file-header
+ information that is prepended to the kernel.
+
+ For example, the bootable U-Boot format (created with
+ mkimage) has a 64 byte header (0x40). So while the image
+ you write to flash might start at say 0x20080000, you have
+ to add 0x40 to get the kernel's ROM base as it will come
+ after the header.
comment "Clock/PLL Setup"
GZFLAGS := -9
KBUILD_CFLAGS += $(call cc-option,-mno-fdpic)
+ifeq ($(CONFIG_ROMKERNEL),y)
+KBUILD_CFLAGS += -mlong-calls
+endif
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
CFLAGS_MODULE += -mlong-calls
LDFLAGS_MODULE += -m elf32bfin
INSTALL_PATH ?= /tftpboot
boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma
+BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.xip
PHONY += $(BOOT_TARGETS) install
KBUILD_IMAGE := $(boot)/vmImage
echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
+ echo ' vmImage.xip - XIP Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.xip)'
echo ' install - Install kernel using'
echo ' (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) PATH: $(INSTALLKERNEL) or'
MKIMAGE := $(srctree)/scripts/mkuboot.sh
-targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma
-extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
+targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.xip
+extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xip
+
+UIMAGE_OPTS-y :=
+UIMAGE_OPTS-$(CONFIG_RAMKERNEL) += -a $(CONFIG_BOOT_LOAD)
+UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+ -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
- -d $< $@
+ $(UIMAGE_OPTS-y) -d $< $@
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
+# The mkimage tool wants 64bytes prepended to the image
+quiet_cmd_mk_bin_xip = BIN $@
+ cmd_mk_bin_xip = ( printf '%64s' | tr ' ' '\377' ; cat $< ) > $@
+$(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,mk_bin_xip)
+
$(obj)/vmImage.bin: $(obj)/vmlinux.bin
$(call if_changed,uimage,none)
$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
$(call if_changed,uimage,lzma)
+$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip
+ $(call if_changed,uimage,none)
+
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
+suffix-$(CONFIG_ROMKERNEL) := xip
+
$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
@ln -sf $(notdir $<) $@
(R7:0, P5:0) = [SP++];
.endm
+
+.macro pseudo_long_call func:req, scratch:req
+#ifdef CONFIG_ROMKERNEL
+ \scratch\().l = \func;
+ \scratch\().h = \func;
+ call (\scratch);
+#else
+ call \func;
+#endif
+.endm
extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[];
extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[],
_data_l1_lma[], __weak _data_l1_len[];
+#ifdef CONFIG_ROMKERNEL
+extern char _data_lma[], _data_len[], _sinitdata[], _einitdata[], _init_data_lma[], _init_data_len[];
+#endif
extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
_data_b_l1_lma[], __weak _data_b_l1_len[];
extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[],
icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
}
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ dcplb_tbl[cpu][i_d].addr = addr;
+ dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD;
+ icplb_tbl[cpu][i_i].addr = addr;
+ icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD;
+#endif
+
/* Cover L1 memory. One 4M area for code and data each is enough. */
#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
}
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ d_tbl[i_d].addr = addr;
+ d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+ i_tbl[i_i].addr = addr;
+ i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+#endif
+
/* Cover L1 memory. One 4M area for code and data each is enough. */
if (cpu == 0) {
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
sti r4;
#endif /* CONFIG_IPIPE */
SP += -12;
- call _schedule_tail;
+ pseudo_long_call _schedule_tail, p5;
SP += 12;
r0 = [sp + PT_IPEND];
cc = bittst(r0,1);
r0 += 24;
[--sp] = rets;
SP += -12;
- call _bfin_vfork;
+ pseudo_long_call _bfin_vfork, p2;
SP += 12;
rets = [sp++];
rts;
r0 += 24;
[--sp] = rets;
SP += -12;
- call _bfin_clone;
+ pseudo_long_call _bfin_clone, p2;
SP += 12;
rets = [sp++];
rts;
r0 += 24;
[--sp] = rets;
SP += -12;
- call _do_rt_sigreturn;
+ pseudo_long_call _do_rt_sigreturn, p2;
SP += 12;
rets = [sp++];
rts;
memcpy(_stext_l2, _l2_lma, l2_len);
}
+#ifdef CONFIG_ROMKERNEL
+void __init bfin_relocate_xip_data(void)
+{
+ early_shadow_stamp();
+
+ memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
+ memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
+}
+#endif
+
/* add_memory_region to memmap */
static void __init add_memory_region(unsigned long long start,
unsigned long long size, int type)
#endif
unsigned long max_mem;
- _rambase = (unsigned long)_stext;
+ _rambase = CONFIG_BOOT_LOAD;
_ramstart = (unsigned long)_end;
if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
- ((int)memory_end - (int)_stext) >> 10,
- _stext,
+ ((int)memory_end - (int)_rambase) >> 10,
+ (void *)_rambase,
(void *)memory_end);
seq_printf(m, "\n");
SECTIONS
{
+#ifdef CONFIG_RAMKERNEL
. = CONFIG_BOOT_LOAD;
+#else
+ . = CONFIG_ROM_BASE;
+#endif
+
/* Neither the text, ro_data or bss section need to be aligned
* So pack them back to back
*/
LOCK_TEXT
IRQENTRY_TEXT
KPROBES_TEXT
+#ifdef CONFIG_ROMKERNEL
+ __sinittext = .;
+ INIT_TEXT
+ __einittext = .;
+ EXIT_TEXT
+#endif
*(.text.*)
*(.fixup)
/* Just in case the first read only is a 32-bit access */
RO_DATA(4)
+ __rodata_end = .;
+#ifdef CONFIG_ROMKERNEL
+ . = CONFIG_BOOT_LOAD;
+ .bss : AT(__rodata_end)
+#else
.bss :
+#endif
{
. = ALIGN(4);
___bss_start = .;
___bss_stop = .;
}
+#if defined(CONFIG_ROMKERNEL)
+ .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
+#else
.data :
+#endif
{
__sdata = .;
/* This gets done first, so the glob doesn't suck it in */
__edata = .;
}
+ __data_lma = LOADADDR(.data);
+ __data_len = SIZEOF(.data);
/* The init section should be last, so when we free it, it goes into
* the general memory pool, and (hopefully) will decrease fragmentation
. = ALIGN(PAGE_SIZE);
___init_begin = .;
+#ifdef CONFIG_RAMKERNEL
INIT_TEXT_SECTION(PAGE_SIZE)
/* We have to discard exit text and such at runtime, not link time, to
}
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
+#else
+ .init.data : AT(__data_lma + __data_len)
+ {
+ __sinitdata = .;
+ INIT_DATA
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+
+ . = ALIGN(4);
+ ___per_cpu_load = .;
+ ___per_cpu_start = .;
+ *(.data.percpu.first)
+ *(.data.percpu.page_aligned)
+ *(.data.percpu)
+ *(.data.percpu.shared_aligned)
+ ___per_cpu_end = .;
+
+ EXIT_DATA
+ __einitdata = .;
+ }
+ __init_data_lma = LOADADDR(.init.data);
+ __init_data_len = SIZEOF(.init.data);
+ __init_data_end = .;
+
+ .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
+#endif
{
. = ALIGN(4);
__stext_l1 = .;
/* Force trailing alignment of our init section so that when we
* free our init memory, we don't leave behind a partial page.
*/
+#ifdef CONFIG_RAMKERNEL
. = __l2_lma + __l2_len;
+#else
+ . = __init_data_end;
+#endif
. = ALIGN(PAGE_SIZE);
___init_end = .;
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _double_fault_c;
+ pseudo_long_call _double_fault_c, p5;
SP += 12;
.L_double_fault_panic:
JUMP .L_double_fault_panic
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _trap_c;
+ pseudo_long_call _trap_c, p4;
SP += 12;
/* If interrupts were off during the exception (IPEND[4] = 1), turn them off
p0 = sp;
sp += -16;
[sp + 12] = p0;
- call _do_execve;
+ pseudo_long_call _do_execve, p5;
SP += 16;
cc = r0 == 0;
if ! cc jump .Lexecve_failed;
sp += 4;
SP += -12;
- call _schedule;
+ pseudo_long_call _schedule, p4;
SP += 12;
jump .Lresume_userspace_1;
r0 = sp;
SP += -12;
- call _do_notify_resume;
+ pseudo_long_call _do_notify_resume, p5;
SP += 12;
.Lsyscall_really_exit:
* this symbol need not be global anyways, so ...
*/
_sys_trace:
- call _syscall_trace;
+ pseudo_long_call _syscall_trace, p5;
/* Execute the appropriate system call */
SP += 24;
[sp + PT_R0] = r0;
- call _syscall_trace;
+ pseudo_long_call _syscall_trace, p5;
jump .Lresume_userspace;
ENDPROC(_sys_trace)
r0 = sp;
sp += -12;
- call _finish_atomic_sections;
+
+ pseudo_long_call _finish_atomic_sections, p5;
sp += 12;
jump.s .Lresume_userspace;
ENDPROC(_schedule_and_signal_from_int)
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
call _bfin_relocate_l1_mem;
+
+#ifdef CONFIG_ROMKERNEL
+ call _bfin_relocate_xip_data;
+#endif
+
#ifdef CONFIG_BFIN_KERNEL_CLOCK
/* Only use on-chip scratch space for stack when absolutely required
* to avoid Anomaly 05000227 ... we know the init_clocks() func only
cc = r0 == 0;
if cc jump .Lcommon_restore_context;
#else /* CONFIG_IPIPE */
- call _do_irq;
+ pseudo_long_call _do_irq, p2;
SP += 12;
#endif /* CONFIG_IPIPE */
- call _return_from_int;
+ pseudo_long_call _return_from_int, p2;
.Lcommon_restore_context:
RESTORE_CONTEXT
rti;
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _trap_c;
+ pseudo_long_call _trap_c, p5;
SP += 12;
#ifdef EBIU_ERRMST
w[p0] = r0.l;
#endif
- call _ret_from_exception;
+ pseudo_long_call _ret_from_exception, p2;
.Lcommon_restore_all_sys:
RESTORE_ALL_SYS
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
- call _system_call;
+ pseudo_long_call _system_call, p2;
jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)