#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
#define _ASM_MICROBLAZE_MEMBLOCK_H
-/* MEMBLOCK limit is OFF */
-#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF
-
#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
-#ifdef CONFIG_PPC32
-extern phys_addr_t lowmem_end_addr;
-#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
-#else
-#define MEMBLOCK_REAL_LIMIT 0
-#endif
-
#endif /* _ASM_POWERPC_MEMBLOCK_H */
if ((memory_limit && (start + size) > memory_limit) ||
overlaps_crashkernel(start, size)) {
- p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size));
+ p = __va(memblock_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
initial_boot_params = (struct boot_param_header *)p;
DBG("Moved device tree to 0x%p\n", p);
static inline void __init phyp_dump_reserve_mem(void) {}
#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
+static void set_boot_memory_limit(void)
+{
+#ifdef CONFIG_PPC32
+ /* 601 can only access 16MB at the moment */
+ if (PVR_VER(mfspr(SPRN_PVR)) == 1)
+ memblock_set_current_limit(0x01000000);
+ /* 8xx can only access 8MB at the moment */
+ else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
+ memblock_set_current_limit(0x00800000);
+ else
+ memblock_set_current_limit(0x10000000);
+#else
+ memblock_set_current_limit(memblock.rmo_size);
+#endif
+}
void __init early_init_devtree(void *params)
{
/* Scan memory nodes and rebuild MEMBLOCKs */
memblock_init();
+
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
+ set_boot_memory_limit();
+
/* We may need to relocate the flat tree, do it now.
* FIXME .. and the initrd too? */
move_device_tree();
unsigned int i;
/* interrupt stacks must be in lowmem, we get that for free on ppc32
- * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
+ * as the memblock is limited to lowmem by default */
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/bootx.h>
#include <asm/machdep.h>
#include <asm/setup.h>
+
#include "mmu_decl.h"
extern int __map_without_ltlbs;
* coverage with normal-sized pages (or other reasons) do not
* attempt to allocate outside the allowed range.
*/
-
- __initial_memory_limit_addr = memstart_addr + mapped;
+ memblock_set_current_limit(memstart_addr + mapped);
return mapped;
}
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
(unsigned int)((total_lowmem - __max_low_memory) >> 20));
- __initial_memory_limit_addr = memstart_addr + __max_low_memory;
+ memblock_set_current_limit(memstart_addr + __max_low_memory);
}
#endif /* CONFIG_U3_DART */
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
prot, mmu_linear_psize, mmu_kernel_ssize));
- }
+ }
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
/*
* If we have a memory_limit and we've allocated TCEs then we need to
/* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM;
-/*
- * address of the limit of what is accessible with initial MMU setup -
- * 256MB usually, but only 16MB on 601.
- */
-phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
-
/*
* Check for command-line options that affect what MMU_init will do.
*/
if (ppc_md.progress)
ppc_md.progress("MMU:enter", 0x111);
- /* 601 can only access 16MB at the moment */
- if (PVR_VER(mfspr(SPRN_PVR)) == 1)
- __initial_memory_limit_addr = 0x01000000;
- /* 8xx can only access 8MB at the moment */
- if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
- __initial_memory_limit_addr = 0x00800000;
-
/* parse args from command line */
MMU_setup();
#ifdef CONFIG_BOOTX_TEXT
btext_unmap();
#endif
+
+ /* Shortly after that, the entire linear mapping will be available */
+ memblock_set_current_limit(lowmem_end_addr);
}
/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
- void *p;
-
- if (init_bootmem_done) {
- p = alloc_bootmem_pages(PAGE_SIZE);
- } else {
- p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
- __initial_memory_limit_addr));
- }
- return p;
+ if (init_bootmem_done)
+ return alloc_bootmem_pages(PAGE_SIZE);
+ else
+ return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
}
/* Free up now-unused memory */
* Find some memory for the hash table.
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
- Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
- __initial_memory_limit_addr));
+ Hash = __va(memblock_alloc(Hash_size, Hash_size));
cacheable_memzero(Hash, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
* the MMU configuration
*/
mb();
+
+ memblock_set_current_limit(linear_map_top);
}
void __init early_init_mmu(void)
#ifndef __ASM_SH_MEMBLOCK_H
#define __ASM_SH_MEMBLOCK_H
-#define MEMBLOCK_REAL_LIMIT 0
-
#endif /* __ASM_SH_MEMBLOCK_H */
#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
-#define MEMBLOCK_REAL_LIMIT 0
-
#endif /* !(_SPARC64_MEMBLOCK_H) */
struct memblock {
unsigned long debug;
u64 rmo_size;
+ u64 current_limit;
struct memblock_type memory;
struct memblock_type reserved;
};
extern long memblock_remove(u64 base, u64 size);
extern long __init memblock_free(u64 base, u64 size);
extern long __init memblock_reserve(u64 base, u64 size);
+
extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid);
extern u64 __init memblock_alloc(u64 size, u64 align);
+
+/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(u64)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+
extern u64 __init memblock_alloc_base(u64 size,
u64, u64 max_addr);
-#define MEMBLOCK_ALLOC_ANYWHERE 0
extern u64 __init __memblock_alloc_base(u64 size,
u64 align, u64 max_addr);
extern u64 __init memblock_phys_mem_size(void);
/* Provided by the architecture */
extern u64 memblock_nid_range(u64 start, u64 end, int *nid);
+/**
+ * memblock_set_current_limit - Set the current allocation limit to allow
+ * limiting allocations to what is currently
+ * accessible during boot
+ * @limit: New limit value (physical address)
+ */
+extern void memblock_set_current_limit(u64 limit);
+
/*
* pfn conversion functions
memblock.reserved.regions[0].base = 0;
memblock.reserved.regions[0].size = 0;
memblock.reserved.cnt = 1;
+
+ memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
}
void __init memblock_analyze(void)
u64 __init memblock_alloc(u64 size, u64 align)
{
- return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+ return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr)
size = memblock_align_up(size, align);
- /* On some platforms, make sure we allocate lowmem */
- /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */
- if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
- max_addr = MEMBLOCK_REAL_LIMIT;
-
/* Pump up max_addr */
- if (max_addr == MEMBLOCK_ALLOC_ANYWHERE)
- max_addr = ~(u64)0;
+ if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
+ max_addr = memblock.current_limit;
/* We do a top-down search, this tends to limit memory
* fragmentation by keeping early boot allocs near the
return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
}
+
+void __init memblock_set_current_limit(u64 limit)
+{
+ memblock.current_limit = limit;
+}
+