phys_addr_t p;
v = KERNELBASE;
- p = PPC_MEMSTART;
+ p = 0;
s = total_lowmem;
if (__map_without_ltlbs)
#include <asm/machdep.h>
#include <asm/setup.h>
+#include "mmu_decl.h"
+
extern void loadcam_entry(unsigned int index);
unsigned int tlbcam_index;
unsigned int num_tlbcam_entries;
static unsigned long __cam0, __cam1, __cam2;
-extern unsigned long total_lowmem;
-extern unsigned long __max_low_memory;
-extern unsigned long __initial_memory_limit;
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
#define NUM_TLBCAMS (16)
void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
unsigned long cam2)
{
- settlbcam(0, PAGE_OFFSET, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
+ settlbcam(0, PAGE_OFFSET, memstart_addr, cam0, _PAGE_KERNEL, 0);
tlbcam_index++;
if (cam1) {
tlbcam_index++;
- settlbcam(1, PAGE_OFFSET+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
+ settlbcam(1, PAGE_OFFSET+cam0, memstart_addr+cam0, cam1, _PAGE_KERNEL, 0);
}
if (cam2) {
tlbcam_index++;
- settlbcam(2, PAGE_OFFSET+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
+ settlbcam(2, PAGE_OFFSET+cam0+cam1, memstart_addr+cam0+cam1, cam2, _PAGE_KERNEL, 0);
}
}
unsigned long total_memory;
unsigned long total_lowmem;
-unsigned long ppc_memstart;
-unsigned long ppc_memoffset = PAGE_OFFSET;
+phys_addr_t memstart_addr;
+phys_addr_t lowmem_end_addr;
int boot_mapsize;
#ifdef CONFIG_PPC_PMAC
printk(KERN_WARNING "Only using first contiguous memory region");
}
- total_memory = lmb_end_of_DRAM();
- total_lowmem = total_memory;
+ total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
#ifdef CONFIG_FSL_BOOKE
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
extern unsigned long __initial_memory_limit;
extern unsigned long total_memory;
extern unsigned long total_lowmem;
+extern phys_addr_t memstart_addr;
/* ...and now those things that may be slightly different between processor
* architectures. -- Dan
*/
void __init mapin_ram(void)
{
- unsigned long v, p, s, f;
+ unsigned long v, s, f;
+ phys_addr_t p;
int ktext;
s = mmu_mapin_ram();
v = KERNELBASE + s;
- p = PPC_MEMSTART + s;
+ p = memstart_addr + s;
for (; s < total_lowmem; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM;
#else
unsigned long tot, bl, done;
unsigned long max_size = (256<<20);
- unsigned long align;
if (__map_without_bats) {
printk(KERN_DEBUG "RAM mapped without BATs\n");
/* Make sure we don't map a block larger than the
smallest alignment of the physical address. */
- /* alignment of PPC_MEMSTART */
- align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
- /* set BAT block size to MIN(max_size, align) */
- if (align && align < max_size)
- max_size = align;
-
tot = total_lowmem;
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > tot)
break;
}
- setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
+ setbat(2, KERNELBASE, 0, bl, _PAGE_RAM);
done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */
for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot)
break;
- setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
+ setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM);
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
}
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
-#define PPC_MEMSTART 0
-
#ifdef CONFIG_NOT_COHERENT_CACHE
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
#endif