#define _ASM_POWERPC_LPPACA_H
#ifdef __KERNEL__
+/* These definitions relate to hypervisors that only exist when using
+ * a server type processor
+ */
+#ifdef CONFIG_PPC_BOOK3S
+
//=============================================================================
//
// This control block contains the data that is shared between the
extern struct slb_shadow slb_shadow[];
+#endif /* CONFIG_PPC_BOOK3S */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h>
-#elif defined(CONFIG_PPC_STD_MMU)
+#elif defined(CONFIG_PPC_STD_MMU_32)
/* 32-bit classic hash table MMU */
# include <asm/mmu-hash32.h>
#elif defined(CONFIG_40x)
#error TASK_SIZE_USER64 exceeds pagetable range
#endif
+#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
+#endif
/*
* Define the address range of the vmalloc VM area.
if (!huge)
assert_pte_locked(mm, addr);
+#ifdef CONFIG_PPC_STD_MMU_64
if (old & _PAGE_HASHPTE)
hpte_need_flush(mm, addr, ptep, old, huge);
+#endif
+
return old;
}
if (!en)
return;
+#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/*
* Do we need to disable preemption here? Not really: in the
if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts();
}
+#endif /* CONFIG_PPC_STD_MMU_64 */
/*
* if (get_paca()->hard_enabled) return;
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
+ *
+ * Note: If we ever support P2P hotplug on Book3E, we'll have
+ * to do an appropriate TLB flush here too
*/
if (bus->self) {
struct resource *res = bus->resource[0];
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
+#ifdef CONFIG_PPC_STD_MMU_64
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1);
+#endif
return 0;
}
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
if (ppc64_caches.iline_size != 0x80)
printk("ppc64_caches.icache_line_size = 0x%x\n",
ppc64_caches.iline_size);
+#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
printk("physical_start = 0x%lx\n",
PHYSICAL_START);
irqstack_early_init();
emergency_stack_init();
+#ifdef CONFIG_PPC_STD_MMU_64
stabs_alloc();
-
+#endif
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();
pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
-hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
-obj-$(CONFIG_PPC64) += hash_utils_64.o \
+obj-$(CONFIG_PPC64) += mmap_64.o
+hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
+obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
- mmap_64.o $(hash-y)
+ mmap_64.o $(hash64-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_hash$(CONFIG_WORD_SIZE).o \
#include "mmu_decl.h"
+#ifdef CONFIG_PPC_STD_MMU_64
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
phys_addr_t kernstart_addr;