static inline int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags)
{
+ if (radix_enabled()) {
+#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
+ WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
+#endif
+ return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+ }
return hash__map_kernel_page(ea, pa, flags);
}
unsigned long page_size,
unsigned long phys)
{
+ if (radix_enabled())
+ return radix__vmemmap_create_mapping(start, page_size, phys);
return hash__vmemmap_create_mapping(start, page_size, phys);
}
static inline void vmemmap_remove_mapping(unsigned long start,
unsigned long page_size)
{
+
+ if (radix_enabled())
+ return radix__vmemmap_remove_mapping(start, page_size);
return hash__vmemmap_remove_mapping(start, page_size);
}
#endif
#endif
+extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys);
+extern void radix__vmemmap_remove_mapping(unsigned long start,
+ unsigned long page_size);
+
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
#endif /* __ASSEMBLY__ */
/* Finally limit subsequent allocations */
memblock_set_current_limit(first_memblock_base + first_memblock_size);
}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit radix__vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys)
+{
+ /* Create a PTE encoding */
+ unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
+
+ BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
+ return 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
+{
+ /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+}
+#endif
+#endif