/* These actually do the work of building the kernel identity maps. */
#include <asm/init.h>
#include <asm/pgtable.h>
+/* Use the static base for this part of the boot process */
+#undef __PAGE_OFFSET
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#include "../../mm/ident_map.c"
/* Used by pgtable.h asm code to force instruction serialization. */
unsigned long kaslr_get_random_long(const char *purpose);
#ifdef CONFIG_RANDOMIZE_MEMORY
+extern unsigned long page_offset_base;
+
void kernel_randomize_memory(void);
#else
static inline void kernel_randomize_memory(void) { }
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
+#ifndef __ASSEMBLY__
+#include <asm/kaslr.h>
+#endif
+
#ifdef CONFIG_KASAN
#define KASAN_STACK_ORDER 1
#else
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
-#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL)
+#ifdef CONFIG_RANDOMIZE_MEMORY
+#define __PAGE_OFFSET page_offset_base
+#else
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
+#endif /* CONFIG_RANDOMIZE_MEMORY */
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)
* before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
* ensure that this order is correct and won't be changed.
*/
-static const unsigned long vaddr_start;
-static const unsigned long vaddr_end;
+static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
+static const unsigned long vaddr_end = VMALLOC_START;
+
+/* Default values */
+unsigned long page_offset_base = __PAGE_OFFSET_BASE;
+EXPORT_SYMBOL(page_offset_base);
/*
* Memory regions randomized by KASLR (except modules that use a separate logic
unsigned long *base;
unsigned long size_tb;
} kaslr_regions[] = {
+ { &page_offset_base, 64/* Maximum */ },
};
/* Get size in bytes used by the memory region */
{
size_t i;
unsigned long vaddr = vaddr_start;
- unsigned long rand;
+ unsigned long rand, memory_tb;
struct rnd_state rand_state;
unsigned long remain_entropy;
if (!kaslr_memory_enabled())
return;
+ BUG_ON(kaslr_regions[0].base != &page_offset_base);
+ memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT);
+
+ /* Adapt phyiscal memory region size based on available memory */
+ if (memory_tb < kaslr_regions[0].size_tb)
+ kaslr_regions[0].size_tb = memory_tb;
+
/* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)